def batchProcess(self, arr_to_enque, work_method, t=False):
		q = JoinableQueue()
		output = JoinableQueue()
		extra = JoinableQueue()
		third = JoinableQueue()
		if t: 
			args = ((q, output, extra, third))
		else:
			args=(q, output, extra)
		for obj in arr_to_enque:
			q.put(obj)
		processes = [Process(target=work_method, args=args, name=str(obj)) for obj in arr_to_enque]
		for p in processes:
			p.start()
		for p in processes: 
			p.join(30)
			if p.is_alive():
				print "ERROR JOINING PROCESS FOR: ", p.name
				p.terminate()
				raise Exception("Goal Conversion Error:", (self.account_id, self.project_id, exp_id, var_ids))
		print "end batch process"
		if t:
			return (output, extra, third)
		else:
			return (output, extra)
Beispiel #2
0
class Mothership(object):

    """ Monitor of producer and consumers """

    def __init__(self, producer, consumers):
        self._queue = JoinableQueue()

        self._producer_proxy = ProducerProxy(self._queue, producer)
        self._consumer_pool = list(ConsumerProxy(self._queue, consumer) \
                                   for consumer in consumers)

    def start(self):
        """ Start working """
        logger.info('Starting Producers'.center(20, '='))
        self._producer_proxy.start()

        logger.info('Starting Consumers'.center(20, '='))
        for consumer in self._consumer_pool:
            consumer.start()

        self._producer_proxy.join()
        self._queue.join()

    def __enter__(self):
        return self

    def __exit__(self, types, value, tb):
        return
Beispiel #3
0
def cdp_no_split_single(loaded_seq_list, loaded_seq_name_list,
                        ref_file,
                        nt, cores):
    """
    Aligns a single SRNA_seq object to multiple refseq seqs in a Ref object
    at a time.  No splitting of read counts.
    """

    refs = RefSeq()
    refs.load_ref_file(ref_file)
    print(colored("------------------ALIGNING READS------------------\n", 'green'))

    workers = cores
    work_queue = JoinableQueue()
    processes = []
    mgr = Manager()
    count = 0
    counts_by_ref = mgr.dict()  # {header:[count1, count2,.......]}
    for header, seq in refs:
        work_queue.put((header, seq,))
        count += 1
        if count % 10000 == 0:
            _cdp_no_split_single_queue(counts_by_ref, loaded_seq_list, nt, processes, work_queue, workers)
    _cdp_no_split_single_queue(counts_by_ref, loaded_seq_list, nt, processes, work_queue, workers)

    _cdp_single_output(counts_by_ref.copy(), loaded_seq_name_list, ref_file, nt)
def processData(imageList,featuresDir,featuresExt,task):
  numProcs = 8
  taskQueue = JoinableQueue()
  resultQueue = Queue()
  processes = []
  for i in range(numProcs):
    t = Process(target=worker, args=(taskQueue, resultQueue, task))
    t.daemon = True
    t.start()
    processes.append(t)

  for img in imageList:
    filename = featuresDir+'/'+img+'.'+featuresExt
    idxFile = re.sub(r'\..+$',r'.idx',filename)
    content = open(filename)
    index = open(idxFile)
    taskQueue.put( (img,content.read(),index.read()) )
    #taskQueue.put( (img,filename,idxFile) )
    index.close()
    content.close()
  for i in range(len(processes)):
    taskQueue.put('stop')

  results = []
  retrieved = 0
  while retrieved < len(imageList):
    data = resultQueue.get()
    retrieved += 1
    if data != 'Ignore':
      results.append(data)
  return results
def parallelPrepareImg(img, info, name, idx):
  # Make Color Image
  if img.ndim == 2:
    img = np.tile(img[:, :, np.newaxis], (1, 1, 3))
  elif img.shape[2] == 4:
    img = img[:, :, :3]
  # Prepare processes
  numProcs = 3
  taskQueue = JoinableQueue()
  resultQueue = ProcQueue()
  processes = []
  for i in range(numProcs):
    t = Process(target=singleWindowProcess, args=(taskQueue, resultQueue, img))
    t.daemon = True
    t.start()
    processes.append(t)
  j = 0
  # Add tasks to the queue
  for b in info:
    idx.write(b[4])
    taskQueue.put( (b,j) )
    j += 1
  for i in range(len(processes)):
    taskQueue.put('stop')
  # Collect results
  data = np.zeros([len(info), 227, 227, 3])
  retrieved = 0
  while retrieved < len(info):
    j,win = resultQueue.get()
    data[j,:,:,:] = win
    retrieved += 1
  # Substract mean and return
  data -= imagenet.IMAGENET_MEAN[14:241,14:241,:]
  return data.swapaxes(2, 3).swapaxes(1, 2)
def task_writer(task: JoinableQueue):
    for n in News.objects.all()[:50].iterator():
        task.put(n)

    for i in range(PROCESS_NUM):
        task.put("end")
    print("task writer ends")
def find_vocabulary(data_dir, stats_dir, category, min_num_images, save_description):
    print "Start find vocabulary"
    filequeue = JoinableQueue()
    photoqueue = Queue()

    init_dict = initialize_variables(None, None, False)

    # Create new processes
    num_processes = cpu_count()
    temp_dir = os.path.join(stats_dir, "database_temp", "vocab", category)
    if not os.path.exists(temp_dir):
        os.makedirs(temp_dir)
    processes = [FindVocabularyProcess(filequeue, photoqueue, init_dict, 30.0, num_processes, temp_dir, category) for i in xrange(num_processes)]
    for p in processes:
        p.start()

    #Add the files to the process queue
    add_files_to_queue(data_dir, category, filequeue)
    #Add a poison pill for each process
    for i in xrange(num_processes):
        filequeue.put("Stop")

    for p in processes:
        p.join()

    merge_vocabulary_files(data_dir, temp_dir, min_num_images, save_description)

    print "Removing temp files"
    shutil.rmtree(temp_dir)

    print "Done with find vocabulary"
def save_transaction_list(data_dir, stats_dir, category, concept_vocabulary, save_description):
    print "Start saving transaction list"
    filequeue = JoinableQueue()

    concept_vocabulary_list, concept_vocabulary_freq = zip(*concept_vocabulary)
    init_dict = initialize_variables(concept_vocabulary_list, None, True)

    # Create new processes
    temp_dir = os.path.join(stats_dir, "transaction_list")
    if not os.path.exists(temp_dir):
        os.makedirs(temp_dir)
    else:
        print "todo"
    lock = Lock()
    num_processes = cpu_count()
    processes = [TransactionListProcess(filequeue, init_dict, 30, num_processes, temp_dir, save_description, lock) for i in xrange(num_processes)]
    for p in processes:
        p.start()

    #Add the files to the process queue
    add_files_to_queue(data_dir, category, filequeue)
    #Add a poison pill for each process
    for i in xrange(num_processes):
        filequeue.put("Stop")

    for p in processes:
        p.join()

    print "Removing temp files"
    shutil.rmtree(temp_dir)

    print "Done with saving transaction list"
Beispiel #9
0
def scheduler(db,category):
    task=JoinableQueue()
    for i in range(cpu_count()):
        pid=os.fork()
        if pid==0:
            consumer(category,task)
            os._exit(0) # 防止子进程向下执行
            # print('此处不会被执行')    
        elif pid<0:
            logging.error('创建子进程失败')   

    with ThreadPoolExecutor() as executor:
        cursor = db['image_match_result_{}'.format(category)].find(
            {'$or': [{'robot_processed': False}, {'robot_processed': {'$exists': False}}]}, 
            {'_id': 1, 'b_image_url': 1, 'c_image_url': 1}
        )
        for item in cursor:
            item['mark']=True # 标错
            executor.submit(producer, item, task)
        cursor = db['item_match_result_{}'.format(category)].find(
            {'$or': [{'robot_processed': False}, {'robot_processed': {'$exists': False}}]}, 
            {'_id': 1, 'b_image_url': 1, 'c_image_url': 1}
        )
        for item in cursor:
            item['mark']=False # 标对
            executor.submit(producer, item, task)
    task.join()
    os.kill(0,signal.SIGKILL)
Beispiel #10
0
def test_basic():
    in_queue = JoinableQueue()

    algolia_reader = Algoliaio("MyAppID", "MyKey", 1000)
    algolia_reader.scan_and_queue(in_queue, p_index="INT_Rubriques",p_query=None, p_connect_timeout=30, p_read_timeout=60)

    assert in_queue.qsize() > 2600
Beispiel #11
0
    def __init__(self,
                 network_retries=SynchronousScanner.DEFAULT_NETWORK_RETRIES,
                 network_timeout=SynchronousScanner.DEFAULT_NETWORK_TIMEOUT,
                 max_processes_nb=_DEFAULT_MAX_PROCESSES_NB,
                 max_processes_per_hostname_nb=_DEFAULT_PROCESSES_PER_HOSTNAME_NB):
        # type: (Optional[int], Optional[int], Optional[int], Optional[int]) -> None
        """Create a scanner for running scanning commands concurrently using a pool of processes.

        Args:
            network_retries (Optional[int]): How many times SSLyze should retry a connection that timed out.
            network_timeout (Optional[int]): The time until an ongoing connection times out.
            max_processes_nb (Optional[int]): The maximum number of processes to spawn for running scans concurrently.
            max_processes_per_hostname_nb (Optional[int]): The maximum number of processes that can be used for running
                scans concurrently against a single server. A lower value will reduce the chances of DOS-ing the server.
        """
        self._network_retries = network_retries
        self._network_timeout = network_timeout
        self._max_processes_nb = max_processes_nb
        self._max_processes_per_hostname_nb = max_processes_per_hostname_nb

        # Create hostname-specific queues to ensure aggressive scan commands targeting this hostname are never
        # run concurrently
        self._hostname_queues_dict = {}
        self._processes_dict = {}

        self._task_queue = JoinableQueue()  # Processes get tasks from task_queue and
        self._result_queue = JoinableQueue()  # put the result of each task in result_queue
        self._queued_tasks_nb = 0
Beispiel #12
0
    def __init__(self, p_max_items_by_queue=50000, p_forkserver=False, p_log_every=10000):
        """Class creation"""
        logger = logging.getLogger('swallow')

        if p_forkserver:
            mp.set_start_method('forkserver')

        self.readers = None
        self.writer = None
        self.writer_store_args = None
        self.process = None
        self.process_args = None
        if p_max_items_by_queue is None:
            self.in_queue = JoinableQueue()
            self.out_queue = JoinableQueue()
        else:
            if (sys.platform.lower() == 'darwin'):
                logger.warn("As running Swallow on a MacOS env, the number of items is limited to 32767.")
                p_max_items_by_queue = 32767
            self.in_queue = JoinableQueue(p_max_items_by_queue)
            self.out_queue = JoinableQueue(p_max_items_by_queue)

        self.counters = {
            'nb_items_processed': Value('i', 0),
            'nb_items_error': Value('i', 0),
            'nb_items_scanned': Value('i', 0),
            'nb_items_stored': Value('i', 0),
            'whole_storage_time': Value('f', 0),
            'bulk_storage_time': Value('f', 0),
            'whole_process_time': Value('f', 0),
            'real_process_time': Value('f', 0),
            'idle_process_time': Value('f', 0),
            'scan_time': Value('f', 0),
            'log_every': p_log_every
        }
Beispiel #13
0
class AlarmExecutor:
    def __init__(self):
        self.queue = JoinableQueue(10)
        self.running = False
        self.t = Thread(target=self._run, name="AlarmExecutor")

    def _run(self):
        while self.running:
            try:
                alarm = self.queue.get(block=True, timeout=1)
                alarm.execute() 
                logging.debug("Alarm executed")
                self.queue.task_done()       
            except Queue.Empty:
                continue
            
    def start(self):
        logging.debug("Starting alarm executor")
        self.running = True
        self.t.start()

    def stop(self):
        if self.running:
            logging.debug("Stoppping alarm executor")
            self.running = False
            self.t.join()
        else:
            logging.debug("Attempted to stop alarm executor when it is not running")
def main():
    jobs = JoinableQueue()
    result = JoinableQueue()


    numToProcess = -1
    scores = pd.DataFrame(columns=['query','fmeasure','precision','recall',
                                   'size','maxDistance','topHits',"contextSteps"])

    print len(datasets)

    for key in datasets:
        jobs.put(key)

    processed_count = Counter()
        
    for i in xrange(NUMBER_OF_PROCESSES):
        p = Process(target=work, args=(i, jobs, result, processed_count))
        p.daemon = True
        p.start()

    #work(1, jobs, result, processed_count)

    automated_annotations = {}
    distances = {}

    jobs.join()

    dataset_index = collections.defaultdict(set)
    annotated_datasets = set()
    while not result.empty():
        dataset, classes = result.get()
        if len(classes) == 0:
            annotated_datasets.add(dataset)
        for c in classes.keys():
            dataset_index[c].add(dataset)
            owl_class = Class(c, graph=graph)
            for parent in owl_class.parents:
                dataset_index[parent.identifier].add(dataset)
        result.task_done()

    print '\n'
    
    for query, c in queries.items():
        manual = ground_truth[query]
        automated = dataset_index[c]
        hits = manual & automated
        misses = manual - automated
        precision = np.nan if len(automated) == 0 else float(len(hits)) / len(automated)
        recall = np.nan if len(manual) == 0 else float(len(hits)) / len(manual)
        if precision != 0 or recall != 0:
            fmeasure = 0 if np.isnan(precision) or np.isnan(recall) else 2 * (precision * recall) / (precision + recall)
        else:
            fmeasure = 0
        scores = scores.append(dict(query=query, size=len(manual), precision=precision, recall=recall, fmeasure=fmeasure,topHits=topHits, maxDistance=maxDistance, contextSteps = context_steps),
                        ignore_index=True)
        print "Hits for", query, c
        print '\n'.join(sorted(hits))
    print scores
    print "Annotated", len(annotated_datasets), "datasets."
    def __init__(self, available_plugins, network_retries=DEFAULT_NETWORK_RETRIES,
                 network_timeout=DEFAULT_NETWORK_TIMEOUT,
                 max_processes_nb=DEFAULT_MAX_PROCESSES_NB,
                 max_processes_per_hostname_nb=DEFAULT_PROCESSES_PER_HOSTNAME_NB):
        """
        Args:
            available_plugins (PluginsFinder): An object encapsulating the list of available plugins.
            network_retries (Optional[int)]: How many times plugins should retry a connection that timed out.
            network_timeout (Optional[int]): The time until an ongoing connection times out within all plugins.
            max_processes_nb (Optional[int]): The maximum number of processes to spawn for running scans concurrently.
            max_processes_per_hostname_nb (Optional[int]): The maximum of processes that can be used for running scans
                concurrently on a single server.

        Returns:
            PluginsProcessPool: An object for queueing scan commands to be run concurrently.

        """

        self._available_plugins = available_plugins
        self._network_retries = network_retries
        self._network_timeout = network_timeout
        self._max_processes_nb = max_processes_nb
        self._max_processes_per_hostname_nb = max_processes_per_hostname_nb

        # Create hostname-specific queues to ensure aggressive scan commands targeting this hostname are never
        # run concurrently
        self._hostname_queues_dict = {}
        self._processes_dict = {}

        self._task_queue = JoinableQueue()  # Processes get tasks from task_queue and
        self._result_queue = JoinableQueue()  # put the result of each task in result_queue
        self._queued_tasks_nb = 0
Beispiel #16
0
class QuickReader():
	def __init__(self, writer_name,handle_raw_assertion,add_lines_to_queue, isTest = False, num_threads = 5):
		self.writer_name = writer_name
		self.num_threads = num_threads
		self.handle_raw_assertion = handle_raw_assertion
		self.add_lines_to_queue = add_lines_to_queue
		self.queue = JoinableQueue()
		self.isTest = isTest

	def start(self):
		print "begin writing " + self.writer_name
		self.create_processes()
		self.add_lines_to_queue(self.queue)
		self.queue.join()
		print "finished writing " + self.writer_name

	def pull_lines(self,q,writer):
	    while 1:
	        raw_assertion = q.get()
	        edges = self.handle_raw_assertion(raw_assertion)
	        for edge in edges:
	            writer.write(edge)
	        q.task_done()

	def create_processes(self):
	    for i in range(self.num_threads):
	        writer = MultiWriter(self.writer_name + "_" + str(i),self.isTest)
	        p = Process(target = self.pull_lines, args = (self.queue, writer))
	        #p.daemon=True
	        p.start()
def main():

    num_page = 6000
    num_processes = 60
    num_works = num_page / num_processes
    q = JoinableQueue()
    pool = list()
    final_set = set()
    
    for index in xrange(1,num_processes+1):
        p =  Process(target=fetch_feature,args=(q,index,num_works))
        p.start()
    
    for index in xrange(1,num_processes+1):    
        final_set = final_set.union(q.get())
    
        #p.join()
    #    pool.append(p)
        
    #for p in pool:
    #   p.join()
    result_file = open('result.out','w');

    for feature in final_set:
        print feature
        result_file.write(feature+'\n')
   
    result_file.close()    
    print len(final_set)
def main():

    fetch_queue = JoinableQueue()
    reject_queue = JoinableQueue(maxsize = 1000)

    log_processor = Process(target=job_creator, args=(fetch_queue, './search_log_valid_2010_06_17'), name='log-processor')
    
    writers = [ ]
    write_queues = []

    for num in DATA_SETS:
        queue, writer = create_writer(reject_queue, num) 
        writers.append(writer)
        write_queues.append(queue)

    fetchers = [ create_fetcher(fetch_queue, write_queues, reject_queue, num) for num in xrange(NUM_FETCHERS) ]
    reject_writer = Process(target=reject, args=(reject_queue, './rejected-lines'), name='related-search-reject-writer')

    log_processor.start()
    reject_writer.start()
    start_processes(writers)
    start_processes(fetchers)

    log_processor.join()
    print 'DONE? '
    fetch_queue.join()
    write_queue.join()
    reject_writer.join()
Beispiel #19
0
def test_basic():
    in_queue = JoinableQueue()

    mongo_reader = Mongoio(p_host='localhost',p_port='27017',p_user='******',p_password='******',p_base='ACTIVITE',p_rs_xtra_nodes=['localhost:27018','localhost:27019'],p_rs_name='rs0')
    mongo_reader.scan_and_queue(in_queue,p_collection='rubriques', p_query={})

    assert in_queue.qsize() > 2600
Beispiel #20
0
class emailSubsystem(object):
    def __init__(self):
        ### will move to Celery eventually; with Celery, the app would be able to periodically
        # wakeup and check on replyQueue to see which emails were send, which were not and
        # what to do ...

        self.emailQueue = JoinableQueue()
        self.replyQueue = JoinableQueue()

        self.worker = Process(target=sendEmailWorker, args=(self.emailQueue, self.replyQueue))

    def start(self):
        # temporarily comment out starting a new process as it seems to leave zombies
        # and causes app not to start as max process limit is reached.
        #self.worker.start()
        return

    def shutdown(self):
        # post poison pill
        # wait on the queue to be done; ie join on emailQueue
        # wait on the worker process to die; ie join on worker

        self.emailQueue.put(None)
        self.emailQueue.join()
        self.worker.join()
Beispiel #21
0
 def __init__(self, config, maxCpus, maxMemory):
     AbstractBatchSystem.__init__(self, config, maxCpus, maxMemory) #Call the parent constructor
     if maxMemory != sys.maxint:
         logger.critical("A max memory has been specified for the parasol batch system class of %i, but currently this batchsystem interface does not support such limiting" % maxMemory)
     #Keep the name of the results file for the pstat2 command..
     self.parasolCommand = config.attrib["parasol_command"]
     self.parasolResultsFile = getParasolResultsFileName(config.attrib["job_tree"])
     #Reset the job queue and results (initially, we do this again once we've killed the jobs)
     self.queuePattern = re.compile("q\s+([0-9]+)")
     self.runningPattern = re.compile("r\s+([0-9]+)\s+[\S]+\s+[\S]+\s+([0-9]+)\s+[\S]+")
     self.killJobs(self.getIssuedJobIDs()) #Kill any jobs on the current stack
     logger.info("Going to sleep for a few seconds to kill any existing jobs")
     time.sleep(5) #Give batch system a second to sort itself out.
     logger.info("Removed any old jobs from the queue")
     #Reset the job queue and results
     exitValue = popenParasolCommand("%s -results=%s clear sick" % (self.parasolCommand, self.parasolResultsFile), False)[0]
     if exitValue != None:
         logger.critical("Could not clear sick status of the parasol batch %s" % self.parasolResultsFile)
     exitValue = popenParasolCommand("%s -results=%s flushResults" % (self.parasolCommand, self.parasolResultsFile), False)[0]
     if exitValue != None:
         logger.critical("Could not flush the parasol batch %s" % self.parasolResultsFile)
     open(self.parasolResultsFile, 'w').close()
     logger.info("Reset the results queue")
     #Stuff to allow max cpus to be work
     self.outputQueue1 = Queue()
     self.outputQueue2 = Queue()
     #worker = Thread(target=getUpdatedJob, args=(self.parasolResultsFileHandle, self.outputQueue1, self.outputQueue2))
     #worker.setDaemon(True)
     worker = Process(target=getUpdatedJob, args=(self.parasolResultsFile, self.outputQueue1, self.outputQueue2))
     worker.daemon = True
     worker.start()
     self.usedCpus = 0
     self.jobIDsToCpu = {}
Beispiel #22
0
class ProcessPool(object):

    def __init__(self, size=1):
        self.size = size
        self.jobs = Queue()
        self.results = Queue()
        self.processes = []

    def start(self):
        '''start all processes'''

        for i in range(self.size):
            self.processes.append(ProcessWorker(self))

        for process in self.processes:
            process.start()

    def append_job(self, job, *args, **kwargs):
        self.jobs.put((job, args, kwargs))

    def join(self):
        '''waiting all jobs done'''
        self.jobs.join()

    def stop(self):
        '''kill all processes'''
        for process in self.processes:
            process.stop()

        for process in self.processes:  # waiting processes completing
            if process.is_alive():
                process.join()

        del self.processes[:]  # reset processes to empty
class SimpleSynergeticServer(Process):
    
    def __init__(self, authen_key):
        Process.__init__(self)
        self.__task_queue = JoinableQueue(1)
        self.__return_queue = Queue(1)
        self.serv = Listener(('', 40000), authkey=authen_key)
    
    def run(self):
        print 'Server Works'
        copy_reg.pickle(types.MethodType, _reduce_method)
        #Start the synergeticProcess in Deamon Mode
        worker_p = SynergeticProcess(self.__task_queue, self.__return_queue)
        worker_p.deamon = True
        worker_p.start()          
        while True:
            print 'wait for Client'
            pool_conn = self.serv.accept()
            print 'connection Client Accepted'
            while True:
                print 'in LOOP Simple Server'
                #There is no need for task_id in this version
                try:
                    print 'Try to recv MSG'
                    unpickled_msg = pool_conn.recv()
                    print 'MSG Reseved'
                except Exception as e: # EOFError:
                    print 'Fail To Receive MSG:', e
                    break 
                if unpickled_msg[0] == 'MODULES':
                    self.import_mods( unpickled_msg[1] )
                    ret = 'MODULES-READY'
                else:    
                    self.__task_queue.put(unpickled_msg)
                    ret = self.__return_queue.get()
                try:
                    print 'SEND RESPONCE'
                    try:
                        pool_conn.send( ret )
                    except EOFError:
                        print 'SENT TO POOL FAILD'
                    print 'RESPONCE SENT ', ret
                except EOFError:
                    break
            pool_conn.close()
    
    def import_mods(self, mods_d):
        for mod_name, mod_bytecode in mods_d.items():
            try:
                fobj = open(mod_name + ".pyc", 'wb')
            except Exception as e:
                print("Synergeticprocessing.SimpleServer --> Module file error: %s" % e)
            else:
                fobj.write( mod_bytecode )
            finally:
                fobj.close()
        for mod in mods_d:
            print 'blocking'
            __import__( mod )
            print 'imported ', mod
Beispiel #24
0
def aggress(map):
    global startMap
    startMap = map

    #print "Regressing..."
    state = State()

    jobs = []

    longestSolution = Value('d', 20)
    highestScore = Value('d', 0)

    queue = JoinableQueue()

    manager = Manager()

    d = manager.dict()
    d.clear()

    l = RLock()

    if multiProc:
        queue.put((state, map, 1))

        for i in range(numProcs):
           p = Process(target = multiMain, args=(startMap, l, d, queue,highestScore))
           p.start()

        queue.join()
    else:
        a(l, highestScore, d, None, state, map, 1)
class QueueTask:
    def __init__(self):
        self.queue = JoinableQueue()
        self.event = Event()
        atexit.register( self.queue.join )

        process = Process(target=self.work)
        process.daemon = True
        process.start()


    def work(self):
        while True:
            func, args, wait_for = self.queue.get()

            for evt in wait_for: 
                evt.wait()
            func(*args)
            self.event.set()

            self.queue.task_done()


    def enqueue(self, func, args=[], wait_for=[]):
        self.event.clear()
        self.queue.put( (func, args, wait_for) )

        return self.event 
Beispiel #26
0
def evaluate(points,meshToBasis,kernel,quadRule,coeffs,nprocs=None):
    """Evaluate a kernel using the given coefficients"""


    if nprocs==None: nprocs=cpu_count()

    inputQueue=JoinableQueue()

    nelements=meshToBasis.nelements

    for elem in meshToBasis: inputQueue.put(elem)

    buf=sharedctypes.RawArray('b',len(points[0])*numpy.dtype(numpy.complex128).itemsize)
    result=numpy.frombuffer(buf,dtype=numpy.complex128)
    result[:]=numpy.zeros(1,dtype=numpy.complex128)

    time.sleep(.5)
    workers=[]

    for id in range(nprocs):
        worker=EvaluationWorker(points,kernel,quadRule,coeffs,inputQueue,result)
        worker.start()
        workers.append(worker)


    inputQueue.join()
    for worker in workers: worker.join()

    return result.copy()
Beispiel #27
0
def queue_info(iters=None,):
    work = JoinableQueue()

    for filename in iters:
        work.put(obj=filename)
    time.sleep(1)
    return work
Beispiel #28
0
 def getdata_multiprocess(self,task_funcsiter=None,task_funcsiterparas={},
                         task_funcsconst=None,task_funcsconstparas={},processnum=None,
                         threadnum=2):
     def _start_processes(taskqueue,resultqueue,taskqueue_lk,task_funcsconst,
                          task_funcsconstparas,processnum,threadnum):
         for i in range(processnum):
             p = Process(target=self.multiprocess_task, args=(taskqueue,resultqueue,
                                      taskqueue_lk,threadnum,
                                      task_funcsconst,task_funcsconstparas
                                      ),name='P'+str(i))
             p.daemon=True
             p.start()
             
     processnum=processnum if processnum else multiprocessing.cpu_count()
     #任务传送queue
     taskqueue=JoinableQueue()
     #任务写入/唤醒lock
     taskqueue_lk = multiprocessing.Condition(multiprocessing.Lock())
     #结果传送queue
     resultqueue=Queue()
     
     _start_processes(taskqueue,resultqueue,taskqueue_lk,task_funcsconst,
                         task_funcsconstparas,processnum,threadnum)
     #放入任务,唤醒进程
     if task_funcsconst is None:
         self._put_tasks(zip(task_funcsiter,task_funcsiterparas),taskqueue,taskqueue_lk)
     else:
         self._put_tasks(task_funcsiterparas,taskqueue,taskqueue_lk)
     logger.info('main join!')
     taskqueue.join()
     logger.info('main end!')
     return self._get_results(resultqueue)
Beispiel #29
0
    def __init__(self, p_max_items_by_queue=50000, p_forkserver=False, p_log_every=10000):
        """Class creation"""
        if p_forkserver:
            mp.set_start_method('forkserver')

        self.readers = None
        self.writer = None
        self.writer_store_args = None
        self.process = None
        self.process_args = None
        if p_max_items_by_queue is None:
            self.in_queue = JoinableQueue()
            self.out_queue = JoinableQueue()
        else:
            self.in_queue = JoinableQueue(p_max_items_by_queue)
            self.out_queue = JoinableQueue(p_max_items_by_queue)

        self.counters = {
            'nb_items_processed': Value('i', 0),
            'nb_items_error': Value('i', 0),
            'nb_items_scanned': Value('i', 0),
            'nb_items_stored': Value('i', 0),
            'whole_storage_time': Value('f', 0),
            'bulk_storage_time': Value('f', 0),
            'whole_process_time': Value('f', 0),
            'real_process_time': Value('f', 0),
            'idle_process_time': Value('f', 0),
            'scan_time': Value('f', 0),
            'log_every': p_log_every
        }
    def run(self):

        # Changes the process name shown by ps for instance
        setProcTitle ("agentcluster master [version: %s] [monitoring: %d seconds]" % (__version__,self.monitoring_period) );

        try:
            logger.info ( 'Agent cluster server starting' );

            logger.info ( 'Configurations will be scanned in directories:' );
            for directory in confdir.data:
                logger.info ( '  o %s', os.path.abspath(directory) );

            self.watchdog = Watchdog(self.monitoring_period)
            self.watchdog.start()

            # Generates a deadlock to enter in sleep mode
            # Only an external signal can break this deadlock
            logger.info ( 'Agent cluster server started' );
            queue = JoinableQueue()
            queue.put(object());
            queue.join();

        except KeyboardInterrupt:
            logger.info ( 'Agent cluster server interrupted' );
        except Exception:
            logger.error ( 'Exception catched in main process: %s', sys.exc_info()[1] );
            logger.debug ( "", exc_info=True );
        finally:
            # First stop the monitoring to avoid restarting killed agents
            if self.watchdog is not None:
                self.watchdog.shutdown = True
                self.watchdog.join()
            logger.info ( 'Agent cluster server end' );
            logging.shutdown()
Beispiel #31
0
        for x in range(2, int(math.sqrt(n)) + 1):
            if n % x == 0:
                self.answer = False
                return

        self.answer = True


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-f', '--file', type=str, required=True)
    parser.add_argument('-p', '--processes', type=int, default=2)

    args = parser.parse_args()

    queue = JoinableQueue()
    workers = [Worker(queue, i) for i in range(args.processes)]
    for worker in workers:
        worker.start()

    tasks_file = open(args.file, 'r')

    begin_time = time.time()
    while True:
        task_str = tasks_file.readline()
        if not task_str:
            break
        task = task_str.split('\t')
        if task[0] == 'prime':
            queue.put(PrimeTask(int(task[1])))
        elif task[0] == 'check':
Beispiel #32
0
def fit_for_all(drop_non_countries=False):
    """Main function to perform fit for all countries."""
    ####################################################################
    # Read files
    train_df = pd.read_csv(TRAIN_FILE, encoding='cp1252',
                           index_col='Country Name').dropna(axis=0)
    test_df = pd.read_csv(TEST_FILE, encoding='cp1252',
                          index_col='Country Name').dropna(axis=0)

    # The test_df has one extra country. Line up train and test.
    test_df = test_df.loc[train_df.index]

    if drop_non_countries:
        train_df = train_df.drop(NON_COUNTRIES)
        test_df = test_df.drop(NON_COUNTRIES)

    # Get matrices.
    train_mat = train_df.values.T.astype(int)
    test_mat = test_df.values.T.astype(int)

    # Grab list and number of countries for convenience.
    countries = train_df.index.values
    num_countries = countries.shape[0]

    # Initialize queues for parallel processing.
    queue_in = JoinableQueue()
    queue_out = Queue()

    # Start processes.
    processes = []
    for i in range(NUM_PROCESSES):
        p = Process(target=fit_for_country_worker, args=(train_mat, test_mat,
                                                         queue_in, queue_out))
        p.start()
        processes.append(p)

    # Loop over all the countries (columns of the train matrix).
    for i in range(num_countries):
        # Put boolean array in the queue.
        queue_in.put((i, num_countries))

    # Wait for processing to finish.
    queue_in.join()

    # Track coefficients.
    best_coeff = pd.DataFrame(0.0, columns=countries, index=countries)

    # Track training scores.
    best_scores = pd.Series(0.0, index=countries)

    # Track predictions.
    predictions = pd.DataFrame(0.0, columns=test_df.columns, index=countries)

    # Map data.
    for _ in range(num_countries):
        # Grab data from the queue.
        other_countries, s, c, p = queue_out.get()

        country = countries[~other_countries][0]

        # Map.
        best_scores.loc[~other_countries] = s
        best_coeff.loc[other_countries, country] = c
        # p needs to be transformed (17x1 vs 1x17)
        predictions.loc[~other_countries, :] = p.T

    # Shut down processes.
    for p in processes:
        queue_in.put_nowait(None)
        p.terminate()

    predictions.transpose().to_csv(PRED_OUT, index_label='Id',
                                   encoding='cp1252')
    best_coeff.to_csv(COEFF_OUT, encoding='cp1252')

    # Print MSE
    print('Summary of MSE:')
    print(best_scores.describe())
Beispiel #33
0
    def fn_main(self):
        apk_list = self.fn_parse_args_and_populate_apk_list()
        # Keep track of the number of processes we create.
        num_processes = 0

        if apk_list == []:
            print('All APK files checked.')
            sys.exit(0)

        length_apk_list = len(apk_list)/NUMBER_OF_PROCESSES
        length_apk_list = int(length_apk_list)
        print(
            'Total number of APKs: ' 
            + str(len(apk_list))
            + '\nApproximate number of APKs per thread: '
            + str(length_apk_list)
        )

        # Free up memory
        checked_apks = None

        # Create two process queues: 
        #  one for sending data to, and one for receiving data from,
        #  the worker process(es).
        process_extractor_send_queue = JoinableQueue()
        process_extractor_receive_queue = JoinableQueue()

        # List for keeping track of processes.
        self.process_list = []
        # Create worker processes.
        for i in range(0, NUMBER_OF_PROCESSES):
            worker_uuid_process = UUIDExtractor(
                self.trace_params,
                self.named_special_case_object,
                self.nonnamed_special_case_object
            )
            worker = Process(
                target=worker_uuid_process.fn_main,
                args=(
                    process_extractor_send_queue,
                    process_extractor_receive_queue,
                    num_processes
                )
            )
            worker.start()
            self.process_list.append(worker)
            num_processes += 1

        apks_to_check = 0
        # Send work to worker process.
        for match in apk_list:
            if os.path.isfile(match):
                apks_to_check += 1
                process_extractor_send_queue.put(str(match))
            else:
                apk_list.remove(match)

        if os.path.isfile(self.error_log):
            fo_error = open(self.error_log, 'a')
        else:
            fo_error = open(self.error_log, 'w')
        completed_apk_count = 0

        while True:
            # Get information sent by worker process.
            [analysed_file, pkg, status, result] = process_extractor_receive_queue.get()
            process_extractor_receive_queue.task_done()
            # Analyse the output string.
            if status == STATUS_ERROR:
                print('\n Error encountered with ' + analysed_file 
                      + ': ' + result + '\n')
                # Log the error to a separate file.
                fo_error.write(analysed_file+','+result+'\n')
                completed_apk_count += 1
            elif status == STATUS_LOGGING:
                print(result)
            elif status == STATUS_DONE:
                print('\n Finished analysing ' + analysed_file 
                      + ' with result ' + str(result) + '\n')
                # Write the output to temporary JSON.
                raw_filename = self.fn_get_filename_from_path(analysed_file)
                filename_no_ext = raw_filename.replace('.apk', '')
                json_obj = {}
                json_obj[filename_no_ext] = {}
                json_obj[filename_no_ext]['pkg'] = pkg
                json_obj[filename_no_ext]['uuids'] = result
                outfile = os.path.join(
                    self.io_tmp_dir,
                    raw_filename.replace('.apk', '.json')
                )
                with open(outfile, 'w') as f:
                    json.dump(json_obj, f, indent=4)
                completed_apk_count += 1
            else:
                print('Unhandled condition from worker.')
            

            # Check if any processes have become zombies.
            if len(active_children()) < NUMBER_OF_PROCESSES:
                for p in self.process_list:
                    if not p.is_alive():
                        self.process_list.remove(p)
                        # Create a new process in its place.
                        worker_uuid_process = UUIDExtractor(
                            self.trace_params,
                            self.named_special_case_object,
                            self.nonnamed_special_case_object
                        )
                        replacement_worker = Process(
                            target=worker_uuid_process.fn_main,
                            args=(
                                process_extractor_send_queue,
                                process_extractor_receive_queue,
                                num_processes
                            )
                        )
                        replacement_worker.start()
                        self.process_list.append(replacement_worker)
                        num_processes += 1

            # Check if all APKs have been analysed.            
            if completed_apk_count == apks_to_check:
                break

        print('All done.')

        # Tell child processes to stop
        for i in range(NUMBER_OF_PROCESSES):
            process_extractor_send_queue.put('STOP')

        # Collate.
        self.fn_collate_json()
Beispiel #34
0
    def __init__(self,
                 config_in=None,
                 min_occur=10,
                 min_percent=5,
                 window=2,
                 threads=8,
                 period=24,
                 min_interval=2,
                 es_host='localhost',
                 es_port=9200,
                 es_timeout=480,
                 es_index='logstash-flow-*',
                 kibana_version='4',
                 verbose=True,
                 debug=True):
        """

        :param min_occur: Minimum number of triads to be considered beaconing
        :param min_percent: Minimum percentage of all connection attempts that
         must fall within the window to be considered beaconing
        :param window: Size of window in seconds in which we group connections to determine percentage, using a
         large window size can give inaccurate interval times, multiple windows contain all interesting packets,
         so the first window to match is the interval
        :param threads: Number of cores to use
        :param period: Number of hours to locate beacons for
        :param min_interval: Minimum interval betweeen events to consider for beaconing behavior
        :param es_host: IP Address of elasticsearch host (default is localhost)
        :param es_timeout: Sets timeout to 480 seconds
        :param kibana_version: 4 or 5 (query will depend on version)
        """
        #self.config_in = config_in
        if config_in is not None:
            try:
                self.config = flareConfig(config_in)
                self.es_host = self.config.get('beacon', 'es_host')
                self.es_port = int(self.config.get('beacon', 'es_port'))
                self.es_index = self.config.get('beacon', 'es_index')
                self.use_ssl = self.config.config.getboolean('beacon', 'use_ssl')
                self.MIN_OCCURRENCES = int(self.config.get('beacon','min_occur'))
                self.MIN_PERCENT = int(self.config.get('beacon','min_percent'))
                self.WINDOW = int(self.config.get('beacon','window'))
                self.NUM_PROCESSES = int(self.config.get('beacon','threads'))
                self.period = int(self.config.get('beacon','period'))
                self.min_interval = int(self.config.get('beacon', 'min_interval'))
                self.es_timeout = int(self.config.get('beacon','es_timeout'))
                self.kibana_version = self.config.get('beacon','kibana_version')
                self.beacon_src_ip = self.config.get('beacon','field_source_ip')
                self.beacon_dest_ip = self.config.get('beacon', 'field_destination_ip')
                self.beacon_destination_port = self.config.get('beacon', 'field_destination_port')
                self.beacon_timestamp = self.config.get('beacon', 'field_timestamp')
                self.beacon_flow_bytes_toserver = self.config.get('beacon', 'field_flow_bytes_toserver')
                self.beacon_flow_id = self.config.get('beacon', 'field_flow_id')
                self.beacon_event_key = self.config.get('beacon','event_key')
                self.beacon_event_type = self.config.get('beacon','event_type')
                self.filter = self.config.get('beacon','filter')
                self.verbose = self.config.config.getboolean('beacon', 'verbose')
                self.auth_user = self.config.config.get('beacon','username')
                self.auth_password = self.config.config.get('beacon', 'password')
                self.suricata_defaults = self.config.config.getboolean('beacon','suricata_defaults')
                try:
                    self.debug = self.config.config.getboolean('beacon', 'debug')
                except:
                    pass


            except Exception as e:
                print(('{red}[FAIL]{endc} Could not properly load your config!\nReason: {e}'.format(red=bcolors.FAIL, endc=bcolors.ENDC, e=e)))
                sys.exit(0)

        else:
            self.es_host = es_host
            self.es_port = es_port
            self.es_index = es_index
            self.use_ssl = False
            self.MIN_OCCURRENCES = min_occur
            self.MIN_PERCENT = min_percent
            self.WINDOW = window
            self.NUM_PROCESSES = threads
            self.period = period
            self.min_interval = min_interval
            self.kibana_version = kibana_version
            self.es_timeout = es_timeout
            self.beacon_src_ip = 'src_ip'
            self.beacon_dest_ip = 'dest_ip'
            self.beacon_destination_port = 'dest_port'
            self.beacon_timestamp = '@timestamp'
            self.beacon_flow_bytes_toserver = 'bytes_toserver'
            self.beacon_flow_id = 'flow_id'
            self.beacon_event_type = 'flow'
            self.beacon_event_key = 'event_type'
            self.filter = ''
            self.verbose = verbose
            self.suricata_defaults = False

        self.ver = {'4': {'filtered': 'query'}, '5': {'bool': 'must'}}
        self.filt = list(self.ver[self.kibana_version].keys())[0]
        self.query = list(self.ver[self.kibana_version].values())[0]
        self.whois = WhoisLookup()
        self.info = '{info}[INFO]{endc}'.format(info=bcolors.OKBLUE, endc=bcolors.ENDC)
        self.success = '{green}[SUCCESS]{endc}'.format(green=bcolors.OKGREEN, endc=bcolors.ENDC)
        self.fields = [self.beacon_src_ip, self.beacon_dest_ip, self.beacon_destination_port, self.beacon_flow_bytes_toserver, 'dest_degree', 'occurrences', 'percent', 'interval']

        try:
            _ = (self.auth_user, self.auth_password)
            self.auth = "Enabled"
        except AttributeError as e:
            self.auth = "None"

        try:
            self.vprint('{info}[INFO]{endc} Attempting to connect to elasticsearch...'.format(info=bcolors.OKBLUE, endc=bcolors.ENDC))
            if self.auth == "None":
                self.es = Elasticsearch(self.es_host, port=self.es_port, timeout=self.es_timeout, verify_certs=False, use_ssl=self.use_ssl, connection_class=RequestsHttpConnection)
            else:
                self.es = Elasticsearch(self.es_host, port=self.es_port, timeout=self.es_timeout, http_auth=(self.auth_user, self.auth_password), verify_certs=False, use_ssl=self.use_ssl, connection_class=RequestsHttpConnection)
            self.vprint('{green}[SUCCESS]{endc} Connected to elasticsearch on {host}:{port}'.format(green=bcolors.OKGREEN, endc=bcolors.ENDC, host=self.es_host, port=str(self.es_port)))
        except Exception as e:
            self.vprint(e)
            raise Exception(
                "Could not connect to ElasticSearch -- Please verify your settings are correct and try again.")

        self.q_job = JoinableQueue()
        self.l_df = Lock()
        self.l_list = Lock()
        self.high_freq = None
        self.flow_data = self.run_query()
Beispiel #35
0
def writer_loop(queue: JoinableQueue):
    stop = False
    file = None
    group = None

    while not stop:
        item = queue.get()

        assert 'action' in item
        action = item['action']

        # get parameter if any
        parameters = {}
        if 'parameters' in item:
            parameters = item['parameters']

        if action == 'create_file':
            assert 'fname' in parameters

            fname = parameters['fname']

            file = h5py.File(fname, 'w', libver='latest')

            group = file.create_group('group')

            pass

        elif action == 'set_swmr_mode':
            file.swmr_mode = True

        elif action == 'create_dataset':
            assert 'name' in parameters
            assert 'value' in parameters

            name = parameters['name']
            data = parameters['value']

            group.create_dataset(name, maxshape=data.shape, data=data)

        elif action == 'update_dataset':
            assert 'name' in parameters
            assert 'value' in parameters

            name = parameters['name']
            data = parameters['value']

            group[name][:] = data

        elif action == 'create_attribute':
            assert 'name' in parameters
            assert 'value' in parameters

            name = parameters['name']
            value = parameters['value']

            if isinstance(value, str):
                value = np.string_(value)

            group.attrs.create(name, value)

        elif action == 'update_attribute':
            assert 'name' in parameters
            assert 'value' in parameters

            name = parameters['name']
            value = parameters['value']

            if isinstance(value, str):
                value = np.string_(value)

            group.attrs[name] = value

        elif action == 'flush_file':
            file.flush()

        elif action == 'flush_group':
            group.flush()

        elif action == 'close_file':
            file.close()
        elif action == 'stop':
            stop = True
        else:
            assert False

        if action == 'stop':
            stop = True

        queue.task_done()
Beispiel #36
0
        q.put(res)
    q.join()


def consumer(q):
    while True:
        res = q.get()
        if res is None: break
        time.sleep(1)
        print('消费者吃了%s' % res)
        q.task_done()  # 发出信号项目已处理完成,省去了后面需另外添加的q.put(None)


if __name__ == '__main__':
    # 容器
    q = JoinableQueue()

    # 生产者们
    p1 = Process(target=producer, args=(q, ))
    p2 = Process(target=producer, args=(q, ))
    p3 = Process(target=producer, args=(q, ))

    # 消费者们
    c1 = Process(target=consumer, args=(q, ))
    c2 = Process(target=consumer, args=(q, ))
    # c1.daemon = True  # 若不添加守护进程,则会卡在消费者
    # c2.daemon = True

    p1.start()
    p2.start()
    p3.start()
Beispiel #37
0
def main():
    freeze_support()

    #--PLUGINS INITIALIZATION--
    start_time = time()
    print '\n\n\n' + _format_title('Registering available plugins')
    sslyze_plugins = PluginsFinder()
    available_plugins = sslyze_plugins.get_plugins()
    available_commands = sslyze_plugins.get_commands()
    print ''
    for plugin in available_plugins:
        print '  ' + plugin.__name__
    print '\n\n'

    # Create the command line parser and the list of available options
    sslyze_parser = CommandLineParser(available_plugins, PROJECT_VERSION)

    try:  # Parse the command line
        (command_list, target_list,
         shared_settings) = sslyze_parser.parse_command_line()
    except CommandLineParsingError as e:
        print e.get_error_msg()
        return

    #--PROCESSES INITIALIZATION--
    # Three processes per target from MIN_PROCESSES up to MAX_PROCESSES
    nb_processes = max(MIN_PROCESSES, min(MAX_PROCESSES, len(target_list) * 3))
    if command_list.https_tunnel:
        nb_processes = 1  # Let's not kill the proxy

    task_queue = JoinableQueue()  # Processes get tasks from task_queue and
    result_queue = JoinableQueue(
    )  # put the result of each task in result_queue

    # Spawn a pool of processes, and pass them the queues
    process_list = []
    for _ in xrange(nb_processes):
        priority_queue = JoinableQueue()  # Each process gets a priority queue
        p = WorkerProcess(priority_queue, task_queue, result_queue, available_commands, \
                          shared_settings)
        p.start()
        process_list.append(
            (p,
             priority_queue))  # Keep track of each process and priority_queue

    #--TESTING SECTION--
    # Figure out which hosts are up and fill the task queue with work to do
    print _format_title('Checking host(s) availability')

    targets_OK = []
    targets_ERR = []

    # Each server gets assigned a priority queue for aggressive commands
    # so that they're never run in parallel against this single server
    cycle_priority_queues = cycle(process_list)
    target_results = ServersConnectivityTester.test_server_list(
        target_list, shared_settings)
    for target in target_results:
        if target is None:
            break  # None is a sentinel here

        # Send tasks to worker processes
        targets_OK.append(target)
        (_, current_priority_queue) = cycle_priority_queues.next()

        for command in available_commands:
            if getattr(command_list, command):
                args = command_list.__dict__[command]

                if command in sslyze_plugins.get_aggressive_commands():
                    # Aggressive commands should not be run in parallel against
                    # a given server so we use the priority queues to prevent this
                    current_priority_queue.put((target, command, args))
                else:
                    # Normal commands get put in the standard/shared queue
                    task_queue.put((target, command, args))

    for exception in target_results:
        targets_ERR.append(exception)

    print ServersConnectivityTester.get_printable_result(
        targets_OK, targets_ERR)
    print '\n\n'

    # Put a 'None' sentinel in the queue to let the each process know when every
    # task has been completed
    for (proc, priority_queue) in process_list:
        task_queue.put(None)  # One sentinel in the task_queue per proc
        priority_queue.put(None)  # One sentinel in each priority_queue

    # Keep track of how many tasks have to be performed for each target
    task_num = 0
    for command in available_commands:
        if getattr(command_list, command):
            task_num += 1

    # --REPORTING SECTION--
    processes_running = nb_processes

    # XML output
    xml_output_list = []

    # Each host has a list of results
    result_dict = {}
    for target in targets_OK:
        result_dict[target] = []

    # If all processes have stopped, all the work is done
    while processes_running:
        result = result_queue.get()

        if result is None:  # Getting None means that one process was done
            processes_running -= 1

        else:  # Getting an actual result
            (target, command, plugin_result) = result
            result_dict[target].append((command, plugin_result))

            if len(result_dict[target]) == task_num:  # Done with this target
                # Print the results and update the xml doc
                print _format_txt_target_result(target, result_dict[target])
                if shared_settings['xml_file']:
                    xml_output_list.append(
                        _format_xml_target_result(target, result_dict[target]))

        result_queue.task_done()

    # --TERMINATE--

    # Make sure all the processes had time to terminate
    task_queue.join()
    result_queue.join()
    #[process.join() for process in process_list] # Causes interpreter shutdown errors
    exec_time = time() - start_time

    # Output XML doc to a file if needed
    if shared_settings['xml_file']:
        result_xml_attr = {
            'httpsTunnel': str(shared_settings['https_tunnel_host']),
            'totalScanTime': str(exec_time),
            'defaultTimeout': str(shared_settings['timeout']),
            'startTLS': str(shared_settings['starttls'])
        }

        result_xml = Element('results', attrib=result_xml_attr)

        # Sort results in alphabetical order to make the XML files (somewhat) diff-able
        xml_output_list.sort(key=lambda xml_elem: xml_elem.attrib['host'])
        for xml_element in xml_output_list:
            result_xml.append(xml_element)

        xml_final_doc = Element('document',
                                title="SSLyze Scan Results",
                                SSLyzeVersion=PROJECT_VERSION,
                                SSLyzeWeb=PROJECT_URL)
        # Add the list of invalid targets
        xml_final_doc.append(
            ServersConnectivityTester.get_xml_result(targets_ERR))
        # Add the output of the plugins
        xml_final_doc.append(result_xml)

        # Hack: Prettify the XML file so it's (somewhat) diff-able
        xml_final_pretty = minidom.parseString(
            tostring(xml_final_doc, encoding='UTF-8'))
        with open(shared_settings['xml_file'], 'w') as xml_file:
            xml_file.write(
                xml_final_pretty.toprettyxml(indent="  ", encoding="utf-8"))

    print _format_title('Scan Completed in {0:.2f} s'.format(exec_time))
Beispiel #38
0
class WholeSlideTiler(object):
    """Handles generation of tiles and metadata for all images in a slide."""
    def __init__(self, slide_path, outpath, img_format, tile_size, overlap,
                 limit_bounds, rotate, quality, nworkers, only_last):

        self._slide = open_slide(slide_path)  # the whole slide image
        self._outpath = outpath  # baseline name of each tiled image
        self._img_format = img_format  # image format (jpeg or png)
        self._tile_size = tile_size  # tile size. default: 256x256 pixels
        self._overlap = overlap
        self._limit_bounds = limit_bounds
        self._queue = JoinableQueue(
            2 * nworkers)  # setup multiprocessing worker queues.
        self._nworkers = nworkers  # number of workers
        self._only_last = only_last
        self._dzi_data = {}
        for _i in range(nworkers):
            TileWorker(self._queue, slide_path, tile_size, overlap,
                       limit_bounds, rotate, quality).start()

    def run(self):
        self._run_image()
        for name in self._slide.associated_images:
            self._run_image(name)
            # self._write_static()
        self._shutdown()

    def _run_image(self, associated=None):
        """Run a single image from self._slide."""
        if associated is None:
            image = self._slide
            outpath = self._outpath

        else:
            image = ImageSlide(self._slide.associated_images[associated])
            outpath = os.path.join(self._outpath, self._slugify(associated))

        dz = DeepZoomGenerator(image, self._tile_size, self._overlap,
                               self._limit_bounds)

        tiler = SingleImageTiler(dz, outpath, self._img_format, associated,
                                 self._queue, self._only_last)
        tiler.run()

        self._dzi_data[self._url_for(associated)] = tiler.get_dzi()

    def _url_for(self, associated):
        if associated is None:
            base = 'slide'
        else:
            base = self._slugify(associated)
        return '%s.dzi' % base

    @staticmethod
    def _copydir(src, dest):
        if not os.path.exists(dest):
            os.makedirs(dest)
        for name in os.listdir(src):
            srcpath = os.path.join(src, name)
            if os.path.isfile(srcpath):
                shutil.copy(srcpath, os.path.join(dest, name))

    @classmethod
    def _slugify(cls, text):
        text = normalize('NFKD', text.lower()).encode('ascii',
                                                      'ignore').decode()
        return re.sub('[^a-z0-9]+', '_', text)

    def _shutdown(self):
        for _i in range(self._nworkers):
            self._queue.put(None)
        self._queue.join()
Beispiel #39
0
    def __init__(self, config_location="/etc/redeem"):
        """
        config_location: provide the location to look for config files.
         - default is installed directory
         - allows for running in a local directory when debugging
        """
        firmware_version = "1.2.8~Predator"
        logging.info("Redeem initializing " + firmware_version)

        printer = Printer()
        self.printer = printer
        Path.printer = printer

        printer.firmware_version = firmware_version

        printer.config_location = config_location

        # Set up and Test the alarm framework
        Alarm.printer = self.printer
        Alarm.executor = AlarmExecutor()
        alarm = Alarm(Alarm.ALARM_TEST, "Alarm framework operational")

        # check for config files
        file_path = os.path.join(config_location, "default.cfg")
        if not os.path.exists(file_path):
            logging.error(
                file_path +
                " does not exist, this file is required for operation")
            sys.exit()  # maybe use something more graceful?

        local_path = os.path.join(config_location, "local.cfg")
        if not os.path.exists(local_path):
            logging.info(local_path + " does not exist, Creating one")
            os.mknod(local_path)
            os.chmod(local_path, 0o777)

        # Parse the config files.
        printer.config = CascadingConfigParser([
            os.path.join(config_location, 'default.cfg'),
            os.path.join(config_location, 'printer.cfg'),
            os.path.join(config_location, 'local.cfg')
        ])

        # Check the local and printer files
        printer_path = os.path.join(config_location, "printer.cfg")
        if os.path.exists(printer_path):
            printer.config.check(printer_path)
        printer.config.check(os.path.join(config_location, 'local.cfg'))

        # Get the revision and loglevel from the Config file
        level = self.printer.config.getint('System', 'loglevel')
        if level > 0:
            logging.getLogger().setLevel(level)

        # Set up additional logging, if present:
        if self.printer.config.getboolean('System', 'log_to_file'):
            logfile = self.printer.config.get('System', 'logfile')
            formatter = '%(asctime)s %(name)-12s %(levelname)-8s %(message)s'
            printer.redeem_logging_handler = logging.handlers.RotatingFileHandler(
                logfile, maxBytes=2 * 1024 * 1024)
            printer.redeem_logging_handler.setFormatter(
                logging.Formatter(formatter))
            printer.redeem_logging_handler.setLevel(level)
            logging.getLogger().addHandler(printer.redeem_logging_handler)
            logging.info("-- Logfile configured --")

        # Find out which capes are connected
        self.printer.config.parse_capes()
        self.revision = self.printer.config.replicape_revision
        if self.revision:
            logging.info("Found Replicape rev. " + self.revision)
            printer.replicape_key = printer.config.get_key()
        else:
            logging.warning("Oh no! No Replicape present!")
            self.revision = "0B3A"
        # We set it to 5 axis by default
        Printer.NUM_AXES = 5
        if self.printer.config.reach_revision:
            logging.info("Found Reach rev. " +
                         self.printer.config.reach_revision)
        if self.printer.config.reach_revision == "00A0":
            Printer.NUM_AXES = 8
        elif self.printer.config.reach_revision == "00B0":
            Printer.NUM_AXES = 7

        if self.revision in ["00A4", "0A4A", "00A3"]:
            PWM.set_frequency(100)
        elif self.revision in ["00B1", "00B2", "00B3", "0B3A"]:
            PWM.set_frequency(printer.config.getint('Cold-ends', 'pwm_freq'))

        # Init the Watchdog timer
        printer.watchdog = Watchdog()

        # Enable PWM and steppers
        printer.enable = Enable("P9_41")
        printer.enable.set_disabled()

        # Init the Paths
        printer.axis_config = printer.config.getint('Geometry', 'axis_config')

        # Init the end stops
        EndStop.inputdev = self.printer.config.get("Endstops", "inputdev")
        # Set up key listener
        Key_pin.listener = Key_pin_listener(EndStop.inputdev)

        homing_only_endstops = self.printer.config.get('Endstops',
                                                       'homing_only_endstops')

        for es in ["Z2", "Y2", "X2", "Z1", "Y1",
                   "X1"]:  # Order matches end stop inversion mask in Firmware
            pin = self.printer.config.get("Endstops", "pin_" + es)
            keycode = self.printer.config.getint("Endstops", "keycode_" + es)
            invert = self.printer.config.getboolean("Endstops", "invert_" + es)
            self.printer.end_stops[es] = EndStop(printer, pin, keycode, es,
                                                 invert)
            self.printer.end_stops[es].stops = self.printer.config.get(
                'Endstops', 'end_stop_' + es + '_stops')

        # activate all the endstops
        self.printer.set_active_endstops()

        # Init the 5 Stepper motors (step, dir, fault, DAC channel, name)
        Stepper.printer = printer
        if self.revision == "00A3":
            printer.steppers["X"] = Stepper_00A3("GPIO0_27", "GPIO1_29",
                                                 "GPIO2_4", 0, "X")
            printer.steppers["Y"] = Stepper_00A3("GPIO1_12", "GPIO0_22",
                                                 "GPIO2_5", 1, "Y")
            printer.steppers["Z"] = Stepper_00A3("GPIO0_23", "GPIO0_26",
                                                 "GPIO0_15", 2, "Z")
            printer.steppers["E"] = Stepper_00A3("GPIO1_28", "GPIO1_15",
                                                 "GPIO2_1", 3, "E")
            printer.steppers["H"] = Stepper_00A3("GPIO1_13", "GPIO1_14",
                                                 "GPIO2_3", 4, "H")
        elif self.revision == "00B1":
            printer.steppers["X"] = Stepper_00B1("GPIO0_27", "GPIO1_29",
                                                 "GPIO2_4", 11, 0, "X")
            printer.steppers["Y"] = Stepper_00B1("GPIO1_12", "GPIO0_22",
                                                 "GPIO2_5", 12, 1, "Y")
            printer.steppers["Z"] = Stepper_00B1("GPIO0_23", "GPIO0_26",
                                                 "GPIO0_15", 13, 2, "Z")
            printer.steppers["E"] = Stepper_00B1("GPIO1_28", "GPIO1_15",
                                                 "GPIO2_1", 14, 3, "E")
            printer.steppers["H"] = Stepper_00B1("GPIO1_13", "GPIO1_14",
                                                 "GPIO2_3", 15, 4, "H")
        elif self.revision == "00B2":
            printer.steppers["X"] = Stepper_00B2("GPIO0_27", "GPIO1_29",
                                                 "GPIO2_4", 11, 0, "X")
            printer.steppers["Y"] = Stepper_00B2("GPIO1_12", "GPIO0_22",
                                                 "GPIO2_5", 12, 1, "Y")
            printer.steppers["Z"] = Stepper_00B2("GPIO0_23", "GPIO0_26",
                                                 "GPIO0_15", 13, 2, "Z")
            printer.steppers["E"] = Stepper_00B2("GPIO1_28", "GPIO1_15",
                                                 "GPIO2_1", 14, 3, "E")
            printer.steppers["H"] = Stepper_00B2("GPIO1_13", "GPIO1_14",
                                                 "GPIO2_3", 15, 4, "H")
        elif self.revision in ["00B3", "0B3A"]:
            printer.steppers["X"] = Stepper_00B3("GPIO0_27", "GPIO1_29", 90,
                                                 11, 0, "X")
            printer.steppers["Y"] = Stepper_00B3("GPIO1_12", "GPIO0_22", 91,
                                                 12, 1, "Y")
            printer.steppers["Z"] = Stepper_00B3("GPIO0_23", "GPIO0_26", 92,
                                                 13, 2, "Z")
            printer.steppers["E"] = Stepper_00B3("GPIO1_28", "GPIO1_15", 93,
                                                 14, 3, "E")
            printer.steppers["H"] = Stepper_00B3("GPIO1_13", "GPIO1_14", 94,
                                                 15, 4, "H")
        elif self.revision in ["00A4", "0A4A"]:
            printer.steppers["X"] = Stepper_00A4("GPIO0_27", "GPIO1_29",
                                                 "GPIO2_4", 0, 0, "X")
            printer.steppers["Y"] = Stepper_00A4("GPIO1_12", "GPIO0_22",
                                                 "GPIO2_5", 1, 1, "Y")
            printer.steppers["Z"] = Stepper_00A4("GPIO0_23", "GPIO0_26",
                                                 "GPIO0_15", 2, 2, "Z")
            printer.steppers["E"] = Stepper_00A4("GPIO1_28", "GPIO1_15",
                                                 "GPIO2_1", 3, 3, "E")
            printer.steppers["H"] = Stepper_00A4("GPIO1_13", "GPIO1_14",
                                                 "GPIO2_3", 4, 4, "H")
        # Init Reach steppers, if present.
        if printer.config.reach_revision == "00A0":
            printer.steppers["A"] = Stepper_reach_00A4("GPIO2_2", "GPIO1_18",
                                                       "GPIO0_14", 5, 5, "A")
            printer.steppers["B"] = Stepper_reach_00A4("GPIO1_16", "GPIO0_5",
                                                       "GPIO0_14", 6, 6, "B")
            printer.steppers["C"] = Stepper_reach_00A4("GPIO0_3", "GPIO3_19",
                                                       "GPIO0_14", 7, 7, "C")
        elif printer.config.reach_revision == "00B0":
            printer.steppers["A"] = Stepper_reach_00B0("GPIO1_16", "GPIO0_5",
                                                       "GPIO0_3", 5, 5, "A")
            printer.steppers["B"] = Stepper_reach_00B0("GPIO2_2", "GPIO0_14",
                                                       "GPIO0_3", 6, 6, "B")

        # Enable the steppers and set the current, steps pr mm and
        # microstepping
        for name, stepper in self.printer.steppers.iteritems():
            stepper.in_use = printer.config.getboolean('Steppers',
                                                       'in_use_' + name)
            stepper.direction = printer.config.getint('Steppers',
                                                      'direction_' + name)
            stepper.has_endstop = printer.config.getboolean(
                'Endstops', 'has_' + name)
            stepper.set_current_value(
                printer.config.getfloat('Steppers', 'current_' + name))
            stepper.set_steps_pr_mm(
                printer.config.getfloat('Steppers', 'steps_pr_mm_' + name))
            stepper.set_microstepping(
                printer.config.getint('Steppers', 'microstepping_' + name))
            stepper.set_decay(
                printer.config.getint("Steppers", "slow_decay_" + name))
            # Add soft end stops
            printer.soft_min[Printer.axis_to_index(
                name)] = printer.config.getfloat('Endstops',
                                                 'soft_end_stop_min_' + name)
            printer.soft_max[Printer.axis_to_index(
                name)] = printer.config.getfloat('Endstops',
                                                 'soft_end_stop_max_' + name)
            slave = printer.config.get('Steppers', 'slave_' + name)
            if slave:
                printer.add_slave(name, slave)
                logging.debug("Axis " + name + " has slave " + slave)

        # Commit changes for the Steppers
        #Stepper.commit()

        Stepper.printer = printer

        # Delta printer setup
        if printer.axis_config == Printer.AXIS_CONFIG_DELTA:
            opts = [
                "Hez", "L", "r", "Ae", "Be", "Ce", "A_radial", "B_radial",
                "C_radial", "A_tangential", "B_tangential", "C_tangential"
            ]
            for opt in opts:
                Delta.__dict__[opt] = printer.config.getfloat('Delta', opt)

        # Discover and add all DS18B20 cold ends.
        paths = glob.glob("/sys/bus/w1/devices/28-*/w1_slave")
        logging.debug("Found cold ends: " + str(paths))
        for i, path in enumerate(paths):
            self.printer.cold_ends.append(ColdEnd(path, "ds18b20-" + str(i)))
            logging.info("Found Cold end " + str(i) + " on " + path)

        # Make Mosfets, temperature sensors and extruders
        heaters = ["E", "H", "HBP"]
        if self.printer.config.reach_revision:
            heaters.extend(["A", "B", "C"])
        for e in heaters:
            # Mosfets
            channel = self.printer.config.getint("Heaters", "mosfet_" + e)
            self.printer.mosfets[e] = Mosfet(channel)
            # Thermistors
            adc = self.printer.config.get("Heaters", "path_adc_" + e)
            if not self.printer.config.has_option("Heaters", "sensor_" + e):
                sensor = self.printer.config.get("Heaters", "temp_chart_" + e)
                logging.warning("Deprecated config option temp_chart_" + e +
                                " use sensor_" + e + " instead.")
            else:
                sensor = self.printer.config.get("Heaters", "sensor_" + e)
            self.printer.thermistors[e] = TemperatureSensor(
                adc, 'MOSFET ' + e, sensor)
            self.printer.thermistors[e].printer = printer

            # Extruders
            onoff = self.printer.config.getboolean('Heaters', 'onoff_' + e)
            prefix = self.printer.config.get('Heaters', 'prefix_' + e)
            max_power = self.printer.config.getfloat('Heaters',
                                                     'max_power_' + e)
            if e != "HBP":
                self.printer.heaters[e] = Extruder(self.printer.steppers[e],
                                                   self.printer.thermistors[e],
                                                   self.printer.mosfets[e], e,
                                                   onoff)
            else:
                self.printer.heaters[e] = HBP(self.printer.thermistors[e],
                                              self.printer.mosfets[e], onoff)
            self.printer.heaters[e].prefix = prefix
            self.printer.heaters[e].Kp = self.printer.config.getfloat(
                'Heaters', 'pid_Kp_' + e)
            self.printer.heaters[e].Ti = self.printer.config.getfloat(
                'Heaters', 'pid_Ti_' + e)
            self.printer.heaters[e].Td = self.printer.config.getfloat(
                'Heaters', 'pid_Td_' + e)

            # Min/max settings
            self.printer.heaters[e].min_temp = self.printer.config.getfloat(
                'Heaters', 'min_temp_' + e)
            self.printer.heaters[e].max_temp = self.printer.config.getfloat(
                'Heaters', 'max_temp_' + e)
            self.printer.heaters[
                e].max_temp_rise = self.printer.config.getfloat(
                    'Heaters', 'max_rise_temp_' + e)
            self.printer.heaters[
                e].max_temp_fall = self.printer.config.getfloat(
                    'Heaters', 'max_fall_temp_' + e)

        # Init the three fans. Argument is PWM channel number
        self.printer.fans = []
        if self.revision == "00A3":
            self.printer.fans.append(Fan(0))
            self.printer.fans.append(Fan(1))
            self.printer.fans.append(Fan(2))
        elif self.revision == "0A4A":
            self.printer.fans.append(Fan(8))
            self.printer.fans.append(Fan(9))
            self.printer.fans.append(Fan(10))
        elif self.revision in ["00B1", "00B2", "00B3", "0B3A"]:
            self.printer.fans.append(Fan(7))
            self.printer.fans.append(Fan(8))
            self.printer.fans.append(Fan(9))
            self.printer.fans.append(Fan(10))
        if printer.config.reach_revision == "00A0":
            self.printer.fans.append(Fan(14))
            self.printer.fans.append(Fan(15))
            self.printer.fans.append(Fan(7))

        # Set default value for all fans
        for i, f in enumerate(self.printer.fans):
            f.set_value(
                self.printer.config.getfloat('Fans',
                                             "default-fan-{}-value".format(i)))

        # Init the servos
        printer.servos = []
        servo_nr = 0
        while (printer.config.has_option("Servos", "servo_" + str(servo_nr) +
                                         "_enable")):
            if printer.config.getboolean("Servos",
                                         "servo_" + str(servo_nr) + "_enable"):
                channel = printer.config.get(
                    "Servos", "servo_" + str(servo_nr) + "_channel")
                pulse_min = printer.config.getfloat(
                    "Servos", "servo_" + str(servo_nr) + "_pulse_min")
                pulse_max = printer.config.getfloat(
                    "Servos", "servo_" + str(servo_nr) + "_pulse_max")
                angle_min = printer.config.getfloat(
                    "Servos", "servo_" + str(servo_nr) + "_angle_min")
                angle_max = printer.config.getfloat(
                    "Servos", "servo_" + str(servo_nr) + "_angle_max")
                angle_init = printer.config.getfloat(
                    "Servos", "servo_" + str(servo_nr) + "_angle_init")
                s = Servo(channel, pulse_min, pulse_max, angle_min, angle_max,
                          angle_init)
                printer.servos.append(s)
                logging.info("Added servo " + str(servo_nr))
            servo_nr += 1

        # Connect thermitors to fans
        for t, therm in self.printer.heaters.iteritems():
            for f, fan in enumerate(self.printer.fans):
                if not self.printer.config.has_option(
                        'Cold-ends', "connect-therm-{}-fan-{}".format(t, f)):
                    continue
                if printer.config.getboolean(
                        'Cold-ends', "connect-therm-{}-fan-{}".format(t, f)):
                    c = Cooler(therm, fan, "Cooler-{}-{}".format(t, f),
                               True)  # Use ON/OFF on these.
                    c.ok_range = 4
                    opt_temp = "therm-{}-fan-{}-target_temp".format(t, f)
                    if printer.config.has_option('Cold-ends', opt_temp):
                        target_temp = printer.config.getfloat(
                            'Cold-ends', opt_temp)
                    else:
                        target_temp = 60
                    c.set_target_temperature(target_temp)
                    c.enable()
                    printer.coolers.append(c)
                    logging.info("Cooler connects therm {} with fan {}".format(
                        t, f))

        # Connect fans to M106
        printer.controlled_fans = []
        for i, fan in enumerate(self.printer.fans):
            if not self.printer.config.has_option(
                    'Cold-ends', "add-fan-{}-to-M106".format(i)):
                continue
            if self.printer.config.getboolean('Cold-ends',
                                              "add-fan-{}-to-M106".format(i)):
                printer.controlled_fans.append(self.printer.fans[i])
                logging.info("Added fan {} to M106/M107".format(i))

        # Connect the colds to fans
        for ce, cold_end in enumerate(self.printer.cold_ends):
            for f, fan in enumerate(self.printer.fans):
                option = "connect-ds18b20-{}-fan-{}".format(ce, f)
                if self.printer.config.has_option('Cold-ends', option):
                    if self.printer.config.getboolean('Cold-ends', option):
                        c = Cooler(cold_end, fan,
                                   "Cooler-ds18b20-{}-{}".format(ce, f), False)
                        c.ok_range = 4
                        opt_temp = "cooler_{}_target_temp".format(ce)
                        if printer.config.has_option('Cold-ends', opt_temp):
                            target_temp = printer.config.getfloat(
                                'Cold-ends', opt_temp)
                        else:
                            target_temp = 60
                        c.set_target_temperature(target_temp)
                        c.enable()
                        printer.coolers.append(c)
                        logging.info(
                            "Cooler connects temp sensor ds18b20 {} with fan {}"
                            .format(ce, f))

        # Init roatray encs.
        printer.filament_sensors = []

        # Init rotary encoders
        printer.rotary_encoders = []
        for ex in ["E", "H", "A", "B", "C"]:
            if not printer.config.has_option('Rotary-encoders',
                                             "enable-{}".format(ex)):
                continue
            if printer.config.getboolean("Rotary-encoders",
                                         "enable-{}".format(ex)):
                logging.debug("Rotary encoder {} enabled".format(ex))
                event = printer.config.get("Rotary-encoders",
                                           "event-{}".format(ex))
                cpr = printer.config.getint("Rotary-encoders",
                                            "cpr-{}".format(ex))
                diameter = printer.config.getfloat("Rotary-encoders",
                                                   "diameter-{}".format(ex))
                r = RotaryEncoder(event, cpr, diameter)
                printer.rotary_encoders.append(r)
                # Append as Filament Sensor
                ext_nr = Printer.axis_to_index(ex) - 3
                sensor = FilamentSensor(ex, r, ext_nr, printer)
                alarm_level = printer.config.getfloat(
                    "Filament-sensors", "alarm-level-{}".format(ex))
                logging.debug("Alarm level" + str(alarm_level))
                sensor.alarm_level = alarm_level
                printer.filament_sensors.append(sensor)

        # Make a queue of commands
        self.printer.commands = JoinableQueue(10)

        # Make a queue of commands that should not be buffered
        self.printer.sync_commands = JoinableQueue()
        self.printer.unbuffered_commands = JoinableQueue(10)

        # Bed compensation matrix
        printer.matrix_bed_comp = printer.load_bed_compensation_matrix()
        logging.debug("Loaded bed compensation matrix: \n" +
                      str(printer.matrix_bed_comp))

        for axis in printer.steppers.keys():
            i = Printer.axis_to_index(axis)
            printer.max_speeds[i] = printer.config.getfloat(
                'Planner', 'max_speed_' + axis.lower())
            printer.min_speeds[i] = printer.config.getfloat(
                'Planner', 'min_speed_' + axis.lower())
            printer.jerks[i] = printer.config.getfloat(
                'Planner', 'max_jerk_' + axis.lower())
            printer.home_speed[i] = printer.config.getfloat(
                'Homing', 'home_speed_' + axis.lower())
            printer.home_backoff_speed[i] = printer.config.getfloat(
                'Homing', 'home_backoff_speed_' + axis.lower())
            printer.home_backoff_offset[i] = printer.config.getfloat(
                'Homing', 'home_backoff_offset_' + axis.lower())
            printer.steps_pr_meter[i] = printer.steppers[
                axis].get_steps_pr_meter()
            printer.backlash_compensation[i] = printer.config.getfloat(
                'Steppers', 'backlash_' + axis.lower())

        printer.e_axis_active = printer.config.getboolean(
            'Planner', 'e_axis_active')

        dirname = os.path.dirname(os.path.realpath(__file__))

        # Create the firmware compiler
        pru_firmware = PruFirmware(dirname + "/firmware/firmware_runtime.p",
                                   dirname + "/firmware/firmware_runtime.bin",
                                   dirname + "/firmware/firmware_endstops.p",
                                   dirname + "/firmware/firmware_endstops.bin",
                                   self.printer, "/usr/bin/pasm")

        printer.move_cache_size = printer.config.getfloat(
            'Planner', 'move_cache_size')
        printer.print_move_buffer_wait = printer.config.getfloat(
            'Planner', 'print_move_buffer_wait')
        printer.min_buffered_move_time = printer.config.getfloat(
            'Planner', 'min_buffered_move_time')
        printer.max_buffered_move_time = printer.config.getfloat(
            'Planner', 'max_buffered_move_time')

        printer.max_length = printer.config.getfloat('Planner', 'max_length')

        self.printer.processor = GCodeProcessor(self.printer)
        self.printer.plugins = PluginsController(self.printer)

        # Path planner
        travel_default = False
        center_default = False
        home_default = False

        # Setting acceleration before PathPlanner init
        for axis in printer.steppers.keys():
            printer.acceleration[Printer.axis_to_index(
                axis)] = printer.config.getfloat(
                    'Planner', 'acceleration_' + axis.lower())

        self.printer.path_planner = PathPlanner(self.printer, pru_firmware)
        for axis in printer.steppers.keys():
            i = Printer.axis_to_index(axis)

            # Sometimes soft_end_stop aren't defined to be at the exact hardware boundary.
            # Adding 100mm for searching buffer.
            if printer.config.has_option('Geometry', 'travel_' + axis.lower()):
                printer.path_planner.travel_length[
                    axis] = printer.config.getfloat('Geometry',
                                                    'travel_' + axis.lower())
            else:
                printer.path_planner.travel_length[axis] = (
                    printer.soft_max[i] - printer.soft_min[i]) + .1
                if axis in ['X', 'Y', 'Z']:
                    travel_default = True

            if printer.config.has_option('Geometry', 'offset_' + axis.lower()):
                printer.path_planner.center_offset[
                    axis] = printer.config.getfloat('Geometry',
                                                    'offset_' + axis.lower())
            else:
                printer.path_planner.center_offset[axis] = (
                    printer.soft_min[i]
                    if printer.home_speed[i] > 0 else printer.soft_max[i])
                if axis in ['X', 'Y', 'Z']:
                    center_default = True

            if printer.config.has_option('Homing', 'home_' + axis.lower()):
                printer.path_planner.home_pos[axis] = printer.config.getfloat(
                    'Homing', 'home_' + axis.lower())
            else:
                printer.path_planner.home_pos[
                    axis] = printer.path_planner.center_offset[axis]
                if axis in ['X', 'Y', 'Z']:
                    home_default = True

        if printer.axis_config == Printer.AXIS_CONFIG_DELTA:
            if travel_default:
                logging.warning(
                    "Axis travel (travel_*) set by soft limits, manual setup is recommended for a delta"
                )
            if center_default:
                logging.warning(
                    "Axis offsets (offset_*) set by soft limits, manual setup is recommended for a delta"
                )
            if home_default:
                logging.warning(
                    "Home position (home_*) set by soft limits or offset_*")
                logging.info("Home position will be recalculated...")

                # convert home_pos to effector space
                Az = printer.path_planner.home_pos['X']
                Bz = printer.path_planner.home_pos['Y']
                Cz = printer.path_planner.home_pos['Z']

                delta_bot = self.printer.path_planner.native_planner.delta_bot

                z_offset = delta_bot.vertical_offset(Az, Bz,
                                                     Cz)  # vertical offset
                xyz = delta_bot.forward_kinematics(Az, Bz,
                                                   Cz)  # effector position

                # The default home_pos, provided above, is based on effector space
                # coordinates for carriage positions. We need to transform these to
                # get where the effector actually is.
                xyz[2] += z_offset
                for i, a in enumerate(['X', 'Y', 'Z']):
                    printer.path_planner.home_pos[a] = xyz[i]

                logging.info("Home position = %s" %
                             str(printer.path_planner.home_pos))

        # Read end stop value again now that PRU is running
        for _, es in self.printer.end_stops.iteritems():
            es.read_value()

        # Enable Stepper timeout
        timeout = printer.config.getint('Steppers', 'timeout_seconds')
        printer.swd = StepperWatchdog(printer, timeout)
        if printer.config.getboolean('Steppers', 'use_timeout'):
            printer.swd.start()

        # Set up communication channels
        printer.comms["USB"] = USB(self.printer)
        printer.comms["Eth"] = Ethernet(self.printer)

        if Pipe.check_tty0tty() or Pipe.check_socat():
            printer.comms["octoprint"] = Pipe(printer, "octoprint")
            printer.comms["toggle"] = Pipe(printer, "toggle")
            printer.comms["testing"] = Pipe(printer, "testing")
            printer.comms["testing_noret"] = Pipe(printer, "testing_noret")
            # Does not send "ok"
            printer.comms["testing_noret"].send_response = False
        else:
            logging.warning(
                "Neither tty0tty or socat is installed! No virtual tty pipes enabled"
            )
Beispiel #40
0
from multiprocessing import JoinableQueue

q = JoinableQueue()
'''
q.join()# 用于生产者。等待 q.task_done的返回结果,通过返回结果,生产者就能获得消费者当前消费了多少个数据
q.task_done() # 用于消费者,是指每消费队列中一个数据,就给join返回一个标识。
详细解释:
	假设生产者生产了100个数据,join就能记录下100这个数字。每次消费者消费一个数据,
	就必须要task_done返回一个标识,当生产者(join)接收到100个消费者返回来的标识的时候,
	生产者就能知道消费者已经把所有数据都消费完了。
'''

from multiprocessing import Process


def consumer(q, name, color):
    while 1:
        info = q.get()
        print('%s %s 拿走了%s \033[0m' % (color, name, info))
        q.task_done()


def producer(q, product):
    for i in range(20):
        info = product + '的娃娃%s号' % str(i)
        q.put(info)
    q.join()  # 记录了生产了20个数据在队列中,此时会阻塞等待消费者消费完队列中所有数据


if __name__ == '__main__':
    q = JoinableQueue(10)
class Toggle:
    def __init__(self):
        Clutter.init(None)
        ui = Clutter.Script()
        ui.load_from_file("ui-threads.json")
        stage = ui.get_object("stage")
        stage.connect("destroy", self.stop)
        stage.connect("destroy", lambda w: Clutter.main_quit())

        self.loader = ui.get_object("loader")
        self.loader.set_from_file("style/loading.png")
        self.t = Clutter.PropertyTransition(property_name='rotation-angle-z')
        self.t.set_from(0)
        self.t.set_to(360)
        self.t.set_duration(3000)
        self.t.set_animatable(self.loader)
        self.t.set_repeat_count(-1)
        self.t.start()

        button1 = ui.get_object("button1")
        button1.connect("button-press-event", self.execute_in_main)
        button1.set_reactive(True)

        button2 = ui.get_object("button2")
        button2.connect("button-press-event", self.idle_add_event)
        button2.set_reactive(True)

        button3 = ui.get_object("button3")
        button3.connect("button-press-event", self.threads_idle_add_event)
        button3.set_reactive(True)

        button4 = ui.get_object("button4")
        button4.connect("button-press-event", self.execute_in_thread)
        button4.set_reactive(True)

        stage = ui.get_object("stage")
        stage.set_title("Test threads")
        stage.connect("destroy", self.stop)
        stage.show_all()

        self.events = JoinableQueue(10)

        # UI events needs to happen from within the
        # main thread. This was the only way I found that would do that.
        # It looks weirdm, but it works.
        def execute(event):
            print("Execute " + event + " from " + str(current_thread()))
            for i in range(100):
                hashlib.md5(str(list(range(100000))))
            print("Done executing")

        self.execute = execute
        stage.show()

    def execute_in_main(self, btn, other):
        self.execute("main")

    def idle_add_event(self, btn, other):
        self.events.put("glib_idle_add")

    def threads_idle_add_event(self, btn, other):
        self.events.put("threads_add_idle")

    def execute_in_thread(self, btn, other):
        self.events.put("execute_in_thread")

    def run(self):
        """ Start the process """
        self.running = True
        # Start the processes
        self.p0 = Thread(target=self.loop, args=(self.events, "Push updates"))
        self.p0.start()
        logging.info("Toggle ready")
        Clutter.main()

    def loop(self, queue, name):
        """ When a new event comes in, execute it """
        try:
            while self.running:
                try:
                    event = queue.get(block=True, timeout=1)
                except queue.Empty:
                    continue
                if event == "glib_idle_add":
                    print("adding with Glib")
                    GLib.idle_add(self.execute, event)
                elif event == "threads_add_idle":
                    print("adding with Clutter")
                    Clutter.threads_add_idle(0, self.execute, event)
                elif event == "execute_in_thread":
                    print("Executing from thread")
                    self.execute(event)

                # Must hand it over to the main thread.
                queue.task_done()
        except Exception:
            logging.exception("Exception in {} loop: ".format(name))

    def stop(self, w):
        logging.debug("Stopping Toggle")
        self.running = False
        self.p0.join()
        Clutter.main_quit()
        logging.debug("Done")
Beispiel #42
0
class Servo:
    if PY2:
        range = xrange

    def __init__(self,
                 channel,
                 pulse_width_min,
                 pulse_width_max,
                 angle_min,
                 angle_max,
                 init_angle,
                 turnoff_timeout=0):
        """Define a new software controllable servo with adjustable speed control

        Keyword arguments:
        pulse_width_min -- The minimum pulse width defining the lowest angle
        pulse_width_max -- The maximum pulse width defining the biggest angle
        init_angle -- Initial angle that the servo should take when it is powered on. Range is 0 to 180deg
        turnoff_timeout -- number of seconds after which the servo is turned off if no command is received. 0 = never turns off
        """

        self.angle_min = angle_min
        self.angle_max = angle_max
        self.angle_range = angle_max - angle_min
        self.pulse_width_min = pulse_width_min
        self.pulse_width_max = pulse_width_max
        self.pulse_width_range = pulse_width_max - pulse_width_min

        self.turnoff_timeout = turnoff_timeout

        self.current_pulse_width = self.angle_to_pulse_width(init_angle)
        self.last_pulse_width = self.current_pulse_width

        self.last_angle = init_angle

        self.pulse_length = 20.0 * 10.0**-3.0  # 20 ms

        logging.debug("Angle min: {} deg".format(self.angle_min))
        logging.debug("Angle max: {} deg".format(self.angle_max))
        logging.debug("Angle tot: {} deg".format(self.angle_range))
        logging.debug("Pulse min: {} ms".format(self.pulse_width_min * 1000.0))
        logging.debug("Pulse max: {} ms".format(self.pulse_width_max * 1000.0))
        logging.debug("Pulse tot: {} ms".format(self.pulse_width_range *
                                                1000.0))

        self.queue = JoinableQueue(1000)
        self.lastCommandTime = 0

        self.t = Thread(target=self._wait_for_event, name="Servo")
        self.t.daemon = True
        self.running = True
        self.t.start()

        # Branch based on channel type.

        if type(channel) == int:  # Revision A
            self.pwm = PWM(channel, 50, self.current_pulse_width)
        else:  # Revision B
            # Set up the Shift register for enabling this servo
            if channel == "P9_14":
                shiftreg_nr = 1
                self.pwm = PWM_pin(channel, 50, self.current_pulse_width)
            elif channel == "P9_16":
                shiftreg_nr = 2
                self.pwm = PWM_pin(channel, 50, self.current_pulse_width)
            else:
                logging.warning(
                    "Tried to assign servo to an unknown channel/pin: " +
                    str(channel))
                return
            ShiftRegister.make(5)
            self.shift_reg = ShiftRegister.registers[shiftreg_nr]
        self.set_enabled()
        self.pwm.set_value(
            self.angle_to_pulse_width(init_angle) / self.pulse_length)

    def set_enabled(self, is_enabled=True):
        if is_enabled:
            self.shift_reg.add_state(0x01)
        else:
            self.shift_reg.remove_state(0x01)

    def set_angle(self, angle, speed=60, asynchronous=True):
        ''' Set the servo angle to the given value, in degree, with the given speed in deg / sec '''
        angle = max(min(self.angle_max, angle), self.angle_min)
        pulse_width = self.angle_to_pulse_width(angle)
        last_angle = self.last_angle

        logging.debug("Updating angle from {} (pw={}) to {} (pw={}) ".format(
            last_angle, self.last_pulse_width, angle, pulse_width))

        if angle == last_angle:
            return
        t = (math.fabs(angle - last_angle) / speed) / math.fabs(angle -
                                                                last_angle)

        if angle >= last_angle:
            increment = 1
        else:
            increment = -1

        for a in range(int(last_angle + increment), int(angle), increment):
            self.queue.put((self.angle_to_pulse_width(a), t))

        self.queue.put((self.angle_to_pulse_width(angle), t))

        self.last_pulse_width = pulse_width
        self.last_angle = angle

        if not asynchronous:
            self.queue.join()

    def turn_off(self):
        self.pwm.set_value(0)

    def stop(self):
        self.running = False
        self.t.join()
        self.turn_off()

    def _wait_for_event(self):
        while self.running:
            try:
                ev = self.queue.get(block=True, timeout=1)
            except queue.Empty:
                if self.turnoff_timeout > 0 and self.lastCommandTime > 0 and time.time(
                ) - self.lastCommandTime > self.turnoff_timeout:
                    self.lastCommandTime = 0
                    self.turn_off()
                continue
            except Exception:
                # To avoid exception printed on output
                pass

            self.current_pulse_width = ev[0]
            #logging.debug("setting pulse width to "+str(self.current_pulse_width))
            self.pwm.set_value(self.current_pulse_width / self.pulse_length)
            self.lastCommandTime = time.time()
            time.sleep(ev[1])

            self.queue.task_done()

    def angle_to_pulse_width(self, angle):
        return (
            (angle - self.angle_min) /
            self.angle_range) * self.pulse_width_range + self.pulse_width_min

    def pulse_width_to_angle(self, pulse_width):
        return (((pulse_width - self.pulse_width_min) /
                 (self.pulse_width_range)) * self.angle_range) + self.angle_min
Beispiel #43
0
    def __init__(self,
                 channel,
                 pulse_width_min,
                 pulse_width_max,
                 angle_min,
                 angle_max,
                 init_angle,
                 turnoff_timeout=0):
        """Define a new software controllable servo with adjustable speed control

        Keyword arguments:
        pulse_width_min -- The minimum pulse width defining the lowest angle
        pulse_width_max -- The maximum pulse width defining the biggest angle
        init_angle -- Initial angle that the servo should take when it is powered on. Range is 0 to 180deg
        turnoff_timeout -- number of seconds after which the servo is turned off if no command is received. 0 = never turns off
        """

        self.angle_min = angle_min
        self.angle_max = angle_max
        self.angle_range = angle_max - angle_min
        self.pulse_width_min = pulse_width_min
        self.pulse_width_max = pulse_width_max
        self.pulse_width_range = pulse_width_max - pulse_width_min

        self.turnoff_timeout = turnoff_timeout

        self.current_pulse_width = self.angle_to_pulse_width(init_angle)
        self.last_pulse_width = self.current_pulse_width

        self.last_angle = init_angle

        self.pulse_length = 20.0 * 10.0**-3.0  # 20 ms

        logging.debug("Angle min: {} deg".format(self.angle_min))
        logging.debug("Angle max: {} deg".format(self.angle_max))
        logging.debug("Angle tot: {} deg".format(self.angle_range))
        logging.debug("Pulse min: {} ms".format(self.pulse_width_min * 1000.0))
        logging.debug("Pulse max: {} ms".format(self.pulse_width_max * 1000.0))
        logging.debug("Pulse tot: {} ms".format(self.pulse_width_range *
                                                1000.0))

        self.queue = JoinableQueue(1000)
        self.lastCommandTime = 0

        self.t = Thread(target=self._wait_for_event, name="Servo")
        self.t.daemon = True
        self.running = True
        self.t.start()

        # Branch based on channel type.

        if type(channel) == int:  # Revision A
            self.pwm = PWM(channel, 50, self.current_pulse_width)
        else:  # Revision B
            # Set up the Shift register for enabling this servo
            if channel == "P9_14":
                shiftreg_nr = 1
                self.pwm = PWM_pin(channel, 50, self.current_pulse_width)
            elif channel == "P9_16":
                shiftreg_nr = 2
                self.pwm = PWM_pin(channel, 50, self.current_pulse_width)
            else:
                logging.warning(
                    "Tried to assign servo to an unknown channel/pin: " +
                    str(channel))
                return
            ShiftRegister.make(5)
            self.shift_reg = ShiftRegister.registers[shiftreg_nr]
        self.set_enabled()
        self.pwm.set_value(
            self.angle_to_pulse_width(init_angle) / self.pulse_length)
Beispiel #44
0
class TestDatasetSwmrWriteRead(TestCase):
    """ Testing SWMR functions when reading a dataset.
    Skip this test if the HDF5 library does not have the SWMR features.
    """
    def setUp(self):
        """ First setup a file with a small chunked and empty dataset.
        No data written yet.
        """

        # Note that when creating the file, the swmr=True is not required for
        # write, but libver='latest' is required.

        self.fname = self.mktemp()

        self.writer_queue = JoinableQueue()
        self.writer_process = Process(target=writer_loop,
                                      args=(self.writer_queue, ),
                                      daemon=True)
        self.writer_process.start()

    def test_create_open_read_update_file(self):
        """ Update and read dataset and
         an attribute in group with SWMR mode
        """

        self.data = np.arange(13).astype('f')
        self.new_data = np.arange(13).astype('f') + 2

        writer_queue = self.writer_queue

        parameters = {'fname': self.fname}
        writer_queue.put({'action': 'create_file', 'parameters': parameters})
        writer_queue.join()

        parameters = {'name': 'data', 'value': self.data}
        writer_queue.put({
            'action': 'create_dataset',
            'parameters': parameters
        })
        writer_queue.join()

        # create attributes to test
        attributes = [
            {
                'name': 'attr_bool',
                'value': False,
                'new_value': True
            },
            {
                'name': 'attr_int',
                'value': 1,
                'new_value': 2
            },
            {
                'name': 'attr_float',
                'value': 1.4,
                'new_value': 3.2
            },
            {
                'name': 'attr_string',
                'value': 'test',
                'new_value': 'essai'
            },
        ]

        for attribute in attributes:
            attribute_name = attribute['name']
            attribute_value = attribute['value']

            parameters = {'name': attribute_name, 'value': attribute_value}
            writer_queue.put({
                'action': 'create_attribute',
                'parameters': parameters
            })
            writer_queue.join()

        # try opening the file in swmr

        file = None
        with self.assertRaises(OSError):
            file = h5py.File(self.fname, 'r', libver='latest', swmr=True)

        writer_queue.put({'action': 'set_swmr_mode'})
        writer_queue.join()

        # open file and check group
        file = h5py.File(self.fname, 'r', libver='latest', swmr=True)
        self.assertIn('group', file)

        # check attributes

        group = file['group']

        for attribute in attributes:
            attribute_name = attribute['name']
            attribute_value = attribute['value']
            attribute_new_value = attribute['new_value']

            self.assertIn(attribute_name, group.attrs)

            read_value = group.attrs[attribute_name]
            if isinstance(attribute_value, str):
                read_value = read_value.decode()
            self.assertEqual(read_value, attribute_value)

            parameters = {'name': attribute_name, 'value': attribute_new_value}
            writer_queue.put({
                'action': 'update_attribute',
                'parameters': parameters
            })
            writer_queue.join()

            read_value = group.attrs[attribute_name]
            if isinstance(attribute_value, str):
                read_value = read_value.decode()
            self.assertEqual(read_value, attribute_value)

            writer_queue.put({'action': 'flush_group'})
            writer_queue.join()

            # check that read group attribute has not changed
            read_value = group.attrs[attribute_name]
            if isinstance(attribute_value, str):
                read_value = read_value.decode()
            self.assertEqual(read_value, attribute_value)

            group.refresh()

            # check that read group attribute has changed
            read_value = group.attrs[attribute_name]
            if isinstance(attribute_value, str):
                read_value = read_value.decode()
            self.assertEqual(read_value, attribute_new_value)

        # check that dataset has been recorder
        data = group['data']
        self.assertArrayEqual(data[:], self.data)

        # update data
        parameters = {'name': 'data', 'value': self.new_data}
        writer_queue.put({
            'action': 'update_dataset',
            'parameters': parameters
        })
        writer_queue.join()

        # check that data has not been updated
        data = group['data']
        self.assertArrayEqual(data[:], self.data)

        # flush group
        writer_queue.put({'action': 'flush_group'})
        writer_queue.join()

        # check that data has not been updated
        data = group['data']
        self.assertArrayEqual(data[:], self.data)

        # refresh group, this won't update dataset
        group.refresh()

        # check that data has not been updated
        data = group['data']
        self.assertArrayEqual(data[:], self.data)

        # refresh dataset, this will update data
        data.refresh()

        # check that data has been updated
        self.assertArrayEqual(data[:], self.new_data)

        writer_queue.put({'action': 'close_file'})
        writer_queue.join()

        file.close()

        pass

    def tearDown(self):

        self.writer_queue.put({'action': "stop"})
        self.writer_queue.join()

        self.writer_process.join()
Beispiel #45
0
        foods = q.get()
        print('%s消费了%s' % (name, foods))
        q.task_done()


def producer(name, food, q):
    for i in range(4):
        time.sleep(random.randint(0, 2))
        foods = '%s生产了%s%s' % (name, food, i)
        print(foods)
        q.put(foods)
    q.join()


if __name__ == '__main__':
    q = JoinableQueue(20)

    p1 = Process(target=producer, args=('egon', '包子', q))
    p1.start()

    p2 = Process(target=producer, args=('jason', '包子', q))
    p2.start()

    c = Process(target=consunmer, args=('alise', q))
    c.daemon = True
    c.start()
    c2 = Process(target=consunmer, args=('hanke', q))
    c2.daemon = True
    c2.start()

    p1.join()
def main(_):
    parser = argparse.ArgumentParser(description='ProjE.')
    parser.add_argument('--data',
                        dest='data_dir',
                        type=str,
                        help="Data folder",
                        default='./data/FB15k/')
    parser.add_argument('--lr',
                        dest='lr',
                        type=float,
                        help="Learning rate",
                        default=0.01)
    parser.add_argument("--dim",
                        dest='dim',
                        type=int,
                        help="Embedding dimension",
                        default=200)
    parser.add_argument("--batch",
                        dest='batch',
                        type=int,
                        help="Batch size",
                        default=100)
    parser.add_argument("--comb",
                        dest="combination_method",
                        type=str,
                        help="Combination method",
                        default='simple')
    parser.add_argument("--worker",
                        dest='n_worker',
                        type=int,
                        help="Evaluation worker",
                        default=3)
    parser.add_argument("--generator",
                        dest='n_generator',
                        type=int,
                        help="Data generator",
                        default=10)
    parser.add_argument("--eval_batch",
                        dest="eval_batch",
                        type=int,
                        help="Evaluation batch size",
                        default=500)
    parser.add_argument("--save_dir",
                        dest='save_dir',
                        type=str,
                        help="Model path",
                        default='./')
    parser.add_argument("--load_model",
                        dest='load_model',
                        type=str,
                        help="Model file",
                        default="")
    parser.add_argument("--save_per",
                        dest='save_per',
                        type=int,
                        help="Save per x iteration",
                        default=10)
    parser.add_argument("--eval_per",
                        dest='eval_per',
                        type=int,
                        help="Evaluate every x iteration",
                        default=1)
    parser.add_argument("--max_iter",
                        dest='max_iter',
                        type=int,
                        help="Max iteration",
                        default=100)
    parser.add_argument("--summary_dir",
                        dest='summary_dir',
                        type=str,
                        help="summary directory",
                        default='./ProjE_summary/')
    parser.add_argument("--keep",
                        dest='drop_out',
                        type=float,
                        help="Keep prob (1.0 keep all, 0. drop all)",
                        default=0.5)
    parser.add_argument("--optimizer",
                        dest='optimizer',
                        type=str,
                        help="Optimizer",
                        default='adam')
    parser.add_argument("--prefix",
                        dest='prefix',
                        type=str,
                        help="model_prefix",
                        default='DEFAULT')
    parser.add_argument("--loss_weight",
                        dest='loss_weight',
                        type=float,
                        help="Weight on parameter loss",
                        default=1e-5)
    parser.add_argument("--neg_weight",
                        dest='neg_weight',
                        type=float,
                        help="Sampling weight on negative examples",
                        default=0.5)

    args = parser.parse_args()

    print(args)

    model = ProjE(args.data_dir,
                  embed_dim=args.dim,
                  combination_method=args.combination_method,
                  dropout=args.drop_out,
                  neg_weight=args.neg_weight)

    train_hrt_input, train_hrt_weight, train_trh_input, train_trh_weight, \
    train_loss, train_op = train_ops(model, learning_rate=args.lr,
                                     optimizer_str=args.optimizer,
                                     regularizer_weight=args.loss_weight)
    test_input, test_head, test_tail = test_ops(model)

    with tf.Session() as session:
        tf.initialize_all_variables().run()

        saver = tf.train.Saver()

        iter_offset = 0

        if args.load_model is not None and os.path.exists(args.load_model):
            saver.restore(session, args.load_model)
            iter_offset = int(
                args.load_model.split('.')[-2].split('_')[-1]) + 1
            print("Load model from %s, iteration %d restored." %
                  (args.load_model, iter_offset))

        total_inst = model.n_train

        # training data generator
        raw_training_data_queue = Queue()
        training_data_queue = Queue()
        data_generators = list()
        for i in range(args.n_generator):
            data_generators.append(
                Process(target=data_generator_func,
                        args=(raw_training_data_queue, training_data_queue,
                              model.tr_h, model.hr_t, model.n_entity,
                              args.neg_weight)))
            data_generators[-1].start()

        evaluation_queue = JoinableQueue()
        result_queue = Queue()
        for i in range(args.n_worker):
            worker = Process(target=worker_func,
                             args=(evaluation_queue, result_queue, model.hr_t,
                                   model.tr_h))
            worker.start()

        for data_func, test_type in zip(
            [model.validation_data, model.testing_data], ['VALID', 'TEST']):
            accu_mean_rank_h = list()
            accu_mean_rank_t = list()
            accu_filtered_mean_rank_h = list()
            accu_filtered_mean_rank_t = list()

            evaluation_count = 0

            for testing_data in data_func(batch_size=args.eval_batch):
                head_pred, tail_pred = session.run([test_head, test_tail],
                                                   {test_input: testing_data})

                evaluation_queue.put((testing_data, head_pred, tail_pred))
                evaluation_count += 1

            for i in range(args.n_worker):
                evaluation_queue.put(None)

            print("waiting for worker finishes their work")
            evaluation_queue.join()
            print("all worker stopped.")
            while evaluation_count > 0:
                evaluation_count -= 1

                (mrh, fmrh), (mrt, fmrt) = result_queue.get()
                accu_mean_rank_h += mrh
                accu_mean_rank_t += mrt
                accu_filtered_mean_rank_h += fmrh
                accu_filtered_mean_rank_t += fmrt

            print(
                "[%s] INITIALIZATION [HEAD PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f"
                %
                (test_type, np.mean(accu_mean_rank_h),
                 np.mean(accu_filtered_mean_rank_h),
                 np.mean(np.asarray(accu_mean_rank_h, dtype=np.int32) < 10),
                 np.mean(
                     np.asarray(accu_filtered_mean_rank_h, dtype=np.int32) < 10
                 )))

            print(
                "[%s] INITIALIZATION [TAIL PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f"
                %
                (test_type, np.mean(accu_mean_rank_t),
                 np.mean(accu_filtered_mean_rank_t),
                 np.mean(np.asarray(accu_mean_rank_t, dtype=np.int32) < 10),
                 np.mean(
                     np.asarray(accu_filtered_mean_rank_t, dtype=np.int32) < 10
                 )))

        for n_iter in range(iter_offset, args.max_iter):
            start_time = timeit.default_timer()
            accu_loss = 0.
            accu_re_loss = 0.
            ninst = 0

            print("initializing raw training data...")
            nbatches_count = 0
            for dat in model.raw_training_data(batch_size=args.batch):
                raw_training_data_queue.put(dat)
                nbatches_count += 1
            print("raw training data initialized.")

            while nbatches_count > 0:
                nbatches_count -= 1

                hr_tlist, hr_tweight, tr_hlist, tr_hweight = training_data_queue.get(
                )

                l, rl, _ = session.run(
                    [train_loss, model.regularizer_loss, train_op], {
                        train_hrt_input: hr_tlist,
                        train_hrt_weight: hr_tweight,
                        train_trh_input: tr_hlist,
                        train_trh_weight: tr_hweight
                    })

                accu_loss += l
                accu_re_loss += rl
                ninst += len(hr_tlist) + len(tr_hlist)

                if ninst % (5000) is not None:
                    print(
                        '[%d sec](%d/%d) : %.2f -- loss : %.5f rloss: %.5f ' %
                        (timeit.default_timer() - start_time, ninst,
                         total_inst, float(ninst) / total_inst, l /
                         (len(hr_tlist) + len(tr_hlist)), args.loss_weight *
                         (rl / (len(hr_tlist) + len(tr_hlist)))),
                        end='\r')
            print("")
            print("iter %d avg loss %.5f, time %.3f" %
                  (n_iter, accu_loss / ninst,
                   timeit.default_timer() - start_time))

            if n_iter % args.save_per == 0 or n_iter == args.max_iter - 1:
                save_path = saver.save(
                    session,
                    os.path.join(
                        args.save_dir, "ProjE_" + str(args.prefix) + "_" +
                        str(n_iter) + ".ckpt"))
                print("Model saved at %s" % save_path)

            if n_iter % args.eval_per == 0 or n_iter == args.max_iter - 1:

                for data_func, test_type in zip(
                    [model.validation_data, model.testing_data],
                    ['VALID', 'TEST']):
                    accu_mean_rank_h = list()
                    accu_mean_rank_t = list()
                    accu_filtered_mean_rank_h = list()
                    accu_filtered_mean_rank_t = list()

                    evaluation_count = 0

                    for testing_data in data_func(batch_size=args.eval_batch):
                        head_pred, tail_pred = session.run(
                            [test_head, test_tail], {test_input: testing_data})

                        evaluation_queue.put(
                            (testing_data, head_pred, tail_pred))
                        evaluation_count += 1

                    for i in range(args.n_worker):
                        evaluation_queue.put(None)

                    print("waiting for worker finishes their work")
                    evaluation_queue.join()
                    print("all worker stopped.")
                    while evaluation_count > 0:
                        evaluation_count -= 1

                        (mrh, fmrh), (mrt, fmrt) = result_queue.get()
                        accu_mean_rank_h += mrh
                        accu_mean_rank_t += mrt
                        accu_filtered_mean_rank_h += fmrh
                        accu_filtered_mean_rank_t += fmrt

                    print(
                        "[%s] ITER %d [HEAD PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f"
                        %
                        (test_type, n_iter, np.mean(accu_mean_rank_h),
                         np.mean(accu_filtered_mean_rank_h),
                         np.mean(
                             np.asarray(accu_mean_rank_h, dtype=np.int32) < 10
                         ),
                         np.mean(
                             np.asarray(accu_filtered_mean_rank_h,
                                        dtype=np.int32) < 10)))

                    print(
                        "[%s] ITER %d [TAIL PREDICTION] MEAN RANK: %.1f FILTERED MEAN RANK %.1f HIT@10 %.3f FILTERED HIT@10 %.3f"
                        %
                        (test_type, n_iter, np.mean(accu_mean_rank_t),
                         np.mean(accu_filtered_mean_rank_t),
                         np.mean(
                             np.asarray(accu_mean_rank_t, dtype=np.int32) < 10
                         ),
                         np.mean(
                             np.asarray(accu_filtered_mean_rank_t,
                                        dtype=np.int32) < 10)))
Beispiel #47
0
    exit = close


if __name__ == '__main__':

    import signal
    from multiprocessing import JoinableQueue

    from ...Machine import Machine
    from ...ConfigParser import ConfigParser
    from ..OscProcessor import OscProcessor

    logging.basicConfig(level=logging.DEBUG,
                        format='%(asctime)s %(name)-12s \
                        %(levelname)-8s %(message)s',
                        datefmt='%Y/%m/%d %H:%M:%S')

    m = Machine()
    c = ConfigParser('../conf/default.conf')

    m.config = c
    m.commands = JoinableQueue(10)
    m.unbuffered_commands = JoinableQueue(10)
    m.synced_commands = JoinableQueue()

    m.osc_processor = OscProcessor(m)
    o = OscServer(m)
    o.start()

    signal.pause()
Beispiel #48
0
def consumer(input_q):
    while True:
        # Get an item from the queue
        item = input_q.get()
        # Process item
        print(item)
        # Signal completion
        input_q.task_done() # Signal task completion

def producer(sequence,output_q):
    for item in sequence:
        # Put the item on the queue
        output_q.put(item)

if __name__ == '__main__':
    # JoinableQueue(), a Queue subclass, is a queue which additionaly has task_done() and join() methods
    from multiprocessing import Process, JoinableQueue
    q = JoinableQueue()

    # Launch the consumer process
    cons_p = Process(target=consumer, args=(q,))
    cons_p.daemon = True
    cons_p.start()

    # Run the producer function on some data
    sequence = range(100)    # Replace with useful data
    producer(sequence, q)

    # Wait for the consumer to finish
    q.join()
Beispiel #49
0
from datetime import datetime, timezone
import logging
from multiprocessing import JoinableQueue
import threading
import select
import sys
import weakref

from nemubot import __version__
from nemubot.consumer import Consumer, EventConsumer, MessageConsumer
from nemubot import datastore
import nemubot.hooks

logger = logging.getLogger("nemubot")

sync_queue = JoinableQueue()


def sync_act(*args):
    sync_queue.put(list(args))


class Bot(threading.Thread):
    """Class containing the bot context and ensuring key goals"""
    def __init__(self,
                 ip="127.0.0.1",
                 modules_paths=list(),
                 data_store=datastore.Abstract(),
                 debug=False):
        """Initialize the bot context
	def insert_files(self, out,cfg, producer,return_dict, skip_header=0, rec_delim=os.linesep):
		self.opt.skip_header = skip_header
		self.opt.rec_delim = rec_delim
		log = logging.getLogger('cli')
		self.scfg, self.tcfg = cfg
		file_object_cache = FileObjectCache()
		start = time.time()
		
		stat_queue = JoinableQueue()
		
		if 1:
			put_queue = JoinableQueue(1024 * self.opt.processes)
			

			
		if 1:
			put = {'update': self.put_update}[self.opt.put]
			putter_processes = list(islice(repeatedly(Process, target=self.putter, args=(put, put_queue, stat_queue, return_dict)), self.opt.processes))
			for putter_process in putter_processes:
				putter_process.start()
		if 1:
			statter_process = Process(target=self.statter, args=(stat_queue, start))
			statter_process.start()
			
		out_names=[]
		#walk = {'filesystem': self.walk_filesystem}[self.opt.walk]
		for file in producer[0](*producer[1]):
			out_names.append(file)
			put_queue.put(file)	
			#time.sleep(3)
		out.dump_files=out_names

		for putter_process in putter_processes:
			put_queue.put(None)
		put_queue.close()
		for putter_process in putter_processes:
			putter_process.join()
			
		stat_queue.put(None)
		stat_queue.close()
		statter_process.join()
		put_queue.join_thread()
		stat_queue.join_thread()
		print 77777, counter.value()
		print 77777, self.total_ins
		print 7777, (return_dict.values())
Beispiel #51
0
    def run(self):
        global sync_queue

        # Rewrite the sync_queue, as the daemonization process tend to disturb it
        old_sync_queue, sync_queue = sync_queue, JoinableQueue()
        while not old_sync_queue.empty():
            sync_queue.put_nowait(old_sync_queue.get())

        self._poll.register(sync_queue._reader, select.POLLIN | select.POLLPRI)

        self.stop = False

        # Relaunch events
        self._update_event_timer()

        logger.info("Starting main loop")
        while not self.stop:
            for fd, flag in self._poll.poll():
                # Handle internal socket passing orders
                if fd != sync_queue._reader.fileno() and fd in self.servers:
                    srv = self.servers[fd]

                    if flag & (select.POLLERR | select.POLLHUP
                               | select.POLLNVAL):
                        try:
                            srv.exception(flag)
                        except:
                            logger.exception(
                                "Uncatched exception on server exception")

                    if srv.fileno() > 0:
                        if flag & (select.POLLOUT):
                            try:
                                srv.async_write()
                            except:
                                logger.exception(
                                    "Uncatched exception on server write")

                        if flag & (select.POLLIN | select.POLLPRI):
                            try:
                                for i in srv.async_read():
                                    self.receive_message(srv, i)
                            except:
                                logger.exception(
                                    "Uncatched exception on server read")

                    else:
                        del self.servers[fd]

                # Always check the sync queue
                while not sync_queue.empty():
                    args = sync_queue.get()
                    action = args.pop(0)

                    logger.debug("Executing sync_queue action %s%s", action,
                                 args)

                    if action == "sckt" and len(args) >= 2:
                        try:
                            if args[0] == "write":
                                self._poll.modify(
                                    int(args[1]), select.POLLOUT
                                    | select.POLLIN | select.POLLPRI)
                            elif args[0] == "unwrite":
                                self._poll.modify(
                                    int(args[1]),
                                    select.POLLIN | select.POLLPRI)

                            elif args[0] == "register":
                                self._poll.register(
                                    int(args[1]),
                                    select.POLLIN | select.POLLPRI)
                            elif args[0] == "unregister":
                                self._poll.unregister(int(args[1]))
                        except:
                            logger.exception(
                                "Unhandled excpetion during action:")

                    elif action == "exit":
                        self.quit()

                    elif action == "launch_consumer":
                        pass  # This is treated after the loop

                    sync_queue.task_done()

            # Launch new consumer threads if necessary
            while self.cnsr_queue.qsize() > self.cnsr_thrd_size:
                # Next launch if two more items in queue
                self.cnsr_thrd_size += 2

                c = Consumer(self)
                self.cnsr_thrd.append(c)
                c.start()
        sync_queue = None
        logger.info("Ending main loop")
Beispiel #52
0
class QiushiSpider(object):
    def __init__(self):
        self.url = 'https://www.qiushibaike.com/8hr/page/{}/'
        self.headers = {
            'User-Agent':
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'
        }
        # 三大队列,进程队列要使用JoinableQueue
        self.url_queue = JoinableQueue()  # type:Queue
        self.page_queue = JoinableQueue()  # type:Queue
        self.data_queue = JoinableQueue()  # type:Queue

    def run(self):
        # 执行任务
        self.excute_task(self.add_url)
        self.excute_task(self.add_page)
        self.excute_task(self.add_data)
        self.excute_task(self.save_data)
        # 等待队列任务完成
        self.url_queue.join()
        self.page_queue.join()
        self.data_queue.join()

    def excute_task(self, task, count=1):
        """
        使用线程执行任务
        :param task:  要执行任务函数(方法)
        :param count:  开启多少个线程来执行
        """
        for i in range(count):
            t = Process(target=task)
            t.daemon = True
            t.start()

    def add_url(self):
        """获取url列表,添加到url_queue队列"""
        for i in range(1, 14):
            url = self.url.format(i)
            self.url_queue.put(url)

    def add_page(self):
        """从url_queue获取url的页面,添加到page_queue队列"""
        while True:
            url = self.url_queue.get()
            response = requests.get(url=url, headers=self.headers)
            page = response.content.decode()
            self.page_queue.put(page)
            self.url_queue.task_done()  # 表明url队列已完成

    def add_data(self):
        """从page_queue获取页面,解析页面,添加到data_queue队列"""
        while True:
            page = self.page_queue.get()
            element = etree.HTML(page)
            divs = element.xpath('//*[@id="content-left"]/div')  # 所有段子信息
            data_list = []
            for div in divs:
                """遍历divs,拿到每一个段子的div,然后把想要的数据保存在data_list中"""
                item = {}
                # 用户头像
                item['head_img'] = div.xpath('./div[1]/a[1]/img/@src')
                item['head_img'] = 'https:' + item['head_img'][0] if len(
                    item['head_img']) != 0 else None
                # 昵称
                item['nick_name'] = div.xpath('./div[1]/a[2]/h2/text()')
                item['nick_name'] = item['nick_name'][0].strip() if len(
                    item['nick_name']) != 0 else None
                # 性别
                gender_class = div.xpath('./div[1]/div/@class')
                if len(gender_class) != 0:  # 判断是否有填写性别
                    # articleGender womenIcon
                    item['gender'] = re.findall('(\w+)Icon',
                                                gender_class[0])[0]
                else:
                    item['gender'] = None
                # 段子内容
                item['content'] = ''.join(div.xpath('./a/div/span//text()'))
                # 好笑数
                item['vote_count'] = div.xpath(
                    './/span[@class="stats-vote"]/i/text()')[0]
                # 评论数
                item['comments_count'] = div.xpath(
                    './/span[@class="stats-comments"]/a/i/text()')[0]
                # print(item)
                data_list.append(item)
            self.data_queue.put(data_list)  # 添加到data队列
            self.page_queue.task_done()  # 表明page队列已完成

    def save_data(self):
        """从data_queue拿到数据保存"""
        while True:
            data_list = self.data_queue.get()
            with open('./test/糗事百科热门(多进程版).jsonlines', 'a',
                      encoding='utf-8') as f:
                for data in data_list:
                    json.dump(data, f, ensure_ascii=False)
                    f.write('\n')
            self.data_queue.task_done()  # 完成数据队列任务
Beispiel #53
0
# coding: utf-8
import time
from multiprocessing import freeze_support
from multiprocessing import JoinableQueue
from multiprocessing import Process
from multiprocessing import Queue
from random import random
# 进程的Queue类并不支持task_done和join方法,需要使用特别的JoinableQueue

tasks_queue = JoinableQueue()
results_queue = Queue()


def double(n):
    return n * 2


def producer(in_queue):
    while 1:
        wt = random()
        time.sleep(wt)
        in_queue.put((double, wt))
        if wt > 0.9:
            in_queue.put(None)
            print("Stop Producer")
            break


def consumer(in_queue, out_queue):
    while 1:
        task = in_queue.get()
Beispiel #54
0
    def __init__(self):
        self.base_filename = "raw_data/conceptnet4_flat_"
        self.num_threads = 10

        self.queue = JoinableQueue()