def getPairsThread(pairs):
    xVals = []
    res = Queue()
    processors = multiprocessing.cpu_count()
    width = len(pairs)/(processors -1)
    processes = []
    print "Initial pairs len " + str(len(pairs)) + " " + str(width)
    ##processes = [Process(target=getPairs, args=(pairs, xVals)) for i in xrange(processors)]

    available = processors -1
    for i in xrange(available):
        start = width * i
        end = ((i + 1) * width)
        p = Process(target=getPairs, args=(pairs[start:end], res))
        processes.append(p)

    if (width * processors < len(pairs)):

        p = Process(target=getPairs, args=(pairs[(width*processors):(len(pairs))], res))
        processes.append(p)
    for p in processes:
        p.start()
    for p in processes:
        p.join()

    xVals = res.get()
    for r in range(res.qsize()):
        xVals += res.get()

    return xVals
示例#2
1
def start(repos, counter):
    """
    creates the whole user connected graph
    parameters:
    repos - comma separated repository ids
    counter - int value
    """
    unExploredUserQueue = Queue()  # queue for maintaining the unexplored users
    ExploredUserMap = {}  # map for maintaining the explored users

    global userMap, userCount, userIndexFile
    usersSet = getUsers(repos)
    print len(usersSet)

    userConnectedGraph = nx.DiGraph()
    userConnectedGraph.add_nodes_from(usersSet)
    try:
        for user in usersSet:
            if user not in userMap:
                userMap[user] = userCount
                userIndexFile.write(user + " " + str(userCount))
                userIndexFile.write("\n")
                userCount += 1
            unExploredUserQueue.put(user)
            createUserConnectedWholeGraph(userConnectedGraph, unExploredUserQueue, ExploredUserMap)
        gp.processGraph(userConnectedGraph)
    except Exception as e:
        exc_type, exc_obj, exc_tb = sys.exc_info()
        fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
        print (exc_type, fname, exc_tb.tb_lineno)
        raise e
示例#3
1
文件: Sender.py 项目: YUESS/bCNC
	def __init__(self):
		# Global variables
		self.history     = []
		self._historyPos = None
		CNC.loadConfig(Utils.config)
		self.gcode = GCode()
		self.cnc   = self.gcode.cnc

		self.log         = Queue()	# Log queue returned from GRBL
		self.queue       = Queue()	# Command queue to send to GRBL
		self.pendant     = Queue()	# Command queue to be executed from Pendant
		self.serial      = None
		self.thread      = None
		self.controller  = Utils.CONTROLLER["Grbl"]

		self._posUpdate  = False	# Update position
		self._probeUpdate= False	# Update probe
		self._gUpdate    = False	# Update $G
		self._update     = None		# Generic update

		self.running     = False
		self._runLines   = 0
		self._stop       = False	# Raise to stop current run
		self._quit       = 0
		self._pause      = False	# machine is on Hold
		self._alarm      = True		# Display alarm message if true
		self._msg        = None
		self._sumcline   = 0
		self._lastFeed   = 0
		self._newFeed    = 0
def startForCreatingWholeGraph(repos, counter):
    """
    creates the whole user connected graph
    parameters:
    repos - comma separated repository ids
    counter - int value
    """
    unExploredUserQueue = Queue() #queue for maintaining the unexplored users
    ExploredUserMap = {} #map for maintaining the explored users

    usersSet = getUsers(repos)
    print len(usersSet)
    
    userConnectedGraph = nx.DiGraph()
    #userConnectedGraph = nx.Graph()
    try:
    #add the users to the unExploredQueue
        for user in usersSet:
            unExploredUserQueue.put(user)
            createUserConnectedWholeGraph(userConnectedGraph,unExploredUserQueue, ExploredUserMap)
            
        calculateEigenCentrality(userConnectedGraph, counter)
        calculateDegreeCentrality(userConnectedGraph, counter)
        #calculateClosenessCentrality(userConnectedGraph, counter)
          
        filesList = []
        filesList.append('eigenCentrality' + str(counter))
        filesList.append('degreeCentrality' + str(counter))
        #filesList.append('closenessCentrality'+str(counter))
        plotgraphForCentralities(path, filesList)
    except Exception as e:
        print e
示例#5
0
文件: Node.py 项目: dntfg4/MST6604
    def receive_token(self, token):
        self.__token = token
        self.token_node()
        if self.is_proxy():
            q = Queue()
            while not self.__request.empty():
                request = self.__request.get(False)
                #if request[2] < self.__token.get_counter():
                if request[0] is self:
                    child_node = self.get_child_node(request[1])
                    if child_node is not None:
                        print "Proxy MSS %d grants token to MH %d via MSS %d" % (self.__name, request[1].get_name(), child_node.get_name())
                        child_node.process_mh_token(self.__token, request)
                    else:
                        print "Proxy MSS %d cannot find MH %d...request dropped" % (self.__name, request[1].get_name())
                elif request[0] is not None:
                    request[0].process_proxy_token(self.__token, request)
                # else:
                #     q.put(request)

            if not q.empty():
                del self.__request
                self.__request = q
            else:
                del q
示例#6
0
class pr_subscriber(Thread):
    def __init__(self, top = None, loc = "tcp://localhost:5556"):
        Thread.__init__(self)
        self.loc = loc
        self.top = top
        self.messages = Queue()
        self.listen = True
    def run(self):
        context = zmq.Context()
        socket = context.socket(zmq.SUB)
        socket.connect(self.loc)
        if self.top:
            socket.setsockopt(zmq.SUBSCRIBE, self.top)
        while self.listen:
            string = socket.recv()
            self.messages.put(string)
    def poll(self):
        res = None
        try:
            res = self.messages.get_nowait()
        except Empty:
            return res
        return pr_message.decode(res)
    def kill(self):
        self.listen = False
class AsyncImageGrabber(threading.Thread):
  def __init__(self, url, interval, size=3):
    super(AsyncImageGrabber, self).__init__()
    self.queue = Queue(3)
    self.imageGrabber = ImageGrabber(url)
    self.running = True
    self.interval = interval

  # def initialize(self, filename, interval, resolution):
  #   self.recorder.initialize(filename, interval, resolution)

  def clean(self):
    self.running = False
    # while not self.queue.empty():
    #   self.queue.get()
    #   self.queue.task_done()
    # self.queue.join()

  def get(self):
    image = self.queue.get()
    self.queue.task_done()
    return image

  def run(self):
    while self.running:
      try:
        # image = imageGrabber.getFrame()
        image = self.imageGrabber.getFrame()
      except Exception as e:
        logging.error('Failed to grab image due to exception(%s)' % (str(e),))
        self.running = False
        break
      # self.queue.put(self.imageGrabber.getFrame())
      self.queue.put(image)
      time.sleep(self.interval)
示例#8
0
文件: main.py 项目: Jablons/spider
def main():
    start = {}
    start['url'] = "http://www.hao123.com"
    logop = {}
    logop['logfile'] = "log.txt"#uop['logfile']
    logop['loglevel'] = "INFO"#uop['level']
    global logger
    logger = getlog(logop)#构造log对象
    global db
    db = dbhand(logger)
    dbfile = "todayb.db"
    db.dbconnect(dbfile)
    db.initdatabase()#数据表初始化
    db.selecturls2()
    db.insertone(start,'urls')
    queue = Queue()
    threadnumber = 30
    lock = threading.Lock()
    for i in range(threadnumber):#初始化线程池
        t1 = multigeturl(queue,'urlt_'+str(i),logger,lock)
        t1.setDaemon(True)
        t1.start()
    allurl = db.selecturls2()

    while True:
        while len(allurl) > 0:

            t = allurl.pop()
            queue.put(t)
        allurl = db.selecturls2()
        queue.join()
示例#9
0
def test_lock(processes=10, lock=Lock(), queue=None): 

    print_result = False 
    if queue == None: 
        print_result = True 
        queue = Queue()

    procs = [] 
    for i in xrange(processes): 
        procs.append(Process(target=test_lock_process, args=(lock, i, queue,))) 

    start = time.time()
    for t in procs: 
        t.start() 
    for t in procs: 
        t.join() 
    end = time.time()
    
    print end - start
    res = [(None, 0, None)]
    i = 0
    if print_result: 
        try: 
            while True:
                res.append(queue.get(block=False))
                if res[-1][0] == res[-2][0]:                    
                    sys.stderr.write("{}:\n".format(i))
                    for r in res[-2 ** 2:]:
                        sys.stderr.write("{}\n".format(r))
                    sys.stderr.write("\n")
                i += 1
        except Empty: 
            sys.stderr.write("{}:".format(i))
示例#10
0
文件: Sender.py 项目: Sci33/bCNC
	def __init__(self):
		# Global variables
		self.history     = []
		self._historyPos = None
		CNC.loadConfig(Utils.config)
		self.gcode = GCode()
		self.cnc   = self.gcode.cnc
		self.wait  = False	# wait for commands to complete

		self.log         = Queue()	# Log queue returned from GRBL
		self.queue       = Queue()	# Command queue to send to GRBL
		self.pendant     = Queue()	# Command queue to be executed from Pendant
		self.serial      = None
		self.thread      = None

		self._posUpdate  = False
		self._probeUpdate= False
		self._gUpdate    = False
		self.running     = False
		self._runLines   = 0
		self._stop       = False	# Raise to stop current run
		self._quit       = 0
		self._pause      = False	# machine is on Hold
		self._alarm      = True
		self._msg        = None
		self._update     = None
示例#11
0
class WorkManager():

    def __init__(self,work_count = 10,timeout = 0,is_daemon = bool('true')):
            self.work_queue = Queue()
            self.result_queue = Queue()
            self.works = []
            self.timeout = 0
            self.__init__workers__(work_count)

    def __init_workers__(self,work_count):
        for i in range(work_count):
            work = Work(self.work_queue,self.result_queue,is_daemon,self_timeout)
            self.works.append(work)
    
    def add_worker(callback,*arg,**kwds):
        self.work_queue.put((callback,*arg,**kwds))

    def start(self):
        for work in works:
            work.start()

    def get_result(self):
        return self.result_queue
    def wait_for_complete(self):
        while len(self.works):
            w = self.workers.pop()
            w.join()
            if w.isAlive() and not self.work_queue.empty():
                self.works.append(w)

        print 'all done'
示例#12
0
def run_skink_server():
    root_dir = abspath(dirname(__file__))
    server = Server(root_dir=root_dir)
    server.build_dir = join(root_dir, "ci_tmp")

    server.subscribe('on_user_authentication_failed', on_user_authentication_failed_handler)

    server.context.current_project = None
    server.context.current_command = None
    server.context.current_log = ""
    server.context.build_queue = Queue.deque()
    server.context.projects_being_built = Queue.deque()

    builder = BuilderPlugin(cherrypy.engine, server)
    builder.subscribe()

    monitor = MonitorPlugin(cherrypy.engine, server)
    monitor.subscribe()

    try:
        server.start("config.ini", non_block=True)
        for plugin in SkinkPlugin.all():
            config = server.context.settings.config
            if config.has_section(plugin.__name__) and \
               config.get(plugin.__name__, "enabled") == "True":
                cherrypy.log('Plugin %s enabled!' % plugin.__name__,
                             'PLUGINS')
                instance = plugin(server)

        cherrypy.engine.block()
    except KeyboardInterrupt:
        server.stop()
示例#13
0
    def getSolarSystems(self, solarSystems, deuteriumSourcePlanet = None): # solarsytems is an iterable of tuples
        if deuteriumSourcePlanet:
            self.goToPlanet(deuteriumSourcePlanet)
        threads = []
        inputQueue = Queue()
        outputQueue = Queue()
        for galaxy, solarSystem in solarSystems:
            params = {'session':self.session, 'galaxy':galaxy, 'system':solarSystem }
            url = "http://%s/game/index.php?page=galaxyContent&ajax=1&%s" % (self.config.webpage, urllib.urlencode(params))
            inputQueue.put((galaxy,solarSystem,url))

        # Each day, all the new inactive planets appear at midnight, usually with plenty
        # of resources, as nobody has attacked them yet. In order to find and
        # attack them before others do, spawn a thread per 50 solar systems to scan:
        for dummy in range((len(solarSystems) / 50) +1 ):
            thread = ScanThread(inputQueue, outputQueue, self.opener)
            thread.start()
            threads.append(thread)
 

        found = []
        while True:
            try:
                output = outputQueue.get(True,1)
                if output not in found:
                    found.append(output)
                    yield output
            except Empty:
                if not filter(threading.Thread.isAlive, threads):
                    break
                
        for thread in threads:
            if thread.exception:
                raise thread.exception  
示例#14
0
def call_external(method, cpu_seconds = None):
    queue = None
    call_child = None
    track_child = None

    try:
        queue = multiprocessing.Queue()
        call_child = ExternalCall(method, queue)

        child.start()

        track_child = ExternalCall(call_child.pid, queue)

        track_child.start()

        try:
            return queue.get(timeout = timeout)
        except queue.Empty:
            return None
    finally:
        if queue is not None:
            queue.close()
            queue.join_thread()
        if child is not None and child.is_alive():
            child.terminate()
            child.join()
示例#15
0
文件: pong.py 项目: k-freeman/IRCbot
class pong(object):
    def __init__(self):
        self.queue_in=Queue()
        self.queue_out=Queue()
        thread.start_new_thread(self.run,())
        self.resttime=0
        self.lastcmd=0
    
    def run(self):
        while 1:
            recv=self.queue_in.get()
            try:
                _, msg_header, msg_payload = recv.split(":",2)
                identification, msg_type, msg_receiver = msg_header.strip(" ").split(" ")
                sender=identification.split("!")
                
                if msg_payload[0]=="\x01" and msg_payload[1:6]=="PING ":
                    self.queue_out.put("NOTICE %s :%s\n"%(sender[0],msg_payload))
                    self.lastcmd=time.time()
                
            except IndexError:
                print "IndexError"
                pass
            except ValueError: # no normal channel/private message
                print "ValueError"
                pass

    def cmd(self,msg):
        self.queue_in.put(msg)
示例#16
0
文件: ifi.py 项目: k-freeman/IRCbot
 def __init__(self):
     self.queue_in=Queue()
     self.queue_out=Queue()
     thread.start_new_thread(self.run,())
     self.h=HTMLParser.HTMLParser()
     self.resttime=10
     self.lastcmd=0
示例#17
0
文件: abb0t.py 项目: k-freeman/IRCbot
    def __init__(self, nick):
        self.queue_in = Queue()
        self.queue_out = Queue()
        thread.start_new_thread(self.run, ())
        self.resttime = 0
        self.lastcmd = 0
        self.nick = nick
        self.logfile = "msglogabb0t.gz"
        self.blacklist = ["bottle", "Abb0t", "Ezrael"]
        self.msgblacklistfile = "msgblacklist.gz"
        self.msgblacklist = gzip.open(self.msgblacklistfile).read().lower().strip("\n").split("\n")
        self.ziplines = gzip.open(self.logfile).read().strip("\n").split("\n")
        self.all_msgs = []
        tmp = []
        for line in self.ziplines:
            l = line
            l = l.lower().strip("\t\n \r,").split(";", 2)
            if l[1] not in self.blacklist and len(l[2]) > 0 and l[2][0] != "!":
                try:
                    self.all_msgs.append(abb0t.de.stemWord(l[2].decode("utf-8")))
                except UnicodeDecodeError:
                    self.all_msgs.append(abb0t.de.stemWord(l[2].decode("iso-8859-1")))
                tmp.append(line)
        self.ziplines = tmp

        # self.vectorizer = TfidfVectorizer(min_df=1)#CountVectorizer(min_df=1)
        self.vectorizer = CountVectorizer(min_df=1)
        self.X = self.vectorizer.fit_transform(self.all_msgs)
示例#18
0
文件: log.py 项目: k-freeman/IRCbot
class log(object):
    
    def __init__(self):
        self.queue_in=Queue()
        self.queue_out=Queue()
        thread.start_new_thread(self.run,())
    
    def run(self):
        while 1:
            recv=self.queue_in.get()
            try:
                _, msg_header, msg_payload = recv.split(":",2)
                identification, msg_type, msg_receiver = msg_header.strip(" ").split(" ")
                sender=identification.split("!")
                
                if msg_payload and msg_receiver[0]=="#":
                    with gzip.open("msglog.gz","a+") as log:
                        #print "logged:",msg_payload
                        log.write(str(time.time())+";"+sender[0]+";"+msg_payload+"\n")

                    
                
            except IndexError:
                print "IndexError"
                pass
            except ValueError: # no normal channel/private message
                print "ValueError"
                pass

    def cmd(self,msg):
        self.queue_in.put(msg)
示例#19
0
def new_queue(serialized_queue=None):
    if not serialized_queue:
        serialized_queue = mxcube.empty_queue
    queue = pickle.loads(serialized_queue)
    import Queue
    Queue.init_signals(queue)
    return queue
示例#20
0
文件: test_job.py 项目: qq18436558/rq
 def test_create_and_cancel_job(self):
     """test creating and using cancel_job deletes job properly"""
     queue = Queue(connection=self.testconn)
     job = queue.enqueue(fixtures.say_hello)
     self.assertEqual(1, len(queue.get_jobs()))
     cancel_job(job.id)
     self.assertEqual(0, len(queue.get_jobs()))
class ReservationManager:
    """ Manages reservations for a movie theater. """

    def __init__(self):
        """ Creates a new instance of the reservation manager. """
        self.current_id = 0
        self.requests = Queue()
        self.all_reservations = SwapList(ArrayList())

    def queue_reservation(self, Request):
        """ Queues a reservation for a showtime for processing. """
        self.requests.enqueue(Request)

    def process_reservations(self):
        """ Processes all queued reservations and returns a read-only list containing the newly made reservations. """
        results = ArrayList()
        while not self.requests.is_empty:
            request = self.requests.dequeue()
            reserv = request.showtime.make_reservation(self.current_id, request)
            if reserv is not None:
                self.current_id += 1
                self.all_reservations.add(reserv)
                results.add(reserv)
        return results

    @property
    def reservations(self):
        """ Gets the movie theater's processed and accepted reservations. """
        return self.all_reservations
    def generatePathWithBFS(self, tank, graph, start = 1, goal = 2):
        dist = []
        prev = []
        Q = []
  
        for i in xrange(len(graph)):            #// Initialization
            dist.append(float('Inf'))           #// Unknown distance from source to v
            prev.append(None)                   #// Previous node in optimal path from source
            Q.append(i)                         #// All nodes initially in Q (unvisited nodes)

        dist[start] = 0                        #// Distance from source to source
        
        while Q:
            dist_u, u = min((val, idx) for (idx, val) in enumerate(dist) if idx in Q)
            Q.remove(u)
            if u == goal:
                return self.convertBFSPath(u, prev, graph)

            for neighbor in xrange(1,len(graph)):           #// where v is still in Q.
                if graph[u][neighbor] == 0:
                    continue
                alt = dist_u + distance(graph[0][u], graph[0][neighbor])
                if alt < dist[neighbor]:               #// A shorter path to v has been found
                    dist[neighbor] = alt 
                    prev[neighbor] = u 
                    print self.convertBFSPath(u, prev, graph)

        raise Exception('Shortest Path not Found')
示例#23
0
def threadload(f, directory, queue):
    p = subprocess.Popen('elm-doc ' + os.path.relpath(f, directory), stdout=subprocess.PIPE, cwd=directory, shell=True)
    output = p.communicate()[0].strip() or 'Could not document: ' + f
    # print output
    if output.startswith('Could not document: '):
        p = subprocess.Popen('elm -mo --print-types ' + os.path.relpath(f, directory), stdout=subprocess.PIPE, cwd=directory, shell=True)
        output = p.communicate()[0].strip() or 'Could not compile: ' + f
        types = parse_print_types(output)
        raw_values = []
        with open(f) as module:
            first_line = module.readline()
            if first_line.startswith('module ') and first_line.endswith(' where'):
                module_name = first_line.split()[1]
                module_name = module_name.split('(')[0]
            else:
                module_name = os.path.split(f)[1][:-4]
        for t in types:
            try:
                if not name(t).startswith('Could not compile: '):
                    if name(t).startswith(module_name):
                        raw_values.append(t[len(module_name)+1:])
            except IndexError:
                pass
        data = {'name': module_name, 'document': '', 'aliases': [], 'datatypes': [], 'values': [{'raw': t} for t in raw_values]}
        queue.put(data)
示例#24
0
def runTask(task):
    global config
    try:
        exitCode, taskStartDate = 0, datetime.datetime.now()
        # print(datetime.datetime.now()," - INFO - ","runTask for task:  ", task.taskName ," STARTING STEPS: ", task.start)
        print(datetime.datetime.now()," - INFO - ","runTask for task:  ", task.taskName , " STARTING STEPS: ", task.start,file = log)
        if isinstance(task.start, dict):
            allStartingSteps = []
            for mergeStep, startSteps in task.start.iteritems():
                allStartingSteps += startSteps
            if (len(allStartingSteps) > 1):
                branchQueue = Queue()
                for branchName in allStartingSteps:
                    branchQueue.put(task.branchesDict[branchName])
                exitCode = forkBranches(task, branchQueue)
            elif (len(allStartingSteps) == 1):
                branchName = allStartingSteps[0]
                branch = task.branchesDict[branchName]
                exitCode = getStep(task, branch)
        if exitCode is None : exitCode = -1
        print(datetime.datetime.now()," - INFO - task:  ", task.taskName , " execution time is: ", datetime.datetime.now() - taskStartDate ,file = log)
    except:
        exitCode = -1
        e = sys.exc_info() 
        print(datetime.datetime.now()," - ERROR - ","Exception occurred in runTask for task:  ", task.taskName , " error: ", e)
        print(datetime.datetime.now()," - ERROR - ","Exception occurred in runTask for task:  ", task.taskName , " error: ", e ,file = log)
        return exitCode
    return exitCode 
示例#25
0
def load_dependency_docs(name):
    try:
        directory = os.path.split(name)[0]
        try:
            data = []
            source_files = []
            for root, dirs, files in os.walk(directory):
                for filename in files:
                    if filename.lower().endswith('.elm'):
                        f = os.path.join(root, filename)
                        if not '_internals' in f:
                            source_files.append((f, filename))
            queue = queue.Queue()
            global POOL
            threads = [POOL.add_task(threadload, f[0], directory, queue) for f in source_files]
            POOL.wait_completion()
            modules = []
            while True:
                if not queue.empty():
                    modules.append(queue.get())
                else:
                    break
            for root, dirs, files in os.walk(directory):
                for filename in files:
                    if filename.lower().endswith('.json') and filename in [f[1][:-4] + '.json' for f in source_files]:
                        x = os.path.join(root, filename.lower())
                        if '_internals' not in x:
                            with open(x) as f:
                                data.append(json.load(f))
            return modules + data
        except KeyError:
            return []
    except:
        return []
示例#26
0
class Stack_from_Queues(object):
	def __init__(self):
		self.in_queue = Queue()
		self.out_queue = Queue()
		self.length = 0


	def push(self, element):
		self.in_queue.enqueue(element)
		self.length += 1


	def pop(self):
		while(self.in_queue.get_size() > 1):
			element = self.in_queue.dequeue()
			if (element):	#Check if not None
				self.out_queue.enqueue(element)

		if (self.in_queue.get_size() == 1):
			self.length -= 1
			return self.in_queue.dequeue()
		elif (self.out_queue.get_size() > 0):
			self.length -= 1
			return self.out_queue.dequeue()


	def is_empty(self):
		return self.length <= 0


	def __repr__(self):
		return "IN: " + str(self.in_queue) + "\nOUT: " + str(self.out_queue)
示例#27
0
 def _dbThread(self, queue):
     while 1:
         op = queue.get()
         reset_queries()
         
         
         if op is None:
             close_connection()
             queue.task_done()
             return
         
         func, args, kwargs, d, finished = op
             
         start = datetime.datetime.now()
         try:
             result = d.callback, func(*args, **kwargs)
         except:
             _rollback_on_exception()
             result = d.errback, Failure()
         delta = datetime.datetime.now() - start
         queries = ''
         if delta.seconds > 0.5:
             q = []
             for conn in connections.all():
                 q.extend(conn.queries)
             queries = ': QUERIES: %r' % (q,)
             log.msg('Query took too long %s on thread %s queue %s: func =\n %r queries =\n %s' % (delta, threading.currentThread().getName(), queue.qsize(), repr.repr((func.__module__, func.func_name, args, kwargs,)), queries[:1024],))
         finished(*result)
         queue.task_done()
 def __importTemple( self, temple, project, override = False ):
     fqueue = Queue( 0, [ temple ] )
     temple = altsep( temple )
     project= altsep( project )
     while True:
         if fqueue.empty(): break
         src_dir = altsep( fqueue.get() )
         des_dir = src_dir.replace( temple, project )
         items = os.listdir( src_dir )
         message( "directory:" + src_dir, c = DEBUG )
         for item in items:
             src = os.path.join( src_dir, item )
             des = os.path.join( des_dir, item )
             message( "file: " + src, c = DEBUG  )
             if os.path.isdir( src ):
                 if not os.path.isdir( des ):
                     os.makedirs( des )
                     message( "create directory: " + des, c = DEBUG  )
                 fqueue.put( src )
             else:
                 if not os.path.isfile( des ) or override:
                     shutil.copy2( src, des )
                     message( "copy file: " + src, c = DEBUG  )
                 else:
                     message( "ignore file:" + src, c = DEBUG )
def testQueue():
	print("Testing Queue")
	queue=Queue()
	for i in range(1,16):
		queue.enqueue(i)
	while (queue.checkSize() != 0):
		print(queue.dequeue())
示例#30
0
 def on_packet(self, header, payload):
     client_ref = header.get('client_ref', '')
     queue_id, call_id = client_ref.split(':', 1)
     queue_id = int(queue_id)
     #queue_id = header.get('queue_id', '')
     queue = self.get_thread_queue(queue_id)
     queue.put((header, payload))
示例#31
0
import Queue
import threading
import os
import gzip
import sys

import common
import storages
from storages import *

import filecmp
import shutil

storagelist = storages.__all__

storageQueue = Queue.Queue()


class StorageThread(threading.Thread):
    '''Class of storage-related threads.'''
    def __init__(self, queue):
        threading.Thread.__init__(self)
        self.queue = queue

    def run(self):
        while True:
            method, args = self.queue.get()
            getattr(self, method)(*args)
            self.queue.task_done()

    def syncMirror(self, setting, nodeid, path):
示例#32
0
文件: server.py 项目: chrisayoub/Mesh
class myHandler(BaseHTTPRequestHandler):
	
	#Handler for the GET requests
	def do_GET(self):
		self.send_response(200)
		self.end_headers()

		if self.path == '/start':
			self.startPing()
			return
		elif self.path == '/stop':
			self.stopPing()
			return

		# Send the html message
		# This is for returning JSON for device signal strength
		data = str(os.popen(CMD).read()).strip()
		if data == '':
			return
		lines = data.split('\n')

		result = {}

		for i in range(0, len(lines), 2):
			dev = lines[i]
			signal = lines[i + 1]
			devMac = dev.split(' ')[1]
			signalVal = re.compile("[ \t]").split(signal)[4]
			signalVal = int(signalVal)
			result[devMac] = signalVal

		self.wfile.write(json.dumps(result))
		return

	q = Queue.Queue()

	def startPing(self):
		if self.q.empty():
			print('Start pinging')
			self.q.put(0)
			thr = threading.Thread(target=self.beginPing, args=[self.q])
			thr.start()

	def beginPing(self, q):
		# First, get IP of client (assume only one client)
		IP_CMD = 'cat /var/lib/misc/dnsmasq.leases'
		data = str(os.popen(IP_CMD).read()).strip()
		if data == '':
			return
		ip = data.split(' ')[2]

		PING_CMD = 'ping -c 1 -W 1 ' + ip
		print(PING_CMD)
		# It takes about 10 ms to do a local ping
		# Do this at 250 ms rate, lower than 100 ms RTT from central
		TIME = 10
		TGT = 250
		RATE = TGT - TIME
		while not q.empty(): # This value should be changed, hopefully
			# print('Ping!')
			os.popen(PING_CMD)
			time.sleep(RATE / 1000.0) # Milliseconds

	def stopPing(self):
		if not self.q.empty():
			print('Done pinging!')
			self.q.get()
示例#33
0
                 continue
 def stage_load_dlrm(run_q, batch_q, qid, stop, sls_type, lru, libFlashRec, sparse_feature_size):
     while True:
         try:
             batch = batch_q.get(block=False)
             #print("Loading batch " + str(batch))
             listResult = slsload(sls_type, lru, libFlashRec, sparse_feature_size, lX[batch], lS_l[batch], lS_i[batch], qid)
             run_q.put((batch, listResult))
             batch_q.task_done()
         except Queue.Empty:
             if stop():
                 break
             else:
                 continue
 stop_workers = False
 run_q = Queue.Queue()
 batch_q = Queue.Queue()
 for i in xrange(run_instances):
     run_worker = Thread(target = stage_run_dlrm,
             args = (dlrm_run_instances[i], run_q, (lambda : stop_workers)))
     run_worker.setDaemon(True)
     run_worker.start()
 for i in xrange(load_instances):
     load_worker = Thread(target = stage_load_dlrm,
             args = (run_q, batch_q, i, (lambda : stop_workers),
                 args.sls_type, lru, libFlashRec, args.arch_sparse_feature_size))
     load_worker.setDaemon(True)
     load_worker.start()
 for k in xrange(args.nepochs):
     for i in xrange(nbatches):
         batch_q.put(i)
示例#34
0
if len(sys.argv) == 1:
    print banner
    print "[*] bypass waf"
    print "[*] python {0} url <proxy_ip> <webpath>".format(sys.argv[0])
    exit(1)

TARGET = sys.argv[1]
IPFILES = 'output/proxy_ip.txt'
WEBPATH = sys.argv[2] if len(
    sys.argv) == 3 else "e:\Tools\PassList\Webpath\\fuckyou.txt"

with open(IPFILES, 'r') as f:
    proxy_ip_list = [line.rstrip() for line in f.readlines()]

web_file = Queue.Queue()

with open(WEBPATH, 'r') as f:
    for line in f.readlines():
        web_file.put(line.rstrip())


def scan(web_file):
    HEADERS = {'user-agent': random.choice(agents_list)}
    proxy_ip = random.choice(proxy_ip_list)
    url = TARGET + '/' + web_file

    try:
        r = requests.get(url,
                         headers=HEADERS,
                         timeout=10,
示例#35
0
            pthreads = int(pthreads)
            pm_args.pop(pm_args.index('-pt') + 1)
            pm_args.pop(pm_args.index('-pt'))
        if '-sf' in pm_args:
            start_frame = pm_args[pm_args.index('-sf') + 1]
            if start_frame[0] == '-' or not start_frame.isdigit():
                print "ERROR: Missing or invalid start frame number (-sf option)"
                sys.exit()
            start_frame = int(start_frame)
            pm_args.pop(pm_args.index('-sf') + 1)
            pm_args.pop(pm_args.index('-sf'))

    start = datetime.today()
    # build the queue for the processing threads
    checklist = {}
    queue = Queue.Queue()
    for path in paths:
        # convert the path to absolute
        abs_path = os.path.abspath(path)

        # search for cr2 files
        print "Scanning path for CR2 files: %s" % path
        search = find_cr2_images(abs_path)

        # iterate through the found images
        for source_path in sorted(search):
            images = search[source_path]
            # add this path and the number of images
            # to the checklist
            checklist[source_path] = len(images)
示例#36
0
    def __init__(self, pymol_instance, skin):

        # prevent overloading
        self.initializePlugins = self._initializePlugins

        self.allow_after = 1  # easy switch for troubleshooting threads

        self.pymol = pymol_instance

        if self.pymol._ext_gui != None:

            raise RuntimeError  # only one PMGApp should ever be instantiated

        else:

            # create a FIFO so that PyMOL can send code to be executed by the GUI thread

            self.fifo = Queue.Queue(0)

            # create a pymol global so that PyMOL can find the external GUI

            self.pymol._ext_gui = self

            self.skin = None

            # initialize Tcl/Tk

            self.root = Tk()  # creates the root window for the application

            # color scheme

            self.initializeTk_colors_common()

            # operating-system dependencies

            self.initOS()

            # Python megawigit initialization

            Pmw.initialise(self.root)

            # Initialize the base class

            Pmw.MegaWidget.__init__(self, parent=self.root)

            # read the command line arguments regarding:
            # - the size of the root window
            # - the skin to use

            inv = sys.modules.get("pymol.invocation", None)
            if inv != None:
                if skin == None:
                    skin = inv.options.skin
                self.frameWidth = inv.options.win_x + 220
                self.frameXPos = inv.options.win_px - self.frameXAdjust
                self.frameHeight = inv.options.ext_y
                self.frameYPos = inv.options.win_py - (self.frameHeight +
                                                       self.frameYAdjust)
                self.setSkin(skin, run=0)

            # define the size of the root window

            import platform
            if sys.platform == 'darwin' and platform.mac_ver()[0] >= '10.9':
                # let OS X Maverics place the window automatically, to avoid
                # off-screen placement in multi-monitor setup
                self.root.geometry('%dx%d' %
                                   (self.frameWidth, self.frameHeight))
            else:
                self.root.geometry('%dx%d+%d+%d' %
                                   (self.frameWidth, self.frameHeight,
                                    self.frameXPos, self.frameYPos))

            # activate polling on the fifo

            if self.allow_after:
                self.root.after(1000, self.flush_fifo)

            # and let 'er rip

            self.runSkin()
示例#37
0
 def run_in_thread(func):
   """Runs |func| in a parallel thread, returns future (as Queue)."""
   result = Queue.Queue()
   thread = threading.Thread(target=lambda: result.put(func()))
   thread.start()
   return result
示例#38
0
 def __init__(self, work_num=1000, thread_num=2):
     self.work_queue = Queue.Queue()
     self.threads = []
     self.__init_work_queue(work_num)
     self.__init_thread_pool(thread_num)
示例#39
0
WORKERS = 2

class Worker(threading.Thread):
    def __init__(self, queue):
        self.__queue = queue
        threading.Thread.__init__(self)
    
    def run(self):
        while 1:
            item = self.__queue.get()
            if item is None:
                break # reached end of queue
            
            # pretend we're doing something that takes 10?00 ms
            time.sleep(random.randint(10, 100) / 1000.0)
            
            print "task", item, "finished"
#
# run with limited queue
queue = Queue.Queue(3)

for i in range(WORKERS):
    Worker(queue).start() # start a worker
    
for item in range(10):
    print "push", item
    queue.put(item)
    
for i in range(WORKERS):
    queue.put(None) # add end-of-queue markers
示例#40
0
	def __init__(self) :
		#setup the map with a sample queue.
		self.qMap = dict([('127.0.0.1', Queue.Queue())])
示例#41
0
 def __init__(self):
     self.yy = 0
     self.qq = Queue.Queue()
     self.qq.put((self.aa, 1))
     self.yy = 1
     print 'alread user port'
示例#42
0
def main(argv):
    if not check_output('which ceph')[0]:
        logger.error("No 'ceph' command available. Run this script from node, which has ceph access")
        return

    # TODO: Logs from down OSD
    opts = parse_args(argv)
    res_q = Queue.Queue()
    run_q = Queue.Queue()

    with warnings.catch_warnings():
        warnings.simplefilter("ignore")
        out_folder = os.tempnam()

    os.makedirs(out_folder)

    setup_loggers(getattr(logging, opts.log_level),
                  os.path.join(out_folder, "log.txt"))

    global logger_ready
    logger_ready = True

    global SSH_OPTS
    SSH_OPTS = SSH_OPTS.format(opts.ssh_conn_timeout, opts.ssh_private_key)

    global USERNAME
    USERNAME = opts.username

    collector_settings = CollectSettings()
    map(collector_settings.disable, opts.disable)

    allowed_collectors = opts.collectors.split(',')
    collectors = []

    if CephDataCollector.name in allowed_collectors:
        ceph_collector = CephDataCollector(opts, collector_settings, res_q)
        collectors.append(ceph_collector)
    else:
        ceph_collector = None

    if NodeCollector.name in allowed_collectors:
        node_collector = NodeCollector(opts, collector_settings, res_q)
        collectors.append(node_collector)
    else:
        node_collector = None

    if NodeResourseUsageCollector.name in allowed_collectors:
        node_resource_collector = NodeResourseUsageCollector(opts, collector_settings, res_q)
    else:
        node_resource_collector = None

    if CephPerformanceCollector.name in allowed_collectors:
        if CephDataCollector.name not in allowed_collectors:
            logger.error("Can't collect performance info without ceph info collected")
            exit(1)
        else:
            ceph_performance_collector = CephPerformanceCollector(opts, collector_settings, res_q)
    else:
        ceph_performance_collector = None

    nodes = discover_nodes(opts)
    nodes['master'][None] = [{}]

    for role, nodes_with_args in nodes.items():
        if role == 'node':
            continue
        logger.info("Found %s hosts with role %s", len(nodes_with_args), role)
        logger.info("Found %s services with role %s",
                    sum(map(len, nodes_with_args.values())), role)

    logger.info("Found %s hosts total", len(nodes['node']))

    good_hosts = set(get_sshable_hosts(nodes['node'].keys()))
    bad_hosts = set(nodes['node'].keys()) - good_hosts

    if len(bad_hosts) != 0:
        logger.warning("Next hosts aren't awailable over ssh and would be skipped: %s",
                       ",".join(bad_hosts))

    res_q.put((True, "bad_hosts", 'json', json.dumps(list(bad_hosts))))

    new_nodes = collections.defaultdict(lambda: {})

    for role, role_objs in nodes.items():
        if role == 'master':
            new_nodes[role] = role_objs
        else:
            for node, args in role_objs.items():
                if node in good_hosts:
                    new_nodes[role][node] = args

    nodes = new_nodes

    # collect data at the beginning
    if node_resource_collector is not None:
        for node, _ in nodes['node'].items():
            run_q.put((node_resource_collector.collect_node, "", node, {}))

    for role, nodes_with_args in nodes.items():
        for collector in collectors:
            if hasattr(collector, 'collect_' + role):
                coll_func = getattr(collector, 'collect_' + role)
                for node, kwargs_list in nodes_with_args.items():
                    for kwargs in kwargs_list:
                        run_q.put((coll_func, "", node, kwargs))

    save_results_thread = threading.Thread(target=save_results_th_func,
                                           args=(opts, res_q, out_folder))
    save_results_thread.daemon = True
    save_results_thread.start()

    t1 = time.time()
    try:
        run_all(opts, run_q)

        # collect data at the end
        if node_resource_collector is not None:
            dt = opts.usage_collect_interval - (time.time() - t1)
            if dt > 0:
                logger.info("Will wait for {0} seconds for usage data collection".format(int(dt)))
                for i in range(int(dt / 0.1)):
                    time.sleep(0.1)
            logger.info("Start final usage collection")
            for node, _ in nodes['node'].items():
                run_q.put((node_resource_collector.collect_node, "", node, {}))
            run_all(opts, run_q)

        if ceph_performance_collector is not None:
            logger.info("Start performace monitoring.")
            with ceph_collector.osd_devs_lock:
                osd_devs = ceph_collector.osd_devs.copy()

            per_node = collections.defaultdict(lambda: [])
            for node, data_dev, j_dev in osd_devs.values():
                per_node[node].extend((data_dev, j_dev))

            # start monitoring
            for node, data in per_node.items():
                run_q.put((ceph_performance_collector.start_performance_monitoring,
                          "", node, {'osd_devs': data}))
            run_all(opts, run_q)

            dt = opts.performance_collect_seconds
            logger.info("Will wait for {0} seconds for performance data collection".format(int(dt)))
            for i in range(int(dt / 0.1)):
                time.sleep(0.1)

            # collect results
            for node, data in per_node.items():
                run_q.put((ceph_performance_collector.collect_performance_data,
                          "", node, {}))
            run_all(opts, run_q)
    except Exception:
        logger.exception("When collecting data:")
    finally:
        res_q.put(None)
        # wait till all data collected
        save_results_thread.join()

    if opts.result is None:
        with warnings.catch_warnings():
            warnings.simplefilter("ignore")
            out_file = os.tempnam() + ".tar.gz"
    else:
        out_file = opts.result

    check_output("cd {0} ; tar -zcvf {1} *".format(out_folder, out_file))
    logger.info("Result saved into %r", out_file)
    if opts.log_level in ('WARNING', 'ERROR', "CRITICAL"):
        print "Result saved into %r" % (out_file,)

    if not opts.dont_remove_unpacked:
        shutil.rmtree(out_folder)
    else:
        logger.info("Temporary folder %r", out_folder)
        if opts.log_level in ('WARNING', 'ERROR', "CRITICAL"):
            print "Temporary folder %r" % (out_folder,)
示例#43
0
import cgi
import sys
import json
import time
import urllib
import Queue
import traceback

recvQueue = Queue.Queue()
sendQueue = Queue.Queue()


# class to receive requests
class Receiver:

    # constructor
    def __init__(self, rank=None, nonMPIMode=False):
        if nonMPIMode:
            self.comm = None
            self.stat = None
            self.nRank = 0
            self.totalRanks = 1
            self.selectSource = None
        else:
            from mpi4py import MPI
            self.comm = MPI.COMM_WORLD
            self.stat = MPI.Status()
            self.nRank = self.comm.Get_rank()
            self.totalRanks = self.comm.Get_size()
            self.selectSource = MPI.ANY_SOURCE
示例#44
0
import os
from Queue import *
from threading import *
import glob

data = glob.glob(os.path.expanduser("~/apa/data.apa/*"))

num_worker_threads = 20
q = Queue()


def worker():
    while True:
        task = q.get()
        os.system(task)
        q.task_done()


# raw
tasks = []
for lib_id in data:
    lib_id = os.path.basename(lib_id)  # get lib_id from full path
    tasks.append(
        "pybio.bamclip -bam \"~/apa/data.apa/%s/*/*/*.bam\" -image ~/apa/data.apa/%s/%s_clipping"
        % (lib_id, lib_id, lib_id))

for i in range(num_worker_threads):
    t = Thread(target=worker)
    t.daemon = True
    t.start()
示例#45
0
    redispool = redis.ConnectionPool(host=Config.RedisIp, port=6379, db=0)
    redata = redis.Redis(connection_pool=redispool)
    redpipe = redata.pipeline()

    #验证该日期数据是否已被处理
    if redata.hexists("checkdate", datadate):
        logger.error("The day the data has been processed")
        sys.exit(Config.EX_CODE_1)
    else:
        redata.hset("checkdate", datadate, 1)

    #添加当天日期到全局日期列表
    redata.rpush("date", datadate)

    #添加当天日期到uid_rdate
    queue = Queue.Queue(0)
    for i in range(Config.workers):
        worker_obj = HandleUserinfo(queue, logger, datadate, Config)
        worker_obj.setDaemon(True)
        worker_obj.start()

    user_list = list(redata.smembers("user_info"))
    for item in user_list:
        queue.put(item)

    queue.join()
    time.sleep(5)

    if isExists(logger, redpipe):
        for bvalue in redata.transaction(handleInit, "loadtable", "playtable",
                                         "endlist", "regiontable"):
 def __init__(self):
     self.pq = Queue.PriorityQueue()
     self.removed = set()
     self.count = 0
示例#47
0
 def __init__(self):
     _MockConnBase.__init__(self)
     self._queue = Queue.Queue()
示例#48
0
    templateF.close()

    o = open(os.path.join(tempPath, ".htaccess"), "w")
    accessFileF = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".htaccess"))
    accessFile = accessFileF.read()
    o.write(accessFile)
    o.close()
    accessFileF.close()

    #os.rmdir(tempPath)
    #os.system("tar cjf " + outFileBZ2 + " --totals " + " ".join(fileList))

    #os.chdir(originalPath)
    #print os.getcwd()

workQueue = Queue.Queue()

def worker():
    #while not workQueue.empty():
    while True:
        try:
            item = workQueue.get()
            if item.endswith("stats.gz") or item.endswith("stats.bz2"):
                #prof = cProfile.Profile().runctx("processStats(item)", globals(), locals())
                #stream = StringIO.StringIO()
                #stats = pstats.Stats(prof, stream=stream)
                #stats.sort_stats("time")  # Or cumulative
                #stats.print_stats()  # 80 = how many to print
                #print stream.getvalue()

                processStats(item)
示例#49
0
from dashboard.io_adafruit import ioAdafruitDash
import threading
import Queue
import time
import signal
import sys
from other import utils

THSens = THSensor()
GyroAcc = MPU6050GryroAccSensor()
Disp = OLEDDisplay()
dashboard = ioAdafruitDash()
mAccEvent = AccEvents()
mEvent = evt
# queue
qTH = Queue.Queue(maxsize=1)
qGA = Queue.Queue(maxsize=1)
qHB = Queue.Queue(maxsize=1)
qSensorData = Queue.Queue(maxsize=1)
qEvents = Queue.Queue(maxsize=10)
# thread list
lThreadsID = []

TAG = os.path.basename(__file__)


def signalHandler(sig, frame):
    print 'You pressed ctrl+c'
    print lThreadsID
    if len(lThreadsID) == 0:
        sys.exit(0)
示例#50
0
 def __init__(self, grid):
     self.grid = grid
     self.q = Q.PriorityQueue()
示例#51
0
@app.route('/') 
def index():
#    return Response(out, mimetype='application/json')
    n = random.randint(0, 100)
    q.put(n)
    return '%s\n' % n


def worker():
    while True:
        item = q.get()
        if item is None:
            break
        print('Processing %s' % item)  # do the work e.g. update database
        cmd = ["sh","shodan.sh"]
        p = subprocess.Popen(cmd, stdout = subprocess.PIPE,
                                stderr=subprocess.PIPE,
                                stdin=subprocess.PIPE)
        out,err = p.communicate()
        time.sleep(50)
        q.task_done()

if __name__ == "__main__" :
    q = Queue.Queue()
    t = Thread(target=worker)
    t.start()
    app.run(host='0.0.0.0', port=3333, debug=True)
    q.join()
    q.put(None)
    t.join()
示例#52
0
def main():
  parser = argparse.ArgumentParser(description='Runs clang-tidy over all files '
                                   'in a compilation database. Requires '
                                   'clang-tidy and clang-apply-replacements in '
                                   '$PATH.')
  parser.add_argument('-clang-tidy-binary', metavar='PATH',
                      default='clang-tidy',
                      help='path to clang-tidy binary')
  parser.add_argument('-clang-apply-replacements-binary', metavar='PATH',
                      default='clang-apply-replacements',
                      help='path to clang-apply-replacements binary')
  parser.add_argument('-checks', default=None,
                      help='checks filter, when not specified, use clang-tidy '
                      'default')
  parser.add_argument('-config', default=None,
                      help='Specifies a configuration in YAML/JSON format: '
                      '  -config="{Checks: \'*\', '
                      '                       CheckOptions: [{key: x, '
                      '                                       value: y}]}" '
                      'When the value is empty, clang-tidy will '
                      'attempt to find a file named .clang-tidy for '
                      'each source file in its parent directories.')
  parser.add_argument('-header-filter', default=None,
                      help='regular expression matching the names of the '
                      'headers to output diagnostics from. Diagnostics from '
                      'the main file of each translation unit are always '
                      'displayed.')
  parser.add_argument('-export-fixes', metavar='filename', dest='export_fixes',
                      help='Create a yaml file to store suggested fixes in, '
                      'which can be applied with clang-apply-replacements.')
  parser.add_argument('-j', type=int, default=0,
                      help='number of tidy instances to be run in parallel.')
  parser.add_argument('files', nargs='*', default=['.*'],
                      help='files to be processed (regex on path)')
  parser.add_argument('-fix', action='store_true', help='apply fix-its')
  parser.add_argument('-format', action='store_true', help='Reformat code '
                      'after applying fixes')
  parser.add_argument('-style', default='file', help='The style of reformat '
                      'code after applying fixes')
  parser.add_argument('-p', dest='build_path',
                      help='Path used to read a compile command database.')
  parser.add_argument('-extra-arg', dest='extra_arg',
                      action='append', default=[],
                      help='Additional argument to append to the compiler '
                      'command line.')
  parser.add_argument('-extra-arg-before', dest='extra_arg_before',
                      action='append', default=[],
                      help='Additional argument to prepend to the compiler '
                      'command line.')
  parser.add_argument('-quiet', action='store_true',
                      help='Run clang-tidy in quiet mode')
  parser.add_argument('-warnings-as-errors', action=None,
                      help="Upgrades warnings to errors. Same format as    "
                      "'-checks'.")
  args = parser.parse_args()

  db_path = 'compile_commands.json'

  if args.build_path is not None:
    build_path = args.build_path
  else:
    # Find our database
    build_path = find_compilation_database(db_path)

  try:
    invocation = [args.clang_tidy_binary, '-list-checks']
    invocation.append('-p=' + build_path)
    if args.checks:
      invocation.append('-checks=' + args.checks)
    invocation.append('-')
    subprocess.check_call(invocation)
  except:
    print("Unable to run clang-tidy.", file=sys.stderr)
    sys.exit(1)

  # Load the database and extract all files.
  database = json.load(open(os.path.join(build_path, db_path)))
  files = [make_absolute(entry['file'], entry['directory'])
           for entry in database]

  max_task = args.j
  if max_task == 0:
    max_task = multiprocessing.cpu_count()

  tmpdir = None
  if args.fix or args.export_fixes:
    check_clang_apply_replacements_binary(args)
    tmpdir = tempfile.mkdtemp()

  # Build up a big regexy filter from all command line arguments.
  file_name_re = re.compile('|'.join(args.files))

  return_code = 0
  try:
    # Spin up a bunch of tidy-launching threads.
    task_queue = queue.Queue(max_task)
    # List of files with a non-zero return code.
    failed_files = []
    for _ in range(max_task):
      t = threading.Thread(target=run_tidy,
                           args=(args, tmpdir, build_path, task_queue, failed_files))
      t.daemon = True
      t.start()

    # Fill the queue with files.
    for name in files:
      if file_name_re.search(name):
        task_queue.put(name)

    # Wait for all threads to be done.
    task_queue.join()
    if len(failed_files):
      return_code = 1

  except KeyboardInterrupt:
    # This is a sad hack. Unfortunately subprocess goes
    # bonkers with ctrl-c and we start forking merrily.
    print('\nCtrl-C detected, goodbye.')
    if tmpdir:
      shutil.rmtree(tmpdir)
    os.kill(0, 9)

  if args.export_fixes:
    print('Writing fixes to ' + args.export_fixes + ' ...')
    try:
      merge_replacement_files(tmpdir, args.export_fixes)
    except:
      print('Error exporting fixes.\n', file=sys.stderr)
      traceback.print_exc()
      return_code=1

  if args.fix:
    print('Applying fixes ...')
    try:
      apply_fixes(args, tmpdir)
    except:
      print('Error applying fixes.\n', file=sys.stderr)
      traceback.print_exc()
      return_code=1

  if tmpdir:
    shutil.rmtree(tmpdir)
  sys.exit(return_code)
示例#53
0
from FifteenPuzzle import FifteenPuzzle
from EightPuzzle import EightPuzzle
from Solution import solution

try:
    import Queue as Q  # ver. < 3.0
except ImportError:
    import queue as Q

frontier = Q.LifoQueue()
frontier_metrics = dict()
visited_hashes = dict()


class Node:
    def __init__(self, state, parent):
        self.parent = parent
        self.state = state
        self.G = 0
        self.metric = self.G + state.heuristic()


def IDFS(limit):
    return RIDFS(limit)


def RIDFS(limit):
    if frontier.empty():
        return "failure"
    # get next un-visited node in stack
    u_node = frontier.get()[1]
示例#54
0
        except:
            erf = sys.__stderr__
            print>>erf, '\n' + '-'*40
            print>>erf, 'Unhandled server exception!'
            print>>erf, 'Thread: %s' % threading.currentThread().getName()
            print>>erf, 'Client Address: ', client_address
            print>>erf, 'Request: ', repr(request)
            traceback.print_exc(file=erf)
            print>>erf, '\n*** Unrecoverable, server exiting!'
            print>>erf, '-'*40
            os._exit(0)

#----------------- end class RPCServer --------------------

objecttable = {}
request_queue = Queue.Queue(0)
response_queue = Queue.Queue(0)


class SocketIO(object):

    nextseq = 0

    def __init__(self, sock, objtable=None, debugging=None):
        self.sockthread = threading.currentThread()
        if debugging is not None:
            self.debugging = debugging
        self.sock = sock
        if objtable is None:
            objtable = objecttable
        self.objtable = objtable
示例#55
0
    def translate(self, **kwargs):
        response = cherrypy.response
        response.headers['Content-Type'] = 'application/json'

        errors = self._check_params_translate(kwargs)
        if errors:
            cherrypy.response.status = 400
            return self._dump_json(errors)
        self.log("The server is working on: %s" % repr(kwargs["q"]))

        if "segid" in kwargs:
            segid = kwargs["segid"]
        else:
            segid = "0000"
        self.log("The server is working on segid: %s" % repr(segid))

        if self.verbose > 0:
            self.log("Request before preprocessing: %s" % repr(kwargs["q"]))
        q = self._prepro(self.filter.filter(kwargs["q"]))
        if self.verbose > 0:
            self.log("Request after preprocessing: %s" % repr(q))
        translation = ""
        if q.strip():
            result_queue = Queue.Queue()

            key = self._get_engine_key(segid)
            self.log("KEY: %s" % repr(key))
            self.queue_translate[key].put((result_queue, segid, q))

            try:
                if self.timeout and self.timeout > 0:
                    name, translation = result_queue.get(timeout=self.timeout)
                else:
                    name, translation = result_queue.get()
                self.log("Engine name: %s" % name)
            except Queue.Empty:
                return self._timeout_error(q, 'translation')

        if self.verbose > 0:
            self.log(
                "Translation before sentence-level confidence estimation: %s" %
                translation)
            self.log("Source before sentence-level confidence estimation: %s" %
                     q)

        sentenceConfidence = None
        if self.sentence_confidence_enabled == 1:
            if self.verbose > 0:
                self.log(
                    "Translation before sentence-level confidence estimation: %s"
                    % translation)
                self.log(
                    "Source before sentence-level confidence estimation: %s" %
                    q)
            sentenceConfidence = self._get_sentence_confidence(
                "ID", q, translation)
            if self.verbose > 0:
                self.log("Sentence Confidence: %s" % sentenceConfidence)
                self.log("Translation after postprocessing: %s" % translation)

        if self.verbose > 0:
            self.log("Translation before postprocessing: %s" % translation)
        translation = self._postpro(translation)
        if self.verbose > 0:
            self.log("Translation after postprocessing: %s" % translation)

        translation, phraseAlignment = self._getPhraseAlignment(translation)
        if self.verbose > 1:
            self.log("Phrase alignment: %s" % str(phraseAlignment))
            self.log("Translation after removing phrase-alignment: %s" %
                     translation)

        translation, wordAlignment = self._getWordAlignment(translation)
        if self.verbose > 1:
            self.log("Word alignment: %s" % str(wordAlignment))
            self.log("Translation after removing word-alignment: %s" %
                     translation)

        translation = self._getOnlyTranslation(translation)
        if self.verbose > 1:
            self.log("Translation after removing additional info: %s" %
                     translation)

        translationDict = {}
        if translation:
            if (re.match("_NOSUGGESTION_", name)):
                translationDict["translatedText"] = " "
            else:
                translationDict["translatedText"] = translation
        if phraseAlignment:
            if (re.match("_NOSUGGESTION_", name)):
                translationDict["phraseAlignment"] = ""
            else:
                translationDict["phraseAlignment"] = phraseAlignment
        if wordAlignment:
            if (re.match("_NOSUGGESTION_", name)):
                translationDict["wordAlignment"] = ""
            else:
                translationDict["wordAlignment"] = wordAlignment
        if self.sentence_confidence_enabled == 1:
            self.log("sentence_confidence_enabled: passed")
            if sentenceConfidence:
                if (re.match("_NOSUGGESTION_", name)):
                    translationDict["sentence_confidence"] = ""
                else:
                    translationDict["sentence_confidence"] = sentenceConfidence
        translationDict["systemName"] = name
        translationDict["segmentID"] = segid

        answerDict = {}
        answerDict["translations"] = [translationDict]

        data = {"data": answerDict}
        #        data = {"data" : {"translations" : [translationDict]}}
        self.log("The server is returning: %s" % self._dump_json(data))
        return self._dump_json(data)
示例#56
0
        try:
            (ev, data) = recv.get(timeout=timeout)
        except Queue.Empty:
            raise Exception('operation timeout')
        try:
            tags.remove(ev)
        except:
            pass


arg_parser = argparse.ArgumentParser(
    description='Command line python tool. hang test')
arg_parser.add_argument('--tv',
                        type=bool,
                        default=False,
                        help='enable tv-mode')
args = vars(arg_parser.parse_args())

recv = Queue.Queue()
umc = MediaPlayer()
start_proxy(umc, recv)

try:
    umc.load('file://dummy', "sim")
    wait_reply(recv, ['load'])
    umc.sendDebugMsg('hang')
    umc.play()
    wait_reply(recv, ['play'])
finally:
    umc.unload()
示例#57
0
    def update(self, **kwargs):
        response = cherrypy.response
        response.headers['Content-Type'] = 'application/json'

        errors = self._check_params_update(kwargs)
        if errors:
            cherrypy.response.status = 400
            return self._dump_json(errors)

        source = kwargs["segment"]
        target = kwargs["translation"]
        self.log("The server is updating, segment: %s" % repr(source))
        self.log("The server is updating, translation: %s" % repr(target))

        if "segid" in kwargs:
            segid = kwargs["segid"]
        else:
            segid = "0000"
        self.log("The server is working on segid: %s" % repr(segid))

        key = self._get_updater_key(segid)
        if self.updater[key] == None:
            answerDict = {}
            answerDict["code"] = "0"
            answerDict[
                "string"] = "OK, but this engine/updater combination does not manage user feedback"
            answerDict["systemName"] = key
            answerDict["segmentID"] = segid

            data = {"data": answerDict}
            self.log("The server is returning: %s" % self._dump_json(data))
            return self._dump_json(data)

        source = self._updater_source_prepro(self.filter.filter(source))
        source = self._removeXMLTags(source)
        source = source.encode("utf-8")
        target = self._updater_target_prepro(self.filter.filter(target))
        target = self._removeXMLTags(target)
        target = target.encode("utf-8")

        self.log("The server is updating, after preprocessing segment: %s" %
                 repr(source))
        self.log(
            "The server is updating, after preprocessing translation: %s" %
            repr(target))

        if "extra" in kwargs:
            extra = kwargs["extra"]
            self.log("The server is working on update, extra: %s" %
                     repr(extra))

        annotation = self.updater[key].update(source=source, target=target)
        self.log(
            "The server created this annotation: %s from the current segment and translation"
            % annotation)

        annotation = annotation.decode("utf-8")

        q = annotation
        if self.verbose > 0:
            self.log("Request Dummy_Input: %s" % repr(q))
        translation = ""
        result_queue = Queue.Queue()

        key = self._get_engine_key(segid)
        self.queue_translate[key].put((result_queue, segid, q))

        try:
            if self.timeout and self.timeout > 0:
                name, translation = result_queue.get(timeout=self.timeout)
            else:
                name, translation = result_queue.get()
        except Queue.Empty:
            return self._timeout_error(q, 'dummy_translation')
        if self.verbose > 0:
            self.log(
                "Request after translation of Dummy_Input (NOT USED): %s" %
                repr(translation))

        answerDict = {}
        answerDict["code"] = "0"
        answerDict["string"] = "OK"
        answerDict["systemName"] = name
        answerDict["segmentID"] = segid

        data = {"data": answerDict}
        self.log("The server is returning: %s" % self._dump_json(data))
        return self._dump_json(data)
示例#58
0
from SocketServer import TCPServer
from SocketServer import ThreadingMixIn
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler

#the handler of the log and database module
logger = None

#store the task information
task_status = {}

#the queue used for task scheduling
sched_queue = []

#task preprocessing, e.g., time estimation, download.
preproc_queue = Queue.Queue(100)

#the video blocks waitting for distributing
disb_queue = Queue.Queue(5000)

#the blocks that have been dispatched, not been finished
ongoing_blocks = []

#the ip and port information of the master server
master_ip = config.master_ip
master_rev_port = int(config.master_rev_port)
master_snd_port = int(config.master_snd_port)
master_rpc_port = int(config.master_rpc_port)

redis_ip = 'localhost'
red_con = redis.StrictRedis(host=redis_ip, port=6379, db=0)
示例#59
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-

#分布式进程
#在Thread和Process中,应当优选Process,因为Process更稳定,而且,Process可以
#分布到多台机器上,而Thread最多只能分布到同一台机器的多个CPU上。

#managers子模块还支持把多进程分布到多台机器上
import random, time, Queue
from multiprocessing.managers import BaseManager

#发送队列
task_que = Queue.Queue()
#接受队列
result_que = Queue.Queue()


class QueueManager(BaseManager):
    pass


#注册到网络


def cal1():
    return task_que


def cal2():
    return result_que
示例#60
0
    def reset(self, **kwargs):
        response = cherrypy.response
        response.headers['Content-Type'] = 'application/json'

        errors = self._check_params_reset(kwargs)
        if errors:
            cherrypy.response.status = 400
            return self._dump_json(errors)


#We assume that the string for resetting is the same for all MT engines, and it is created by any updater

        if "segid" in kwargs:
            segid = kwargs["segid"]
        else:
            segid = "0000"
        self.log("The server is working on segid: %s" % repr(segid))

        k = self._get_updater_key(segid)
        if updater_config[k] == None:
            answerDict = {}
            answerDict["code"] = "0"
            answerDict[
                "string"] = "OK, but this engine/updater combination does not manage this request"
            answerDict["systemName"] = k
            answerDict["segmentID"] = segid

            data = {"data": answerDict}
            self.log("The server is returning: %s" % self._dump_json(data))
            return self._dump_json(data)

        annotation = self.updater[k].reset()
        if self.verbose > 0:
            self.log("The server created this annotation: %s" % annotation)

        q = annotation
        if self.verbose > 0:
            self.log("Request Dummy_Input: %s" % repr(q))
        for k in moses.keys():
            translation = ""
            result_queue = Queue.Queue()
            self.queue_translate[k].put((result_queue, segid, q))
            try:
                if self.timeout and self.timeout > 0:
                    name, translation = result_queue.get(timeout=self.timeout)
                else:
                    name, translation = result_queue.get()
            except Queue.Empty:
                return self._timeout_error(q, 'dummy_translation')
            if self.verbose > 0:
                self.log(
                    "Request after translation of Dummy_Input (NOT USED): %s" %
                    repr(translation))

        answerDict = {}
        answerDict["code"] = "0"
        answerDict["string"] = "OK"

        data = {"data": answerDict}
        self.log("The server is returning: %s" % self._dump_json(data))
        return self._dump_json(data)