示例#1
0
	def handle(self, task):
		result = False
		if isinstance(task, Task):
			self.display( OUTPUT_DEBUG, 'task to handle %s' % task.id() )
			result = True
			if task.state == NEW:
				task.worker = self._rank
				task.hostname = self._name

				if isinstance( task, ControlTask ):
					self.display( OUTPUT_DEBUG, 'running command task locally' )
					task.execute()
					result = self.handle( task )

				elif task.pid in self._procs:
					if (self._inuse + task.slots) <= self._poolsize:
						self.display( OUTPUT_DEBUG, 'running task with process pool' )
						rp, lp = Pipe(False)
						lp.send( self._procs[ task.pid ] )
						lp.send( task )
						self._pipes[ task.key ] = ( rp, lp )
						tmp = pickle.dumps(reduce_connection(rp))
						h = self._pool.apply_async( handle_task, (tmp,) )

						self._pollq.append( h )
						self._inuse = self._inuse + task.slots
						self.display( OUTPUT_DEBUG, 'using: %d, task used %d' % ( self._inuse, task.slots) )
						
				
					elif self._inuse == 0 and self._poolsize < task.slots:
						self.display( OUTPUT_DEBUG, 'running oversized task with process pool' )
						rp, lp = Pipe(False)
						lp.send( self._procs[ task.pid ] )
						lp.send( task )
						self._pipes[ task.key ] = ( rp, lp )
						tmp = pickle.dumps(reduce_connection(rp))
						h = self._pool.apply_async( handle_task, (tmp,) )

						self._pollq.append( h )
						self._inuse = self._inuse + task.slots
						self.display( OUTPUT_DEBUG, 'using: %d, task used %d' % ( self._inuse, task.slots) )
		
					else:
						self.display( OUTPUT_DEBUG, 'no processes available to run task' )
						self.queueFirst( task )
						result = False
				else:
					self.display( OUTPUT_DEBUG, 'process resources not available on worker yet' )
					self.queueFirst( task )
					result = False
							
					
			else:
				self.display( OUTPUT_DEBUG, 'returning task to manager' )
				self.returnTask( task )
		else:
			self.display( OUTPUT_DEBUG, 'got invalid task to handle' )
					
		return result
示例#2
0
def main():
    _setproctitle('structurarium.graph')
    parser = argparse.ArgumentParser(description='Run Structurarium graph server')
    parser.add_argument('--version', '-v', action='version', version=__version__)
    parser.add_argument('host')
    parser.add_argument('port', type=int)
    parser.add_argument('path')
    parser.add_argument('--authkey', '-k', action='store')
    parser.add_argument(
        '--worker',
        '-w',
        action='store',
        type=int,
        help='default is set to the number of CPU'
    )
    args = parser.parse_args()
    listener = Listener((args.host, args.port), family='AF_INET')
    database = Graph(args.path, authkey=args.authkey)
    pool = Pool(processes=args.worker)

    print 'Running on %s:%s' % (args.host, args.port)
    while True:
        connection = listener.accept()
        connection = reduce_connection(connection)
        # pool.apply_async(connect, [database, connection])
        connect(database, connection)
示例#3
0
 def _ask(self, msg, args=(), kwargs={}):
     i, o = Pipe()
     reduced = reduce_connection(i)
     self.inbox.put([msg, args, kwargs, reduced[1]])
     ret = o.recv()
     i.close()
     o.close()
     return ret
示例#4
0
 def _ask(self, msg, args=(), kwargs={}):
     i, o = Pipe()
     reduced = reduce_connection(i)
     self.inbox.put([msg, args, kwargs, reduced[1]])
     ret = o.recv()
     i.close()
     o.close()
     return ret
示例#5
0
def pack(conn):
    """
      pickle a connection for transport over connection

     :param conn: connection obj to pickle
     :returns: pickled connection
    """
    try:
        return pickle.dumps(reduction.reduce_connection(conn))
    except Exception as e:
        raise BrineException(e)
示例#6
0
def pack(conn):
    """
      pickle a connection for transport over connection

     :param conn: connection obj to pickle
     :returns: pickled connection
    """
    try:
        return pickle.dumps(reduction.reduce_connection(conn))
    except Exception as e:
        raise BrineException(e)
示例#7
0
def main():
    recv_1, send_1 = Pipe(duplex=False)
    recv_2, send_2 = Pipe(duplex=False)
    recv_3, send_3 = Pipe(duplex=False)
    recv_4, send_4 = Pipe(duplex=False)
    lines = {1: send_1, 2: send_2, 3: send_3}
    encoders = Queue()

    lines[1].send({'text': '3Radio', 'style': 'center'})
    lines[2].send({'text': '------', 'style': 'center'})
    lines[3].send({'text': '', 'style': 'center'})
    send_4.send({'text': 'Loading...', 'style': 'center'})

    # print 'RE#1'
    pe1 = rotary_encoder_proc.RE_runner(encoders, 1, 8, 9, 7)
    # print 'RE#2'
    pe2 = rotary_encoder_proc.RE_runner(encoders, 2, 0, 2, 3)
    # print 'RE#3'
    pe3 = rotary_encoder_proc.RE_runner(encoders, 3, 12, 13, 14)
    # print 'LCD'
    plcd = lcd_proc.LCD_runner(recv_1, recv_2, recv_3, recv_4)

    mc = pylibmc.Client(['127.0.0.1'], binary=True,
                                 behaviors={'tcp_nodelay': True,
                                 'ketama': True})
    mc['city'] = 'Moscow,ru'
    mc['TNWU_authkey'] = current_process().authkey
    mc['TNWU_reduced_connection'] = reduction.reduce_connection(send_4)
    mc['TNWU_running'] = False
    mc['TNWU_last_temp'] = '-666.666'

    amount = {1: 0, 2: 0, 3: 0}
    state = {1: False, 2: False, 3: False}

    while True:
        encoder_message = encoders.get(block=True)
        encoder_id = encoder_message['name']

        if 'rot' in encoder_message:
            amount[encoder_id] += encoder_message['rot']
            lines[encoder_id].send({'text': amount[encoder_id], 'style': 'center'})
        elif 'button' in encoder_message:
            state = button(lines[encoder_id], state, encoder_message, encoder_id)

        if stop_all(state):
            break

    pe1.terminate()
    pe2.terminate()
    pe3.terminate()
    plcd.terminate()
示例#8
0
    def process_request(self, request, query_num, atomic):
        """Processes a feedback requests and returns its result.

        This call is used by queries for submitting individual requests. It is
        a blocking call that should be called from a separate execution thread.

        @param request: The feedback request to process.
        @param query_num: The unique query number.
        @param atomic: Whether subsequent request(s) are expected and should be
                       processed without interruption.
        """
        reply_pipe_send, reply_pipe_recv = multiprocessing.Pipe()
        reduced_reply_pipe_send = reduction.reduce_connection(reply_pipe_send)
        self._request_queue.put(
            ReqTuple(request, reduced_reply_pipe_send, query_num, atomic))
        return reply_pipe_recv.recv()
示例#9
0
 def rpc(self, target, topic, message, timeout=50):
     leftPipe, rightPipe = multiprocessing.Pipe()
     reduced_Pipe = reduce_connection(rightPipe)
     message = {
         "reduced_return_pipe": reduced_Pipe,
         "actual_message": message
     }
     self.parent.bemoss_publish(target='bacnetagent',
                                topic=topic,
                                message=message)
     if leftPipe.poll(timeout):
         result = leftPipe.recv()
         if result == None:
             raise Exception('Received None. Device communication problem')
         return result
     else:
         raise self.RPCTimeout("Time Out")
示例#10
0
def main():
    _setproctitle('structurarium.taskqueue')
    parser = argparse.ArgumentParser(
        description='Run Structurarium graph server'
    )
    parser.add_argument(
        '--version',
        '-v',
        action='version',
        version=__version__
    )
    parser.add_argument('host')
    parser.add_argument('port', type=int)
    parser.add_argument('path')
    parser.add_argument('--authkey', '-k')
    parser.add_argument(
        '--worker',
        '-w',
        action='store',
        type=int,
        help='default is set to the number of CPU'
    )
    args = parser.parse_args()
    listener = Listener((args.host, args.port), family='AF_INET')
    database = TaskQueue(args.path, authkey=args.authkey)

    print 'Running on %s:%s' % (args.host, args.port)
    if args.worker > 1:
        pool = Pool(processes=args.worker)
        while True:
            pool = Pool(processes=args.worker)
            connection = listener.accept()
            connection = cPickle.dumps(reduce_connection(connection))
            database.process(connection)
            pool.apply_async(process, [database, connection])
    else:
        print 'monothread'
        database.replay()
        while True:
            connection = listener.accept()
            command = loads(connection.recv())
            output = database.play(command)
            connection.send(dumps(output))
            connection.close()
示例#11
0
文件: process.py 项目: llazzaro/WorQ
def _reduce_connection(conn):
    """Reduce a connection object so it can be pickled.

    WARNING this puts the current process' authentication key in the data
    to be pickled. Connections pickled with this function should not be
    sent over an untrusted network.

    HACK work around ``multiprocessing`` connection authentication because
    we are using ``subprocess.Popen`` instead of ``multiprocessing.Process``
    to spawn new child processes.

    This will not be necessary when ``multiprocessing.Connection`` objects
    can be pickled. See http://bugs.python.org/issue4892
    """
    obj = reduce_connection(conn)
    assert obj[0] is rebuild_connection, obj
    assert len(obj) == 2, obj
    args = (bytes(current_process().authkey),) + obj[1]
    return (_rebuild_connection, args)
示例#12
0
文件: process.py 项目: pdam/WorQ
def _reduce_connection(conn):
    """Reduce a connection object so it can be pickled.

    WARNING this puts the current process' authentication key in the data
    to be pickled. Connections pickled with this function should not be
    sent over an untrusted network.

    HACK work around ``multiprocessing`` connection authentication because
    we are using ``subprocess.Popen`` instead of ``multiprocessing.Process``
    to spawn new child processes.

    This will not be necessary when ``multiprocessing.Connection`` objects
    can be pickled. See http://bugs.python.org/issue4892
    """
    obj = reduce_connection(conn)
    assert obj[0] is rebuild_connection, obj
    assert len(obj) == 2, obj
    args = (bytes(current_process().authkey),) + obj[1]
    return (_rebuild_connection, args)
示例#13
0
    def dbrpc(self, command, args, kwargs, timeout=30):

        leftPipe, rightPipe = multiprocessing.Pipe()
        reduced_Pipe = reduce_connection(rightPipe)
        message = dict()
        message['command'] = command
        message['args'] = args
        message['kwargs'] = kwargs
        finalmessage = {
            "reduced_return_pipe": reduced_Pipe,
            "actual_message": message
        }
        self.publish(source=self.name,
                     destinations=['metadataagent'],
                     topic='dbrpc',
                     message=finalmessage)
        if leftPipe.poll(timeout):
            result = leftPipe.recv()
            return result
        else:
            raise self.RPCTimeout("Time Out")
示例#14
0
def pickle_connection(connection):
    return pickle.dumps(reduce_connection(connection))
示例#15
0
def pickle_connection(connection):
    return pickle.dumps(reduce_connection(connection))
示例#16
0
文件: req.py 项目: shiryehoshua/POR
 def encode(self, connection):
     # reduces Connection() objects to pickleable objects
     reduced = reduce_connection(connection)
     # pickles the Connection() into a string
     return dumps(reduced)
示例#17
0
def processCommand(command):
    global process_count

    def find_matches(pid):
        matches = []
        l = len(pid)
        for process_name, process in processes_dict.items():
            if str(process.id)[-l:] == str(pid):
                matches.append(process_name)
        return matches

    commands = command.split(' ')
    reply = ''

    if len(commands) == 2 and commands[0].lower() == "error":
        agent_name = commands[1]  # assume the argument is agent_name
        try:
            process = processes_dict.get(agent_name)
            error = process.err
        except AttributeError:
            return ""

        return error

    elif commands[0].lower() == 'status':
        running_count = 0
        stopped_count = 0
        stop_requested_count = 0

        for process_name, process in processes_dict.items():
            if process.is_alive():
                if process.config['stopFlag'].value == 0:
                    status, code = "running", ""
                    running_count += 1
                else:
                    status, code = "stop_requested", ""
                    stop_requested_count += 1
            else:
                status, code = "stopped", process.exitcode
                stopped_count += 1
            if hasattr(process, 'pid'):
                pid = process.pid
            else:
                pid = os.getpid()

            reply += "{:<6} {:<10} {:<30} {:<6} {:<10}\n".format(
                process.id, pid, process.name, status, code)

        reply += "Total %s Agents. Running: %s. Stoped: %s. Stop_Requested: %s" % (
            len(processes_dict.keys()), running_count, stopped_count,
            stop_requested_count)
        return reply
    # elif commands[0].lower() == 'tstatus': #thread_status
    #     threads = threading.enumerate()
    #
    #     replies = []
    #     for thread in threads:
    #         if hasattr(thread, 'id'):
    #             replies.append((thread.id,thread.name))
    #         else:
    #             if hasattr(thread,'parent') and hasattr(thread.parent,'id'):
    #                 replies.append((thread.parent.id,thread.name))
    #             else:
    #                 replies.append((-1,thread.name))
    #
    #     replies = sorted(replies,key=lambda x: x[0])
    #     if len(commands) > 1:
    #         id = commands[1] #filter only this
    #         new_reply = []
    #         for rep in replies:
    #             if str(rep[0]) == str(id):
    #                 new_reply.append(rep)
    #         replies = new_reply
    #     for id, name in replies:
    #         reply +=("{:<6} {:<50}\n".format(id, name))
    #
    #     return reply
    elif len(commands) == 2 and commands[0].lower() == "tstatus":
        target = commands[1]
        leftPipe, rightPipe = multiprocessing.Pipe()
        reduced_Pipe = reduce_connection(rightPipe)
        finalmessage = {"reduced_return_pipe": reduced_Pipe}
        outQueue.put(("commandhandler", [target], 'tstatus', finalmessage))
        if leftPipe.poll(20):
            result = leftPipe.recv()
            reply = ""
            count = 0
            alive_count = 0
            for id, name, status, watchdogtimer, msg in result:

                reply += "{:<6} {:<60} {:<6} {:<5} {:<10}\n".format(
                    id, name, status, watchdogtimer, msg)
                count += 1
                if status:
                    alive_count += 1

            reply += "Total %s threads. Running: %s. Stoped: %s." % (
                count, alive_count, count - alive_count)
            return reply
        else:
            return "Timed out"

    elif len(commands) == 2 and commands[0].lower() in [
            "start", "start_agent", "stop", "stop_agent"
    ]:
        agent_name = commands[1]  #assume the argument is agent_name
        if not agent_name in processes_dict:  #if agent_name is not present
            #assume it to be pid from the end
            matches = find_matches(pid=agent_name)
            if len(matches) == 1:
                agent_name = matches[0]
            if len(matches) == 0:
                return "invalid pid"
            elif len(matches) > 1:
                reply = "\n".join([
                    str(processes_dict[process_name].id) + " " + process_name
                    for process_name in matches
                ])
                return reply
        process_name = agent_name
        process = processes_dict[process_name]
        if commands[0].lower() in ["start", "start_agent"]:
            old_agent = process.agent
            old_config = process.config
            old_config['stopFlag'].value = 0
            old_config['id'] = process_count
            p = BProcess(target=process.agent,
                         name=process.name,
                         kwargs=old_config)
            p.id = process_count
            process_count += 1
            p.agent = old_agent
            p.config = old_config
            p.start()
            processes_dict[process_name] = p
        else:
            process.config['stopFlag'].value = 1
            archived_process_list.append((datetime.datetime.utcnow(), process))
        return reply

    elif len(commands) == 3 and commands[0].lower() in [
            "launch_agent", "start"
    ]:
        agent_type = commands[1]
        agent_name = commands[2]

        agent = getAgent(agent_type)
        if agent:
            print "Launching " + agent.__name__ + " with name: " + agent_name
            inQ = Queue(AGENT_QUEUE_SIZE)
            inQueues_dict[agent_name] = inQ
            stopFlag = Value('i', 0)
            config = {
                'name': agent_name,
                'inQ': inQ,
                'outQ': outQueue,
                'logQ': logQueue,
                'stopFlag': stopFlag,
                'id': process_count
            }
            p = BProcess(target=agent, name=agent_name, kwargs=config)
            p.id = process_count
            process_count += 1
            p.agent = agent
            p.config = config
            p.daemon = True
            p.start()
            if agent_name in processes_dict:  # if there is existing process by that name, terminate it
                processes_dict[agent_name].config[
                    'stopFlag'].value = 1  # make the old process stop
                archived_process_list.append(
                    (datetime.datetime.utcnow(), processes_dict[agent_name])
                )  #TODO kill processes in archived process if still running and clear it periodically
            processes_dict[agent_name] = p
            return ""
        else:
            return agent_type + " Agent not found"

    elif commands[0].lower() in ["remove_agent", "remove"]:
        if len(commands) == 2:
            agent_name = commands[1]  #assume the argument is agent_name
        else:
            return "Invalid number of arguments"
        if not agent_name in processes_dict:  #if agent_name is not present
            #assume it to be pid from the end
            matches = find_matches(pid=agent_name)
            if len(matches) == 1:
                agent_name = matches[0]
            elif len(matches) == 0:
                return "invalid pid"
            else:
                return " ".join([
                    processes_dict[process_name].id for process_name in matches
                ])

        if agent_name in processes_dict or find_matches(pid=agent_name):
            if processes_dict[agent_name].is_alive():
                if processes_dict[agent_name].config['stopFlag'].value == 1:
                    return "Wait for it to stop first"
                else:
                    return "Should stop first"
            processes_dict.pop(agent_name)  #remove it from process-dict
            inQueues_dict.pop(agent_name)
            return ""
    else:
        return "Invalid command"
示例#18
0
文件: vaw.py 项目: virer/vaw-router
    def new_websocket_client(self, path=None):
        """
        Called after a new WebSocket connection has been established.
        """
        # Checking for a token is done in validate_connection()
        _id = None
        msg = None
        try:
            if path != None and path != '/' and path.index("-") != -1:
                # remove first / and get mode(client or manager)
                mode      = str(path[1:path.index("-")])

                # Now extract id & pw
                tmp       = str(path[path.index("-")+1:])
                wanted_id = str(tmp[:tmp.index("-")])
                wanted_pw = str(tmp[tmp.index("-")+1:])

                # Clear tmp
                del tmp

                # Create a queue for any new client/manager
                manager = Manager()

                if mode == "manager":
                    if wanted_pw == self.session[wanted_id]["pw"]:
                        # Generate the id of the manager
                        _id = id_gen()

                        self.wanted_id = wanted_id

                        if self.pipe == None:
                            reduced = self.session[wanted_id]["pipe"]
                            self.pipe = reduced[0](*reduced[1])

                        self.connected = True
                        self.pipe.send('{ "vnc": "connect" }')
                        
                        # Init manager queue in the session
                        self.session[_id]= { "mode": "manager", "id": _id, "wanted_id": wanted_id, "wanted_pw": wanted_pw, "authenticated": True  }

                        msg = "New manager id: %s wanted client id: %s" % (_id, wanted_id)
                    else:
                        _id = None
                        self.session[_id] = None 

                elif mode == "client":
                    # Use client provided id
                    _id = wanted_id

                    # Since we are a client we dont have a wanted id
                    self.wanted_id = None

                    # Waiting for manager
                    self.connected = False

                    self.pipe, manager_conn = Pipe()
                    try:

                        # Init the client queue in the session
                        self.session[wanted_id]= { "mode": "client", "id": wanted_id, "pw": wanted_pw, "authenticated": True, "pipe": reduction.reduce_connection(manager_conn) }
                    except:
                        print "pipe error"
                        print sys.exc_type,sys.exc_value
                
                    msg = "New client id: %s" % _id
                else:
                    msg = "Error no client/manager mode detected !!!"
                    self.send_close()

                if msg != None:
                    # add log entry
                    self.log_message(msg)
        except:
            print "Error in new_websocket_client()"
            print sys.exc_type, sys.exc_value
            del self.session[_id]
            try:
                # if exception occured the close socket
                self.send_close()
            except:
                pass

        # Start proxying
        try:
            if _id != None:
                self.vawrouter(_id)
            else:
                del self.session[_id]
                self.send_close()
        except:
            del self.session[_id]
            self.send_close()
            print "Error in vaw router"
            print sys.exc_type,sys.exc_value
示例#19
0
def compress_pipe(p):
    pp = pickle.dumps(reduce_connection(p))
    return (pp)
示例#20
0
	def reduce(self):
		self.urgent = reduce_connection(self.urgent)
		self.normal = reduce_connection(self.normal)
示例#21
0
 def create_task(self):
     return Task(
         wait_for_data_in_pipe,
         (pickle.dumps(reduce_connection(self.__child_pipe)),))