Beispiel #1
0
 def runForever(self):
     try:
         while True:
             s, addr = self.bindsocket.accept()
             try:
                 p = Process(target=self.ponger)
                 self.conn = ssl.wrap_socket(s, server_side=True, certfile=self.pem, keyfile=self.pem)
                 p.start()
                 self.handleClient()
             except KeyboardInterrupt:
                 break
             finally:
                 try:
                     p.terminate()
                 except AttributeError:
                     msg = (
                         "[Server] Error! Have you installed ca.crt on your iPhone 4S? "
                         + "If not, email ca.crt to your iPhone 4S, open the email, "
                         + "tap the attachment, and install the certificate. Then try again."
                     )
                     logger.exception(msg)
                     sys.exit(1)
                 try:
                     self.conn.shutdown(socket.SHUT_RDWR)
                     self.conn.close()
                 except:
                     pass
     except KeyboardInterrupt:
         pass
    def __init__(
        self, port=default_port, baudrate=default_baudrate, timeout=default_timeout, commands=None, responses=None
    ):
        Process.__init__(self)

        self.port = port
        self.baudrate = baudrate
        self.timeout = timeout
        # NOTE Other default port settings: bytesize=8, parity='N', stopbits=1, xonxoff=0, rtscts=0
        self.device = None  # open serial port in run()
        self.live = False  # flag to signal threads
        self.fake_id = -1  # temp id used to make fake send-recv work

        # Create data structures to store commands and responses, unless passed in
        # self.sendLock = Lock()  # to prevent multiple processes from trying to send on the same serial line
        if commands is not None:
            self.commands = commands
        else:
            self.commands = Queue(default_queue_maxsize)  # internal queue to receive and service commands
        # TODO move queue out to separate class to manage it (and responses?)
        # TODO create multiple queues for different priority levels?

        if responses is not None:
            self.responses = responses
        else:
            self.manager = (
                Manager()
            )  # to facilitate process-safe shared memory, especially for responses; NOTE breaks on windows
            self.responses = self.manager.dict()  # a map structure to store responses by some command id
Beispiel #3
0
def start_listener(port):
    p = Process(target=_run, args=(port,))
    processMap[port] = {"process": p, "connected": True, "alt_nodes": []}
    # TODO: alt_nodes for orchestrator rebalance detection

    print "sdk_%s: starting" % port
    p.start()
def start_detection(queue, image_label, lf, lf_label, username):
    """
   Runs the detection algorithm and grabs multiple frames for the recognizer to compare.
   """
    configure_folders()
    global enter_button
    global q
    # enter_button.config(state='disabled')
    cascade_fn = "../../metadata/haarcascade_frontalface_alt.xml"
    cascade = cv2.CascadeClassifier(cascade_fn)
    max_capture_attempts = 160
    num_pics_required = 30
    configure_folders()
    for i in range(0, max_capture_attempts):
        frame = queue.get()
        update_video_feed(image_label, frame)
        detect_face(frame, cascade)
        update_labels(lf, lf_label, num_pics_required)

        # print i

        if num_pics_captured() == 30:
            #  print "Started new process"
            p = Process(target=rec, args=(q, username))
            p.start()
            global status
            status = 1
            break
    if num_pics_captured() < 30:
        tkMessageBox.showwarning(title="Error", message="Please position in front of thecamera")
        configure_folders()
Beispiel #5
0
 def __init__(self, task_queue, temp_file, head, mode="chromosome", verbosity=False):
     Process.__init__(self)
     self.task_queue = task_queue
     self.verbosity = verbosity
     self.temp_file = temp_file
     self.header = head.header
     self.mode = mode
Beispiel #6
0
def start_example_app_process3():
    # XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
    p = Process(target=start_example_server3)
    p.start()
    sleep()
    test_gateway_connection()
    return p
    def action_create_copy(self, login, password, status_id, paths, session):
        try:
            self.logger.info(
                "FM starting subprocess worker create_copy %s %s", pprint.pformat(status_id), pprint.pformat(login)
            )

            p = Process(
                target=self.run_subprocess,
                args=(
                    self.logger,
                    CreateCopy,
                    status_id.decode("UTF-8"),
                    FM.Action.CREATE_COPY,
                    {
                        "login": login.decode("UTF-8"),
                        "password": password.decode("UTF-8"),
                        "paths": byte_to_unicode_list(paths),
                        "session": byte_to_unicode_dict(session),
                    },
                ),
            )

            p.start()
            return {"error": False}

        except Exception as e:
            result = {"error": True, "message": str(e), "traceback": traceback.format_exc()}

            return result
Beispiel #8
0
class DropboxSynchronizer(object):
    """
    Consumer class for uploading newly created snapshots asynchronously.
    """

    def __init__(self, client):
        self.client = client
        self.queue = Queue()

    @staticmethod
    def serve(queue, client):
        """
        Serves the queue by retrieving current snapshot files.
        """

        while True:
            task = queue.get()
            if not task:
                break
            else:
                path, filename = task
                snapshot_file = open(path)
                client.put_file(filename, snapshot_file)
                snapshot_file.close()

    def start(self):
        """
        Starts the consumer process.
        """

        self.worker = Process(target=DropboxSynchronizer.serve, args=(self.queue, self.client))
        self.worker.start()
        return self.queue
def execute_code(cell_id, code):
    """Evaluate the given code in another process,
    Put the results and list of generated files into the global queue."""
    # the fs variable is inherited from the parent process
    code = displayhook_hack(code)
    curr_dir = os.getcwd()
    tmp_dir = tempfile.mkdtemp()
    print "Temp files in " + tmp_dir
    # We should at least document the side effects of
    # just setting the daemon flag and creating subprocesses
    # What things does a user/developer need to be aware of?
    oldDaemon = current_process().daemon
    current_process().daemon = False
    # Daemonic processes cannot create children
    os.chdir(tmp_dir)
    result = ""
    p = Process(target=execProcess, args=(cell_id, code))
    p.start()
    p.join()
    file_list = []
    fslock.acquire()
    for filename in os.listdir(tmp_dir):
        file_list.append(filename)
        fs_file = fs.new_file(cell_id, filename)
        with open(filename) as f:
            fs_file.write(f.read())
        fs_file.close()
    fslock.release()
    if len(file_list) > 0:
        outQueue.put((cell_id, new_stream("files", printout=False, files=file_list)))
    current_process().daemon = oldDaemon
    os.chdir(curr_dir)
    shutil.rmtree(tmp_dir)
Beispiel #10
0
 def __init__(self, queue, opts, thread_lock, stats, updates):
     Process.__init__(self)
     self.queue = queue
     self.stats = stats
     self.updates = updates
     self.opts = opts
     self.thread_lock = thread_lock
def parallel_process_pool(X_train, y_train, num_tasks, nbrs, num_neighbors, error_list):
    print "%d vectors in X_train." % X_train.shape[0]
    server = multiprocessing.Manager()
    vec_list = []
    label_list = []
    for i in xrange(X_train.shape[0]):
        vec_list.append(X_train[i])
        label_list.append(y_train[i])

    num_vecs = int(X_train.shape[0] / num_tasks)
    worker_list = []
    for task in range(0, num_tasks):
        current_vec_list = []
        if task < num_tasks - 1:
            print "[%d,%d],task %d" % (task * num_vecs, (task + 1) * num_vecs, task)
            X_sub = X_train[task * num_vecs : (task + 1) * num_vecs]
        else:
            print "[%d,%d],task %d" % (task * num_vecs, (task + 1) * num_vecs, task)
            X_sub = X_train[task * num_vecs :]

        index_offset = task * num_vecs
        worker = Process(target=find_knn, args=(X_sub, y_train, index_offset, error_list))
        worker.start()
        worker_list.append(worker)
        print "Task %d invoked." % task

    for worker in worker_list:
        worker.join()
        print "one worker exit."
Beispiel #12
0
    def brute(self):
        if self.found.value == 1:
            return self.syncRnd

        freshcookie = self.givemecookie().split("=")[-1].strip()

        print "freshcookie =", freshcookie

        workers = []

        for b0 in range(256):  # Fill the queue
            for b1 in range(256):
                self.queue.put((b0, b1))
                # time.sleep(0.01)
                # self.queue.put((13,13))

        for i in range(self.workers):
            p = Process(target=self.brute_worker, args=(freshcookie,))
            workers.append(p)
            p.start()

        for w in workers:  # Wait for workers
            w.join()

        self.syncRnd = GnuSHA1PRNGMimic(self.seed[:])
        self.syncRnd.setSeedL(self.syncRnd.nextLong())
        for i in range(2 * self.offset.value):
            self.syncRnd.nextLong()

        return self.found.value == 1
Beispiel #13
0
def siriServer(saveKeys=False, keyPickle="keys.pickle", local="127.0.0.1"):
    try:
        pem = "tmp.pem"
        with open(pem, "w") as f:
            f.write(PEM)
        server = SiriServer(pem)
        if local == "127.0.0.1":
            local = raw_input("Enter this computer's IP address: ")
        p = Process(target=dnsServer, args=[local])
        p.start()
        logger.info("[Server] Siri server started on localhost:443")
        logger.info(
            "[Server] To recover iPhone 4S Siri auth keys, change DNS address on iPhone to %s and make a Siri request.",
            local,
        )
        server.runForever()
    except KeyboardInterrupt:
        logger.info("[Server] Shutting down Siri server")
        p.terminate()
    except socket.error as e:
        import errno

        if e.args[0] == errno.EACCES:
            logger.exception('[Server] Failed to start server. Are you root? Try: "sudo %s"' % " ".join(sys.argv))
        raise
    except Exception as e:
        raise
    finally:
        os.unlink(pem)
        if saveKeys:
            logger.info("[Server] Recovered iPhone 4S keys:")
            logger.info(pprint.pformat(server.keys))
            with open(keyPickle, "wb") as f:
                pickle.dump(server.keys, f)
Beispiel #14
0
def siriClient(url="guzzoni.apple.com", keyPickle="keys.pickle", speech="input.sif"):
    try:
        with open(keyPickle, "rb") as f:
            keys = pickle.load(f)
        client = SiriClient(url, keys, speech, "ca.crt")
        p = Process(target=client.getResponse, args=[client])
        client.sendData(client.httpHeaders())
        logger.info("[Client] Sent HTTP headers")
        client.sendData(client.contentHeader())
        logger.info("[Client] Sent content header")
        client.sendData(client.ping())
        p.start()
        logger.info("[Client] Sent ping")
        client.sendData(client.loadAssistant())
        logger.info("[Client] Sent LoadAssistant")
        client.sendData(client.startSpeechDictation())
        logger.info("[Client] Sent StartSpeechDictation")
        idx = client.sendSpeechPackets()
        logger.info("[Client] Sent all speech packets")
        client.sendData(client.finishSpeech(idx))
        logger.info("[Client] Sent FinishSpeech")
        client.pinger()
    except KeyboardInterrupt:
        logger.info("[Client] Shutting down Siri client")
        p.terminate()
        p2.terminate()
    except Exception as e:
        raise
Beispiel #15
0
def teach(
    in_fname,
    out_fname,
    vec_per_class=[2000, 2000],
    C=[512, 5.998047],
    sigma=[128, 7.013672],
    tau=[1e-12, 1e-12],
    max_iter=[100000, 100000],
    epsilon=[1e-3, 1e-3],
):

    t_in_fname = list(map(lambda x: x.encode("ascii"), in_fname))
    t_out_fname = list(map(lambda x: x.encode("ascii"), out_fname))
    t_vec_per_class = set(wrap_sfire.unsigned_array, vec_per_class)
    t_C = set(wrap_sfire.double_array, C)
    t_sigma = set(wrap_sfire.double_array, sigma)
    t_tau = set(wrap_sfire.double_array, tau)
    t_max_iter = set(wrap_sfire.unsigned_array, max_iter)
    t_epsilon = set(wrap_sfire.double_array, epsilon)

    child = Process(
        target=wrap_sfire.teach,
        args=(t_in_fname, t_out_fname, t_vec_per_class, t_sigma, t_C, t_tau, t_max_iter, t_epsilon),
    )
    child.start()

    return child
Beispiel #16
0
 def spawn_worker(self, node, socket_def):
     # No arguments. just call run()
     print "DEBUG: Starting worker for node %s" % node.name
     process = Process(target=node.run, args=(socket_def,), name=node.name)
     self.processes[node.name] = process
     process.start()
     print "DEBUG: Worker %s: PID %s" % (node.name, process.pid)
Beispiel #17
0
def iperf_experiment(net):
    print "*** Running iperf experiment"

    # Get receiver and clients
    sender = net.getNodeByName("sender")
    receiver = net.getNodeByName("receiver")

    s1 = net.getNodeByName("s1")
    s2 = net.getNodeByName("s2")

    port = 5001

    # Start the bandwidth and cwnd monitors in the background
    monitor = Process(target=monitor_devs_ng, args=("%s/bwm-iperf-udp.txt" % args.dir, 1.0))
    monitor.start()
    start_tcpprobe("udp")

    # Start the receiver
    receiver.cmd("iperf -s -w 256K -l 16K -u -p", port, "> %s/iperf_server-udp.txt" % args.dir, "&")

    print "*** Starting iperf udp"
    sender.sendCmd(
        "iperf -c %s -p %s -t %d -i 1 -r -w 256K -l 16K -u -b %dM -yc > %s/iperf_client-udp.txt"
        % (receiver.IP(), port, args.time, args.bw, args.dir)
    )
    sender.waitOutput(verbose=True)
    print "*** Killing iperf proc"

    receiver.cmd("kill %iperf")

    # Shut down monitors
    stop_tcpprobe()
    monitor.terminate()
    os.system("killall -9 bwm-ng")

    ### do tcp test
    monitor = Process(target=monitor_devs_ng, args=("%s/bwm-iperf-tcp.txt" % args.dir, 1.0))
    monitor.start()
    start_tcpprobe("tcp")

    # Start the receiver
    receiver.cmd("iperf -s -w 256K -l 16K -p", port, "> %s/iperf_server-tcp.txt" % args.dir, "&")
    waitListening(sender, receiver, port)

    print "*** Starting iperf tcp"
    sender.sendCmd(
        "iperf -c %s -p %s -t %d -i 1 -r -w 256K -l 16K -yc > %s/iperf_client-tcp.txt"
        % (receiver.IP(), port, args.time, args.dir)
    )
    sender.waitOutput(verbose=True)
    print "*** Killing iperf proc"

    receiver.cmd("kill %iperf")

    # Shut down monitors
    stop_tcpprobe()
    monitor.terminate()
    os.system("killall -9 bwm-ng")

    print "*** End iperf experiment"
    def resume(self):
        """ It checks for Idle processes & kill them after communicating with them also forks new one """
        for key in self.pid_dict:
            """checks which are the processes which are to be killed """
            obj = self.pid_dict[key]
            if not (obj == None):
                if not (obj.LOCK.value == 3):  # checks if download has completed
                    fVP = Fork_Download()
                    obj.P = 0
                    obj.timeElapsed = 0

                    if obj.initiated.value == 0:  # If download has not been initiated replace it
                        obj.type = 0
                        p = Process(
                            target=fVP.forkVariousPart,
                            args=(obj, self.url, str(self.fileName), self.transmit, self.monitor),
                        )
                    else:  # If download has been initiated append new process with it
                        obj.type = 1
                        p = Process(
                            target=fVP.forkVariousPart,
                            args=(obj, self.url, str(self.fileName), self.transmit, self.monitor),
                        )
                    p.start()
                    obj.P = p
                    self.pid_dict[key] = obj
        Timer(2, self.Monitor).start()
Beispiel #19
0
    def serve(self):
        """Start a fixed number of worker threads and put client into a queue"""

        # this is a shared state that can tell the workers to exit when set
        # as false
        self.isRunning.value = True

        # first bind and listen to the port
        self.serverTransport.listen()

        # fork the children
        for i in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess)
                w.daemon = True
                w.start()
                self.workers.append(w)
            except Exception as x:
                logging.exception(x)

        # wait until the condition is set by stop()

        while True:

            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
                break
            except Exception as x:
                logging.exception(x)

        self.isRunning.value = False
Beispiel #20
0
 def transfer(self, cmd, lfn, pfn, pdst, verbose=0, background=False):
     """Copy LFN to given destination"""
     err = "Unable to identify total size of the file,"
     err += " GRID middleware fails."
     if not background:
         bar = PrintProgress("Fetching LFN info")
     if verbose:
         print_info(cmd)
     if background:
         # here background is a list of commands
         if not isinstance(background, list):
             return "fail"
         proc = Process(target=execute, args=(background, pfn, pdst, 0))
         self.queue[lfn] = (proc, None)
         return "accepted"
     elif verbose:
         status = execute(cmd, pfn, pdst, verbose)
         if not status:
             return "fail"
         else:
             dst, dst_size = status
             size = size_format(dst_size)
             if not size or not dst_size:
                 print_error(err)
                 print "Status of transfer:\n", status
                 return "fail"
             else:
                 print "\nDone, file located at %s (%s)" % (dst, size_format(dst_size))
             return "success"
     else:
         ifile = pdst
         pfn_size = get_size(pfn)
         if pfn_size and pfn_size != "null":
             tot_size = float(pfn_size)
             bar.print_msg("LFN size=%s" % size_format(tot_size))
             bar.init("Download in progress:")
             proc = Process(target=execute, args=(cmd, pfn, pdst, verbose))
             proc.start()
             while True:
                 if proc.is_alive():
                     size = get_size(ifile)
                     if not size or size == "null":
                         bar.refresh("")
                         pass
                     else:
                         progress = float(size) * 100 / tot_size
                         bar.refresh(progress)
                         if progress == 100:
                             break
                 else:
                     break
                 time.sleep(0.5)
             bar.clear()
             status = check_file(pfn, pdst, verbose)
             if status:
                 return "success"
         else:
             print_error(err)
             return "fail"
     return "fail"
Beispiel #21
0
class Button:
    def __init__(self, channel, led_pin, f):
        self.channel = channel
        self.f = f
        self.led_pin = led_pin
        self.p = Process(target=self.target_f)
        GPIO.setup(self.channel, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
        GPIO.setup(self.led_pin, GPIO.OUT, initial=GPIO.LOW)

    def target_f(self):
        while True:
            counter = 0
            while counter < 4:
                if GPIO.input(self.channel) == GPIO.HIGH:
                    if counter > 0:
                        counter -= 1
                else:
                    counter += 1
            time.sleep(0.05)
            GPIO.wait_for_edge(self.channel, GPIO.RISING)
            GPIO.output(self.led_pin, GPIO.HIGH)
            self.f()
            time.sleep(1)
            GPIO.output(self.led_pin, GPIO.LOW)

    def start(self):
        self.p.start()

    def join(self):
        self.p.join()
def run_workers(num_workers, jobs, shuffle):
    """ Queue up all jobs start workers with job_queue
    catch KeyboardInterrupt to allow interrupting all workers
    Not using Pool to better hande KeyboardInterrupt gracefully
    Adapted from example at:
    http://bryceboe.com/2012/02/14/python-multiprocessing-pool-and-keyboardinterrupt-revisited/
    """

    # Queue up all jobs
    job_queue = Queue()
    counter = Value("i", 0)

    # optionally shuffle queue
    if shuffle:
        jobs = list(jobs)
        random.shuffle(jobs)

    for job in jobs:
        job_queue.put(job)

    workers = []

    for i in xrange(0, num_workers):
        tmp = Process(target=do_work, args=(job_queue, counter))
        tmp.start()
        workers.append(tmp)

    try:
        for worker in workers:
            worker.join()
    except KeyboardInterrupt:
        logging.info("Received Ctrl-C, interrupting all workers")
        for worker in workers:
            worker.terminate()
            worker.join()
Beispiel #23
0
    def __init__(
        self,
        alive_event,
        work_event,
        email,
        queue,
        index_queue,
        passed,
        robots,
        unavailable_urls,
        agent_name=DEFAULT_AGENT_NAME,
        headers=DEFAULT_HEADERS,
        url_filter=lambda x: True,
    ):
        Process.__init__(self)
        self.alive_event = alive_event
        self.work_event = work_event
        self.email = email
        self.queue = queue
        self.index_queue = index_queue
        self.passed = passed
        self.robots = robots
        self.unavailable_urls = unavailable_urls
        self.agent_name = agent_name
        self.url_filter = url_filter
        self.is_working = False

        self.handler = request.build_opener(RobotHandler(agent_name, robots))
        handler_headers = [(k, v) for k, v in copy.copy(headers).items()]
        handler_headers.append(("User-Agent", agent_name))
        handler_headers.append(("From", email))
        self.handler.addheaders = handler_headers

        self.html_parser = HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
        self.connection = None
Beispiel #24
0
    def test_multi_processing_handler(self):
        from multiprocessing import Process, Queue
        from logbook.queues import MultiProcessingHandler, MultiProcessingSubscriber

        queue = Queue(-1)
        test_handler = logbook.TestHandler()
        subscriber = MultiProcessingSubscriber(queue)

        def send_back():
            handler = MultiProcessingHandler(queue)
            handler.push_thread()
            try:
                logbook.warn("Hello World")
            finally:
                handler.pop_thread()

        p = Process(target=send_back)
        p.start()
        p.join()

        test_handler.push_thread()
        try:
            subscriber.dispatch_once()
            self.assert_(test_handler.has_warning("Hello World"))
        finally:
            test_handler.pop_thread()
Beispiel #25
0
def run_http_server(redirect_uri=None, port_range=(10000, 10010)):
    """Returns (modified) redirect_uri"""
    from multiprocessing import Process, Pipe
    from urllib.parse import urlsplit, urlunsplit

    if redirect_uri is None:
        redirect_uri = "http://localhost"
    p = urlsplit(redirect_uri)
    # Ensure hostname is localhost or 127.0.0.1
    if p.hostname != "127.0.0.1" and p.hostname != "localhost":
        raise ValueError("url must have host of 127.0.0.1 or localhost! Got: {}".format(p.hostname))
    if p.port is not None:
        port_range = (int(p.port), int(p.port))
    parent_port_pipe, child_port_pipe = Pipe()
    parent_pipe, child_pipe = Pipe()
    httpd_p = Process(target=_run_http_server, args=(child_port_pipe, child_pipe, port_range))
    httpd_p.start()
    if parent_port_pipe.poll(3000):
        final_port = parent_port_pipe.recv()
    else:
        raise Exception("Timeout waiting for HTTP server process to start")
    if final_port == 0:
        # Could not find a port
        raise Exception("Could not find open port")
    netloc = "{0}:{1}".format(p.hostname, final_port)
    if p.path:
        path = p.path
    else:
        path = "/"
    p = p._replace(netloc=netloc, path=path)
    return (urlunsplit(p), parent_pipe, httpd_p)
Beispiel #26
0
    def test_subscriber_group(self):
        from multiprocessing import Process, Queue
        from logbook.queues import MultiProcessingHandler, MultiProcessingSubscriber, SubscriberGroup

        a_queue = Queue(-1)
        b_queue = Queue(-1)
        test_handler = logbook.TestHandler()
        subscriber = SubscriberGroup([MultiProcessingSubscriber(a_queue), MultiProcessingSubscriber(b_queue)])

        def make_send_back(message, queue):
            def send_back():
                handler = MultiProcessingHandler(queue)
                handler.push_thread()
                try:
                    logbook.warn(message)
                finally:
                    handler.pop_thread()

            return send_back

        for _ in range(10):
            p1 = Process(target=make_send_back("foo", a_queue))
            p2 = Process(target=make_send_back("bar", b_queue))
            p1.start()
            p2.start()
            p1.join()
            p2.join()
            messages = [subscriber.recv().message for i in 1, 2]
            self.assertEqual(sorted(messages), ["bar", "foo"])
 def __init__(self, func):
     Process.__init__(self)
     self.in_buffer = None
     self.out_buffer = None
     self.func = func
     # number of tokens popped from the input buffer each time
     self.n_args = len(inspect.getargspec(func).args)
Beispiel #28
0
def send_to_queue(funcpath, func, args):
    """
   Just use the multiprocessing class
   to send the job in the background
   """
    p = Process(target=Queuefy, args=(funcpath, func, args))
    p.start()
Beispiel #29
0
    def __init__(self, fileLocs, test):
        self.fileLocs = fileLocs
        self.test = test
        self.queue = Queue()

        Process.__init__(self, target=self.__defineResult, name="ResultDefiner")
        self.start()
Beispiel #30
0
 def StartProcesses(self):
     self.guiprocess = Process(target=self.__startGUI__)
     self.guiprocess.start()
     self.pipeGUI.send(["StartViewer", None])
     self.orprocess = Process(target=ORServer, args=(self.pipeOR,))
     self.orprocess.start()
     return True