コード例 #1
0
ファイル: PyInterpreter.py プロジェクト: walidsadat/MrPython
 def __init__(self, root, mode, filename):
     self.comm, there = mp.Pipe()
     self.process = mp.Process(target=run_process, args=(there, mode, filename))
     self.root = root
コード例 #2
0
    def async_(self, cbl, fun):
        """
        return a pair (pipe, process) so that the process writes
        `fun(a)` to the pipe for each element `a` in the iterable returned
        by the callable `cbl`.

        :param cbl: a function returning something iterable
        :type cbl: callable
        :param fun: an unary translation function
        :type fun: callable
        :rtype: (:class:`multiprocessing.Pipe`,
                :class:`multiprocessing.Process`)
        """
        # create two unix pipes to redirect the workers stdout and
        # stderr
        stdout = os.pipe()
        stderr = os.pipe()

        # create a multiprocessing pipe for the results
        pipe = multiprocessing.Pipe(False)
        receiver, sender = pipe

        process = FillPipeProcess(cbl(), stdout[1], stderr[1], pipe, fun)
        process.start()
        self.processes.append(process)
        logging.debug('Worker process %s spawned', process.pid)

        def threaded_wait():
            # wait(2) for the process to die
            process.join()

            if process.exitcode < 0:
                msg = 'received signal {0}'.format(-process.exitcode)
            elif process.exitcode > 0:
                msg = 'returned error code {0}'.format(process.exitcode)
            else:
                msg = 'exited successfully'

            logging.debug('Worker process %s %s', process.pid, msg)
            self.processes.remove(process)

        # XXX: it would be much nicer to run this as a coroutine than a thread,
        # except that this code is executed before the eventloop is started.
        #
        # spawn a thread to collect the worker process once it dies
        # preventing it from hanging around as zombie
        threading.Thread(target=threaded_wait).start()

        # TODO: avoid this if logging level > debug
        def threaded_reader(prefix, fd):
            with os.fdopen(fd) as handle:
                for line in handle:
                    logging.debug('Worker process %s said on %s: %s',
                                  process.pid, prefix, line.rstrip())

        # spawn two threads that read from the stdout and stderr pipes
        # and write anything that appears there to the log
        threading.Thread(target=threaded_reader,
                         args=('stdout', stdout[0])).start()
        os.close(stdout[1])
        threading.Thread(target=threaded_reader,
                         args=('stderr', stderr[0])).start()
        os.close(stderr[1])

        # closing the sending end in this (receiving) process guarantees
        # that here the appropriate EOFError is raised upon .recv in the walker
        sender.close()
        return receiver, process
コード例 #3
0
import telegrambot
import discordbot
import redditbot
import statsupdate
import time
import logging
import coloredlogs
import os

logging.getLogger().disabled = True
logger = logging.getLogger(__name__)
os.environ[
    "COLOREDLOGS_LOG_FORMAT"] = "%(asctime)s %(levelname)s %(name)s %(message)s"
coloredlogs.install(level="DEBUG", logger=logger)

discord_telegram_pipe = multiprocessing.Pipe()
discord = multiprocessing.Process(target=discordbot.process,
                                  args=(discord_telegram_pipe[0], ))
telegram = multiprocessing.Process(target=telegrambot.process,
                                   args=(discord_telegram_pipe[1], ))
reddit = multiprocessing.Process(target=redditbot.process)
stats = multiprocessing.Process(target=statsupdate.process)

if __name__ == "__main__":
    logger.info("Starting Discord Bot process...")
    discord.start()
    logger.info("Starting Telegram Bot process...")
    telegram.start()
    logger.info("Starting Reddit Bot process...")
    reddit.start()
    if not __debug__:
コード例 #4
0
                                             ('image', 'mask', 'class_id'),
                                             True)

    unique(file.root.classes, classes)

    files = list(
        filter(
            lambda x: not x.endswith('.mask.png'),
            glob.iglob(os.path.join(args.directory, '**/*.png'),
                       recursive=True)))
    count = len(files)
    random.shuffle(files)

    pn, lock, processes = ceil(count /
                               args.processes), multiprocessing.Lock(), []
    conn_in, conn_out = multiprocessing.Pipe(False)

    for i in range(args.processes):
        processes.append(
            start_process(worker,
                          (i, files[i * pn:(i + 1) * pn], classes,
                           file.root.classes[:], colors, lock, conn_out)))

    files = writer(file, filters, conn_in, lock)

    for i in processes:
        print()

    if args.save_path:
        utils.save_path(file, filters, files)
    utils.split_dataset(file, len(files), args.train)
コード例 #5
0
    def init_bootstrap(self, tuples):
        """
        starts a bootstrap iteration
        """
        if tuples is not None:
            f = open(tuples, "r")
            print "Loading pre-processed sentences", tuples
            self.processed_tuples = cPickle.load(f)
            f.close()
            print len(self.processed_tuples), "tuples loaded"

        self.curr_iteration = 0
        while self.curr_iteration <= self.config.number_iterations:
            print "=========================================="
            print "\nStarting iteration", self.curr_iteration
            print "\nLooking for seed matches of:"
            for s in self.config.positive_seed_tuples:
                print s.e1, '\t', s.e2

            # Looks for sentences macthing the seed instances
            count_matches, matched_tuples = self.match_seeds_tuples()

            if len(matched_tuples) == 0:
                print "\nNo seed matches found"
                sys.exit(0)

            else:
                print "\nNumber of seed matches found"
                sorted_counts = sorted(count_matches.items(),
                                       key=operator.itemgetter(1),
                                       reverse=True)
                for t in sorted_counts:
                    print t[0][0], '\t', t[0][1], t[1]
                print "\n", len(matched_tuples), "tuples matched"

                # Cluster the matched instances: generate patterns
                print "\nClustering matched instances to generate patterns"
                if len(self.patterns) == 0:
                    self.cluster_tuples(matched_tuples)
                    # Eliminate patterns supported by less than 'min_pattern_support' tuples
                    new_patterns = [
                        p for p in self.patterns
                        if len(p.tuples) > self.config.min_pattern_support
                    ]
                    self.patterns = new_patterns

                else:
                    # Paralelize single-pass clustering
                    # Each tuple must be compared with each extraction pattern
                    # Map:
                    # - Divide the tuples into smaller lists, accordingly to the number of CPUs
                    # - Pass to each CPU a sublist of tuples and all the patterns, comparision is done by each CPU
                    # Merge:
                    # - Each CPU sends to the father process the updated patterns and new patterns
                    # - Merge patterns based on Pattern_id
                    # - Cluster new created patterns with single-pass clustering

                    # make a copy of the extraction patterns to be passed to each
                    patterns = [
                        list(self.patterns) for _ in range(self.num_cpus)
                    ]

                    # distribute tuples per different CPUs
                    chunks = [list() for _ in range(self.num_cpus)]
                    n_tuples_per_child = int(
                        math.ceil(float(len(matched_tuples)) / self.num_cpus))

                    print "\n#CPUS", self.num_cpus, '\t', "Tuples per CPU", n_tuples_per_child
                    chunk_n = 0
                    chunck_begin = 0
                    chunck_end = n_tuples_per_child
                    while chunk_n < self.num_cpus:
                        chunks[chunk_n] = matched_tuples[
                            chunck_begin:chunck_end]
                        chunck_begin = chunck_end
                        chunck_end += n_tuples_per_child
                        chunk_n += 1

                    count = 0
                    for c in chunks:
                        print "CPU_" + str(count), len(c), "Patterns", len(
                            patterns[count])
                        count += 1

                    pipes = [
                        multiprocessing.Pipe(False)
                        for _ in range(self.num_cpus)
                    ]
                    processes = [
                        multiprocessing.Process(
                            target=self.cluster_tuples_parallel,
                            args=(patterns[i], chunks[i], pipes[i][1]))
                        for i in range(self.num_cpus)
                    ]

                    print "\nRunning", len(processes), " processes"
                    for proc in processes:
                        proc.start()

                    # Receive and merge all patterns by 'pattern_id'
                    # New created patterns (new pattern_id) go into 'child_patterns' and then are merged
                    # by single-pass clustering between patterns
                    child_patterns = list()
                    for i in range(len(pipes)):
                        data = pipes[i][0].recv()
                        patterns = data[1]
                        for p_updated in patterns:
                            pattern_exists = False
                            for p_original in self.patterns:
                                if p_original.id == p_updated.id:
                                    p_original.tuples.update(p_updated.tuples)
                                    pattern_exists = True
                                    break

                            if pattern_exists is False:
                                child_patterns.append(p_updated)

                    for proc in processes:
                        proc.join()

                    print "\n SELF Patterns:"
                    for p in self.patterns:
                        p.merge_all_tuples_bet()
                        print '\n' + str(p.id)
                        if self.config.alpha == 0 and self.config.gamma == 0:
                            for bet_words in p.bet_uniques_words:
                                print "BET", bet_words

                    print "\n Child Patterns:"
                    for p in child_patterns:
                        p.merge_all_tuples_bet()
                        print '\n' + str(p.id)
                        if self.config.alpha == 0 and self.config.gamma == 0:
                            for bet_words in p.bet_uniques_words:
                                print "BET", bet_words

                    print len(child_patterns), "new created patterns"

                    # merge/aggregate similar patterns generated by the child processes
                    # start comparing smaller ones with greater ones
                    child_patterns.sort(key=lambda y: len(y.tuples),
                                        reverse=False)
                    count = 0
                    new_list = list(self.patterns)
                    for p1 in child_patterns:
                        print "\nNew Patterns", len(
                            child_patterns), "Processed", count
                        print "New List", len(new_list)
                        print "Pattern:", p1.id, "Tuples:", len(p1.tuples)
                        max_similarity = 0
                        max_similarity_cluster = None
                        for p2 in new_list:
                            if p1 == p2:
                                continue
                            score = self.similarity_cluster(p1, p2)
                            if score > max_similarity:
                                max_similarity = score
                                max_similarity_cluster = p2
                        if max_similarity >= self.config.threshold_similarity:
                            for t in p1.tuples:
                                max_similarity_cluster.tuples.add(t)
                        else:
                            new_list.append(p1)
                        count += 1

                    # add merged patterns to main patterns structure
                    for p in new_list:
                        if p not in self.patterns:
                            self.patterns.append(p)

                if self.curr_iteration == 0 and len(self.patterns) == 0:
                    print "No patterns generated"
                    sys.exit(0)

                print "\n", len(self.patterns), "patterns generated"

                # merge equal tuples inside patterns to make less comparisions in collecting instances
                for p in self.patterns:
                    # if only the BET context is being used, merge only based on BET contexts
                    if self.config.alpha == 0 and self.config.gamma == 0:
                        p.merge_all_tuples_bet()

                if PRINT_PATTERNS is True:
                    print "\nPatterns:"
                    for p in self.patterns:
                        print '\n' + str(p.id)
                        if self.config.alpha == 0 and self.config.gamma == 0:
                            for bet_words in p.bet_uniques_words:
                                print "BET", bet_words
                        else:
                            for t in p.tuples:
                                print "BEF", t.bef_words
                                print "BET", t.bet_words
                                print "AFT", t.aft_words
                                print "========"

                # Look for sentences with occurrence of seeds semantic types (e.g., ORG - LOC)

                # This was already collect and its stored in: self.processed_tuples
                #
                # Measure the similarity of each occurrence with each extraction pattern
                # and store each pattern that has a similarity higher than a given threshold
                #
                # Each candidate tuple will then have a number of patterns that extracted it
                # each with an associated degree of match.
                print "\nNumber of tuples to be analyzed:", len(
                    self.processed_tuples)

                print "\nCollecting instances based on", len(
                    self.patterns), "extraction patterns"
                # create copies of generated extraction patterns to be passed to each process
                patterns = [list(self.patterns) for _ in range(self.num_cpus)]

                # copy all tuples into a Queue shared by all processes
                manager = multiprocessing.Manager()
                queue = manager.Queue()
                for t in self.processed_tuples:
                    queue.put(t)

                # each distinct process receives as arguments:
                #   - a list, copy of all the original extraction patterns
                #   - a Queue of the tuples
                #   - a pipe to return the collected tuples and updated patterns to the parent process

                pipes = [
                    multiprocessing.Pipe(False) for _ in range(self.num_cpus)
                ]
                processes = [
                    multiprocessing.Process(target=self.find_instances,
                                            args=(patterns[i], queue,
                                                  pipes[i][1]))
                    for i in range(self.num_cpus)
                ]

                print "Running", len(processes), " processes"
                for proc in processes:
                    proc.start()

                # structures to store each process altered patterns and collected tuples
                patterns_updated = list()
                collected_tuples = list()

                for i in range(len(pipes)):
                    data = pipes[i][0].recv()
                    child_pid = data[0]
                    patterns = data[1]
                    tuples = data[2]
                    print child_pid, "patterns", len(patterns), "tuples", len(
                        tuples)
                    patterns_updated.extend(patterns)
                    collected_tuples.extend(tuples)

                for proc in processes:
                    proc.join()

                # Extraction patterns aggregation happens here:
                for p_updated in patterns_updated:
                    for p_original in self.patterns:
                        if p_original.id == p_updated.id:
                            p_original.positive += p_updated.positive
                            p_original.negative += p_updated.negative
                            p_original.unknown += p_updated.unknown

                # Index the patterns in an hashtable for later use
                for p in self.patterns:
                    self.patterns_index[p.id] = p

                # update all patterns confidence
                for p in self.patterns:
                    p.update_confidence(self.config)

                if PRINT_PATTERNS is True:
                    print "\nPatterns:"
                    for p in self.patterns:
                        print p.id
                        """
                        for t in p.tuples:
                            print "BEF", t.bef_words
                            print "BET", t.bet_words
                            print "AFT", t.aft_words
                            print "========"
                        """
                        print "Positive", p.positive
                        print "Negative", p.negative
                        print "Pattern Confidence", p.confidence
                        print "\n"

                # Candidate tuples aggregation happens here:
                print "Collecting generated candidate tuples"
                for e in collected_tuples:
                    t = e[0]
                    pattern_best = e[1]
                    sim_best = e[2]

                    # if this tuple was already extracted, check if this extraction pattern is already associated
                    # with it, if not, associate this pattern with it and similarity score
                    if t in self.candidate_tuples:
                        t_patterns = self.candidate_tuples[t]
                        if t_patterns is not None:
                            if pattern_best not in [x[0] for x in t_patterns]:
                                self.candidate_tuples[t].append(
                                    (self.patterns_index[pattern_best.id],
                                     sim_best))

                    # If this tuple was not extracted before, associate this pattern with the instance
                    # and the similarity score
                    else:
                        self.candidate_tuples[t].append(
                            (self.patterns_index[pattern_best.id], sim_best))

                # update tuple confidence based on patterns confidence
                print "\n\nCalculating tuples confidence"
                for t in self.candidate_tuples.keys():
                    confidence = 1
                    t.confidence_old = t.confidence
                    for p in self.candidate_tuples.get(t):
                        confidence *= 1 - (p[0].confidence * p[1])
                    t.confidence = 1 - confidence

                    # use past confidence values to calculate new tuple confidence
                    # if parameter Wupdt < 0.5 the system trusts new examples less on each iteration
                    # which will lead to more conservative patterns and have a damping effect.
                    if self.curr_iteration > 0:
                        t.confidence = t.confidence * self.config.wUpdt + t.confidence_old * (
                            1 - self.config.wUpdt)

                # sort tuples by confidence and print
                if PRINT_TUPLES is True:
                    extracted_tuples = self.candidate_tuples.keys()
                    tuples_sorted = sorted(extracted_tuples,
                                           key=lambda tl: tl.confidence,
                                           reverse=True)
                    for t in tuples_sorted:
                        print t.sentence
                        print t.e1, t.e2
                        print t.confidence
                        print "\n"

                # update seed set of tuples to use in next iteration
                # seeds = { T | conf(T) > instance_confidance }
                print "Adding tuples to seed with confidence >=" + str(
                    self.config.instance_confidance)
                for t in self.candidate_tuples.keys():
                    if t.confidence >= self.config.instance_confidance:
                        seed = Seed(t.e1, t.e2)
                        self.config.positive_seed_tuples.add(seed)

                # increment the number of iterations
                self.curr_iteration += 1

        self.write_relationships_to_disk()
コード例 #6
0
    def __init__(
            self,
            vehicle_name="Drone1",
            max_altitude=12,
            min_altitude=.45,
            time_to_exec_hover=1,
            image_mask_FC_FR_FL=[True, True, True
                                 ],  # Front Center, Front right, front left
            sim_mode="both_rgb",
            IMG_HEIGHT=128,
            IMG_WIDTH=128,
            IMG_STEP=3,
            reward_function=drone_forest_racer_rewarding_function):

        self.reward_function = reward_function
        self.mode = sim_mode
        self.time_to_exec_hover = time_to_exec_hover

        self.scaling_factor = .30  # Used as a constant gain factor for the action throttle.
        self.action_duration = .10  # (ms) Each action lasts this many seconds

        # The number of INERTIAL state variables to keep track of
        self.count_inertial_state_variables = 18  # PosX, PosY, PosZ, Vx, Vy, Vz, R, P, Y, Ax, Ay, Az, Rd, Pd, Yd, Rdd, Pdd, Ydd
        self.count_drone_actions = 14  # 6 Linear, 6 angular, 1 hover, 1 No Op (Dont change anything)

        # Simulator Image setup
        self.IMG_HEIGHT = IMG_HEIGHT
        self.IMG_WIDTH = IMG_WIDTH
        isRGB = False
        IMG_CHANNELS = 1
        if 'rgb' in self.mode:
            isRGB = True
            IMG_CHANNELS = 3
        isNormal = False
        if 'normal' in self.mode:
            isNormal = True
        self.IMG_CHANNELS = IMG_CHANNELS
        self.IMG_STEP = IMG_STEP
        self.IMG_VIEWS = np.sum(np.array(image_mask_FC_FR_FL, dtype=np.int))
        # Initialize the container that holds the sequence of images from the simulator
        self.obs4 = np.zeros(
            (self.IMG_HEIGHT, self.IMG_WIDTH,
             self.IMG_CHANNELS * self.IMG_STEP * self.IMG_VIEWS))

        # Initialize the current inertial state
        self.current_inertial_state = np.array(
            np.zeros(self.count_inertial_state_variables))

        # Initialize the IMAGE variables -- We Take in Front Center, Right, Left
        self.images_rgb = None
        self.images_rgba = None
        self.image_mask_rgb = np.array(
            [[0 + 3 * i, 1 + 3 * i, 2 + 3 * i]
             for m, i in zip(image_mask_FC_FR_FL, range(3)) if m]).reshape(-1)
        self.image_mask_rgba = np.array(
            [[0 + 4 * i, 1 + 4 * i, 2 + 4 * i]
             for m, i in zip(image_mask_FC_FR_FL, range(3)) if m]).reshape(-1)
        self.image_mask_FC_FR_FL = image_mask_FC_FR_FL

        # Set max altitude the quadcopter can hover
        self.max_altitude = max_altitude
        self.min_altitude = min_altitude

        # Connect to the AirSim simulator and begin:
        print('Initializing Client')
        self.client = client.MultirotorClient()
        self.client.confirmConnection()
        self.client.enableApiControl(True)
        self.client.armDisarm(True)
        print('Initialization Complete!')
        print("Setting Camera Views")
        orien = Vector3r(0, 0, 0)
        self.client.simSetCameraOrientation(0, orien)  #radians
        orien = Vector3r(0, .12, -np.pi / 9)
        self.client.simSetCameraOrientation(1, orien)
        orien = Vector3r(0, .12, np.pi / 9)
        self.client.simSetCameraOrientation(2, orien)
        # Reset Collion Flags
        print("Setting Camera Views DONE!")

        # Set up GUI Video Feeder
        self.gui_data = {'obs': None, 'state': None, 'meta': None}
        self.vehicle_name = vehicle_name
        num_video_feeds = np.sum(
            np.array(self.image_mask_FC_FR_FL, dtype=np.int)) * IMG_STEP
        GUIConn, self.simEnvDataConn = multiprocessing.Pipe()
        self.app = AirSimGUI.QuadcopterGUI(GUIConn,
                                           vehicle_names=[vehicle_name],
                                           num_video_feeds=num_video_feeds,
                                           isRGB=isRGB,
                                           isNormal=isNormal)

        # Timing Operations Initialize
        self.time_to_do_action = 0
        self.time_to_grab_images = 0
        self.time_to_grab_states = 0
        self.time_to_calc_reward = 0
        self.time_to_step = 0
        self.extra_metadata = None
コード例 #7
0
def consumer(conn):
    while conn.poll():
        msg = conn.recv()
        print("\nReceived the data: {}".format(msg))
        myFile.write("\n" + str(msg))
    myFile.close()


if __name__ == "__main__":
    #beautify data and check data saving efficiency (delta t avg)

    # messages to be sent
    my_queue = queue.Queue()

    # creating a pipe
    producer_conn, consumer_conn = multiprocessing.Pipe()

    # sensor name, data value, timestamp
    #while True:
    print("BEGINNING OF LOOP")

    while my_queue.qsize() < 10:
        print("READING SENSORS")
        currentVal1 = ReadChannel(he_channel1)
        currentVal2 = ReadChannel(he_channel2)

        if (currentVal1 < 20):
            magnet1 = True
        else:
            magnet1 = False
コード例 #8
0
ファイル: com_pipe.py プロジェクト: qlurkin/qlurkin.github.io
import multiprocessing as mp


def compute(child):
    child.send('Hey !')
    child.close()


if __name__ == '__main__':
    parent, child = mp.Pipe()
    proc = mp.Process(target=compute, args=(child, ))
    proc.start()
    print(parent.recv())

    proc.join()  # Attendre la fin du processus
    print('Terminé avec code', proc.exitcode)
コード例 #9
0
 def __init__(self):
     self.receiver, self.sender = multiprocessing.Pipe(duplex=False)
コード例 #10
0
import sys
import traceback
import signal
import time
import syslog
import multiprocessing

from ansible.module_utils._text import to_text

PY3 = sys.version_info[0] == 3

syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))

# pipe for communication between forked process and parent
ipc_watcher, ipc_notifier = multiprocessing.Pipe()


def notice(msg):
    syslog.syslog(syslog.LOG_NOTICE, msg)


def daemonize_self():
    # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
    try:
        pid = os.fork()
        if pid > 0:
            # exit first parent
            sys.exit(0)
    except OSError:
        e = sys.exc_info()[1]
コード例 #11
0
def run_sh(inpfile, outfile, source_lang, target_lang, k, lid_output, sampling,
           lang1_code, lang2_code, rcm_file, linguistic_theory):
    logger = logging.getLogger(__name__)
    working_dir = "{}/cm_text_gnenerator/generator/"
    errfile = '{}.err'.format(inpfile)
    shutil.rmtree("{}onetweet-{}-{}.txt".format(working_dir, source_lang,
                                                target_lang),
                  ignore_errors=True)
    shutil.rmtree(outfile, ignore_errors=True)
    count = 0
    outputs = []
    out_string = ""
    with open_file(inpfile, 'r') as inpfile_f:
        for line in inpfile_f.read().split('\n'):
            if line != "":
                out_string += line + '\n'
            else:
                arguments = out_string.split('\n')
                arguments.append(linguistic_theory)
                out_string = ""

                source, dest = multiprocessing.Pipe()
                p = multiprocessing.Process(
                    target=run_in_try,
                    args=(
                        cm_text_generator.bench_Merged.main,
                        source,
                        arguments,
                    ))
                p.start()
                t = 10
                p.join(t)
                ret = 'fail'
                if p.exitcode is not None and p.exitcode >= 0:
                    ret = dest.recv()
                dest.close()
                p.terminate()

                if type(ret) != str and len(ret) > 0:
                    # random sample only if k != -1 and sampling is not spf
                    if k != -1 and len(ret) >= k and sampling != 'spf':
                        ret = random.sample(ret, k)
                    # word level language tagging
                    if lid_output == 1:
                        ret = lang_tag(ret, arguments[3], source_lang,
                                       target_lang)
                    # spf based sampling
                    if sampling == 'spf':
                        langtags = [lang1_code.upper(), lang2_code.upper()]

                        spf_mean, spf_std = rcm_std_mean.main(
                            rcm_file, langtags)
                        ret = spf_sampling.rank(ret, langtags, spf_mean,
                                                spf_std)

                        if len(ret) >= k:
                            ret = ret[:k]

                    # final generated cm to be added for each input sentence pair
                    outputs.append(ret)
    return outputs
コード例 #12
0
import multiprocessing
import random
import time, os


def proc_send(pipe, urls):
    for url in urls:
        print 'Process (%s) sned: %s' % (os.getpid(), url)
        pipe.send(url)
        time.sleep(random.random())


def proc_recv(pipe):
    while True:
        print "Process(%s) rev: %s" % (os.getpid(), pipe.recv())
        time.sleep(random.random())


if __name__ == '__main__':
    pipe = multiprocessing.Pipe()
    p1 = multiprocessing.Process(target=proc_send,
                                 args=(pipe[0],
                                       ['url_' + str(i) for i in range(10)]))
    p2 = multiprocessing.Process(target=proc_recv, args=(pipe[1], ))
    p1.start()
    p2.start()
    p1.join()
    p2.join()
コード例 #13
0
ファイル: 08_12_dppo_breakout.py プロジェクト: hccho2/RL-GYM
 def __init__(self, seed):
     self.child, parent = multiprocessing.Pipe()
     self.process = multiprocessing.Process(target=worker_process, args=(parent, seed))
     self.process.start()
コード例 #14
0
                if not msg:
                    break
                print('child: recv {!r}'.format(msg))
                s.send(msg)


def server(address, in_p, out_p, worker_pid):
    in_p.close()
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
    s.bind(address)
    s.listen(5)
    while True:
        client, addr = s.accept()
        print('server: got connection from', addr)
        send_hanle(out_p, client.fileno(), worker_pid)
        client.close()


if __name__ == '__main__':
    c1, c2 = multiprocessing.Pipe()
    worker_p = multiprocessing.Process(target=worker, args=(c1, c2))
    worker_p.start()

    server_p = multiprocessing.Process(target=server,
                                       args=(('localhost', 8080), c1, c2,
                                             worker_p.pid))

    server_p.start()
    c1.close()
    c2.close()
コード例 #15
0
        self.rc1 = RCOsc(self.pit, sharp=0.8, mul=self.amp).mix(1)
        self.rc2 = RCOsc(self.pit * 0.99, sharp=0.8, mul=self.amp).mix(1)
        self.mix = Mix([self.rc1, self.rc2], voices=2)
        self.rev = STRev(Denorm(self.mix), [.1, .9], 2, bal=0.30).out()

        while True:
            if self.pipe.poll():
                data = self.pipe.recv()
                self.server.addMidiEvent(*data)
            time.sleep(0.001)

        self.server.stop()


if __name__ == '__main__':
    main1, child1 = multiprocessing.Pipe()
    main2, child2 = multiprocessing.Pipe()
    main3, child3 = multiprocessing.Pipe()
    main4, child4 = multiprocessing.Pipe()
    mains = [main1, main2, main3, main4]
    p1, p2, p3, p4 = Proc(child1), Proc(child2), Proc(child3), Proc(child4)
    p1.start()
    p2.start()
    p3.start()
    p4.start()

    playing = {0: [], 1: [], 2: [], 3: []}
    currentcore = 0

    def callback(status, data1, data2):
        global currentcore
コード例 #16
0
 def __init__(self):
     self._pipe, child_pipe = multiprocessing.Pipe()
     self._process = multiprocessing.Process(target=worker,
                                             args=(child_pipe, ),
                                             daemon=True)
     self._process.start()
コード例 #17
0
ファイル: multiproc.py プロジェクト: cole-brown/veredi-code
def set_up(
    proc_name: str,
    config: Configuration,
    context: VerediContext,
    entry_fn: StartProcFn,
    t_proc_to_sub: Type['ProcToSubComm'] = ProcToSubComm,
    t_sub_to_proc: Type['SubToProcComm'] = SubToProcComm,
    finalize_fn: FinalizeInitFn = None,
    initial_log_level: Optional[log.Level] = None,
    debug_flags: Optional[DebugFlag] = None,
    unit_testing: Optional[bool] = False,
    proc_test: Optional[ProcTest] = None,
    shutdown: Optional[multiprocessing.Event] = None
) -> Optional[ProcToSubComm]:
    '''
    Get a process ready for _run_proc().

    If `t_proc_to_sub` and/or `t_sub_to_proc` are not default, those classes
    will be instantiated instead of ProcToSubComm / SubToProcComm.

    If `unit_testing`, creates the ut_pipe side-channel.

    If `finalize_fn`, sends both ProcToSubComm and SubToProcComm objects in to
    be processed just before set-up is complete.

    `shutdown` is an optional param in case caller wants multiple sub-processes
    to share the same shutdown flag.

    Returns a `t_proc_to_sub` (default: ProcToSubComm) object. When ready to
    start/run the subprocess, call start() on it.
    '''
    logger = log.get_logger(proc_name, min_log_level=initial_log_level)
    log_dotted = label.normalize(_DOTTED_FUNCS, 'set_up')

    if proc_test and proc_test.has(ProcTest.DNE):
        # This process 'Does Not Exist' right now.
        # Should we downgrade this to debug, or error out more heavily?
        # (i.e. exception?)
        log.group_multi(_LOG_INIT,
                        log_dotted,
                        "'{}' has {}. Skipping creation.",
                        proc_name,
                        proc_test,
                        veredi_logger=logger,
                        log_minimum=log.Level.ERROR,
                        log_success=False)
        return None

    # ------------------------------
    # Create multiproc IPC stuff.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating inter-process communication...",
                    proc_name,
                    veredi_logger=logger)

    # The official us<->them IPC pipe.
    child_pipe, parent_pipe = multiprocessing.Pipe()

    # The side-channel/unit-test us<->them IPC pipe.
    ut_child_pipe, ut_parent_pipe = None, None
    if unit_testing:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Creating unit-testing "
                        "inter-process communication...",
                        proc_name,
                        veredi_logger=logger)
        ut_child_pipe, ut_parent_pipe = multiprocessing.Pipe()
        context.add('proc-test', proc_test)

    # multiproc shutdown flag
    if not shutdown:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Creating shutdown inter-process "
                        "event flag...",
                        proc_name,
                        veredi_logger=logger)
        shutdown = multiprocessing.Event()

    # ------------------------------
    # Create the process's private info.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating process comms objects...",
                    proc_name,
                    veredi_logger=logger)

    # Info for the proc itself to own.
    comms = t_sub_to_proc(name=proc_name,
                          config=config,
                          entry_fn=entry_fn,
                          pipe=child_pipe,
                          shutdown=shutdown,
                          debug_flags=debug_flags,
                          ut_pipe=ut_child_pipe)

    # ---
    # Updated Context w/ start-up info (SubToProcComm, etc).
    # ---
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Saving into the ConfigContext...",
                    proc_name,
                    veredi_logger=logger)
    ConfigContext.set_log_level(context, initial_log_level)
    ConfigContext.set_subproc(context, comms)

    # ------------------------------
    # Create the Process, ProcToSubComm
    # ------------------------------
    subp_args = [context]
    subp_kwargs = {}

    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Creating the sub-process object...",
                    proc_name,
                    veredi_logger=logger)

    # Create the process object (doesn't start the process).
    subprocess = multiprocessing.Process(
        # _subproc_entry() is always the target; it will do some setup and then
        # call the actual target: `entry_fn`.
        target=_subproc_entry,
        name=proc_name,
        args=subp_args,
        kwargs=subp_kwargs)

    # Info for the caller about the proc and how to talk to.
    proc = t_proc_to_sub(name=proc_name,
                         process=subprocess,
                         pipe=parent_pipe,
                         shutdown=shutdown,
                         ut_pipe=ut_parent_pipe)

    # ------------------------------
    # Use Finalize Callback, if supplied.
    # ------------------------------
    if finalize_fn:
        log.group_multi(_LOG_INIT,
                        log_dotted, "'{}': Finalize function supplied. "
                        "Calling {}...",
                        proc_name,
                        finalize_fn,
                        veredi_logger=logger)
        finalize_fn(proc, comms)

    # ------------------------------
    # Return ProcToSubComm for caller to use to communicate to sub-proc.
    # ------------------------------
    log.group_multi(_LOG_INIT,
                    log_dotted,
                    "'{}': Set-up complete.",
                    proc_name,
                    veredi_logger=logger)
    return proc
        # performance timer
        timeend = timeit.default_timer()
        timepass = (int((timeend - starttime) * 1000)) / 1000

        frame_out = ui.detection_gui(frame2, sampleresolution, NumObjectsDetected, object_size, ROI_List, timepass, (rows, cols), bg_detect_sensitivity)

        cv2.imshow("Detector", frame_out)
        
        if cv2.waitKey(1) & 0xFF == 27:
            mainPipe.send(PROCESS_SHUTDOWN)
            print("Algorithm Processor: EXIT REQUEST")


if __name__ == "__main__":
    AlgorithmSide, CameraSide = multiprocessing.Pipe()
    MainSide, AlgorithmToMainSide = multiprocessing.Pipe()
    p1 = multiprocessing.Process(target=CameraProcess, args=(AlgorithmSide,))
    p2 = multiprocessing.Process(target=AlgorithmProcess, args=(CameraSide, AlgorithmToMainSide))
    p1.start()
    p2.start()
    while True:
        if MainSide.recv() == PROCESS_SHUTDOWN:
            print("Main Process: EXIT REQUEST")
            break
    p1.terminate()
    print("Camera Process Terminated")
    p2.terminate()
    print("Algorithm Process Terminated")
    print("Have a good day :)")
    sys.exit(0)
コード例 #19
0
    def crawl(self):

        pip = multiprocessing.Pipe()
        pipe1 = pip[0]
        pipe2 = pip[1]

        print("爬虫进程开始运行")
        while (True):
            try:
                if not self.task.empty():
                    airline = self.task.get()

                    con = pymysql.connect(host='111.231.143.45', user='******', passwd='woshinibaba', db='flight',
                                          port=3306,
                                          charset='utf8')
                    cur = con.cursor()

                    table_date = time.strftime("%Y_%m_%d", time.localtime())
                    try:
                        cur.execute('''CREATE TABLE Flight_%s''' % table_date + ''' (
                                          airline      varchar(255)     NOT NULL,
                                          flight_id    varchar(255)     NOT NULL,
                                          model        varchar(255)     NOT NULL,
                                          dept_date    date             NOT NULL,
                                          dept_time    varchar(255)     NOT NULL,
                                          dept_city    varchar(255)     NOT NULL,
                                          dept_airport varchar(255)     NOT NULL,
                                          arv_date     date             NOT NULL,
                                          arv_time     varchar(255)     NOT NULL,
                                          arv_city     varchar(255)     NOT NULL,
                                          arv_airport  varchar(255)     NOT NULL,
                                          isstop       float            NOT NULL,
                                          tran_city    varchar(255)     NOT NULL,
                                          tran_arvdate date             NOT NULL,
                                          tran_arvtime varchar(255)     NOT NULL,
                                          tran_depdate date             NOT NULL,
                                          tran_deptime varchar(255)     NOT NULL,
                                          flight_day   float            NOT NULL,
                                          ontime_Rate  float            NOT NULL,
                                          price_1      float            NOT NULL,
                                          price_2      float            NOT NULL,
                                          price_3      float            NOT NULL
                                          );''')
                    except:
                        print("已经存在数据库")

                    if airline == 'end':
                        print('控制节点通知爬虫节点停止工作...')
                        # 接着通知其它节点停止工作
                        # self.result.put({'confirmed_airline': 'end', 'data': 'end'})
                        return

                    print('get: <<<<<<<<' + airline + '>>>>>>>>>>>')

                    target = airline.split('|')
                    date_list = []
                    for i in range(180):
                        date_list.append((datetime.date.today() + datetime.timedelta(days=i + 1)).strftime("%Y-%m-%d"))


                    d_city = target[1]
                    a_city = target[2]

                    browser_proc = multiprocessing.Process(target = self.camouflage_broewser, args = (pipe1, date_list[0], d_city, a_city))
                    browser_proc.start()

                    cookie = pipe2.recv()

                    for i in range(len(date_list)):
                        print('爬虫节点正在解析: 旅行日期 %s | 出发城市 %s | 到达城市 %s' % (date_list[i], d_city, a_city))
                        self.mainWork(date_list[i], d_city, a_city, cookie,con ,cur, pipe2)
                        time.sleep(random.random() + 4)
                        if self.fail_flag > 5:
                            break

                    pipe2.send('ok')
                    browser_proc.join()
                    print("浏览器已经关闭,线程同步")
                    if self.fail_flag > 5:
                        self.result.put(airline)
                        print("[!]通知控制节点重新爬取:  " + airline)
                        self.fail_flag = 0


            except (EOFError) as e:
                print("连接工作节点失败")
                return
            except (Exception) as e:
                print(e)
                print('Crawl  fali ')
コード例 #20
0

def proc_func(my_pipe):
    try:
        for i in range(10):
            time.sleep(1)
            my_pipe.send(f"{i}: from {mp.current_process().name}")
            if i == 7:
                my_pipe.send("CLOSED")
                break
    except OSError as xe:
        print("Pipe was closed")


if __name__ == '__main__':
    first_dialog_pipe, second_dialog_pipe = mp.Pipe(duplex=True)
    new_process = mp.Process(target=proc_func,
                             name="new_process",
                             args=(second_dialog_pipe, ))

    new_process.start()

    try:
        while True:
            if first_dialog_pipe.poll(0.2):
                print('data:', end=' ')
                text = first_dialog_pipe.recv()
                print(text)
                if text == "CLOSED":
                    break
            else:
コード例 #21
0
    def AddNewActor(self, pubsub_evt):
        """
        Create surface actor, save into project and send it to viewer.
        """
        slice_, mask, surface_parameters = pubsub_evt.data
        matrix = slice_.matrix
        filename_img = slice_.matrix_filename
        spacing = slice_.spacing

        algorithm = surface_parameters['method']['algorithm']
        options = surface_parameters['method']['options']

        surface_name = surface_parameters['options']['name']
        quality = surface_parameters['options']['quality']
        fill_holes = surface_parameters['options']['fill']
        keep_largest = surface_parameters['options']['keep_largest']

        mode = 'CONTOUR'  # 'GRAYSCALE'
        min_value, max_value = mask.threshold_range
        colour = mask.colour

        try:
            overwrite = surface_parameters['options']['overwrite']
        except KeyError:
            overwrite = False
        mask.matrix.flush()

        if quality in const.SURFACE_QUALITY.keys():
            imagedata_resolution = const.SURFACE_QUALITY[quality][0]
            smooth_iterations = const.SURFACE_QUALITY[quality][1]
            smooth_relaxation_factor = const.SURFACE_QUALITY[quality][2]
            decimate_reduction = const.SURFACE_QUALITY[quality][3]

        #if imagedata_resolution:
        #imagedata = iu.ResampleImage3D(imagedata, imagedata_resolution)

        pipeline_size = 4
        if decimate_reduction:
            pipeline_size += 1
        if (smooth_iterations and smooth_relaxation_factor):
            pipeline_size += 1
        if fill_holes:
            pipeline_size += 1
        if keep_largest:
            pipeline_size += 1

        ## Update progress value in GUI
        UpdateProgress = vu.ShowProgress(pipeline_size)
        UpdateProgress(0, "Creating 3D surface...")

        language = ses.Session().language

        if (prj.Project().original_orientation == const.CORONAL):
            flip_image = False
        else:
            flip_image = True

        n_processors = multiprocessing.cpu_count()

        pipe_in, pipe_out = multiprocessing.Pipe()
        o_piece = 1
        piece_size = 2000

        n_pieces = int(round(matrix.shape[0] / piece_size + 0.5, 0))

        q_in = multiprocessing.Queue()
        q_out = multiprocessing.Queue()

        p = []
        for i in xrange(n_processors):
            sp = surface_process.SurfaceProcess(
                pipe_in, filename_img, matrix.shape, matrix.dtype,
                mask.temp_file, mask.matrix.shape, mask.matrix.dtype, spacing,
                mode, min_value, max_value, decimate_reduction,
                smooth_relaxation_factor, smooth_iterations, language,
                flip_image, q_in, q_out, algorithm != 'Default', algorithm,
                imagedata_resolution)
            p.append(sp)
            sp.start()

        for i in xrange(n_pieces):
            init = i * piece_size
            end = init + piece_size + o_piece
            roi = slice(init, end)
            q_in.put(roi)
            print "new_piece", roi

        for i in p:
            q_in.put(None)

        none_count = 1
        while 1:
            msg = pipe_out.recv()
            if (msg is None):
                none_count += 1
            else:
                UpdateProgress(msg[0] / (n_pieces * pipeline_size), msg[1])

            if none_count > n_pieces:
                break

        polydata_append = vtk.vtkAppendPolyData()
        #  polydata_append.ReleaseDataFlagOn()
        t = n_pieces
        while t:
            filename_polydata = q_out.get()

            reader = vtk.vtkXMLPolyDataReader()
            reader.SetFileName(filename_polydata)
            #  reader.ReleaseDataFlagOn()
            reader.Update()
            #  reader.GetOutput().ReleaseDataFlagOn()

            polydata = reader.GetOutput()
            #  polydata.SetSource(None)

            polydata_append.AddInputData(polydata)
            del reader
            del polydata
            t -= 1

        polydata_append.Update()
        #  polydata_append.GetOutput().ReleaseDataFlagOn()
        polydata = polydata_append.GetOutput()
        #polydata.Register(None)
        #  polydata.SetSource(None)
        del polydata_append

        if algorithm == 'ca_smoothing':
            normals = vtk.vtkPolyDataNormals()
            normals_ref = weakref.ref(normals)
            normals_ref().AddObserver(
                "ProgressEvent", lambda obj, evt: UpdateProgress(
                    normals_ref(), "Creating 3D surface..."))
            normals.SetInputData(polydata)
            #  normals.ReleaseDataFlagOn()
            #normals.SetFeatureAngle(80)
            #normals.AutoOrientNormalsOn()
            normals.ComputeCellNormalsOn()
            #  normals.GetOutput().ReleaseDataFlagOn()
            normals.Update()
            del polydata
            polydata = normals.GetOutput()
            #  polydata.SetSource(None)
            del normals

            clean = vtk.vtkCleanPolyData()
            #  clean.ReleaseDataFlagOn()
            #  clean.GetOutput().ReleaseDataFlagOn()
            clean_ref = weakref.ref(clean)
            clean_ref().AddObserver(
                "ProgressEvent", lambda obj, evt: UpdateProgress(
                    clean_ref(), "Creating 3D surface..."))
            clean.SetInputData(polydata)
            clean.PointMergingOn()
            clean.Update()

            del polydata
            polydata = clean.GetOutput()
            #  polydata.SetSource(None)
            del clean

            #  try:
            #  polydata.BuildLinks()
            #  except TypeError:
            #  polydata.BuildLinks(0)
            #  polydata = ca_smoothing.ca_smoothing(polydata, options['angle'],
            #  options['max distance'],
            #  options['min weight'],
            #  options['steps'])

            mesh = cy_mesh.Mesh(polydata)
            cy_mesh.ca_smoothing(mesh, options['angle'],
                                 options['max distance'],
                                 options['min weight'], options['steps'])
            #  polydata = mesh.to_vtk()

            #  polydata.SetSource(None)
            #  polydata.DebugOn()
        else:
            #smoother = vtk.vtkWindowedSincPolyDataFilter()
            smoother = vtk.vtkSmoothPolyDataFilter()
            smoother_ref = weakref.ref(smoother)
            smoother_ref().AddObserver(
                "ProgressEvent", lambda obj, evt: UpdateProgress(
                    smoother_ref(), "Creating 3D surface..."))
            smoother.SetInputData(polydata)
            smoother.SetNumberOfIterations(smooth_iterations)
            smoother.SetRelaxationFactor(smooth_relaxation_factor)
            smoother.SetFeatureAngle(80)
            #smoother.SetEdgeAngle(90.0)
            #smoother.SetPassBand(0.1)
            smoother.BoundarySmoothingOn()
            smoother.FeatureEdgeSmoothingOn()
            #smoother.NormalizeCoordinatesOn()
            #smoother.NonManifoldSmoothingOn()
            #  smoother.ReleaseDataFlagOn()
            #  smoother.GetOutput().ReleaseDataFlagOn()
            smoother.Update()
            del polydata
            polydata = smoother.GetOutput()
            #polydata.Register(None)
            #  polydata.SetSource(None)
            del smoother

        if decimate_reduction:
            print "Decimating", decimate_reduction
            decimation = vtk.vtkQuadricDecimation()
            #  decimation.ReleaseDataFlagOn()
            decimation.SetInputData(polydata)
            decimation.SetTargetReduction(decimate_reduction)
            decimation_ref = weakref.ref(decimation)
            decimation_ref().AddObserver(
                "ProgressEvent", lambda obj, evt: UpdateProgress(
                    decimation_ref(), "Creating 3D surface..."))
            #decimation.PreserveTopologyOn()
            #decimation.SplittingOff()
            #decimation.BoundaryVertexDeletionOff()
            #  decimation.GetOutput().ReleaseDataFlagOn()
            decimation.Update()
            del polydata
            polydata = decimation.GetOutput()
            #polydata.Register(None)
            #  polydata.SetSource(None)
            del decimation

        #to_measure.Register(None)
        #  to_measure.SetSource(None)

        if keep_largest:
            conn = vtk.vtkPolyDataConnectivityFilter()
            conn.SetInputData(polydata)
            conn.SetExtractionModeToLargestRegion()
            conn_ref = weakref.ref(conn)
            conn_ref().AddObserver(
                "ProgressEvent", lambda obj, evt: UpdateProgress(
                    conn_ref(), "Creating 3D surface..."))
            conn.Update()
            #  conn.GetOutput().ReleaseDataFlagOn()
            del polydata
            polydata = conn.GetOutput()
            #polydata.Register(None)
            #  polydata.SetSource(None)
            del conn

        #Filter used to detect and fill holes. Only fill boundary edges holes.
        #TODO: Hey! This piece of code is the same from
        #polydata_utils.FillSurfaceHole, we need to review this.
        if fill_holes:
            filled_polydata = vtk.vtkFillHolesFilter()
            #  filled_polydata.ReleaseDataFlagOn()
            filled_polydata.SetInputData(polydata)
            filled_polydata.SetHoleSize(300)
            filled_polydata_ref = weakref.ref(filled_polydata)
            filled_polydata_ref().AddObserver(
                "ProgressEvent", lambda obj, evt: UpdateProgress(
                    filled_polydata_ref(), "Creating 3D surface..."))
            filled_polydata.Update()
            #  filled_polydata.GetOutput().ReleaseDataFlagOn()
            del polydata
            polydata = filled_polydata.GetOutput()
            #polydata.Register(None)
            #  polydata.SetSource(None)
            #  polydata.DebugOn()
            del filled_polydata

        to_measure = polydata

        # If InVesalius is running without GUI
        if wx.GetApp() is None:
            proj = prj.Project()
            #Create Surface instance
            if overwrite:
                surface = Surface(index=self.last_surface_index)
                proj.ChangeSurface(surface)
            else:
                surface = Surface(name=surface_name)
                index = proj.AddSurface(surface)
                surface.index = index
                self.last_surface_index = index
            surface.colour = colour
            surface.polydata = polydata

        # With GUI
        else:
            normals = vtk.vtkPolyDataNormals()
            #  normals.ReleaseDataFlagOn()
            normals_ref = weakref.ref(normals)
            normals_ref().AddObserver(
                "ProgressEvent", lambda obj, evt: UpdateProgress(
                    normals_ref(), "Creating 3D surface..."))
            normals.SetInputData(polydata)
            normals.SetFeatureAngle(80)
            normals.AutoOrientNormalsOn()
            #  normals.GetOutput().ReleaseDataFlagOn()
            normals.Update()
            del polydata
            polydata = normals.GetOutput()
            #polydata.Register(None)
            #  polydata.SetSource(None)
            del normals

            # Improve performance
            stripper = vtk.vtkStripper()
            #  stripper.ReleaseDataFlagOn()
            stripper_ref = weakref.ref(stripper)
            stripper_ref().AddObserver(
                "ProgressEvent", lambda obj, evt: UpdateProgress(
                    stripper_ref(), "Creating 3D surface..."))
            stripper.SetInputData(polydata)
            stripper.PassThroughCellIdsOn()
            stripper.PassThroughPointIdsOn()
            #  stripper.GetOutput().ReleaseDataFlagOn()
            stripper.Update()
            del polydata
            polydata = stripper.GetOutput()
            #polydata.Register(None)
            #  polydata.SetSource(None)
            del stripper

            # Map polygonal data (vtkPolyData) to graphics primitives.
            mapper = vtk.vtkPolyDataMapper()
            mapper.SetInputData(polydata)
            mapper.ScalarVisibilityOff()
            #  mapper.ReleaseDataFlagOn()
            mapper.ImmediateModeRenderingOn()  # improve performance

            # Represent an object (geometry & properties) in the rendered scene
            actor = vtk.vtkActor()
            actor.SetMapper(mapper)
            del mapper
            #Create Surface instance
            if overwrite:
                surface = Surface(index=self.last_surface_index)
            else:
                surface = Surface(name=surface_name)
            surface.colour = colour
            surface.polydata = polydata
            del polydata

            # Set actor colour and transparency
            actor.GetProperty().SetColor(colour)
            actor.GetProperty().SetOpacity(1 - surface.transparency)

            prop = actor.GetProperty()

            interpolation = int(ses.Session().surface_interpolation)

            prop.SetInterpolation(interpolation)

            proj = prj.Project()
            if overwrite:
                proj.ChangeSurface(surface)
            else:
                index = proj.AddSurface(surface)
                surface.index = index
                self.last_surface_index = index

            session = ses.Session()
            session.ChangeProject()

            measured_polydata = vtk.vtkMassProperties()
            #  measured_polydata.ReleaseDataFlagOn()
            measured_polydata.SetInputData(to_measure)
            volume = float(measured_polydata.GetVolume())
            area = float(measured_polydata.GetSurfaceArea())
            surface.volume = volume
            surface.area = area
            self.last_surface_index = surface.index
            del measured_polydata
            del to_measure

            Publisher.sendMessage('Load surface actor into viewer', actor)

            # Send actor by pubsub to viewer's render
            if overwrite and self.actors_dict.keys():
                old_actor = self.actors_dict[self.last_surface_index]
                Publisher.sendMessage('Remove surface actor from viewer',
                                      old_actor)

            # Save actor for future management tasks
            self.actors_dict[surface.index] = actor

            Publisher.sendMessage(
                'Update surface info in GUI',
                (surface.index, surface.name, surface.colour, surface.volume,
                 surface.area, surface.transparency))

            #When you finalize the progress. The bar is cleaned.
            UpdateProgress = vu.ShowProgress(1)
            UpdateProgress(0, "Ready")
            Publisher.sendMessage('Update status text in GUI', "Ready")

            Publisher.sendMessage('End busy cursor')
            del actor
コード例 #22
0
ファイル: pipeserver.py プロジェクト: motobig/pypilot
def NonBlockingPipe(name, recvfailok=False):
    pipe = multiprocessing.Pipe()
    return NonBlockingPipeEnd(pipe[0], name + '[0]',
                              recvfailok), NonBlockingPipeEnd(
                                  pipe[1], name + '[1]', recvfailok)
コード例 #23
0
 def __init__(self):
     self._closed = False
     self._reader, self._writer = mp.Pipe(duplex=False)
コード例 #24
0
 def __init__(self, task_fn, log_dir=None):
     self.pipe, worker_pipe = mp.Pipe()
     self.worker = _ProcessWrapper(worker_pipe, task_fn, log_dir)
     self.worker.start()
     self.pipe.send([_ProcessWrapper.SPECS, None])
     self.state_dim, self.action_dim, self.name = self.pipe.recv()
コード例 #25
0
    while True:
        msg = conn.recv()
        if msg == "END":
            print(f"{p.name}: {msg} found now break")
            break
        print(f"{p.name}: Received the message: {msg}")


if __name__ == "__main__":
    mylist = [1, 2, 3, 4]
    q = multiprocessing.Queue()

    p1 = multiprocessing.Process(target=square_list, args=(mylist, q))
    p2 = multiprocessing.Process(target=print_queue, args=(q, ))

    p1.start()
    p2.start()
    p1.join()
    p2.join()

    msgs = ["hello", "hey", "hru?", "END"]
    parent_conn, child_conn = multiprocessing.Pipe()

    p3 = multiprocessing.Process(target=sender, args=(parent_conn, msgs))
    p4 = multiprocessing.Process(target=receiver, args=(child_conn, ))

    p3.start()
    p4.start()
    p3.join()
    p4.join()
コード例 #26
0
from storage_controller import StorageController

import multiprocessing as mp
import threading
import time
import random
import numpy as np
import pytest
import os.path
import glob
import subprocess

storage_receiver, storage_sender = mp.Pipe(duplex=False)
filepath_receiver, filepath_sender = mp.Pipe(duplex=False)
file_header_receiver, file_header_sender = mp.Pipe(duplex=False)
reading_to_be_stored_event = mp.Event()
filepath_available_event = mp.Event()
file_header_available_event = mp.Event()

reading = bytearray([
    0xad, 0xde, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xff, 0xee, 0xff, 0xee,
    0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee,
    0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee,
    0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee,
    0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee,
    0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee, 0xff, 0xee
])

chunk_size = 20
active_channels = 0xffff
start_time = 1478300446552583
コード例 #27
0
def fork(pkg, function, dirty, fake):
    """Fork a child process to do part of a spack build.

    Args:

        pkg (PackageBase): package whose environment we should set up the
            forked process for.
        function (callable): argless function to run in the child
            process.
        dirty (bool): If True, do NOT clean the environment before
            building.
        fake (bool): If True, skip package setup b/c it's not a real build

    Usage::

        def child_fun():
            # do stuff
        build_env.fork(pkg, child_fun)

    Forked processes are run with the build environment set up by
    spack.build_environment.  This allows package authors to have full
    control over the environment, etc. without affecting other builds
    that might be executed in the same spack call.

    If something goes wrong, the child process catches the error and
    passes it to the parent wrapped in a ChildError.  The parent is
    expected to handle (or re-raise) the ChildError.
    """

    def child_process(child_pipe, input_stream):
        # We are in the child process. Python sets sys.stdin to
        # open(os.devnull) to prevent our process and its parent from
        # simultaneously reading from the original stdin. But, we assume
        # that the parent process is not going to read from it till we
        # are done with the child, so we undo Python's precaution.
        if input_stream is not None:
            sys.stdin = input_stream

        try:
            if not fake:
                setup_package(pkg, dirty=dirty)
            return_value = function()
            child_pipe.send(return_value)
        except StopIteration as e:
            # StopIteration is used to stop installations
            # before the final stage, mainly for debug purposes
            tty.msg(e)
            child_pipe.send(None)

        except BaseException:
            # catch ANYTHING that goes wrong in the child process
            exc_type, exc, tb = sys.exc_info()

            # Need to unwind the traceback in the child because traceback
            # objects can't be sent to the parent.
            tb_string = traceback.format_exc()

            # build up some context from the offending package so we can
            # show that, too.
            package_context = get_package_context(tb)

            build_log = None
            if hasattr(pkg, 'log_path'):
                build_log = pkg.log_path

            # make a pickleable exception to send to parent.
            msg = "%s: %s" % (exc_type.__name__, str(exc))

            ce = ChildError(msg,
                            exc_type.__module__,
                            exc_type.__name__,
                            tb_string, build_log, package_context)
            child_pipe.send(ce)

        finally:
            child_pipe.close()

    parent_pipe, child_pipe = multiprocessing.Pipe()
    input_stream = None
    try:
        # Forward sys.stdin when appropriate, to allow toggling verbosity
        if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
            input_stream = os.fdopen(os.dup(sys.stdin.fileno()))

        p = multiprocessing.Process(
            target=child_process, args=(child_pipe, input_stream))
        p.start()

    except InstallError as e:
        e.pkg = pkg
        raise

    finally:
        # Close the input stream in the parent process
        if input_stream is not None:
            input_stream.close()

    child_result = parent_pipe.recv()
    p.join()

    # let the caller know which package went wrong.
    if isinstance(child_result, InstallError):
        child_result.pkg = pkg

    # If the child process raised an error, print its output here rather
    # than waiting until the call to SpackError.die() in main(). This
    # allows exception handling output to be logged from within Spack.
    # see spack.main.SpackCommand.
    if isinstance(child_result, ChildError):
        child_result.print_context()
        raise child_result

    return child_result
コード例 #28
0
ファイル: visualiser.py プロジェクト: Riya5915/HRL

class PickleWrapper():
    def __init__(self, var):
        self.var = var

    def __getstate__(self):
        return cloudpickle.dumps(self.var)

    def __setstate__(self, obs):
        self.var = pickle.loads(obs)


if __name__ == '__main__':
    to_pickle = PickleWrapper(lambda: Plotter())
    parent_conn, child_conn = mp.Pipe()
    args = (
        to_pickle,
        child_conn,
    )
    ctx = mp.get_context('spawn')
    process = ctx.Process(target=worker, args=args, daemon=True)
    process.start()
    child_conn.close()

    parent_conn.send(('add_active_policy', [['TR'], {}]))

    time.sleep(5)

    parent_conn.send(('remove_active_policy', [['TR'], {}]))
    parent_conn.send(('add_active_policy', [['TL'], {}]))
コード例 #29
0
    def __init__(self, env_name,crop_size,n_agents,**kwargs):

        self.child, parent = multiprocessing.Pipe()
        self.process = multiprocessing.Process(target=worker_process, args=(parent, env_name,crop_size,n_agents,kwargs))
        self.process.start()
コード例 #30
0
    def __init__(self, init_func):
        self._init_func = init_func
        self.proc = None

        parent_chan, self.child_chan = multiprocessing.Pipe()
        BlockingChannel.__init__(self, parent_chan)