def fit_regression_models(expression, expression_indices):
    pred_expression = expression[:, (0, 1, 2, 3, 4, 5)]
    resp_expression = expression[:, (0, 1, 2, 3, 4, 5)]

    shared_index = Value('i', 0)
    pids = []
    with ThreadSafeFile("output_noshift.txt", "w") as ofp:
        for p_index in xrange(NTHREADS):
            pid = os.fork()
            if pid == 0:
                while True:
                    with shared_index.get_lock():
                        i = shared_index.value
                        if i >= len(expression): break
                        shared_index.value += 1
                    alpha, nonzero_coefs = estimate_covariates(
                        pred_expression, resp_expression, i)
                    output_str = "{}\t{}\t{}\n".format(
                        expression_indices[i], alpha, "\t".join(
                            str(expression_indices[x]) for x in nonzero_coefs))
                    print output_str,
                    ofp.write(output_str)
                sys._exit()
            else:
                pids.append(pid)
        try:
            for pid in pids:
                os.waitpid(pid, 0)
        except:
            for pid in pids:
                os.kill(pid, signal.SIGTERM)
            raise
Beispiel #2
0
def check(token, characters, maxlength):
    global sharedToken
    global sharedN
    global sharedC

    sharedToken = Value(c_char_p, token.encode())
    sharedN = Value('i', 0)
    sharedC = Value('i', 0)

    cores = cpu_count()

    pool = Pool(cores)

    secretFound = False

    print("Number of cores in use: " + str(cores))

    for n in range(0, maxlength):
        with sharedN.get_lock():
            sharedN.value = n
        generator = itertools.product(characters, repeat=n)
        for secret in pool.imap_unordered(checkSecret, generator, 5000):
            if secret or secret == "":
                shutDownPool(pool)
                secretFound = True
                outputSecret(secret)
                return secret

    if (secretFound == False):
        shutDownPool(pool)
        outputSecret(secret)
class BankAccount(object):
    def __init__(self, owner, initial_deposit):
        self.amount = Value(c_double, initial_deposit)
        self.owner = owner

    def __str__(self):
        return "%s: %s€" % (self.owner, self.amount.value)

    def virement(self, money):

        with self.amount.get_lock():
            # get current amount
            current_amount = self.amount.value

            # network slow time
            sleep(0.01)

            # doing computation
            current_amount += money

            # hard drive latency
            sleep(0.01)

            self.amount.value = current_amount
Beispiel #4
0
    class Main_System(object):
        # the Main_System does not need to keep a copy of network
        # only the copy of network inside TrainDQN class is modified by training, so we pass its state_dict to subprocesses
        def __init__(self, train, num_of_processes, others=''):
            self.train = train
            self.processes = []
            self.actor_update_time = 10.
            self.lr_step = 0
            self.pending_training_updates = Value('d', 0., lock=True)
            # somehow RawValue also needs us to call ".value", otherwise it says the type is c_double or c_int
            self.episode = RawValue('i', 0)
            self.t_done = Value('d', 0., lock=True)
            self.last_achieved_time = RawValue('d', 0.)
            # set the data going to subprocesses:
            self.train.memory.start_proxy_process(
                (self.pending_training_updates, self.episode, self.t_done,
                 self.last_achieved_time), self.train.transitions_storage,
                (self.train.batch_size, self.train.memory.tree.data_size))
            # the following will create threads, which not end and cause error (not exiting)
            spawn = mp.get_context('spawn')
            self.manager = spawn.Manager()
            self.MemoryInputQueue = self.manager.Queue()
            self.end_event = self.manager.Event()
            self.pause_event = self.manager.Event()
            self.learning_in_progress_event = self.manager.Event()
            # actors
            self.ActorReceivePipe, self.ActorUpdatePipe = spawn.Pipe(
                False
            )  # unidirectional pipe that send message from conn2 to conn1
            seed = random.randrange(0, 9999999)
            self.worker_manager = spawn.Process(
                target=worker_manager,
                args=(copy.deepcopy(train.net).cpu(),
                      (self.MemoryInputQueue, self.ActorReceivePipe,
                       self.end_event, self.pause_event), num_of_processes,
                      seed, others))
            # store and manage experience (including updating priority and potentially sampling out replays)
            # all the arguments passed into it are used (**by fork initialization in RL module**).
            # somehow RawValue also needs us to call ".value" ? Otherwise it says the type is c_double / c_int
            self.train.memory.set_memory_source(
                self.MemoryInputQueue, (self.pause_event, self.end_event,
                                        self.learning_in_progress_event))
            self.backup_period = self.train.backup_period

        def __call__(self, num_of_episodes):
            started = False
            self.worker_manager.start()
            last_time = time.time()
            last_idle_time = 0.
            updates_done = 0.
            # We assume batch_size is 256 and each experience is learned 8 times in RL.py, and when we change, we use the rescaling factor below to implement.
            # If we disable training, we use 'inf' instead to make the condition of training always False.
            downscaling_of_default_num_updates = (
                8. / args.n_times_per_sample) * (
                    args.batch_size / 256.) if args.train else float('inf')

            while self.episode.value < num_of_episodes or (
                    self.episode.value < args.maximum_trails_before_giveup
                    and not self.learning_in_progress_event.is_set()):
                something_done = False  # check whether nothing is done in one event loop
                remaining_updates = self.pending_training_updates.value - updates_done
                if remaining_updates >= 1. * downscaling_of_default_num_updates:
                    if remaining_updates >= 150. * downscaling_of_default_num_updates and not self.pause_event.is_set(
                    ):
                        self.pause_event.set()
                        print('Wait for training')
                    loss = self.train()
                    # if we parallelize the training as a separate process, the following block should be deleted
                    if loss != None:
                        updates_done += 1. * downscaling_of_default_num_updates
                        something_done = True  # one training step is done
                        if not started: started = True
                        # to reduce the frequency of calling "get_lock()", we only periodically reset the shared data "pending_training_updates"
                        if updates_done >= 200. * downscaling_of_default_num_updates or self.pause_event.is_set(
                        ):
                            with self.pending_training_updates.get_lock():
                                self.pending_training_updates.value -= updates_done
                                updates_done = 0.
                if remaining_updates < 50. * downscaling_of_default_num_updates and self.pause_event.is_set(
                ):
                    self.pause_event.clear()
                if self.t_done.value >= self.actor_update_time:
                    self.scale_up_actor_update_time(
                        self.last_achieved_time.value)
                    if not self.ActorReceivePipe.poll(
                    ) and started and not args.LQG:
                        self.ActorUpdatePipe.send(self.train.net.state_dict())
                        with self.t_done.get_lock():
                            self.t_done.value = 0.
                        something_done = True
                if something_done:
                    # print out how much time the training process has been idle for
                    if last_idle_time != 0. and time.time() - last_time > 40.:
                        print(
                            'trainer pending for {:.1f} seconds out of {:.1f}'.
                            format(last_idle_time,
                                   time.time() - last_time))
                        last_idle_time = 0.
                        last_time = time.time()
                # if nothing is done, wait.
                if not something_done:
                    time.sleep(0.01)
                    last_idle_time += 0.01
                self.adjust_learning_rate()
            self.end_event.set()
            self.worker_manager.join()
            return

        def scale_up_actor_update_time(self, achieved_time):
            changed = False
            if achieved_time > 80. and self.actor_update_time <= 150.:
                self.actor_update_time = 1000.
                changed = True
            elif achieved_time > 20. and self.actor_update_time <= 50.:
                self.actor_update_time = 150.
                changed = True
            elif achieved_time > 10. and self.actor_update_time <= 25.:
                self.actor_update_time = 50.
                changed = True
            elif achieved_time > 5. and self.actor_update_time <= 10.:
                self.actor_update_time = 25.
                changed = True
            if changed and args.train:
                print('actor_update_time adjusted to {:.1f}'.format(
                    self.actor_update_time))

        def adjust_learning_rate(self):
            # the learning rate schedule is written in "arguments.py"
            if self.episode.value > args.lr_schedule[self.lr_step][
                    0] and self.last_achieved_time.value == t_max:
                args.lr = min(args.lr_schedule[self.lr_step][1], args.lr)
                self.lr_step += 1
                if args.train:
                    for param_group in self.train.optim.param_groups:
                        param_group['lr'] = args.lr
                    print(
                        colored('learning rate set to {:.2g}'.format(args.lr),
                                attrs=['bold']))