示例#1
0
 def test_history_recording_simple_model(self):
     """Test that history in memory matches with that recorded for test one-dimensional model."""
     self.param, self.like = onedmodel()
     model = Model(self.like, self.param)
     step = Dream(model=model, model_name='test_history_recording')
     history_arr = mp.Array('d', [0] * 4 * step.total_var_dimension)
     n = mp.Value('i', 0)
     nchains = mp.Value('i', 3)
     pydream.Dream_shared_vars.history = history_arr
     pydream.Dream_shared_vars.count = n
     pydream.Dream_shared_vars.nchains = nchains
     test_history = np.array([[1], [3], [5], [7]])
     for chainpoint in test_history:
         for point in chainpoint:
             step.record_history(nseedchains=0,
                                 ndimensions=step.total_var_dimension,
                                 q_new=point,
                                 len_history=len(history_arr))
     history_arr_np = np.frombuffer(
         pydream.Dream_shared_vars.history.get_obj())
     history_arr_np_reshaped = history_arr_np.reshape(
         np.shape(test_history))
     self.assertIs(np.array_equal(history_arr_np_reshaped, test_history),
                   True)
     remove('test_history_recording_DREAM_chain_history.npy')
     remove('test_history_recording_DREAM_chain_adapted_crossoverprob.npy')
     remove('test_history_recording_DREAM_chain_adapted_gammalevelprob.npy')
示例#2
0
 def test_history_recording_multidim_model(self):
     """Test that history in memory matches with that recorded for test multi-dimensional model."""
     self.param, self.like = multidmodel()
     model = Model(self.like, self.param)
     dream = Dream(model=model, model_name='test_history_recording')
     history_arr = mp.Array('d', [0] * 4 * dream.total_var_dimension * 3)
     n = mp.Value('i', 0)
     nchains = mp.Value('i', 3)
     pydream.Dream_shared_vars.history = history_arr
     pydream.Dream_shared_vars.count = n
     pydream.Dream_shared_vars.nchains = nchains
     test_history = np.array([[[1, 2, 3, 4], [3, 4, 5, 6], [5, 6, 7, 8]],
                              [[7, 8, 9, 10], [9, 12, 18, 20],
                               [11, 14, 18, 8]],
                              [[13, 14, 18, 4], [15, 17, 11, 8],
                               [17, 28, 50, 4]],
                              [[19, 21, 1, 18], [21, 19, 19, 11],
                               [23, 4, 3, 2]]])
     for chainpoint in test_history:
         for point in chainpoint:
             dream.record_history(nseedchains=0,
                                  ndimensions=dream.total_var_dimension,
                                  q_new=point,
                                  len_history=len(history_arr))
     history_arr_np = np.frombuffer(
         pydream.Dream_shared_vars.history.get_obj())
     history_arr_np_reshaped = history_arr_np.reshape(
         np.shape(test_history))
     self.assertIs(np.array_equal(history_arr_np_reshaped, test_history),
                   True)
     remove('test_history_recording_DREAM_chain_history.npy')
     remove('test_history_recording_DREAM_chain_adapted_crossoverprob.npy')
     remove('test_history_recording_DREAM_chain_adapted_gammalevelprob.npy')
示例#3
0
def fit_two_poolable(file, two_pulse_fit, pulse_params, height_th, sigma0):
    counter = mp.Value('i', 0)
    try:
        time = pu.time_vector(file)
        signal = trcp.trace_extr(file, height_th)
        # fname = file.split('/')[-1].split('.trc')[0]
        fname = file
        # print('success')
        result = pfp.fit_two_cw(time, signal, two_pulse_fit, pulse_params,
                                height_th, sigma0)
        # Extract results
        results_summary, keys = results_extr(result)
        df = results_packager(results_summary, keys, fname)
        # Sends Telegram Message to Update Status
        # counter.value+=1
        # times_toupdate = 20
        # text_tosend = '\nProcess finished:'+ '%.1f'%(counter.value/len(tasks)*100) + "%"
        # print text_tosend
        # try:
        #     if counter.value % int(len(tasks)/times_toupdate) == 0:
        #         sender_bot.sendMessage(chat_id=uid,text=text_tosend)
        # except:
        #     pass

        # Extracts useful data for saving
        return df
    except:
        # print fname+' failed' #if file is corrupt
        # raise
        pass
def start_train(resume):

    urllib.request.urlretrieve(const.url + '/network_file',
                               'deliverables/network.py')
    urllib.request.urlretrieve(const.url + '/config_file',
                               'deliverables/input_params.py')
    urllib.request.urlretrieve(const.url + '/observation_file',
                               'deliverables/observation.py')
    urllib.request.urlretrieve(const.url + '/curriculum_file',
                               'deliverables/curriculum.py')

    num_workers = mp.cpu_count() - 1
    should_stop = mp.Value(c_bool, False)

    while True:
        worker_processes = []

        # create_worker(0, should_stop)

        # Start process 1 - n, running in other processes
        for w_num in range(0, num_workers):
            process = mp.Process(target=create_worker,
                                 args=(w_num, should_stop))
            process.start()
            sleep(0.5)
            worker_processes.append(process)

        try:
            for p in worker_processes:
                p.join()
        except KeyboardInterrupt:
            should_stop.value = True

    print("Looks like we're done")
示例#5
0
 def test_proposal_generation_snooker(self):
     """Test that proposal generation with a snooker update returns values of the expected shape."""
     self.param, self.like = multidmodel()
     model = Model(self.like, self.param)
     step = Dream(model=model)
     history_arr = mp.Array('d', list(range(120)))
     n = mp.Value('i', 0)
     pydream.Dream_shared_vars.history = history_arr
     pydream.Dream_shared_vars.count = n
     step.nseedchains = 20
     q0 = np.array([2, 3, 4, 5])
     proposed_pt, snooker_logp, z = step.generate_proposal_points(
         n_proposed_pts=1,
         q0=q0,
         CR=1,
         DEpairs=1,
         gamma_level=1,
         snooker=True)
     self.assertEqual(len(proposed_pt), step.total_var_dimension)
     proposed_pts, snooker_logp, z = step.generate_proposal_points(
         n_proposed_pts=5,
         q0=q0,
         CR=1,
         DEpairs=1,
         gamma_level=1,
         snooker=True)
     self.assertEqual(len(proposed_pts), 5)
示例#6
0
 def test_chain_sampling_multidim_model(self):
     """Test that sampling from DREAM history for multi-dimensional model when the history is known matches with expected possible samples."""
     self.params, self.like = multidmodel()
     model = Model(likelihood=self.like, sampled_parameters=self.params)
     dream = Dream(model=model)
     history_arr = mp.Array('d', [0] * 2 * dream.total_var_dimension)
     n = mp.Value('i', 0)
     pydream.Dream_shared_vars.history = history_arr
     pydream.Dream_shared_vars.count = n
     chains_added_to_history = []
     for i in range(2):
         start = i * dream.total_var_dimension
         end = start + dream.total_var_dimension
         chain = dream.draw_from_prior(model.sampled_parameters)
         pydream.Dream_shared_vars.history[start:end] = chain
         chains_added_to_history.append(chain)
     sampled_chains = dream.sample_from_history(
         nseedchains=2, DEpairs=1, ndimensions=dream.total_var_dimension)
     sampled_chains = np.array(sampled_chains)
     chains_added_to_history = np.array(chains_added_to_history)
     self.assertIs(
         np.array_equal(
             chains_added_to_history[chains_added_to_history[:,
                                                             0].argsort()],
             sampled_chains[sampled_chains[:, 0].argsort()]), True)
示例#7
0
 def __init__(self, value=0):
     """a counter where the value can be shared by multiple processes
     """
     try:  #XXX: is multiprocess ever useful here vs multiprocessing?
         import multiprocess as mp
     except ImportError:
         import multiprocessing as mp
     self.val = mp.Value('i', value)
示例#8
0
    def __init__(self, num_of_processes=2):
        super(MultiprocessTool, self).__init__()
        global global_last_id
        global_last_id = mp.Value('i', 0)
        self.num_of_processes = num_of_processes

        lock = mp.Lock()
        self.lock = lock
示例#9
0
 def test_crossover_prob_estimation(self):
     """Test that crossover probabilities are updated as expected when changing or not changing parameter locations and giving points that give a greater jump distance."""
     self.param, self.like = multidmodel()
     model = Model(self.like, self.param)
     dream = Dream(model=model, save_history=False)
     starting_crossover = dream.CR_probabilities
     crossover_probabilities = mp.Array('d', starting_crossover)
     n = mp.Value('i', 0)
     nCR = dream.nCR
     CR_vals = dream.CR_values
     ncrossover_updates = mp.Array('d', [0] * nCR)
     current_position_arr = mp.Array(
         'd', [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4])
     dream.nchains = 5
     delta_m = mp.Array('d', [0] * nCR)
     dream.chain_n = 0
     pydream.Dream_shared_vars.cross_probs = crossover_probabilities
     pydream.Dream_shared_vars.count = n
     pydream.Dream_shared_vars.ncr_updates = ncrossover_updates
     pydream.Dream_shared_vars.current_positions = current_position_arr
     pydream.Dream_shared_vars.delta_m = delta_m
     q0 = np.array([1, 2, 3, 4])
     q_new = np.array([1, 2, 3, 4])
     new_cr_probs = dream.estimate_crossover_probabilities(
         dream.total_var_dimension, q0, q_new, CR_vals[0])
     self.assertEqual(np.array_equal(new_cr_probs, starting_crossover),
                      True)
     q_new = np.array([1.2, 2.2, 3.3, 4.4])
     new_cr_probs = dream.estimate_crossover_probabilities(
         dream.total_var_dimension, q0, q_new, CR_vals[0])
     self.assertEqual(np.array_equal(new_cr_probs, starting_crossover),
                      True)
     q_new = np.array([2, 3, 4, 5])
     new_cr_probs = dream.estimate_crossover_probabilities(
         dream.total_var_dimension, q0, q_new, CR_vals[1])
     self.assertEqual(np.array_equal(new_cr_probs, starting_crossover),
                      True)
     q_new = np.array([11, -15, 20, 9])
     new_cr_probs = dream.estimate_crossover_probabilities(
         dream.total_var_dimension, q0, q_new, CR_vals[2])
     self.assertEqual(np.array_equal(new_cr_probs, starting_crossover),
                      False)
     self.assertGreater(new_cr_probs[2], starting_crossover[2])
     self.assertAlmostEqual(np.sum(new_cr_probs), 1.0, places=1)
     old_cr_probs = new_cr_probs
     for i, q_new in zip(list(range(5)), [
             np.array([15]),
             np.array([17]),
             np.array([19]),
             np.array([21]),
             np.array([23])
     ]):
         new_cr_probs = dream.estimate_crossover_probabilities(
             dream.total_var_dimension, q0, q_new, CR_vals[1])
     self.assertEqual(np.array_equal(new_cr_probs, old_cr_probs), False)
示例#10
0
 def __init__(self, db, username, password, host):
     super(CampaignRuleConstraintMigrator, self).__init__()
     self.db = db
     self.username = username
     self.password = password
     self.host = host
     self.conn = psycopg2.connect(database=self.db,
                                  user=self.username,
                                  password=self.password,
                                  host=self.host)
     self.cur = self.conn.cursor()
     global global_last_id
     global_last_id = mp.Value('i', 0)
示例#11
0
def test_sharedvalues():
    values = [('i', 10), ('h', -2), ('d', 1.25)]
    arrays = [('i', range(100)), ('d', [0.25 * i for i in range(100)]),
              ('H', range(1000))]

    shared_values = [processing.Value(id, v) for id, v in values]
    shared_arrays = [processing.Array(id, a) for id, a in arrays]

    p = processing.Process(target=sharedvalues_func,
                           args=(values, arrays, shared_values, shared_arrays))
    p.start()
    p.join()

    assert p.exitcode == 0
示例#12
0
def test_semaphore():
    sema = processing.Semaphore(3)
    mutex = processing.RLock()
    running = processing.Value('i', 0)

    processes = [
        processing.Process(target=semaphore_func, args=(sema, mutex, running))
        for i in range(10)
    ]

    for p in processes:
        p.start()

    for p in processes:
        p.join()
示例#13
0
def main():
    enable = mp.Value('i', 0)
    while True:
        #判断是否开始
        sign = input("请输入控制命令:")
        print('sign = {}'.format(sign))
        if sign == '1':  #bytes([0x01])
            print('connect')
            enable.value = 1
            mp.Process(target=motionControl, args=(enable, )).start()
            mp.Process(target=displayImage, args=(enable, )).start()

        elif sign == '2':
            print('disconnect')
            enable.value = 0
示例#14
0
 def __init__(self, db, username, password, host):
     super(GeosFiller, self).__init__()
     self.db = db
     self.username = username
     self.password = username
     self.host = host
     self.conn = psycopg2.connect(
         database=self.db,
         user=self.username,
         password=self.password,
         host=self.host
     )
     self.cur = self.conn.cursor()
     global global_last_id
     global_last_id = mp.Value('i', 0)
示例#15
0
def test_value():
    TASKS = 10
    running = processing.Value('i', TASKS)
    mutex = processing.Lock()

    for i in range(TASKS):
        processing.Process(target=value_func, args=(running, mutex)).start()

    while running.value > 0:
        time.sleep(0.08)
        mutex.acquire()
        print(running.value, end=' ')
        sys.stdout.flush()
        mutex.release()

    print()
    print('No more running processes')
def main():

    enable = mp.Value('i', 0)
    imgQueue = mp.Queue(0)
    imgQueueBin = mp.Queue(0)

    while True:
        sign = receive()
        if sign == 1:
            print('connect')
            enable.value = 1
            mp.Process(target=motionControl,
                       args=(enable, imgQueue, imgQueueBin)).start()
            mp.Process(target=displayImage,
                       args=(enable, imgQueue, imgQueueBin)).start()
        elif sign == 2:
            print('disconnect')
            enable.value = 0
示例#17
0
 def test_proposal_generation_nosnooker_CR66(self):
     """Test proposal generation without a snooker update with a single or multiple proposed points and a crossover value of 2/3 gives 2/3 of all dimensions changed on average as expected."""
     self.param, self.like = multidmodel()
     model = Model(self.like, self.param)
     step = Dream(model=model)
     history_arr = mp.Array('d', list(range(120)))
     n = mp.Value('i', 0)
     pydream.Dream_shared_vars.history = history_arr
     pydream.Dream_shared_vars.count = n
     step.nseedchains = 20
     q0 = np.array([2, 3, 4, 5])
     dims_kept = 0
     for iteration in range(100000):
         proposed_pt = step.generate_proposal_points(n_proposed_pts=1,
                                                     q0=q0,
                                                     CR=.66,
                                                     DEpairs=1,
                                                     gamma_level=1,
                                                     snooker=False)
         if iteration == 1:
             self.assertEqual(len(proposed_pt), 1)
         dims_change_vec = np.squeeze(q0 == proposed_pt)
         for dim in dims_change_vec:
             if dim:
                 dims_kept += 1
     frac_kept = dims_kept / (step.total_var_dimension * 100000.0)
     self.assertAlmostEqual(frac_kept, 1 - .66, places=1)
     dims_kept = 0
     for iteration in range(10000):
         proposed_pts = step.generate_proposal_points(n_proposed_pts=5,
                                                      q0=q0,
                                                      CR=.66,
                                                      DEpairs=1,
                                                      gamma_level=1,
                                                      snooker=False)
         if iteration == 1:
             self.assertEqual(len(proposed_pts), 5)
         for pt in proposed_pts:
             dims_change_vec = (q0 == pt)
             for dim in dims_change_vec:
                 if dim:
                     dims_kept += 1
     frac_kept = dims_kept / (step.total_var_dimension * 10000.0 * 5)
     self.assertAlmostEqual(frac_kept, 1 - .66, places=1)
示例#18
0
ORDER = neopixel.GRB

pixels = neopixel.NeoPixel(pixel_pin,
                           num_pixels,
                           brightness=0.2,
                           auto_write=False,
                           pixel_order=ORDER)

app = Flask(__name__)
#clock globals
totsec = 86400
#ctr = 0
loc_now = True

#manager = Manager()
ctr = multiprocess.Value('i', 0)
tf = multiprocess.Value('i', 1)


@app.route("/search/<query>")
def coord_query(query):
    """
    Web API server query
    """
    global tf
    if query is None: return jsonify(success=False)
    query = query.replace("&&", " ")

    lat, long = google_coords(query)
    if (lat is not None and long is not None):
        res = coords_request(lat, long)
示例#19
0
    def __call__(self, queue, results):
        """ Run worker.

        Parameters
        ----------
        queue : multiprocessing.Queue
            queue of jobs for worker
        results : multiprocessing.Queue
            queue for feedback
        """
        _devices = [item['device'] for item in self.devices]
        self.logger.info('Start {} [id:{}] (devices: {})'.format(
            self.worker_name, os.getpid(), _devices))

        try:
            job = queue.get()
        except Exception as exception:  #pylint:disable=broad-except
            self.logger.error(exception)
        else:
            while job is not None:
                try:
                    finished = False
                    self.logger.info(self.worker_name +
                                     ' is creating process for Job ' +
                                     str(job[0]))
                    for trial in range(self.trials):
                        one_job_queue = mp.JoinableQueue()
                        one_job_queue.put(job)
                        feedback_queue = mp.JoinableQueue()
                        last_update_time = mp.Value('d', time.time())

                        task = mp.Process(target=self._run_task,
                                          args=(one_job_queue, feedback_queue,
                                                trial, last_update_time))
                        task.start()
                        pid = feedback_queue.get()
                        final_signal = Signal(worker=self.worker_name,
                                              job=job[0],
                                              iteration=0,
                                              n_iters=job[1].n_iters,
                                              trial=trial,
                                              done=False,
                                              exception=None)

                        while True:
                            try:
                                signal = feedback_queue.get(timeout=1)
                            except EmptyException:
                                signal = None
                            if signal is None:
                                execution_time = (time.time() -
                                                  last_update_time.value) / 60
                                if self.timeout is not None and execution_time > self.timeout:
                                    p = psutil.Process(pid)
                                    p.terminate()
                                    message = f'Job {job[0]} [{pid}] failed in {self.worker_name} because of timeout'
                                    self.logger.info(message)
                                    final_signal.exception = TimeoutError(
                                        message)
                                    results.put(copy(final_signal))
                                    break
                            if signal is not None and signal.done:
                                finished = True
                                final_signal = signal
                                break
                            if signal is not None:
                                final_signal = signal
                                results.put(copy(final_signal))
                        if finished:
                            break
                except Exception as exception:  #pylint:disable=broad-except
                    self.logger.error(exception)
                    final_signal.exception = exception
                    results.put(copy(final_signal))
                if final_signal.done:
                    results.put(copy(final_signal))
                else:
                    final_signal.exception = RuntimeError(
                        'Job {} [{}] failed {} times in {}'.format(
                            job[0], pid, self.trials, self.worker_name))
                    final_signal.done = True
                    results.put(copy(final_signal))
                queue.task_done()
                job = queue.get()
            queue.task_done()
示例#20
0
 def __init__(self, val=0):
     self.val = multiprocess.Value('i', val)
示例#21
0
def _setup_mp_dream_pool(nchains, niterations, step_instance, start_pt=None):

    min_njobs = (2 * len(step_instance.DEpairs)) + 1
    if nchains < min_njobs:
        raise Exception(
            'Dream should be run with at least (2*DEpairs)+1 number of chains.  For current algorithmic settings, set njobs>=%s.'
            % str(min_njobs))
    if step_instance.history_file != False:
        old_history = np.load(step_instance.history_file)
        len_old_history = len(old_history.flatten())
        nold_history_records = len_old_history / step_instance.total_var_dimension
        step_instance.nseedchains = nold_history_records
        if niterations < step_instance.history_thin:
            arr_dim = (
                (np.floor(nchains * niterations / step_instance.history_thin) +
                 nchains) *
                step_instance.total_var_dimension) + len_old_history
        else:
            arr_dim = np.floor(((
                (nchains * niterations) * step_instance.total_var_dimension) /
                                step_instance.history_thin)) + len_old_history
    else:
        if niterations < step_instance.history_thin:
            arr_dim = (
                (np.floor(nchains * niterations / step_instance.history_thin) +
                 nchains) * step_instance.total_var_dimension
            ) + (step_instance.nseedchains * step_instance.total_var_dimension)
        else:
            arr_dim = np.floor(
                ((nchains * niterations / step_instance.history_thin) *
                 step_instance.total_var_dimension)
            ) + (step_instance.nseedchains * step_instance.total_var_dimension)

    min_nseedchains = 2 * len(step_instance.DEpairs) * nchains

    if step_instance.nseedchains < min_nseedchains:
        raise Exception(
            'The size of the seeded starting history is insufficient.  Increase nseedchains>=%s.'
            % str(min_nseedchains))

    current_position_dim = nchains * step_instance.total_var_dimension
    history_arr = mp.Array('d', [0] * int(arr_dim))
    if step_instance.history_file != False:
        history_arr[0:len_old_history] = old_history.flatten()
    nCR = step_instance.nCR
    ngamma = step_instance.ngamma
    crossover_setting = step_instance.CR_probabilities
    crossover_probabilities = mp.Array('d', crossover_setting)
    ncrossover_updates = mp.Array('d', [0] * nCR)
    delta_m = mp.Array('d', [0] * nCR)
    gamma_level_setting = step_instance.gamma_probabilities
    gamma_probabilities = mp.Array('d', gamma_level_setting)
    ngamma_updates = mp.Array('d', [0] * ngamma)
    delta_m_gamma = mp.Array('d', [0] * ngamma)
    current_position_arr = mp.Array('d', [0] * current_position_dim)
    shared_nchains = mp.Value('i', nchains)
    n = mp.Value('i', 0)
    tf = mp.Value('c', b'F')

    if step_instance.crossover_burnin == None:
        step_instance.crossover_burnin = int(np.floor(niterations / 10))

    if start_pt != None:
        if step_instance.start_random:
            print(
                'Warning: start position provided but random_start set to True.  Overrode random_start value and starting walk at provided start position.'
            )
            step_instance.start_random = False

    p = DreamPool(nchains,
                  initializer=_mp_dream_init,
                  initargs=(
                      history_arr,
                      current_position_arr,
                      shared_nchains,
                      crossover_probabilities,
                      ncrossover_updates,
                      delta_m,
                      gamma_probabilities,
                      ngamma_updates,
                      delta_m_gamma,
                      n,
                      tf,
                  ))
    #p = mp.pool.ThreadPool(nchains, initializer=_mp_dream_init, initargs=(history_arr, current_position_arr, shared_nchains, crossover_probabilities, ncrossover_updates, delta_m, gamma_probabilities, ngamma_updates, delta_m_gamma, n, tf, ))
    #p = mp.Pool(nchains, initializer=_mp_dream_init, initargs=(history_arr, current_position_arr, shared_nchains, crossover_probabilities, ncrossover_updates, delta_m, gamma_probabilities, ngamma_updates, delta_m_gamma, n, tf, ))

    return p