コード例 #1
0
    def __init__(self,
                 imageroot,
                 callback,
                 imagelistfile=None,
                 bs=1,
                 shuffle=False,
                 nthread=4,
                 name='',
                 imagesize=128,
                 pathinfo=False,
                 maxlistnum=None):
        self.callback = callback  #callback(name,filename,pindex,cacheobj) result=(image,label) in np.array
        self.bs = bs
        self.shuffle = shuffle
        self.nthread = nthread
        self.name = name

        self.arrimage = Array(ctypes.c_float,
                              10 * bs * 3 * imagesize * imagesize)
        self.arrlabel = Array(ctypes.c_float,
                              10 * bs * 3 * imagesize * imagesize)

        # Below two lines NOT working in python3
        # self.nparrimage = np.frombuffer(self.arrimage.get_obj(),np.float32).reshape(10,len(self.arrimage)/10)
        # self.nparrlabel = np.frombuffer(self.arrlabel.get_obj(),np.float32).reshape(10,len(self.arrlabel)/10)

        # Below two lines working in python3
        self.nparrimage = np.frombuffer(self.arrimage.get_obj(),
                                        np.float32).reshape(
                                            10, int(len(self.arrimage) / 10))
        self.nparrlabel = np.frombuffer(self.arrlabel.get_obj(),
                                        np.float32).reshape(
                                            10, int(len(self.arrlabel) / 10))

        self.filelist = Queue()
        self.result = Queue()
        self.freearr = Queue()
        self.imagenum = 0
        self.finishnum = 0
        self.zfile = None
        self.pathinfo = pathinfo

        for i in range(10):
            self.freearr.put(i)

        self.flist = []
        if imagelistfile is None and os.path.isdir(imageroot):
            for (dirpath, dirnames, filenames) in os.walk(imageroot):
                for filename in filenames:
                    self.flist.append(dirpath + '/' + filename)
        else:
            if os.path.isdir(imageroot): imageroot = imageroot + '/'
            else:
                imageroot = imageroot + ':'
                if '.zip:' in imageroot:
                    import zipfile
                    zipfilepath = imageroot.split(':')[0]
                    if zipfilepath in ImageDataset.zipcache:
                        self.zfile = ImageDataset.zipcache[zipfilepath]
                    else:
                        self.zfile = zipfile.ZipFile(zipfilepath)
                        ImageDataset.zipcache[zipfilepath] = self.zfile

            if '.zip:' in imageroot and imagelistfile is None:
                for zf in self.zfile.filelist:
                    self.flist.append(imageroot + zf.filename)
            elif '.zip:' in imagelistfile:
                with self.zfile.open(imagelistfile.split(':')[1]) as f:
                    lines = f.readlines()
                for line in lines:
                    self.flist.append(imageroot +
                                      line)  # zippath:filename classname
            else:
                with open(imagelistfile) as f:
                    lines = f.readlines()
                for line in lines:
                    self.flist.append(
                        imageroot + line
                    )  # root/filepath classname || zippath:filename classname

        self.imagenum = len(self.flist)
        if self.shuffle: random.shuffle(self.flist)
        for filepath in self.flist:
            self.filelist.put(filepath)
            if maxlistnum is not None: maxlistnum -= 1
            if maxlistnum == 0: break

        for i in range(nthread):
            self.filelist.put('FINISH')
            p = Process(target=dataset_handle,
                        args=(self.name, self.filelist, self.result,
                              self.callback, self.bs, i, self.freearr,
                              self.arrimage, self.arrlabel, self.zfile))
            p.start()
コード例 #2
0
    process_time = Process(target=process_time, args=(), name='TIME')
    # process_guid = Process(target=process_guid,args=(),name='GUID')

    process_time.start()
    # process_guid.start()

    global tWait, tSolve, tDist, eta
    while met.value == 0:
        pass

    state0 = np.array([
        position[0].value, position[1].value, position[2].value,
        velocity[0].value, velocity[1].value, velocity[2].value,
        np.log(mass.value), 0, 0, 0, 0
    ])
    tWait, tSolve, tDist = pdg.findPath(pdg.delta_t,
                                        state0,
                                        initialSearch=True)

    while tSolve > 0:
        t0 = met.value
        path = pdg.findPath(pdg.delta_t,
                            state0,
                            tWait=tWait,
                            tSolve=tSolve,
                            tDist=tDist)
        eta = Array('d', path)
        tSolve -= met.value - t0
        new_eta.value = 1

    process_time.terminate()
コード例 #3
0
ファイル: lplowmem.py プロジェクト: ssmeena/Label-Propagation
    for result in results:
        for entry in result.get():
            output.write(str(entry[0])+delimeter+str(entry[1])+'\n')
    output.close()



                
if __name__ == '__main__':
    #Parse the Command line Options
    filename,iterations,THREADS,SizeHint=ParseOptions(sys.argv) # Parse the command line options

    # A shared array between multiple processes of type int. It is used since lookup for an array is O(1)
    # Also note that range automatically initializes the Label[key]=key
    # The lock is set to false since when data is written to Label only main process is active and pool is closed 
    Label = Array('i',range(SizeHint),lock=False)    


    # Load the Data in adjecancy list 
    start = time.time()
    for iteration in range(1,iterations+1):
        iterstart = time.time()
        print "started iteration",iteration
        pool = Pool(processes=THREADS,initializer=initProcess,initargs=(Label,))

        Adj = open(filename)     # Open the file
        Buffer = []
        count  = 0
        #Contains all result_async objects
        results = []
        for line in Adj:
コード例 #4
0
ファイル: array.py プロジェクト: leinian85/year2019
"""
array.py
共享内存存放一组数据
"""

from multiprocessing import Process, Array

# 创建共享内存
# shm = Array('i', [1, 2, 3, 4])
# shm = Array('i',5) # 初始开辟5个整型空间
shm = Array('c', b'hello')  # 字节串


def fun():
    # array 创建共享内存对象可迭代
    for i in shm:
        print(i)
    shm[0] = b'H'  # 修改共享内存


p = Process(target=fun)
p.start()
p.join()
for i in shm:
    print(i)

print(shm.value)  #
コード例 #5
0
#!/usr/bin/env python
# -*- coding:utf-8 -*-

from multiprocessing import Process, Array, RLock


def Foo(lock, temp, i):
    """
    将第0个数加i
    """
    import time
    time.sleep(1)
    lock.acquire()
    temp[0] += i
    for item in temp:
        print(i, '----->', item)
    lock.release()


lock = RLock()
temp = Array('i', [11, 22, 33, 44])

for i in range(100):
    p = Process(target=Foo, args=(
        lock,
        temp,
        i,
    ))
    p.start()
コード例 #6
0
def solve(obj_func: Callable,
          par_lower_limit: List[float],
          par_upper_limit: List[float],
          eq_func: Optional[Callable] = None,
          eq_values: Optional[List[float]] = None,
          ineq_func: Optional[Callable] = None,
          ineq_lower_bounds: Optional[List[float]] = None,
          ineq_upper_bounds: Optional[List[float]] = None,
          number_of_restarts: int = 1,
          number_of_simulations: int = 20000,
          number_of_processes: Optional[int] = None,
          start_guess_sampling: Union[None, List[Distribution], Sampling] = None,
          seed: Union[None, int] = None,
          evaluation_type: Union[EvaluationType, int] = EvaluationType.OBJECTIVE_FUNC_EXCLUDE_INEQ,
          pysolnp_rho: float = 1.0,
          pysolnp_max_major_iter: int = 10,
          pysolnp_max_minor_iter: int = 10,
          pysolnp_delta: float = 1e-05,
          pysolnp_tolerance: float = 0.0001,
          debug: bool = False) -> Results:
    # Represent the problem with the below object
    model = ProblemModel(obj_func=obj_func,
                         par_lower_limit=par_lower_limit,
                         par_upper_limit=par_upper_limit,
                         number_of_restarts=number_of_restarts,
                         number_of_simulations=number_of_simulations,
                         eq_func=eq_func,
                         eq_values=eq_values,
                         ineq_func=ineq_func,
                         ineq_lower_bounds=ineq_lower_bounds,
                         ineq_upper_bounds=ineq_upper_bounds,
                         rho=pysolnp_rho,
                         max_major_iter=pysolnp_max_major_iter,
                         max_minor_iter=pysolnp_max_minor_iter,
                         delta=pysolnp_delta,
                         tolerance=pysolnp_tolerance,
                         debug=debug,
                         number_of_processes=number_of_processes,
                         start_guess_sampling=start_guess_sampling,
                         evaluation_type=evaluation_type)

    # Validate the inputs for the problem model
    model.validate()

    if start_guess_sampling is None or type(start_guess_sampling) is list:
        # Generate samples using the DefaultSampling object
        sampling = DefaultSampling(parameter_lower_bounds=par_lower_limit,
                                   parameter_upper_bounds=par_upper_limit,
                                   sample_properties=start_guess_sampling,
                                   seed=seed)
    elif isinstance(start_guess_sampling, Sampling):
        if seed is not None and debug is True:
            print(f"Warning: Seed value {seed} ignored due to user sampling override")
        # User provided Sampling instance
        sampling = start_guess_sampling
    else:
        raise ValueError(
            f"Provided parameter start_guess_sampling was not of expected type. Expected None, List[Distribution] or Sampling.")

    parameter_guesses = sampling.generate_all_samples(
        number_of_samples=model.number_of_evaluations,
        sample_size=model.sample_size)

    if debug is True:
        if any(guess is None for guess in parameter_guesses):
            print(f"Some of the random samples provided failed to generate, is your Sampling class setup correctly?")

    if number_of_processes:
        par_lower_limit = Array(c_double, model.par_lower_limit, lock=False)
        par_upper_limit = Array(c_double, model.par_upper_limit, lock=False)
        eq_values = Array(c_double, model.eq_values, lock=False) if model.has_eq_bounds else None
        ineq_lower_bounds = Array(c_double, model.ineq_lower_bounds, lock=False) if model.has_ineq_bounds else None
        ineq_upper_bounds = Array(c_double, model.ineq_upper_bounds, lock=False) if model.has_ineq_bounds else None
        pysolnp_delta = Value(c_double, model.delta, lock=False)
        pysolnp_rho = Value(c_double, model.rho, lock=False)
        pysolnp_max_major_iter = Value(c_int, model.max_major_iter, lock=False)
        pysolnp_max_minor_iter = Value(c_int, model.max_minor_iter, lock=False)
        pysolnp_tolerance = Value(c_double, model.tolerance, lock=False)
        pysolnp_debug = Value(c_bool, model.debug, lock=False)
        evaluation_type = Value(c_int, model.evaluation_type.value, lock=False)
        number_of_parameters = Value(c_int, model.number_of_parameters, lock=False)

        parameter_guesses = Array(c_double, parameter_guesses, lock=False)
        eval_results = Array(c_double, model.number_of_evaluations)  # Results from the eval function
        restart_results = Array(c_double,
                                model.number_of_restarts * model.number_of_parameters)  # Results from pysolnp restarts
        restart_convergence = Array(c_bool, model.number_of_restarts)
        initargs = (
            obj_func,
            par_lower_limit,
            par_upper_limit,
            eq_func,
            eq_values,
            ineq_func,
            ineq_lower_bounds,
            ineq_upper_bounds,
            parameter_guesses,
            pysolnp_delta,
            pysolnp_rho,
            pysolnp_max_major_iter,
            pysolnp_max_minor_iter,
            pysolnp_tolerance,
            pysolnp_debug,
            evaluation_type,
            number_of_parameters,
            eval_results,
            restart_results,
            restart_convergence
        )
        with Pool(processes=number_of_processes,
                  initializer=initialize_worker_process_resources,
                  initargs=initargs) as pool:

            pool.map(evaluate_starting_guess, range(model.number_of_evaluations))

            if debug is True:
                __debug_message_eval_functions(model=model, eval_results=eval_results)

            best_evaluations = __get_best_solutions(results=eval_results, number_of_results=model.number_of_restarts)
            solve_guess_indices = [index for index, value in best_evaluations]
            # The found optimums are stored in parameter_guesses
            pool.starmap(pysolnp_solve, enumerate(solve_guess_indices))

    else:
        eval_results = [None] * model.number_of_evaluations
        restart_results = [None] * model.number_of_restarts * model.number_of_parameters
        restart_convergence = [None] * model.number_of_restarts

        initialize_worker_process_resources(
            obj_func=obj_func,
            par_lower_limit=model.par_lower_limit,
            par_upper_limit=model.par_upper_limit,
            eq_func=eq_func if model.has_eq_bounds else None,
            eq_values=model.eq_values if model.has_eq_bounds else None,
            ineq_func=ineq_func if model.has_ineq_bounds else None,
            ineq_lower_bounds=model.ineq_lower_bounds if model.has_ineq_bounds else None,
            ineq_upper_bounds=model.ineq_upper_bounds if model.has_ineq_bounds else None,
            parameter_guesses=parameter_guesses,
            pysolnp_delta=model.delta,
            pysolnp_rho=model.rho,
            pysolnp_max_major_iter=model.max_major_iter,
            pysolnp_max_minor_iter=model.max_minor_iter,
            pysolnp_tolerance=model.tolerance,
            pysolnp_debug=model.debug,
            evaluation_type=model.evaluation_type.value,
            number_of_parameters=model.number_of_parameters,
            eval_results=eval_results,
            restart_results=restart_results,
            restart_convergence=restart_convergence
        )

        for index in range(model.number_of_evaluations):
            evaluate_starting_guess(simulation_index=index)

        if debug is True:
            __debug_message_eval_functions(model=model, eval_results=eval_results)

        best_evaluations = __get_best_solutions(results=eval_results, number_of_results=model.number_of_restarts)
        solve_guess_indices = [index for index, value in best_evaluations]
        # The found optimums are stored in parameter_guesses
        for solve_index, guess_index in enumerate(solve_guess_indices):
            pysolnp_solve(solve_index=solve_index, guess_index=guess_index)

    # For each restart, get the resulting parameters
    solutions = [(restart_results[index * model.number_of_parameters: (index + 1) * model.number_of_parameters],
                  restart_convergence[index]) for index in range(model.number_of_restarts)]

    # Each Result represents a solution to the restart (might have not converged)
    all_results = [
        Result(parameters=solution, obj_value=obj_func(solution), converged=converged)
        for solution, converged in solutions]

    # pysolnp might have not converged for some solution, if no converging solutions exist, print an warning message.
    if len([solution for solution in all_results if solution.converged]) == 0:
        print(f"Not able to find any feasible solution in {number_of_restarts} restarts.")

    return Results(results=all_results, starting_guesses=parameter_guesses)
"""
"Multiprocessing" section example showing how
to use sharedctypes submodule to share data
between multiple processes.

"""
from multiprocessing import Process, Value, Array


def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]


if __name__ == "__main__":
    num = Value("d", 0.0)
    arr = Array("i", range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print(num.value)
    print(arr[:])
コード例 #8
0
            page = get_page(requrl.format(nm), 20)
            ouhtml.write(page)

if __name__ == "__main__":
    parser = ap.ArgumentParser(
            description="Load seqget pages."
            )
    parser.add_argument(
            "innms", metavar="IN_NMS", type=ap.FileType("r"),
            help="input list of REBASE R-M names"
            )
    parser.add_argument(
            "-o", metavar="SEQGET_PATH", dest="oupath", required=True,
            help="output path, use {} placeholder"
            )
    parser.add_argument(
            "-p", metavar="N", dest="p_number", default=10, type=int,
            help="number of page loading subprocesses, default is 10"
            )
    args = parser.parse_args()
    with args.innms as innms:
        nms = sorted(innms.read().strip().split("\n"))
    print "Loading %d pages ..." % len(nms)
    sh_nms = Array(ctypes.c_char_p, nms)
    sh_index = Value("i", 0)
    l = Lock()
    for i in range(args.p_number):
        Process(target=download_pages,
                args=(args.oupath, sh_nms, sh_index, l)).start()

コード例 #9
0
def train(nn_type, num_neurons):
    output_in_file(filePathMainInfo, "a", "\n" + nn_type + "\n")
    for i in range(repeat):
        output_in_file(filePathMainInfo, "a", "\tAttempt " + str(i + 1) + "\n")

        validation_final_results_age = []
        validation_final_results_country_part = []
        actual_epochs_final_results = []
        for validation_k in range(k):
            training = []
            validation = []
            for j in range(k):
                if j == validation_k:
                    validation += posts_k_fold[j]
                else:
                    training += posts_k_fold[j]
            X_train, y1_train = create_x_y(training, "age")
            X_train, y2_train = create_x_y(training, "country_part")
            X_train = pad_sequences(X_train,
                                    maxlen=max_len_post,
                                    padding='post')
            X_validation, y1_validation = create_x_y(validation, "age")
            X_validation, y2_validation = create_x_y(validation,
                                                     "country_part")
            X_validation = pad_sequences(X_validation,
                                         maxlen=max_len_post,
                                         padding='post')
            validation_results = Array('d', [-1] * 4)
            actual_epochs_results = Array('i', [-1] * 2)
            for early_stopping in ["age", "country_part"]:
                output_in_file(filePathMainInfo, "a",
                               "\t\tEarly-stopping " + early_stopping + "\n")
                p = Process(target=run_train_one_k_fold,
                            args=(
                                nn_type,
                                task,
                                filePathMainInfo,
                                max_len_post,
                                num_neurons,
                                i,
                                validation_k,
                                X_train,
                                [y1_train, y2_train],
                                X_validation,
                                [y1_validation, y2_validation],
                                embedding_matrix,
                                batch_size,
                                early_stopping_wait,
                                early_stopping,
                                validation_results,
                                actual_epochs_results,
                            ))
                p.start()
                p.join()
            for_age = 0
            for_country_part = 0
            if validation_results[:][0] > validation_results[:][2]:
                for_age += 1
            elif validation_results[:][0] < validation_results[:][2]:
                for_country_part += 1
            if validation_results[:][1] > validation_results[:][3]:
                for_age += 1
            elif validation_results[:][1] > validation_results[:][3]:
                for_country_part += 1
            if for_age > for_country_part:
                output_in_file(filePathMainInfo, "a",
                               "\t\tEarly-stopping is Age\n\n")
                validation_final_results_age.append(validation_results[:][0])
                validation_final_results_country_part.append(
                    validation_results[:][1])
                actual_epochs_final_results.append(actual_epochs_results[:][0])
            else:
                output_in_file(filePathMainInfo, "a",
                               "\t\tEarly-stopping is Country-part\n\n")
                validation_final_results_age.append(validation_results[:][2])
                validation_final_results_country_part.append(
                    validation_results[:][3])
                actual_epochs_final_results.append(actual_epochs_results[:][1])

        path = "../models/" + str(task) + "/K-fold/" + str(
            max_len_post) + "/neurons_" + str(num_neurons) + "/" + str(
                nn_type) + "/Attempt " + str(i + 1) + "/"
        shutil.rmtree(path)
        result_acc_age = numpy.mean(validation_final_results_age)
        result_acc_country_part = numpy.mean(
            validation_final_results_country_part)
        better_results_acc = []
        for j in range(len(validation_final_results_age)):
            better_results_acc.append(validation_final_results_age[j] +
                                      validation_final_results_country_part[j])
        num_epochs = actual_epochs_final_results[better_results_acc.index(
            max(better_results_acc))] + 1
        training = []
        for j in range(k):
            training += posts_k_fold[j]
        X_train, y1_train = create_x_y(training, "age")
        X_train, y2_train = create_x_y(training, "country_part")
        X_train = pad_sequences(X_train, maxlen=max_len_post, padding='post')
        p = Process(target=run_train_final,
                    args=(
                        nn_type,
                        filePathMainInfo,
                        task,
                        max_len_post,
                        num_neurons,
                        X_train,
                        [y1_train, y2_train],
                        embedding_matrix,
                        batch_size,
                        i,
                        [result_acc_age, result_acc_country_part],
                        num_epochs,
                    ))
        p.start()
        p.join()
コード例 #10
0
    tmp1 = np.random.random(3)  # float类型随机数
    tmp2 = np.random.uniform(low=0 / 10, high=1 / 10,
                             size=5)  # [0, 0.1)之间的均匀分布

    print("np.random.random: ", tmp1)
    print("np.random.uniform: ", tmp2)

    '''
    2、numpy类型数组,转为ctypes类型
    '''
    # ctypes 类型使用: c的数组类型; Array实现线程安全
    tmp = np.ctypeslib.as_ctypes(tmp2)
    print("ctypes tmp:", tmp)  # <c_double_Array_5 object at 0x119c8cb00>
    print("dir tmp:", dir(tmp), tmp._type_)
    tmp = Array(tmp._type_, tmp, lock=False)
    # <c_double_Array_5 object at 0x11a4ce050> 实际都是c double array类型,不过Array后,可以控制进线程的安全性;
    print("Array tmp: ", tmp)
    print("Array tmp[0]: ", tmp[0])

    '''
    3、numpy axis 的使用
    Axis就是数组层级
    设axis=i,则Numpy沿着第i个下标变化的方向进行操作 相当于sql中group by
    
    axis=0,表示沿着第 0 轴进行操作,即对每一列进行操作;axis=1,表示沿着第1轴进行操作,即对每一行进行操作
    
    '''

    num1 = np.array([[1, 2, 3, 4], [2, 3, 4, 5]])
コード例 #11
0
from multiprocessing import Process, Array
import time

#创建共享内存
shm = Array('i', [1, 2, 3, 4, 5])


def fun():
    for i in shm:
        print(i, end=" ")
    shm[2] = 10000
    print("")


p = Process(target=fun)
p.start()
p.join()
for x in shm:
    print(x, end=" ")
print("")  ##
コード例 #12
0
    def __init__(self,
                 episode_length_time,
                 port='/dev/ttyUSB0',
                 ir_window=20,
                 ir_history=1,
                 obs_history=1,
                 dt=0.015,
                 auto_unwind=True,
                 rllab_box=False,
                 **kwargs):
        """Constructor of the environment.

        Args:
            episode_length_time:  A float duration of an episode defined in seconds
            port:                 the serial port to the Create2 (eg. '/dev/ttyUSB0')
            ir_window:            the number of IR history to include in calculation
            ir_history:           the number of IR packet history (not observation history)
            obs_history:          the number of observation history to keep
            dt:                   the cycle time in second
            auto_unwind:          boolean of whether we want to execute the auto cable-unwind code
            rllab_box:            whether we are using rllab algorithm or not
            **kwargs:             any other arguments to be passed to the base class
        """
        self._ir_window = ir_window
        self._ir_history = ir_history
        self._obs_history = obs_history
        self._episode_step_ = Value('i', 0)
        self._episode_length_time = episode_length_time
        self._episode_length_step = int(episode_length_time / dt)
        self._internal_timing = 0.015
        self._total_rotation = 0
        self._max_rotation = 720
        self._auto_unwind = auto_unwind
        self._min_battery = 1700
        self._max_battery = 2600

        # get the opcode for our main action (only 1 action)
        self._main_op = 'drive_direct'
        self._extra_ops = ['safe', 'seek_dock', 'drive']
        main_opcode = create2_config.OPCODE_NAME_TO_CODE[self._main_op]
        extra_opcodes = [
            create2_config.OPCODE_NAME_TO_CODE[op] for op in self._extra_ops
        ]

        # store the previous action to be shared across processes
        self._prev_action_ = np.frombuffer(Array('i', 2).get_obj(), dtype='i')

        # create factory with common arguments for making an observation dimension
        observation_factory = Create2ObservationFactory(
            main_op=self._main_op,
            dt=dt,
            obs_history=self._obs_history,
            ir_window=self._ir_window,
            ir_history=self._ir_history,
            internal_timing=self._internal_timing,
            prev_action=self._prev_action_)

        # the definition of the observed state and the associated custom modification (if any)
        # before passing to the learning algorithm
        self._observation_def = [
            observation_factory.make_dim('light bump left signal'),
            observation_factory.make_dim('light bump front left signal'),
            observation_factory.make_dim('light bump center left signal'),
            observation_factory.make_dim('light bump center right signal'),
            observation_factory.make_dim('light bump front right signal'),
            observation_factory.make_dim('light bump right signal'),
            observation_factory.make_dim('infrared character omni'),
            observation_factory.make_dim('infrared character left'),
            observation_factory.make_dim('infrared character right'),
            observation_factory.make_dim('bumps and wheel drops'),
            observation_factory.make_dim('charging sources available'),
            observation_factory.make_dim('previous action')
        ]

        # extra packets we need for proper reset and charging
        self._extra_sensor_packets = [
            'angle', 'battery charge', 'oi mode', 'stasis', 'distance',
            'cliff left', 'cliff front left', 'cliff front right',
            'cliff right'
        ]
        main_sensor_packet_ids = [
            d.packet_id for d in self._observation_def
            if d.packet_id is not None
        ]
        extra_sensor_packet_ids = [
            create2_config.PACKET_NAME_TO_ID[nm]
            for nm in self._extra_sensor_packets
        ]

        # TODO: move this out to some base class?
        if rllab_box:
            from rllab.spaces import Box as RlBox  # use this for rllab TRPO
            Box = RlBox
        else:
            from gym.spaces import Box as GymBox  # use this for baselines algos
            Box = GymBox

        # go thru the main opcode (just direct_drive in this case) and add the range of each param
        # XXX should the action space include the opcode? what about op that doesn't have parameters?
        self._action_space = Box(
            low=np.array([
                r[0] for r in create2_config.OPCODE_INFO[main_opcode]
                ['params'].values()
            ]),
            high=np.array([
                r[1] for r in create2_config.OPCODE_INFO[main_opcode]
                ['params'].values()
            ]))

        # loop thru the observation dimension and get the lows and highs
        self._observation_space = Box(
            low=np.concatenate([d.lows for d in self._observation_def]),
            high=np.concatenate([d.highs for d in self._observation_def]))

        self._comm_name = 'Create2'
        buffer_len = int(
            max([
                self._ir_window * self._ir_history, dt / self._internal_timing
            ]) + 1)
        communicator_setups = {
            self._comm_name: {
                'Communicator': Create2Communicator,
                # have to read in this number of packets everytime to support
                # all operations
                'num_sensor_packets': buffer_len,
                'kwargs': {
                    'sensor_packet_ids':
                    main_sensor_packet_ids + extra_sensor_packet_ids,
                    'opcodes': [main_opcode] + extra_opcodes,
                    'port': port,
                    'buffer_len': 2 * buffer_len,
                }
            }
        }

        super(Create2DockerEnv,
              self).__init__(communicator_setups=communicator_setups,
                             action_dim=len(self._action_space.low),
                             observation_dim=len(self._observation_space.low),
                             dt=dt,
                             **kwargs)
コード例 #13
0
    def start(self, function, n, *args, **kwargs):
        """"
        Starts the Parallelization of the Parallelization Class.
        
        Parameters:
        ----------
        function:                          (List) List of functions that should be executed, if always the same 
                                           function should be used, only the function name has to be assigned. 
                                           At iteration i function[i] of the list will be executed, together with the 
                                           belonging list element of args[i].
        n:                                 (int) Number of times the function should be called.
        (optional) *args:                  (Tuple of Lists) Parameter for executed function. If always the same parameter
                                           should be used, then a list containing one parameter [parameter] has to be
                                           assigned. Otherwise args[i] will be passed through to the function.
        (optional) **kwargs:               (Dictionary of Lists) Optional Keyword Parameter for the function. The Value 
                                           should always contain a list. If always the same parameter should be used,
                                           then a list containing one parameter kwarg=[parameter] has to be assigned. 
        (optional) multiple_return_values: (Boolean) **** ATTENTION: If definded, this variable needs to be the last one
                                           in the calling function, after args and kwargs! ****
                                           True, if number of returned variables of 'function()' is greater than one.
                                           If a list of functions  with a different amount of returned variables per function
                                           is called, multiple_return_values should be 'False'.
                                       
        Returns:
        --------
        List of return values from the excecuted function seperated by variables. The length of the variables equals n.
                            
        Example:
        -------
        models = p.start(TensorModel, 4, [gtab1, gtab2, gtab3, gtab4], multiple_return_values=True)
        fits, prediction = p.start([i.fit for i in models], 4, [data1, data2, data3, data4], [TE], sphere=[sphere])
        """

        multiple_return_values = False
        if 'multiple_return_values' in kwargs:
            multiple_return_values = kwargs['multiple_return_values']
            kwargs.pop('multiple_return_values')

        #Multicore Calculation
        self.number_of_cores = cpu_count()
        if self.maximum_number_of_cores < self.number_of_cores:
            self.number_of_cores = self.maximum_number_of_cores

        self.calculations = Array('i', self.number_of_cores)
        self.percent = Value('i', 0)

        self.percent.value = 0
        self.starting_time = time.time()
        q = Queue()
        processes = []

        if self.display == True:
            print 'Parallelization starts on', self.number_of_cores, 'CPUs.'

        for i in range(self.number_of_cores):
            self.calculations[i] = 0
            processes.append(
                Process(target=self.worker,
                        args=(q, i, self.number_of_cores, function, n, args,
                              kwargs)))
            processes[-1].start()

        return_values = [None] * n

        for i in range(n):
            results = q.get()
            vox = results[0]
            return_values[vox] = results[1]

        # Exit the completed processes
        for i in range(self.number_of_cores):
            processes[i].join()

        if self.display == True:
            sys.stdout.write(
                '{:3.0f}%  {:4.0f}:{:02.0f} min remaining\n'.format(
                    100.0, 0, 0))
            time_needed = (time.time() - self.starting_time) / 60.0
            time_needed_seconds = int(time_needed % 1 * 60.0)
            time_needed_minutes = int(time_needed)
            sys.stdout.write(
                '\nTotal Time needed: {:5.0f}:{:02.0f} min\n'.format(
                    time_needed_minutes, time_needed_seconds))

        # Transpose returned values
        if multiple_return_values == True:
            return_values_seperated = np.asarray(return_values)
            return_values_tranformed = (return_values_seperated.T).tolist()
            return_values = tuple(return_values_tranformed)

        return return_values
コード例 #14
0
                if human_waiting.value:
                    break

            client.cancel_goal()

            move_base_pub = rospy.Publisher('move_base/cancel',\
                                            actionlib_msgs.msg.GoalID)

            # for unknown reasons, we have to publish the message at least twice
            # to make it work
            for i in range(5):
                move_base_pub.publish(rospy.Time.now(), '')
                rospy.sleep(0.2)

    return 1


if __name__ == '__main__':

    human_waiting = Value('b', False)
    curr_goal = Array('c', "This is a very very very long string for nothing.")

    p1 = Process(target=gui_thread, args=(human_waiting, curr_goal))
    p2 = Process(target=platform_thread, args=(human_waiting, curr_goal))

    p1.start()
    p2.start()

    p1.join()
    p2.join()
コード例 #15
0
ファイル: HackRF.py プロジェクト: starling021/uh
 def iq_to_bytes(samples: np.ndarray):
     arr = Array("B", 2 * len(samples), lock=False)
     numpy_view = np.frombuffer(arr, dtype=np.uint8)
     numpy_view[:] = samples.flatten(order="C")
     return arr
コード例 #16
0
            Spec1.setTriggerMode(0)  # It is set for free running mode
            Spec1.setIntegrationTime(
                Integration_Time * 1000
            )  # Integration time is in microseconds when using the library
            Spec_Is_Read = Value('i', 0)
            Spec_Is_Read.value = 0
            Spec_Is_Done.value = 0
            No_Spec_Sample = int(
                round(DurationOfReading * 1000 / (Integration_Time))
            )  # Number of samples for spectrometer to read.
            Full_Spec_Records = np.zeros(shape=(len(
                Spec1.Handle.wavelengths()), No_Spec_Sample),
                                         dtype=float)
            Full_Spec_Records2 = Array(
                'd',
                np.zeros(shape=(len(Spec1.Handle.wavelengths()) *
                                No_Spec_Sample, 1),
                         dtype=float))
            Spec_Time = Array('d',
                              np.zeros(shape=(No_Spec_Sample, 1), dtype=float))
            Spec_Index = Array('i', np.zeros(shape=(1, 1), dtype=int))

        ######################################################################################################
        if (DAQ1.Error == 0):
            DAQ_Is_Read.value = 0
            StreamPort = ['AIN0', 'AIN1']
            #while 1==1:
            print('Which working mode for the analogue input you want?')
            print(
                'Hint: using the internal buffer is faster and can go up to 100 kHz but less stable (DAQT7 may crass)'
            )
コード例 #17
0
def Plan(planners, domain, problem, pwd, verbose):
    '''
    calls multiple planners to solve a given problem and as soon as 
    the first planner finds a plan, terminates all other planners and returns 
    '''
    try:
        # if only one planner then no need multiprocessing
        if len(set(planners)) == 1:
            plan = call_planner_sp(planners[0], domain, problem, args_profiles[planners[0]][0], pwd, verbose)
            if plan == -1:
                if not verbose: 
                    print(color.fg_red('[some error by external planner -- run again with parameter \'-v 2\']'))
                sys.exit(0)
            return plan

        # create a shared Queue to store the output plan
        returned_plan = Queue()

        # a shared Array to store the failed planners
        failed_planners = Array('I', len(planners))

        # store the running processes
        process_lst = []

        # run in multiprocessing
        for pidx, planner in enumerate(planners):
            # proc = Process(target=call_planner_mp, \
            #     args=(planner, domain, problem, args_profiles[planner][0], pwd, returned_plan, failed_planners, verbose),
            #     daemon=True)
            proc = Process(target=call_planner_mp, \
                args=(planner, domain, problem, args_profiles[planner][0], pwd, returned_plan, failed_planners, verbose))
            proc.daemon = True
            process_lst.append(proc)
            proc.start()

        # wait until one process completes and returns a plan
        while returned_plan.empty():
            # if all processes (planners) failed to solve the problem
            if sum(failed_planners) == len(planners):
                print(color.fg_red('[error by all external planners: run with \'-v 2\''))
                sys.exit(0)

        # kill running planners (subprocesses) if they are running
        kill_jobs(pwd, planners)

        # make sure processes terminate gracefully
        while process_lst:
            proc = process_lst.pop()
            while proc.is_alive():
                try:
                    proc.terminate()
                    proc.join()
                except: pass

        # return the plan 
        return returned_plan.get()
    # make sure all processes are terminated when KeyboardInterrupt received
    except KeyboardInterrupt:
        if len(planners) > 1:
            kill_jobs(pwd, planners)
            print(color.bg_red('ALL JOBS TERMINATED'))
        raise
コード例 #18
0
        import cv2.cv2 as cv
    except ImportError:
        import cv2 as cv

    # create the Viewport window
    viewport = 'Viewport'
    cv.namedWindow(viewport, cv.WINDOW_GUI_NORMAL)
    cv.setWindowProperty(viewport, cv.WND_PROP_AUTOSIZE, cv.WINDOW_NORMAL)
    cv.setWindowProperty(viewport, cv.WND_PROP_ASPECT_RATIO,
                         cv.WINDOW_FREERATIO)

    set_start_method('spawn')  # windows default

    img_queue = Queue(maxsize=3)  # always up to date with a buffer of 3 frames
    # shared memory array with 4 places (left, upper, right, lower)
    monitor = Array('i', (1, 1, 2, 2))

    # start making screenshots
    screen_capture = ScreenCapture(img_queue, monitor)
    screen_capture.start()

    # use mss to get the width and height of the monitor(s)
    mon: dict = mss.mss().monitors[0]
    mon_width = mon.get('width')
    mon_height = mon.get('height')

    while screen_capture.is_alive():
        try:
            # get the image from the queue that is put in by ScreenCapture
            img = img_queue.get_nowait()
            # show the image in the Porthole
コード例 #19
0
lock = Lock()


def doubler(item):
    lock.acquire()
    try:
        result = number**2
        proc_name = current_process().name
        print('{0} is squared to {1} by: {2}'.format(number, result,
                                                     proc_name))
    finally:
        lock.release()


if __name__ == '__main__':
    numbers = []
    for i in range(1, 11):
        numbers.append(i)
    toShare = Array('i', 10)
    procs = []
    proc = Process(target=doubler, args=(10, ))

    for index, number in enumerate(numbers):
        proc = Process(target=doubler, args=(number, ))
        procs.append(proc)
        proc.start()

    for proc in procs:
        proc.join()
コード例 #20
0
    def locate(self, sensor_positions, multitrack):
        s = sensor_positions.shape
        len = s[0]

        time_delays = numpy.zeros((len, 1))

        starts = time.time()

        if self.proc_numer == 1:
            for p in range(len):
                time_delays[p] = helpers.time_delay_function(
                    multitrack[0, ], multitrack[p, ])
        else:
            pp = ProcessParallel()

            outs = Array('d', range(len))

            ranges = []

            for result in helpers.per_delta(0, len, len / self.proc_numer):
                ranges.append(result)

            for start, end in ranges:
                pp.add_task(helpers.time_delay_function_optimized,
                            (start, end, outs, multitrack))

            pp.start_all()
            pp.join_all()

            for idx, res in enumerate(outs):
                time_delays[idx] = res

        ends = time.time()

        logging.info('%.15f passed for trial.', ends - starts)

        Amat = numpy.zeros((len, 1))
        Bmat = numpy.zeros((len, 1))
        Cmat = numpy.zeros((len, 1))
        Dmat = numpy.zeros((len, 1))

        for i in range(2, len):
            x1 = sensor_positions[0, 0]
            y1 = sensor_positions[0, 1]
            z1 = sensor_positions[0, 2]
            x2 = sensor_positions[1, 0]
            y2 = sensor_positions[1, 1]
            z2 = sensor_positions[1, 2]
            xi = sensor_positions[i, 0]
            yi = sensor_positions[i, 1]
            zi = sensor_positions[i, 2]
            Amat[i] = (1 / (340.29 * time_delays[i])) * (-2 * x1 + 2 * xi) - (
                1 / (340.29 * time_delays[1])) * (-2 * x1 + 2 * x2)
            Bmat[i] = (1 / (340.29 * time_delays[i])) * (-2 * y1 + 2 * yi) - (
                1 / (340.29 * time_delays[1])) * (-2 * y1 + 2 * y2)
            Cmat[i] = (1 / (340.29 * time_delays[i])) * (-2 * z1 + 2 * zi) - (
                1 / (340.29 * time_delays[1])) * (-2 * z1 + 2 * z2)
            Sum1 = (x1**2) + (y1**2) + (z1**2) - (xi**2) - (yi**2) - (zi**2)
            Sum2 = (x1**2) + (y1**2) + (z1**2) - (x2**2) - (y2**2) - (z2**2)
            Dmat[i] = 340.29 * (time_delays[i] - time_delays[1]) + (
                1 / (340.29 * time_delays[i])) * Sum1 - (
                    1 / (340.29 * time_delays[1])) * Sum2

        M = numpy.zeros((len + 1, 3))
        D = numpy.zeros((len + 1, 1))
        for i in range(len):
            M[i, 0] = Amat[i]
            M[i, 1] = Bmat[i]
            M[i, 2] = Cmat[i]
            D[i] = Dmat[i]

        M = numpy.array(M[2:len, :])
        D = numpy.array(D[2:len])

        D = numpy.multiply(-1, D)

        Minv = linalg.pinv(M)

        T = numpy.dot(Minv, D)
        x = T[0]
        y = T[1]
        z = T[2]

        return x, y, z
コード例 #21
0
ファイル: ffs_shoot.py プロジェクト: sulcgroup/anm-oxdna
    print >> sys.stderr, "0 starting configurations! aborting"
    sys.exit(2)

# check that we can write to the success pattern
try:
    checkfile = open(success_pattern + '0', 'w')
    checkfile.close()
    os.remove(success_pattern + '0')
except:
    print >> sys.stderr, "could not write to success_pattern", success_pattern
    sys.exit(3)

success_lock = Lock()
success_count = Value('i', initial_success_count)
attempt_count = Value('i', 0)
success_from = Array('i', len(starting_confs))  # zeroed by default
attempt_from = Array('i', len(starting_confs))  # zeroed by default
undetermined_lock = Lock()
undetermined_count = Value('i', 0)

# write the condition file
if os.path.exists('conditions.txt'):
    log("Main: Warning: overwriting conditions file")
condition_file = open('conditions.txt', "w")
condition_file.write("action = stop_or\n")
condition_file.write("condition1 = {\n%s %s %s\n}\n" %
                     (lambda_f_name, lambda_f_compar, str(lambda_f_value)))
condition_file.write("condition2 = {\n%s %s %s\n}\n" %
                     (lambda_m_name, lambda_m_compar, str(lambda_m_value)))
condition_file.close()
コード例 #22
0
 def __init__(self, initval=[0]):
     self.lst = Array('i', initval)
     self.lock = Lock()
コード例 #23
0
        try:
            os.makedirs(args.directory)
        except:
            print('\tERROR: Failed to create the directory', file=sys.stderr)
            sys.exit(1)

    return


if __name__ == '__main__':
    args = parser.parse_args()
    process_args(args)

    #  0 -> process is not doing anything
    #  1 -> process is working
    process_states = Array('i', [0] * args.max_threads)
    link_queue = Queue()
    visited = Manager().dict()
    processes = []

    # add one thing to the queue
    link_queue.put('index.html')
    visited['index.html'] = True

    # start all of the processes
    for i in range(args.max_threads):
        print('Creating and starting process %d' % i)
        p = Process(target=crawler_process,
                    args=(process_states, link_queue, visited, args, i))
        p.start()
        processes.append(p)
コード例 #24
0
def backbone(A,
             kind='ultrametric',
             start=None,
             offset=None,
             nprocs=None,
             quiet=False):
    """ Compute the graph backbone.

    The graph backbone is the set of edges whose weight does not change after
    the closure operation. These edges respect the triangular inequality (kind
    = 'metric') or the maxmin inequality (kind = 'ultrametric'). And are
    therefore part of the shortest/bottleneck paths of the graph.

    Parameters
    ----------

    A : array_like
        Adjacency matrix. Will be converted to CSR

    kind : str
        the type of closure to compute: either 'metric' or 'ultrametric'
        (default).

    start : int
        Optional; only compute the closure on the submatrix starting at this
        index. Default is 0.

    offset : int
        Optional; only compute the closure on the submatrix ending at this
        offset. The default up to N, where A is an (N, N) matrix.

    nprocs : int
        Optional; distribute the computation over `nprocs` workers. Default is
        90% of the available CPUs/cores.

    Returns
    -------
    A scipy.sparse.coo_matrix.
    """
    A = sp.csr_matrix(A)
    N = A.shape[0]
    fromi, toi = _fromto(start, offset, N)
    nprocs = _nprocs(nprocs)
    indptr = Array(c_int, A.indptr)
    indices = Array(c_int, A.indices)
    data = Array(c_double, A.data)
    initargs = (kind, indptr, indices, data, A.shape)
    if not quiet:
        print '{}: launching pool of {} workers.'.format(now(), nprocs)
    pool = Pool(processes=nprocs,
                initializer=_init_worker,
                initargs=initargs,
                maxtasksperchild=max_tasks_per_worker)
    try:
        with closing(pool):
            result = pool.map_async(_backbone_worker, xrange(fromi, toi))
            while not result.ready():
                result.wait(1)
        pool.join()
        if result.successful():
            coords = result.get()
        else:
            print >> sys.stderr, "There was an error in the pool: `%s`" % (
                result._value)
            sys.exit(2)  # ERROR occurred
    except KeyboardInterrupt:
        print "^C"
        pool.terminate()
        sys.exit(1)  # SIGINT received
    if not quiet:
        print '{}: done'.format(now())
    coords = np.asarray(reduce(list.__add__, coords))
    if len(coords) > 0:
        d = np.ones(len(coords))
        B = sp.coo_matrix((d, (coords[:, 0], coords[:, 1])), shape=A.shape)
    else:
        # empty matrix
        B = sp.coo_matrix(A.shape)
    return B
コード例 #25
0
# from multiprocessing import Process, Lock
#
# def f(l, i):
#     l.acquire()
#     print ('hello world')
#     l.release()
#
# if __name__ == '__main__':
#     lock = Lock()
#
#     for num in range(10):
#         Process(target=f, args=(lock, num)).start()


from multiprocessing import Process, Value, Array

def f(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]

if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))
    # print(arr)
    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print (num.value)
    print (arr[:])
コード例 #26
0
#now, fill up the order book
#bids will be placed from lowest ask to -infinity and asks will be placed from highest bid to +infinity
#print(begin_time)
end_time = begin_time + 20  #build initial LOB for 20 seconds
num_possible_orders_i = 2
k = 0
curr_time = time.time()
curr_time_from_beg = curr_time - begin_time
lob = LimitOrderBook()
id = 0
while (curr_time <= end_time):
    #while(k < 10):
    curr_time_from_beg = curr_time - begin_time
    threads = [None] * num_possible_orders_i
    #times = [None] * num_possible_orders_i
    times = Array('d', range(num_possible_orders_i))
    for i in range(num_possible_orders_i):
        threads[i] = multiprocessing.Process(
            target=nextTime_process,
            args=(times, i, rate_parameters_per_process[2:4]))
        threads[i].start()

    for i in range(len(threads)):
        threads[i].join()
    #print("here")
    min_time = min(times)
    '''print(times[:])
    print(min_time)
    print(index_of_minimum(times))'''
    '''print("Iteration %d, Limit Order Buy Time = %f, Limit Order Sell Time = %f, Minimum Time = %f, "
          "Index of minimum time = %d"
コード例 #27
0
import os
import sys
import time
import numpy
import multiprocessing
from multiprocessing import Process, Value, Array

size = 2000000
number_processes = 8
chuck_size = size / number_processes

a = Array('d', [1.0] * size)
b = Array('d', [1.0] * size)

dot = Value('d', 0.0)
processes = []


def dotproduct(dot, a, b):
    dot.value += sum(x * y for x, y in zip(a, b))


time1 = time.time()

for i in range(number_processes):
    lb = i * chuck_size
    ub = lb + chuck_size
    p = Process(target=dotproduct, args=(
        dot,
        a[lb:ub],
        b[lb:ub],
コード例 #28
0
    def __init__(self):

        self.obj = MyTCPServer()
        self.flag = Value('i', 1)  # 初始共享内存变量
        self.error = Array('c', 128)  # 初始化共享内存变量
        self.start_flag = 0  # 服务器是否已经开启的标志,为0,表示没有开启
コード例 #29
0
ファイル: main.py プロジェクト: AlexKjes/somsom
def genetic_brute(nProcs=4):

    proc_times = [0] * nProcs
    procs = [None] * nProcs
    case = 1
    optimum = 7542
    run_counter = Value('i', 0)
    proc_activity = Array('i', nProcs)
    """
    0: ownerId, 1: best_score, 2: worst_score, 3: average_ score, 4:times_tested, 5: execution_time
    6: node_factor, 7: lr0, 8: lambda_n, 9:lambda_lr, 10: sigma, 11: radius_factor, 12: translate_x, 13: translate_y
    """
    current_best = Array('d', [
        -1.0, 999999999.9, 999999999.9, 99999999.9, 0.0, 99999.9, 2.5, 0.1,
        10000.0, 100000.0, 30.0, 1.0, 1.0, 1.0
    ])

    for i in range(nProcs):
        procs[i] = (Process(target=worker,
                            args=(i, current_best, case, optimum, run_counter,
                                  proc_activity)))
        proc_times[i] = (time.time())
        procs[i].start()

    log_timer = time.time()

    while True:
        iter_time = time.time()
        if time.time() - log_timer > 30:
            log_timer = iter_time
            print('---------------------------------------------')
            print('{0} Self organizing maps trained'.format(run_counter.value))
            print('Process {0} has the best run so far'.format(
                int(current_best[OWNER])))
            print('Best score: {0}\nWorst score: {1}'.format(
                round(current_best[BEST_SCORE] * 100) / 100,
                round(current_best[WORST_SCORE] * 100) / 100))
            print('Average score: {}'.format(
                (round(current_best[AVERAGE_SCORE] * 100) / 100)))
            print('Average time: {0} over {1} runs'.format(
                round(current_best[AVERAGE_TIME] * 100) / 100,
                int(current_best[TIMES_TESTED])))
            print('Variables:\n    Node Factor: {0}\n    Learning Rate: {1}'.
                  format(current_best[NODE_FACTOR],
                         current_best[LEARNING_RATE]))
            print('    Lambda_n: {0}\n    Lambda_lr: {1}'.format(
                current_best[LAMBDA_N], current_best[LAMBDA_LR]))
            print('    Sigma: {0}\n    Radius factor: {1}'.format(
                current_best[SIGMA], current_best[RADIUS_FACTOR]))
            print('    Translate x: {0}\n    Translate y: {1}'.format(
                current_best[TRANSLATE_X], current_best[TRANSLATE_Y]))
            print('cptString: {0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}'.format(
                current_best[NODE_FACTOR], current_best[LEARNING_RATE],
                current_best[LAMBDA_N], current_best[LAMBDA_LR],
                current_best[SIGMA], current_best[RADIUS_FACTOR],
                current_best[TRANSLATE_X], current_best[TRANSLATE_Y]))
            print('---------------------------------------------')

        for i, (t, p) in enumerate(zip(proc_times, procs)):
            if iter_time - proc_times[i] > 120 and proc_activity[i]:
                print("Process {} timed out and is killed".format(i))
                p.terminate()
                procs[i] = Process(target=worker,
                                   args=(i, current_best, case, optimum,
                                         run_counter, proc_activity))
                proc_times[i] = iter_time
            else:
                proc_activity = [0]
                proc_times[i] = iter_time

        time.sleep(2)
コード例 #30
0
    def __init__(self):
        super().__init__()
        self.sample_index = 0

    def did_connect(self, aidlab):
        print("Connected to: ", aidlab.address)

    def did_disconnect(self, aidlab):
        print("Disconnected from: ", aidlab.address)

    def did_receive_ecg(self, aidlab, timestamp, values):
        global result, buffer_size
        self.sample_index += 1
        result[self.sample_index % buffer_size] = values[0]


if __name__ == '__main__':
    # create process  for Plot
    result = Array('d', buffer_size)
    Process(target=chart, args=(result,)).start()

    signals = ["ecg"]

    main_manager = MainManager()
    main_manager.connect(signals)

    # Start the connection
    while True:
        pass