def __init__(self, n, mass, RxInit, RyInit, VxInit, VyInit, AxInit, AyInit,
              interactions):
     self.N = n
     self.M = Value('d', mass, lock=False)
     self.R = Array('d', 3 * (self.N + 1))
     self.V = Array('d', 3 * (self.N + 1))
     self.A = Array('d', 3 * (self.N + 1))
     arrR = np.frombuffer(
         self.R.get_obj())  # mp_arr and arr share the same memory
     self.bR = arrR.reshape(
         (3, self.N + 1))  # b and arr share the same memory
     self.bR[0, 0] = RxInit
     self.bR[1, 0] = RyInit
     arrV = np.frombuffer(
         self.V.get_obj())  # mp_arr and arr share the same memory
     self.bV = arrV.reshape(
         (3, self.N + 1))  # b and arr share the same memory
     self.bV[0, 0] = VxInit
     self.bV[1, 0] = VyInit
     arrA = np.frombuffer(
         self.A.get_obj())  # mp_arr and arr share the same memory
     self.bA = arrA.reshape(
         (3, self.N + 1))  # b and arr share the same memory
     self.bA[0, 0] = AxInit
     self.bA[1, 0] = AyInit
     #self.R = np.zeros(shape = (3, self.N+1))
     #self.V = np.zeros(shape = (3, self.N+1))
     #self.A = np.zeros(shape = (3, self.N+1))
     #self.R[0,0] = RxInit
     #self.R[1,0] = RyInit
     #self.V[0,0] = VxInit
     #self.V[1,0] = VyInit
     #self.A[0,0] = AxInit
     #self.A[1,0] = AyInit
     self.Interactions = interactions
Exemple #2
0
def main(args):
    ''' '''
    # Global variables
    global shared_arrays

    # Variables
    fithr = int(args['fithr'])
    rdthr = int(args['rdthr']) if args['rdthr'] else 0
    abthr = int(args['abthr']) if args['abthr'] else 15
    ncores = int(args['ncores']) if args['ncores'] else 1
    files = args['file']

    # Data structures
    chr_length, shared_arrays = {}, {}
    regions = []

    # Reading chrom.sizes file
    with open(args['chromfile']) as fi:
        for line in fi:
            line = line.rstrip()
            if line:
                chr, length = line.split('\t')
                chr_length.setdefault(chr, int(length))
            #end if
        #end for
    #end with

    # Getting regions
    with open(args['regionfile']) as fi:
        for line in fi:
            line = line.rstrip()  # line is a region
            if line:
                check_region(line, chr_length)
                regions.append(line)
            #end if
        #end for
    #end with

    # Initializing bitarrays data structure
    for chr, length in chr_length.items(
    ):  # +1 to index positions in bitarrays by 1
        shared_arrays.setdefault(
            chr, {
                'snv': Array(ctypes.c_bool, length + 1),
                'ins': Array(ctypes.c_bool, length + 1),
                'del': Array(ctypes.c_bool, length + 1)
            })
    #end for

    # Multiprocessing
    # -> TODO, sometimes when a process die the master hang in wait, to check
    with Pool(ncores) as pool:
        results = pool.map(partial(run_region, files, fithr, rdthr, abthr),
                           regions)
    #end with

    # Writing bitarrays to files
    bitarrays_toHDF5(args['outputfile'])
Exemple #3
0
 def __init__(self):
     self.array = [Array('c', MyQueue.QUEUE_BLOCK_SIZE, lock=False) \
                   for q in range(MyQueue.QUEUE_SIZE)]
     self.name = Array('c', MyQueue.MAX_QUEUE_NAME, lock=False)
     self.send_pos = Value('i', -1, lock=False)
     self.recv_pos = Value('i', -1, lock=False)
     self.occupied_flag = Value('i', -1, lock=False)
     self.slock = multiprocessing.Lock()
     self.rlock = multiprocessing.Lock()
Exemple #4
0
 def __init__(self, cam_num, debug=False):
     Process.__init__(self)
     self.cam = Camera(cam_num, threaded=False)
     self.puck_locations = Array(Vector, [(-1, -1), (-1, -1)])
     self.puck_velocity = Array(Vector, [(-1, -1), (-1, -1)])
     self.gun_positions = Array(Vector, [(-1, -1), (-1, -1)])
     self.debug = debug
     self.field_crop_boundary = list()
     self.field_post_crop_limits = [5000, 0]  # [left, right]
     self.crop_points = list()
     self.lighting_constant = 250
Exemple #5
0
 def __init__(self, ip="127.0.0.1", body_id=0):
     # shared c_double array
     self.shared_bodyPosition = Array(c_double, 3, lock=False)
     self.shared_bodyVelocity = Array(c_double, 3, lock=False)
     self.shared_bodyOrientationQuat = Array(c_double, 4, lock=False)
     self.shared_bodyOrientationMat9 = Array(c_double, 9, lock=False)
     self.shared_bodyAngularVelocity = Array(c_double, 3, lock=False)
     self.shared_timestamp = Value(c_double, lock=False)
     #self.shared_timestamp = -1
     args = (ip, body_id, self.shared_bodyPosition, self.shared_bodyVelocity,
             self.shared_bodyOrientationQuat, self.shared_bodyOrientationMat9,
             self.shared_bodyAngularVelocity, self.shared_timestamp)
     self.p = Process(target=self.qualisys_process, args=args)
     self.p.start()
Exemple #6
0
def main2():
    lock = Lock()

    n = V2('i', 7)
    x = V2(c_double, 1.0 / 3.0, lock=False)
    s = Array('c', b'hello world', lock=lock)
    A = Array(Point, [(1.875, -6.25), (-5.75, 2.0), (2.375, 9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print(n.value)
    print(x.value)
    print(s.value)
    print([(a.x, a.y) for a in A])
 def __init__(self, size=100):
     # Lock handler.
     self.lock = Lock()
     # Array size.
     self.size = size
     # Array of Session.
     self.sessions = Array(Session, self.size, lock=self.lock)
Exemple #8
0
    def __init__(self, app):
        usernames = app.config.get("SUPER_USERS", [])
        usernames_str = ",".join(usernames)

        self._max_length = len(usernames_str) + MAX_USERNAME_LENGTH + 1
        self._array = Array("c", self._max_length, lock=True)
        self._array.value = usernames_str.encode("utf8")
Exemple #9
0
def graph2memory(G):
    """グラフ構造を受け取りknowledeだけ取り出し共有メモリに入れる
    >p_to_cを使えるようにするクッション関数

    Parameters
    ----------
    G : networkx graph

    Returns
    -------
    Shared Memory Array

    """

    knowledge_dict = {}
    mapper = cpmap()
    G_new = deepcopy(G)
    for n in G_new:
        knowledge_dict[n] = G_new.node[n]["knowledge"]

    lock = Lock()
    ctypes_knowledge_array = mapper.p_to_c_convert(knowledge_dict)

    # 共有メモリにデータを格納
    K = Array(DataStructure.KnowledgeStruct, ctypes_knowledge_array, lock=lock)
    return K
Exemple #10
0
 def runCmd(self,
            exe,
            bypass,
            tail=Array('c', ' ' * 10000),
            code=Value('i', 0)):
     if bypass:
         proc = sp.Popen(exe,
                         bufsize=1024,
                         stdout=sp.PIPE,
                         stderr=sp.PIPE,
                         shell=True)
         t1 = threading.Thread(target=self.bufferScreen,
                               args=(proc.stdout, ))
         t1.start()
         t1.join()
         proc.wait()
         code.value = proc.returncode
         if code.value != 0 and tail.value.strip() == '':
             tail.value = 'I was only able to capture the following execution error while executing the following:\n'+exe+'\n... you may wish to re-run without bypass option.'+ \
                '\n'+'~'*18+'\n'+str(proc.stderr.read().strip())+'\n'+'~'*18
             self.tail = self.tail + '\n' + tail.value
     else:
         code.value = sp.call(exe, shell=True)
         if code.value != 0:
             tail.value = '... The following command failed for the reason above (or below)\n' + exe + '\n'
             self.tail = self.tail + '\n' + tail.value
     return self.tail, code.value
    def __init__(self):
        super(PulserHardware, self).__init__()
        self._shutter = 0
        self._trigger = 0
        self.xem = None
        self.Mutex = QtCore.QMutex
        self._adcCounterMask = 0
        self._integrationTime = Q(100, 'ms')

        self.dataQueue = multiprocessing.Queue()
        self.clientPipe, self.serverPipe = multiprocessing.Pipe()
        self.loggingQueue = multiprocessing.Queue()
        self.sharedMemoryArray = Array(c_longlong,
                                       self.sharedMemorySize,
                                       lock=True)

        self.serverProcess = self.serverClass(self.dataQueue, self.serverPipe,
                                              self.loggingQueue,
                                              self.sharedMemoryArray)
        self.serverProcess.start()

        self.queueReader = QueueReader(self, self.dataQueue)
        self.queueReader.start()

        self.loggingReader = LoggingReader(self.loggingQueue)
        self.loggingReader.start()
        self.ppActive = False
        self._pulserConfiguration = None
Exemple #12
0
 def __init__(self, size = 100):
     # Lock handler.
     self.lock = Lock()
     # Array size.
     self.size = size
     # Array of Command.
     self.commands = Array(Command, self.size, lock = self.lock)
Exemple #13
0
    def __init__(self, app):
        usernames = app.config.get('SUPER_USERS', [])
        usernames_str = ','.join(usernames)

        self._max_length = len(usernames_str) + MAX_USERNAME_LENGTH + 1
        self._array = Array('c', self._max_length, lock=True)
        self._array.value = usernames_str
Exemple #14
0
def serial_ML(args):
    """
    baseline: train the model sequentially
    """
    print(args.name)
    global model_module
    if model_module is None:
        spec = importlib.util.spec_from_file_location(
            "module.name", os.path.abspath(args.model_file))
        model_module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(model_module)

    init_weights = model_module.init()
    data = model_module.get_data()

    global coef_shared, w
    coef_shared = Array(c_double, init_weights.flat, lock=False)
    w = np.frombuffer(coef_shared)

    T0 = time.time()
    for i in range(len(data)):
        train_wrapper(data[i])
    T1 = time.time()

    print('sequential job finished in', T1 - T0, 's')
    model_module.finish(w)
Exemple #15
0
 def __init__(self, robot, dt=0.01):
     # a shared c_double array
     self.dt = dt
     self.shared_q_viewer = Array(c_double, robot.nq, lock=False)
     self.p = Process(target=self.display_process,
                      args=(robot, self.shared_q_viewer))
     self.p.start()
Exemple #16
0
 def __init__(self, domain_factory, shm_proxy, nb_domains=os.cpu_count()):
     super().__init__(domain_factory, nb_domains)
     self._conditions = [Condition() for i in range(nb_domains)]
     self._shm_proxy = shm_proxy
     self._shm_registers = {
     }  # Maps from registered method parameter types to vectorized array ranges
     self._shm_types = {}  # Maps from register index to type
     self._shm_sizes = {
     }  # Maps from register method parameter types to number of arrays encoding each type
     self._shm_arrays = []  # Methods' vectorized parameters
     self._rsize = 0  # Total size of the register (updated below)
     self._shm_names = [None] * nb_domains  # Vectorized methods' names
     self._shm_params = [
         None
     ] * nb_domains  # Indices of methods' vectorized parameters
     for i in range(nb_domains):
         j = 0
         for r in shm_proxy.register():
             for k in range(r[1]):
                 m = shm_proxy.initialize(r[0])
                 if type(m) == list or type(m) == tuple:
                     if i == 0 and k == 0:  # do it once for all the domains and redundant initializers
                         self._shm_sizes[r[0].__name__] = len(m)
                         self._shm_registers[r[0].__name__] = (
                             j, j + (r[1] * len(m)))
                         self._shm_types.update({
                             kk: r[0]
                             for kk in range(j, j + (r[1] * len(m)), len(m))
                         })
                         self._rsize += (r[1] * len(m))
                     self._shm_arrays.extend(m)
                     j += len(m)
                 else:
                     if i == 0 and k == 0:  # do it once for all the domains and redundant initializers
                         self._shm_sizes[r[0].__name__] = 1
                         self._shm_registers[r[0].__name__] = (j, j + r[1])
                         self._shm_types.update(
                             {kk: r[0]
                              for kk in range(j, j + r[1])})
                         self._rsize += r[1]
                     self._shm_arrays.append(m)
                     j += 1
         self._shm_names[i] = Array('c', bytearray(100))
         self._shm_params[i] = Array(
             'i', [-1] * sum(r[1] for r in shm_proxy.register()))
     logger.info(rf'Using {nb_domains} parallel shared memory domains')
Exemple #17
0
def shared_array(shape):
    import numpy as np
    from multiprocessing.sharedctypes import Array
    from ctypes import c_double
    # Allocate the memory in shared space.
    memory = Array(c_double, int(np.prod(shape)))
    # Create and return a structure to access the shared memory (numpy array).
    return np.frombuffer(memory.get_obj(), dtype=float).reshape(shape)
Exemple #18
0
 def __init__(self, size_w):
     coef_shared = Array(c_double,
                         (np.random.normal(size=(size_w, 1)) * 1. /
                          np.sqrt(size_w)).flat,
                         lock=False)
     w = np.frombuffer(coef_shared)
     w = w.reshape((len(w), 1))
     self.w = w
Exemple #19
0
 def geometric_plan(self):
     '''
     This function plans full geometric trajectory at initial
     '''
     if not self.plans:
         HumoroLGP.logger.warn(
             'Symbolic plan is empty. Cannot plan trajectory!')
         return False
     # prepare workspace
     self.place_human()
     workspace = self.workspace.get_pyrieef_ws()
     self.ranking = []
     self.chosen_plan_id = None
     # compute plan costs
     for i, plan in enumerate(self.plans):
         waypoints, waypoint_manifolds = self.get_waypoints(plan)
         trajectory = linear_interpolation_waypoints_trajectory(waypoints)
         objective = TrajectoryConstraintObjective(
             dt=1 / self.fps, enable_viewer=self.enable_viewer)
         objective.set_problem(workspace=workspace,
                               trajectory=trajectory,
                               waypoint_manifolds=waypoint_manifolds,
                               goal_manifold=waypoint_manifolds[-1][0])
         self.objectives.append(objective)
         self.ranking.append((objective.cost(), i))
     # rank the plans
     self.ranking.sort(key=operator.itemgetter(0))
     # optimize the objective according to self.ranking
     for r in self.ranking:
         if self.enable_viewer:
             self.viewer.initialize_viewer(self.objectives[r[1]],
                                           self.objectives[r[1]].trajectory)
             status = Value(c_bool, True)
             traj = Array(
                 c_double,
                 self.objectives[r[1]].n * (self.objectives[r[1]].T + 2))
             p = Process(target=self.objectives[r[1]].optimize,
                         args=(status, traj))
             p.start()
             self.viewer.run()
             p.join()
             success = status.value
             traj = Trajectory(q_init=np.array(traj[:2]),
                               x=np.array(traj[2:]))
         else:
             success, traj = self.objectives[r[1]].optimize()
         if success:  # choose this plan
             self.plan = self.plans[r[1]]
             self.chosen_plan_id = r[1]
             if self.verbose:
                 for a in self.plan[1]:
                     HumoroLGP.logger.info(a.name + ' ' +
                                           ' '.join(a.parameters))
             robot = self.workspace.get_robot_link_obj()
             robot.paths.append(traj)
             return True
     HumoroLGP.logger.warn('All plan geometrical optimization infeasible!')
     return False
Exemple #20
0
 def add_process(self, name, target, args):
     new_share = Array('i', [0 for _ in xrange(3)])
     new_share[conf.DAEMON_HEATBEAT] = 3
     new_share[conf.DAEMON_FEEDBACK] = 0
     new_share[conf.DAEMON_COMMAND] = 0
     self.share[name] = new_share
     self.processes[name] = Process(target=target, args=args + (new_share,))
     if name not in self.process_args.keys():
         self.process_args[name] = (target, args)
Exemple #21
0
 def __init__(self):
     super(Genove23, self).__init__()
     self.admin = None
     self.queue = JoinableQueue()
     self.exitQ = Queue()
     self.lock = Lock()
     self.target = Array(
         Point, [(str('').encode('utf-8'), str('').encode('utf-8'))],
         lock=self.lock)
Exemple #22
0
 def __init__(self, learning_rate, lambda_reg, batch_size, dim, lock=False):
     self.__learning_rate = learning_rate
     self.__lambda_reg = lambda_reg
     self.__batch_size = batch_size
     self.__dim = dim
     self.__persistence = 30
     self.__lock = lock
     self.__w = Array(c_double, dim, lock=lock)
     self.__log = Queue()
Exemple #23
0
    def __init__(self, buffer_size, dataset_size):
        self.buffer_size = buffer_size
        self.dataset_size = dataset_size
        self.put_index = Value('i', 0)
        self.get_index = Value('i', 0)
        self.put_lock = mp.Lock()

        self.cdatasets = Array('d', [0.0] * self.buffer_size * self.dataset_size)
        self.cbuffer = self.cdatasets._obj._wrapper
Exemple #24
0
 def __init__(self, filepath=None, size=100):
     # Lock handler
     self.lock = Lock()
     # Maximum number of task
     self.size = size
     # Shared Array of Task
     self.tasks = Array(Task, self.size, lock=self.lock)
     # File path, if None, task_list is not sync'ed to file
     self.filepath = None
Exemple #25
0
 def __init__(self, maxlen: int = 100, timeframe: float = 10.0):
     assert maxlen > 0
     self.__lock = RLock()
     self.__timestamps = Array(Cell, [(0.0, 0.0)] * maxlen,
                               lock=self.__lock)
     self.__index = Value('i', 0, lock=self.__lock)
     self.__start = Value('i', 0, lock=self.__lock)
     self.__length = Value('i', 0, lock=self.__lock)
     self.__maxlen = maxlen
     self.__timeframe = timeframe
Exemple #26
0
    def __init__(self, nROIs, nVols, nSkipVols, nfbDataFolder):
        super().__init__(nROIs, autostart=False)

        self.nfbDataFolder = nfbDataFolder
        self.nROIs = nROIs
        self.nVols = nVols

        self.rtdata = Array(c_double, [0] * self.nROIs * self.nVols)
        self.nData = Value('i', self.nROIs * nSkipVols)

        self.start_process()
Exemple #27
0
    def __init__(self, dataset_size):
        self.dataset_size = dataset_size
        self.index = Value('i', 0)
        self.get_lock = mp.Lock()
        self.index_lock = mp.Lock()

        self.cdatasets = Array('d', [0.0] * 2 * self.dataset_size)
        self.cbuffer = self.cdatasets._obj._wrapper

        init_array = numpy.ones((dataset_size, ), dtype='float32')
        self.put(init_array)
        self.put(init_array)
Exemple #28
0
    def __init__(self, nROIs, nBlocks, nfbDataFolder):
        super().__init__(nROIs * nBlocks, autostart=False)

        self.nfbDataFolder = nfbDataFolder
        self.nROIs = nROIs
        self.nBlocks = nBlocks

        self.rtdata = Array(c_double,
                            [0] * self.nROIs * self.nBlocks * self.nBlocks)
        self.nData = Value('i', 0)

        self.start_process()
Exemple #29
0
    def simulation(self):
        """start simulation.
        """
        # グラフ構造を作成
        self.G = nx.barabasi_albert_graph(agent_size, 2)
        # self.G = nx.complete_graph(agent_size)
        adding_node_attributes(self.G)
        plot_figure(self.G, "penatest", "png")

        # 共有メモリの準備(1世代目だけ)
        SharedKnowledgeStructArray = init_knowledge_array(agent_size)

        # 全世代を通してルールテーブルが表す母音と高頻度単語は共通
        # なのでdumpして保存しておく.使用する場合は読み込んで
        f = open('rules.dump')
        rules = pickle.load(f)
        rd = pickle.dumps(rules)
        f.close()

        f = open('most_freq.dump')
        freq_verbs = pickle.load(f)
        f.close()

        f = open('verb_freq.dump')
        freq_of_verbs = pickle.load(f)
        fv = pickle.dumps(freq_of_verbs)
        f.close()

        result = {}
        # 1世代目だけ別で行う
        print("-- Generation %i --" % 0)
        knowledge_dict = self.server(
            SharedKnowledgeStructArray, rd, freq_verbs, fv, 0)

        # c_knowledge_array = mapper.p_to_c_convert(knowledge_dict)
        result[0] = deepcopy(knowledge_dict)

        lock = Lock()

        for g in xrange(self.GEN-1):
            g = g + 1
            print("-- Generation %i --" % g)
            c_knowledge_array = mapper.p_to_c_convert(knowledge_dict)
            # 共有メモリにデータを格納
            K = Array(DataStructure.KnowledgeStruct,
                      c_knowledge_array, lock=lock)
            knowledge_dict = self.server(K, rd, freq_verbs, fv, g)
            # print type(knowledge_dict)
            result[g] = deepcopy(knowledge_dict)

        f = open("BA12Agent_100gen_100utter.dump", "w")
        pickle.dump(result, f)
        f.close()
def main():
    #global udpProcess # try to kill updprocess using startTkinter
    lock = Lock()
    n = Array('i', [0] * 10,
              lock=lock)  #Packet Storage Array for transfer between processes
    msgIDs = Array(
        'i', [0] * 10,
        lock=lock)  #Message ID Storage Array for transfer between processes
    startLogging = Value('i', 0, lock=lock)
    stopLogging = Value('i', 1, lock=lock)
    print 'Start Bool: ' + str(startLogging.value) + '\n'
    print 'Stop Bool: ' + str(stopLogging.value) + '\n'
    UDPmaster = udpConnection()
    #udpProcess = Process(name = 'UDP Process', target = UDP, args=(n,startLogging,stopLogging,UDPmaster,msgIDs))
    TkinterProcess = Process(name='Tkinter Process',
                             target=startTkinter,
                             args=(n, startLogging, stopLogging, msgIDs))
    # broadcastProcess = Process(name='Broadcasting Process', target=broadcast)
    #udpProcess.start()
    TkinterProcess.start()
    #udpProcess.join()
    TkinterProcess.join()