Exemple #1
0
def shared_array(shape):
    import numpy as np
    from multiprocessing.sharedctypes import Array
    from ctypes import c_double
    # Allocate the memory in shared space.
    memory = Array(c_double, int(np.prod(shape)))
    # Create and return a structure to access the shared memory (numpy array).
    return np.frombuffer(memory.get_obj(), dtype=float).reshape(shape)
Exemple #2
0
def main(args):
    ''' '''
    # Global variables
    global shared_arrays

    # Variables
    fithr = int(args['fithr'])
    rdthr = int(args['rdthr']) if args['rdthr'] else 0
    abthr = int(args['abthr']) if args['abthr'] else 15
    ncores = int(args['ncores']) if args['ncores'] else 1
    files = args['file']

    # Data structures
    chr_length, shared_arrays = {}, {}
    regions = []

    # Reading chrom.sizes file
    with open(args['chromfile']) as fi:
        for line in fi:
            line = line.rstrip()
            if line:
                chr, length = line.split('\t')
                chr_length.setdefault(chr, int(length))
            #end if
        #end for
    #end with

    # Getting regions
    with open(args['regionfile']) as fi:
        for line in fi:
            line = line.rstrip()  # line is a region
            if line:
                check_region(line, chr_length)
                regions.append(line)
            #end if
        #end for
    #end with

    # Initializing bitarrays data structure
    for chr, length in chr_length.items(
    ):  # +1 to index positions in bitarrays by 1
        shared_arrays.setdefault(
            chr, {
                'snv': Array(ctypes.c_bool, length + 1),
                'ins': Array(ctypes.c_bool, length + 1),
                'del': Array(ctypes.c_bool, length + 1)
            })
    #end for

    # Multiprocessing
    # -> TODO, sometimes when a process die the master hang in wait, to check
    with Pool(ncores) as pool:
        results = pool.map(partial(run_region, files, fithr, rdthr, abthr),
                           regions)
    #end with

    # Writing bitarrays to files
    bitarrays_toHDF5(args['outputfile'])
Exemple #3
0
 def __init__(self):
     self.array = [Array('c', MyQueue.QUEUE_BLOCK_SIZE, lock=False) \
                   for q in range(MyQueue.QUEUE_SIZE)]
     self.name = Array('c', MyQueue.MAX_QUEUE_NAME, lock=False)
     self.send_pos = Value('i', -1, lock=False)
     self.recv_pos = Value('i', -1, lock=False)
     self.occupied_flag = Value('i', -1, lock=False)
     self.slock = multiprocessing.Lock()
     self.rlock = multiprocessing.Lock()
Exemple #4
0
 def __init__(self, cam_num, debug=False):
     Process.__init__(self)
     self.cam = Camera(cam_num, threaded=False)
     self.puck_locations = Array(Vector, [(-1, -1), (-1, -1)])
     self.puck_velocity = Array(Vector, [(-1, -1), (-1, -1)])
     self.gun_positions = Array(Vector, [(-1, -1), (-1, -1)])
     self.debug = debug
     self.field_crop_boundary = list()
     self.field_post_crop_limits = [5000, 0]  # [left, right]
     self.crop_points = list()
     self.lighting_constant = 250
Exemple #5
0
class Frame(object):
    def __init__(self, width, height, channels, array_type_code):
        self.__lock = RLock()
        self.__header = Value(Header,
                              width,
                              height,
                              channels,
                              0,
                              lock=self.__lock)
        self.__image = Array(array_type_code,
                             self.__header.width * self.__header.height *
                             channels,
                             lock=self.__lock)
        self.__latch = StateLatch(State.READY, self.__lock)

    def copy(self, dst):
        memmove(addressof(dst.image.get_obj()),
                addressof(self.__image.get_obj()),
                sizeof(self.__image.get_obj()))
        memmove(addressof(dst.header.get_obj()),
                addressof(self.__header.get_obj()),
                sizeof(self.__header.get_obj()))

    def clear(self):
        self.__header.epoch = 0
        memset(addressof(self.__image.get_obj()), 0,
               sizeof(self.__image.get_obj()))
        memset(addressof(self.__header.detections), 0,
               sizeof(self.__header.detections))

    @property
    def lock(self):
        return self.__lock

    @property
    def header(self):
        return self.__header

    @property
    def image(self):
        return self.__image

    @property
    def latch(self):
        return self.__latch

    def get_numpy_image(self, dtype=None):
        """# Get numpy image from buffer.
        """
        image_shape = (self.header.height, self.header.width,
                       self.header.channels)
        image_np = frombuffer(self.image.get_obj(), dtype).reshape(image_shape)
        return image_shape, image_np
Exemple #6
0
 def __init__(self, width, height, channels, array_type_code):
     self.__lock = RLock()
     self.__header = Value(Header,
                           width,
                           height,
                           channels,
                           0,
                           lock=self.__lock)
     self.__image = Array(array_type_code,
                          self.__header.width * self.__header.height *
                          channels,
                          lock=self.__lock)
     self.__latch = StateLatch(State.READY, self.__lock)
Exemple #7
0
class Msg(object):
    """
    Data structure encapsulating a message.
    """

    def __init__(self, size):
        self.s_e = Semaphore(1)
        self.s_f = Semaphore(0)
        self.s_buf = Array(ct.c_ubyte, size)

    def send(self, func):
        self.s_e.acquire()
        self.s_buf.acquire()
        send_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_f.release()
        return send_result

    def recv(self, func):
        self.s_f.acquire()
        self.s_buf.acquire()
        recv_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_e.release()
        return recv_result
Exemple #8
0
 def __init__(self, ip="127.0.0.1", body_id=0):
     # shared c_double array
     self.shared_bodyPosition = Array(c_double, 3, lock=False)
     self.shared_bodyVelocity = Array(c_double, 3, lock=False)
     self.shared_bodyOrientationQuat = Array(c_double, 4, lock=False)
     self.shared_bodyOrientationMat9 = Array(c_double, 9, lock=False)
     self.shared_bodyAngularVelocity = Array(c_double, 3, lock=False)
     self.shared_timestamp = Value(c_double, lock=False)
     #self.shared_timestamp = -1
     args = (ip, body_id, self.shared_bodyPosition, self.shared_bodyVelocity,
             self.shared_bodyOrientationQuat, self.shared_bodyOrientationMat9,
             self.shared_bodyAngularVelocity, self.shared_timestamp)
     self.p = Process(target=self.qualisys_process, args=args)
     self.p.start()
Exemple #9
0
class Msg(object):
    """
    TODO: Not documenting this class because it may go away.
    """

    def __init__(self, size):
        self.s_e = Semaphore(1)
        self.s_f = Semaphore(0)
        self.s_buf = Array(ct.c_ubyte, size)

    def send(self, func):
        self.s_e.acquire()
        self.s_buf.acquire()
        send_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_f.release()
        return send_result

    def recv(self, func):
        self.s_f.acquire()
        self.s_buf.acquire()
        recv_result = func(self.s_buf._obj)
        self.s_buf.release()
        self.s_e.release()
        return recv_result
Exemple #10
0
    def __init__(self, app):
        usernames = app.config.get("SUPER_USERS", [])
        usernames_str = ",".join(usernames)

        self._max_length = len(usernames_str) + MAX_USERNAME_LENGTH + 1
        self._array = Array("c", self._max_length, lock=True)
        self._array.value = usernames_str.encode("utf8")
    def __init__(self):
        super(PulserHardware, self).__init__()
        self._shutter = 0
        self._trigger = 0
        self.xem = None
        self.Mutex = QtCore.QMutex
        self._adcCounterMask = 0
        self._integrationTime = Q(100, 'ms')

        self.dataQueue = multiprocessing.Queue()
        self.clientPipe, self.serverPipe = multiprocessing.Pipe()
        self.loggingQueue = multiprocessing.Queue()
        self.sharedMemoryArray = Array(c_longlong,
                                       self.sharedMemorySize,
                                       lock=True)

        self.serverProcess = self.serverClass(self.dataQueue, self.serverPipe,
                                              self.loggingQueue,
                                              self.sharedMemoryArray)
        self.serverProcess.start()

        self.queueReader = QueueReader(self, self.dataQueue)
        self.queueReader.start()

        self.loggingReader = LoggingReader(self.loggingQueue)
        self.loggingReader.start()
        self.ppActive = False
        self._pulserConfiguration = None
Exemple #12
0
 def __init__(self, size=100):
     # Lock handler.
     self.lock = Lock()
     # Array size.
     self.size = size
     # Array of Session.
     self.sessions = Array(Session, self.size, lock=self.lock)
Exemple #13
0
def main2():
    lock = Lock()

    n = V2('i', 7)
    x = V2(c_double, 1.0 / 3.0, lock=False)
    s = Array('c', b'hello world', lock=lock)
    A = Array(Point, [(1.875, -6.25), (-5.75, 2.0), (2.375, 9.5)], lock=lock)

    p = Process(target=modify, args=(n, x, s, A))
    p.start()
    p.join()

    print(n.value)
    print(x.value)
    print(s.value)
    print([(a.x, a.y) for a in A])
Exemple #14
0
 def __init__(self, size = 100):
     # Lock handler.
     self.lock = Lock()
     # Array size.
     self.size = size
     # Array of Command.
     self.commands = Array(Command, self.size, lock = self.lock)
Exemple #15
0
 def runCmd(self,
            exe,
            bypass,
            tail=Array('c', ' ' * 10000),
            code=Value('i', 0)):
     if bypass:
         proc = sp.Popen(exe,
                         bufsize=1024,
                         stdout=sp.PIPE,
                         stderr=sp.PIPE,
                         shell=True)
         t1 = threading.Thread(target=self.bufferScreen,
                               args=(proc.stdout, ))
         t1.start()
         t1.join()
         proc.wait()
         code.value = proc.returncode
         if code.value != 0 and tail.value.strip() == '':
             tail.value = 'I was only able to capture the following execution error while executing the following:\n'+exe+'\n... you may wish to re-run without bypass option.'+ \
                '\n'+'~'*18+'\n'+str(proc.stderr.read().strip())+'\n'+'~'*18
             self.tail = self.tail + '\n' + tail.value
     else:
         code.value = sp.call(exe, shell=True)
         if code.value != 0:
             tail.value = '... The following command failed for the reason above (or below)\n' + exe + '\n'
             self.tail = self.tail + '\n' + tail.value
     return self.tail, code.value
Exemple #16
0
    def __init__(self, app):
        usernames = app.config.get('SUPER_USERS', [])
        usernames_str = ','.join(usernames)

        self._max_length = len(usernames_str) + MAX_USERNAME_LENGTH + 1
        self._array = Array('c', self._max_length, lock=True)
        self._array.value = usernames_str
Exemple #17
0
def graph2memory(G):
    """グラフ構造を受け取りknowledeだけ取り出し共有メモリに入れる
    >p_to_cを使えるようにするクッション関数

    Parameters
    ----------
    G : networkx graph

    Returns
    -------
    Shared Memory Array

    """

    knowledge_dict = {}
    mapper = cpmap()
    G_new = deepcopy(G)
    for n in G_new:
        knowledge_dict[n] = G_new.node[n]["knowledge"]

    lock = Lock()
    ctypes_knowledge_array = mapper.p_to_c_convert(knowledge_dict)

    # 共有メモリにデータを格納
    K = Array(DataStructure.KnowledgeStruct, ctypes_knowledge_array, lock=lock)
    return K
Exemple #18
0
def serial_ML(args):
    """
    baseline: train the model sequentially
    """
    print(args.name)
    global model_module
    if model_module is None:
        spec = importlib.util.spec_from_file_location(
            "module.name", os.path.abspath(args.model_file))
        model_module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(model_module)

    init_weights = model_module.init()
    data = model_module.get_data()

    global coef_shared, w
    coef_shared = Array(c_double, init_weights.flat, lock=False)
    w = np.frombuffer(coef_shared)

    T0 = time.time()
    for i in range(len(data)):
        train_wrapper(data[i])
    T1 = time.time()

    print('sequential job finished in', T1 - T0, 's')
    model_module.finish(w)
Exemple #19
0
 def __init__(self, robot, dt=0.01):
     # a shared c_double array
     self.dt = dt
     self.shared_q_viewer = Array(c_double, robot.nq, lock=False)
     self.p = Process(target=self.display_process,
                      args=(robot, self.shared_q_viewer))
     self.p.start()
Exemple #20
0
 def __init__(self, size_w):
     coef_shared = Array(c_double,
                         (np.random.normal(size=(size_w, 1)) * 1. /
                          np.sqrt(size_w)).flat,
                         lock=False)
     w = np.frombuffer(coef_shared)
     w = w.reshape((len(w), 1))
     self.w = w
Exemple #21
0
 def __init__(self, domain_factory, shm_proxy, nb_domains=os.cpu_count()):
     super().__init__(domain_factory, nb_domains)
     self._conditions = [Condition() for i in range(nb_domains)]
     self._shm_proxy = shm_proxy
     self._shm_registers = {
     }  # Maps from registered method parameter types to vectorized array ranges
     self._shm_types = {}  # Maps from register index to type
     self._shm_sizes = {
     }  # Maps from register method parameter types to number of arrays encoding each type
     self._shm_arrays = []  # Methods' vectorized parameters
     self._rsize = 0  # Total size of the register (updated below)
     self._shm_names = [None] * nb_domains  # Vectorized methods' names
     self._shm_params = [
         None
     ] * nb_domains  # Indices of methods' vectorized parameters
     for i in range(nb_domains):
         j = 0
         for r in shm_proxy.register():
             for k in range(r[1]):
                 m = shm_proxy.initialize(r[0])
                 if type(m) == list or type(m) == tuple:
                     if i == 0 and k == 0:  # do it once for all the domains and redundant initializers
                         self._shm_sizes[r[0].__name__] = len(m)
                         self._shm_registers[r[0].__name__] = (
                             j, j + (r[1] * len(m)))
                         self._shm_types.update({
                             kk: r[0]
                             for kk in range(j, j + (r[1] * len(m)), len(m))
                         })
                         self._rsize += (r[1] * len(m))
                     self._shm_arrays.extend(m)
                     j += len(m)
                 else:
                     if i == 0 and k == 0:  # do it once for all the domains and redundant initializers
                         self._shm_sizes[r[0].__name__] = 1
                         self._shm_registers[r[0].__name__] = (j, j + r[1])
                         self._shm_types.update(
                             {kk: r[0]
                              for kk in range(j, j + r[1])})
                         self._rsize += r[1]
                     self._shm_arrays.append(m)
                     j += 1
         self._shm_names[i] = Array('c', bytearray(100))
         self._shm_params[i] = Array(
             'i', [-1] * sum(r[1] for r in shm_proxy.register()))
     logger.info(rf'Using {nb_domains} parallel shared memory domains')
Exemple #22
0
 def geometric_plan(self):
     '''
     This function plans full geometric trajectory at initial
     '''
     if not self.plans:
         HumoroLGP.logger.warn(
             'Symbolic plan is empty. Cannot plan trajectory!')
         return False
     # prepare workspace
     self.place_human()
     workspace = self.workspace.get_pyrieef_ws()
     self.ranking = []
     self.chosen_plan_id = None
     # compute plan costs
     for i, plan in enumerate(self.plans):
         waypoints, waypoint_manifolds = self.get_waypoints(plan)
         trajectory = linear_interpolation_waypoints_trajectory(waypoints)
         objective = TrajectoryConstraintObjective(
             dt=1 / self.fps, enable_viewer=self.enable_viewer)
         objective.set_problem(workspace=workspace,
                               trajectory=trajectory,
                               waypoint_manifolds=waypoint_manifolds,
                               goal_manifold=waypoint_manifolds[-1][0])
         self.objectives.append(objective)
         self.ranking.append((objective.cost(), i))
     # rank the plans
     self.ranking.sort(key=operator.itemgetter(0))
     # optimize the objective according to self.ranking
     for r in self.ranking:
         if self.enable_viewer:
             self.viewer.initialize_viewer(self.objectives[r[1]],
                                           self.objectives[r[1]].trajectory)
             status = Value(c_bool, True)
             traj = Array(
                 c_double,
                 self.objectives[r[1]].n * (self.objectives[r[1]].T + 2))
             p = Process(target=self.objectives[r[1]].optimize,
                         args=(status, traj))
             p.start()
             self.viewer.run()
             p.join()
             success = status.value
             traj = Trajectory(q_init=np.array(traj[:2]),
                               x=np.array(traj[2:]))
         else:
             success, traj = self.objectives[r[1]].optimize()
         if success:  # choose this plan
             self.plan = self.plans[r[1]]
             self.chosen_plan_id = r[1]
             if self.verbose:
                 for a in self.plan[1]:
                     HumoroLGP.logger.info(a.name + ' ' +
                                           ' '.join(a.parameters))
             robot = self.workspace.get_robot_link_obj()
             robot.paths.append(traj)
             return True
     HumoroLGP.logger.warn('All plan geometrical optimization infeasible!')
     return False
Exemple #23
0
 def __init__(self):
     super(Genove23, self).__init__()
     self.admin = None
     self.queue = JoinableQueue()
     self.exitQ = Queue()
     self.lock = Lock()
     self.target = Array(
         Point, [(str('').encode('utf-8'), str('').encode('utf-8'))],
         lock=self.lock)
Exemple #24
0
 def add_process(self, name, target, args):
     new_share = Array('i', [0 for _ in xrange(3)])
     new_share[conf.DAEMON_HEATBEAT] = 3
     new_share[conf.DAEMON_FEEDBACK] = 0
     new_share[conf.DAEMON_COMMAND] = 0
     self.share[name] = new_share
     self.processes[name] = Process(target=target, args=args + (new_share,))
     if name not in self.process_args.keys():
         self.process_args[name] = (target, args)
Exemple #25
0
 def __init__(self, learning_rate, lambda_reg, batch_size, dim, lock=False):
     self.__learning_rate = learning_rate
     self.__lambda_reg = lambda_reg
     self.__batch_size = batch_size
     self.__dim = dim
     self.__persistence = 30
     self.__lock = lock
     self.__w = Array(c_double, dim, lock=lock)
     self.__log = Queue()
Exemple #26
0
    def __init__(self, buffer_size, dataset_size):
        self.buffer_size = buffer_size
        self.dataset_size = dataset_size
        self.put_index = Value('i', 0)
        self.get_index = Value('i', 0)
        self.put_lock = mp.Lock()

        self.cdatasets = Array('d', [0.0] * self.buffer_size * self.dataset_size)
        self.cbuffer = self.cdatasets._obj._wrapper
Exemple #27
0
 def __init__(self, filepath=None, size=100):
     # Lock handler
     self.lock = Lock()
     # Maximum number of task
     self.size = size
     # Shared Array of Task
     self.tasks = Array(Task, self.size, lock=self.lock)
     # File path, if None, task_list is not sync'ed to file
     self.filepath = None
Exemple #28
0
def main():
    url = 'http://192.168.1.1/videostream.cgi'
    user = '******'
    pwd = ''
    auth = requests.auth.HTTPDigestAuth(user, pwd)
    r = requests.get(url, auth=auth, stream=True, timeout=9)

    h = 480
    w = 640
    ch = 3
    img = Array('B', np.zeros(h * w * ch, dtype=np.uint8))
    arr = np.frombuffer(img.get_obj(), dtype=np.uint8)
    img_lock = Event()

    stream = Process(target=stream_video, args=(r, img, img_lock))
    stream.start()

    fps = 60.0
    fourcc = cv2.VideoWriter_fourcc(*'X264')
    out = cv2.VideoWriter('output.mp4', fourcc, fps, (640, 480))

    prev = datetime.now()
    time_interval = 1.0 / fps

    while True:
        now = td_to_ms(datetime.now() - prev)
        if now >= time_interval:
            if not img_lock.is_set():
                img_lock.set()
                frame = arr.reshape((h, w, ch))
                img_lock.clear()
                cv2.imshow('camera', frame)
                out.write(frame)
                prev = datetime.now()
                time_interval = (2.0 / fps) - now

        if cv2.waitKey(1) == 27:
            break

    stream.terminate()
    out.release()
    cv2.destroyAllWindows()

    return
Exemple #29
0
 def __init__(self, maxlen: int = 100, timeframe: float = 10.0):
     assert maxlen > 0
     self.__lock = RLock()
     self.__timestamps = Array(Cell, [(0.0, 0.0)] * maxlen,
                               lock=self.__lock)
     self.__index = Value('i', 0, lock=self.__lock)
     self.__start = Value('i', 0, lock=self.__lock)
     self.__length = Value('i', 0, lock=self.__lock)
     self.__maxlen = maxlen
     self.__timeframe = timeframe
Exemple #30
0
    def __init__(self, nROIs, nVols, nSkipVols, nfbDataFolder):
        super().__init__(nROIs, autostart=False)

        self.nfbDataFolder = nfbDataFolder
        self.nROIs = nROIs
        self.nVols = nVols

        self.rtdata = Array(c_double, [0] * self.nROIs * self.nVols)
        self.nData = Value('i', self.nROIs * nSkipVols)

        self.start_process()
Exemple #31
0
    def __init__(self, dataset_size):
        self.dataset_size = dataset_size
        self.index = Value('i', 0)
        self.get_lock = mp.Lock()
        self.index_lock = mp.Lock()

        self.cdatasets = Array('d', [0.0] * 2 * self.dataset_size)
        self.cbuffer = self.cdatasets._obj._wrapper

        init_array = numpy.ones((dataset_size, ), dtype='float32')
        self.put(init_array)
        self.put(init_array)
Exemple #32
0
    def simulation(self):
        """start simulation.
        """
        # グラフ構造を作成
        self.G = nx.barabasi_albert_graph(agent_size, 2)
        # self.G = nx.complete_graph(agent_size)
        adding_node_attributes(self.G)
        plot_figure(self.G, "penatest", "png")

        # 共有メモリの準備(1世代目だけ)
        SharedKnowledgeStructArray = init_knowledge_array(agent_size)

        # 全世代を通してルールテーブルが表す母音と高頻度単語は共通
        # なのでdumpして保存しておく.使用する場合は読み込んで
        f = open('rules.dump')
        rules = pickle.load(f)
        rd = pickle.dumps(rules)
        f.close()

        f = open('most_freq.dump')
        freq_verbs = pickle.load(f)
        f.close()

        f = open('verb_freq.dump')
        freq_of_verbs = pickle.load(f)
        fv = pickle.dumps(freq_of_verbs)
        f.close()

        result = {}
        # 1世代目だけ別で行う
        print("-- Generation %i --" % 0)
        knowledge_dict = self.server(
            SharedKnowledgeStructArray, rd, freq_verbs, fv, 0)

        # c_knowledge_array = mapper.p_to_c_convert(knowledge_dict)
        result[0] = deepcopy(knowledge_dict)

        lock = Lock()

        for g in xrange(self.GEN-1):
            g = g + 1
            print("-- Generation %i --" % g)
            c_knowledge_array = mapper.p_to_c_convert(knowledge_dict)
            # 共有メモリにデータを格納
            K = Array(DataStructure.KnowledgeStruct,
                      c_knowledge_array, lock=lock)
            knowledge_dict = self.server(K, rd, freq_verbs, fv, g)
            # print type(knowledge_dict)
            result[g] = deepcopy(knowledge_dict)

        f = open("BA12Agent_100gen_100utter.dump", "w")
        pickle.dump(result, f)
        f.close()
Exemple #33
0
    sharedSoln[0:,column][0] = wss
    
#Setup the test data:
w = pysal.lat2W(10, 10) #A contiguity weights object
z = np.random.random_sample((w.n, 2)) #Each local is assigned two attributes
p = np.ones((w.n, 1), float) #The region that each location belongs to.
floor = 3 #The minimum bound or value for each region

#Grab the number of available core
cores = mp.cpu_count()
cores = cores *2 #Hyperthreading and testing on a dual core MBP
#Grab the length of p
numP = len(p)+1
#Setup a shared mem array for solutions with dim = numP * cores
lock = mp.Lock()
cSoln = Array(ctypes.c_double, numP*cores, lock=lock)
numSoln = np.frombuffer(cSoln.get_obj())
numSoln.shape = (numP,cores)
numSoln[:] = 1
#initSolnSpace(numSoln) #initialize the solution space as a shared memory array
'''The soln space is an array that holds node id as the index and membership as the attribute.'''

neighbordict = dict(w.neighbors) #This is interesting - we can not pass a class instance through apply_async and need to conver to a dict.

pool = mp.Pool(processes=cores) #Create a pool of workers, one for each core
for job in range(cores): #Prep to assign each core a job
    pool.apply_async(initialize, args=(job,z,w,neighbordict,floor,p,numP, cores)) #Async apply each job
pool.close()
pool.join()

sharedSoln = np.frombuffer(cSoln.get_obj())
z = random_init.random_sample((w.n, 2))
#print z.max(), z.min(), z.std() #Comment out to verify that the 'random' seed is identical over tests
p = np.ones((w.n, 1), float) 
floor_variable = p
floor = 3

'''START TIMING HERE - AFTER TEST DATA GENERATED'''
time0 = time.time()

#Multiprocessing setup
cores = mp.cpu_count()
cores = cores * 2
numP = len(p)+1
#Shared memory solution space
lockSoln = mp.Lock()
cSoln = Array(ctypes.c_double, numP*cores, lock=lockSoln)
numSoln = np.frombuffer(cSoln.get_obj())
numSoln.shape = (numP,cores)
numSoln[:] = -1
#Shared memory update flag space
lockflag = mp.Lock()
c_updateflag = Array(ctypes.c_int, 3*(cores*2), lock=lockflag) #Do I need different locks? #Why double cores again?
updateflag = np.frombuffer(c_updateflag.get_obj())
updateflag.shape=(3,cores)
updateflag[0] = 1 #True for first iteration. - whether the answer was updated
updateflag[1] = 0 #Iteration counter per core.
for index in range(len(updateflag[2])): #Define the tabu list length for each chord.
    updateflag[2][index] = tabulength(numP)
_init_shared(updateflag)

neighbordict = dict(w.neighbors)#Class instances are not pickable.
Exemple #35
0
 def __init__(self, size):
     self.s_e = Semaphore(1)
     self.s_f = Semaphore(0)
     self.s_buf = Array(ct.c_ubyte, size)

					ejecutandose[2][0].ejecutarAccion(deltaT)
					if (ejecutandose[2][0].isAlive()):
						ejecutandose[2].append(ejecutandose[2][0])
					del ejecutandose[2][0]

					ejecutandose[2] = sorted(ejecutandose[2], key=lambda Proceso: Proceso.prioridad_base)
					ejecutandose[2].reverse()
			time.sleep(deltaT)
			
			os.system(['clear','cls'][os.name == 'nt'])
			print "Tiempo: "+str(datetime.datetime.fromtimestamp(tiempoMaquina))
		tiempoMaquina+=1
		#print "Tiempo: "+str(datetime.datetime.fromtimestamp(tiempoMaquina))



consola = Array('c', 'oliafadgwhtjefjdlgkdsgkgsjkfghjskfghskfhghsfkghsfkjoliafadgwhtjefjdlgkdsgkgsjkfghjskfghskfhghsfkghsfkj')	

p1 = Process(target=funcion, args=(1,100,consola))
p1.start()

texto = ""
while(texto <> "salir"):
	texto = raw_input("")
	consola.value = texto
# for p in procesos:
# 	p.writeInfo()