예제 #1
0
파일: sampling.py 프로젝트: cdiener/cobrapy
def shared_np_array(shape, data=None, integer=False):
    """Create a new numpy array that resides in shared memory.

    Parameters
    ----------
    shape : tuple of ints
        The shape of the new array.
    data : numpy.array
        Data to copy to the new array. Has to have the same shape.
    integer : boolean
        Whether to use an integer array. Defaults to False which means
        float array.
    """
    size = np.prod(shape)
    if integer:
        array = Array(ctypes.c_int64, int(size))
        np_array = np.frombuffer(array.get_obj(), dtype="int64")
    else:
        array = Array(ctypes.c_double, int(size))
        np_array = np.frombuffer(array.get_obj())
    np_array = np_array.reshape(shape)

    if data is not None:
        if len(shape) != len(data.shape):
            raise ValueError("`data` must have the same dimensions"
                             "as the created array.")
        same = all(x == y for x, y in zip(shape, data.shape))
        if not same:
            raise ValueError("`data` must have the same shape"
                             "as the created array.")
        np_array[:] = data

    return np_array
예제 #2
0
def get_predict(args, ortho, model):
    xp = cuda.cupy if args.gpu >= 0 else np
    args.h_limit, args.w_limit = ortho.shape[0], ortho.shape[1]
    args.canvas_h = args.h_limit
    args.canvas_w = args.w_limit

    # to share 'canvas' between different threads
    canvas_ = Array(ctypes.c_float, args.canvas_h * args.canvas_w * args.channels)
    canvas = np.ctypeslib.as_array(canvas_.get_obj())
    canvas = canvas.reshape((args.canvas_h, args.canvas_w, args.channels))

    # prepare queues and threads
    patch_queue = Queue(maxsize=5)
    preds_queue = Queue()
    patch_worker = Process(target=create_minibatch, args=(args, ortho, patch_queue))
    canvas_worker = Process(target=tile_patches, args=(args, canvas, preds_queue))
    patch_worker.start()
    canvas_worker.start()

    while True:
        minibatch = patch_queue.get()
        if minibatch is None:
            break
        minibatch = Variable(xp.asarray(minibatch, dtype=xp.float32), volatile=True)
        preds = model(minibatch, None).data
        if args.gpu >= 0:
            preds = xp.asnumpy(preds)
        [preds_queue.put(pred) for pred in preds]

    preds_queue.put(None)
    patch_worker.join()
    canvas_worker.join()

    return canvas
예제 #3
0
def conv_single_image(image):
    shared_array_base = Array(ctypes.c_double, image.size)
    shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
    shared_array = shared_array.reshape(image.shape)
    shared_array[:] = image

    return shared_array
예제 #4
0
    def steps_multiprocessing(self, number_of_steps, plot, plot_every_n):
        """ 
        Equal to take_steps but using multiprocesing.

        Parameters
        ----------
        number_of_steps : float
                 Total number of time steps.
        plot : object
                 make_plot Object.
        plot_every_n : float
                 Every few time steps are going on a plot.
        """
        
        shared_array_base = Array(ctypes.c_double, len(self.bodies)*2)
        shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
        shared_array = shared_array.reshape(2, len(self.bodies))

        counter = Value(ctypes.c_int64, 0)
        end_plot = Value(ctypes.c_int8, 1)

        old_counter = 1
        rk_fun = Process(target = self.rk_fun_task, args=(number_of_steps, plot_every_n, shared_array, end_plot, counter))
        plot_fun = Process(target = self.plot_fun_task, args=(old_counter, shared_array, end_plot, counter, plot))

        rk_fun.start()
        plot_fun.start()

        rk_fun.join()
        plot_fun.join()
예제 #5
0
def run(args):
    # create dummy environment to be able to create model
    env = gym.make(args.environment)
    assert isinstance(env.observation_space, Box)
    assert isinstance(env.action_space, Discrete)
    print("Observation space:", env.observation_space)
    print("Action space:", env.action_space)

    # create main model
    model = create_model(env, args)
    model.summary()
    env.close()

    # for better compatibility with Theano and Tensorflow
    multiprocessing.set_start_method('spawn')

    # create shared buffer for sharing weights
    blob = pickle.dumps(model.get_weights(), pickle.HIGHEST_PROTOCOL)
    shared_buffer = Array('c', len(blob))
    shared_buffer.raw = blob

    # force runner processes to use cpu
    os.environ["CUDA_VISIBLE_DEVICES"] = ""

    # create fifos and threads for all runners
    fifos = []
    for i in range(args.num_runners):
        fifo = Queue(args.queue_length)
        fifos.append(fifo)
        process = Process(target=runner, args=(shared_buffer, fifo, args))
        process.start()

    # start trainer in main thread
    trainer(model, fifos, shared_buffer, args)
    def initialize(self):
        # Create thread safe arrays.
        self.prev_values = Array('d', self.prev_values, lock=False)
        self.next_values = Array('d', self.next_values, lock=False)

        for key in self.records:
            self.records[key] = manager.list()
        for key in self.spikes:
            self.spikes[key] = manager.list()
class SynapseEnvironment:
    def __init__(self, noise=0.0):
        def beta(maximum, rate=1.0):
            return betav(maximum, noise=noise, rate=rate)
        self.beta = beta

        self.prev_concentrations = []
        self.next_concentrations = []

    def initialize(self):
        # Create thread safe arrays.
        self.prev_concentrations = Array('d', self.prev_concentrations, lock=False)
        self.next_concentrations = Array('d', self.next_concentrations, lock=False)
        self.dirty = Value('b', True, lock=False)
        
    def register(self, baseline_concentration):
        pool_id = len(self.prev_concentrations)
        self.prev_concentrations.append(baseline_concentration)
        self.next_concentrations.append(baseline_concentration)
        return pool_id

    def get_concentration(self, pool_id):
        try: self.dirty
        except: self.initialize()
        return self.prev_concentrations[pool_id]

    def set_concentration(self, pool_id, new_concentration):
        try: self.dirty.value = True
        except: self.initialize()
        self.next_concentrations[pool_id] = new_concentration

    def add_concentration(self, pool_id, molecules):
        try: self.dirty.value = True
        except: self.initialize()
        self.next_concentrations[pool_id] += molecules

    def remove_concentration(self, pool_id, molecules):
        try: self.dirty.value = True
        except: self.initialize()
        self.next_concentrations[pool_id] -= molecules
        self.next_concentrations[pool_id] = \
            max(0.0, self.next_concentrations[pool_id])

    def step(self):
        """
        Cycles the environment.
        Returns whether the environment is stable (not dirty, no changes)
        """
        try: self.dirty
        except: self.initialize()
        if self.dirty.value:
            self.dirty.value = False
            for i in xrange(len(self.prev_concentrations)):
                self.prev_concentrations[i]=self.next_concentrations[i]
            return False
        else: return True
예제 #8
0
 def __init__(self, size=1000, data_type="int32"):
     self.data_type = data_type
     self.head = Value("i", 0)
     self.ring_buffer = Array(data_type[0], range(size))
     self.size = size
     for i in range(size):
         self.ring_buffer[i] = 0  # probably really slow but not done often
def calculatePearsonCorrelationMatrixMultiprocessing(matrix, axis=0, symmetrical=True, getpvalmat=False):

    if axis == 1:
        matrix = matrix.T

    nRows = matrix.shape[0]

    # create shared array that can be used from multiple processes
    output_r_arr = Array(ctypes.c_double, matrix.shape[0] * matrix.shape[0])
    # then in each new process create a new numpy array using:
    output_r = np.frombuffer(output_r_arr.get_obj())  # mp_arr and arr share the same memory
    # make it two-dimensional
    output_r = output_r.reshape((matrix.shape[0], matrix.shape[0]))  # b and arr share the same memory
    # output_r = np.zeros((nRows,nRows))  # old version

    output_p_arr = Array(ctypes.c_double, matrix.shape[0] * matrix.shape[0])
    output_p = np.frombuffer(output_p_arr.get_obj())
    output_p = output_p.reshape((matrix.shape[0], matrix.shape[0]))

    print 'Calculating Pearson R for each row, multithreaded'
    print mp.cpu_count(), 'processes in pool'

    pool = None
    try:
        pool = mp.Pool(mp.cpu_count(),
                       initializer=_init_pool,
                       initargs=(matrix, output_r_arr, output_p_arr,
                                 nRows, symmetrical))

        # bar = tqdm(total=nRows*nRows/2)
        # tqdm.write('Calculating Pearson R for each row, multithreaded')
        for result in tqdm(pool.imap_unordered(_f, range(0, nRows)), total=nRows):
            # bar.update(result)
            pass
        # bar.close()
    finally:  # To make sure processes are closed in the end, even if errors happen
        pool.close()
        pool.join()

    print output_r

    if getpvalmat:
        return output_r, output_p
    else:
        return output_r
예제 #10
0
 def initialize(self, nChannels, nSamples, windowSize=1, nptype='float32'):
     '''
     Initializes the buffer with a new raw array
     
     Parameters
     ----------
     nChannels : int
         dimensionality of a single sample
     nSamples : int
         the buffer capacity in samples
     windowSize : int, optional
         optional, the size of the window to be used for reading the
         data. The pocket of the this size will be created
     nptype : string, optional
         the type of the data to be stored
                        
     '''
     self.__initialized = True
     
     # checking parameters
     if nChannels < 1:
         self.logger.warning('nChannels must be a positive integer, setting to 1')
         nChannels = 1
     if nSamples < 1:
         self.logger.warning('nSamples must be a positive integer, setting to 1')
         nSamples = 1
     if windowSize < 1:
         self.logger.warning('wondowSize must be a positive integer, setting to 1')
         windowSize = 1
     
     # initializing
     sizeBytes = c.sizeof(BufferHeader) + \
                 (nSamples + windowSize) * nChannels * np.dtype(nptype).itemsize
     
     raw = Array('c', sizeBytes)
     hdr = BufferHeader.from_buffer(raw.get_obj())
     
     hdr.bufSizeBytes = nSamples * nChannels * np.dtype(nptype).itemsize
     hdr.pocketSizeBytes = windowSize * nChannels * np.dtype(nptype).itemsize
     hdr.dataType = datatypes.get_code(nptype)
     hdr.nChannels = nChannels
     hdr.nSamplesWritten = 0
     
     self.initialize_from_raw(raw.get_obj())
class NeuronEnvironment:
    def __init__(self, noise=0.0):
        def beta(maximum, rate=1.0):
            return betav(maximum, noise=noise, rate=rate)
        self.beta = beta

        self.prev_voltages = []
        self.next_voltages = []
        self.dirty = Value('b', True, lock=False)

    def initialize(self):
        # Create thread safe arrays.
        self.prev_voltages = Array('d', self.prev_voltages, lock=False)
        self.next_voltages = Array('d', self.next_voltages, lock=False)

    def register(self, baseline_voltage=0.0):
        neuron_id = len(self.prev_voltages)
        self.prev_voltages.append(baseline_voltage)
        self.next_voltages.append(baseline_voltage)
        return neuron_id

    def get_voltage(self, neuron_id):
        return self.prev_voltages[neuron_id]

    def set_voltage(self, neuron_id, new_voltage):
        self.dirty.value = True
        self.next_voltages[neuron_id] = new_voltage

    def adjust_voltage(self, neuron_id, delta):
        self.dirty.value = True
        self.next_voltages[neuron_id] += delta

    def step(self):
        """
        Cycles the environment.
        Returns whether the environment is stable (not dirty, no changes)
        """
        if self.dirty.value:
            self.dirty.value = False
            for i in xrange(len(self.prev_voltages)):
                self.prev_voltages[i]=self.next_voltages[i]
            return False
        else: return True
예제 #12
0
    def _calculate_phi(self, x):
        C = self.workers
        neurons = self.neurons
        mu = self.mu
        sigmas = self.sigmas
        phi = self.phi = None
        n = self.n


        def heavy_lifting(c, phi):
            s = jobs[c][1] - jobs[c][0]
            for k, i in enumerate(xrange(jobs[c][0], jobs[c][1])):
                for j in xrange(neurons):
                    # phi[i, j] = metrics(x[i,:], mu[j])**3)
                    # phi[i, j] = plateSpine(x[i,:], mu[j]))
                    # phi[i, j] = invMultiQuadric(x[i,:], mu[j], sigmas[j]))
                    phi[i, j] = multiQuadric(x[i,:], mu[j], sigmas[j])
                    # phi[i, j] = gaussian(x[i,:], mu[j], sigmas[j]))
                if k % 1000 == 0:
                    percent = true_divide(k, s)*100
                    print c, ': {:2.2f}%'.format(percent)
            print c, ': Done'
        
        # distributing the work between 4 workers
        shared_array = Array(c_double, n * neurons)
        phi = frombuffer(shared_array.get_obj())
        phi = phi.reshape((n, neurons))

        jobs = []
        workers = []

        p = n / C
        m = n % C
        for c in range(C):
            jobs.append((c*p, (c+1)*p + (m if c == C-1 else 0)))
            worker = Process(target = heavy_lifting, args = (c, phi))
            workers.append(worker)
            worker.start()

        for worker in workers:
            worker.join()

        return phi
예제 #13
0
    def initialize(self, n_channels, n_samples, np_dtype='float32'):
        """Initializes the buffer with a new array."""
        logger.debug('Initializing {}x{} {} buffer.'.format(n_channels, n_samples, np_dtype))

        # check parameters
        if n_channels < 1 or n_samples < 1:
            logger.error('n_channels and n_samples must be a positive integer')
            raise SharedBufferError(1)

        size_bytes = ct.sizeof(SharedBufferHeader) + n_samples * n_channels * np.dtype(np_dtype).itemsize
        raw = Array('c', size_bytes)
        hdr = SharedBufferHeader.from_buffer(raw.get_obj())

        hdr.bufSizeBytes = size_bytes - ct.sizeof(SharedBufferHeader)
        hdr.dataType = DataTypes.get_code(np_dtype)
        hdr.nChannels = n_channels
        hdr.nSamples = n_samples
        hdr.position = 0

        self.initialize_from_raw(raw.get_obj())
예제 #14
0
파일: train.py 프로젝트: cybermatt/deeppose
def load_data(args, input_q, minibatch_q):
    c = args.channel
    s = args.size
    d = args.joint_num * 2

    input_data_base = Array(ctypes.c_float, args.batchsize * c * s * s)
    input_data = np.ctypeslib.as_array(input_data_base.get_obj())
    input_data = input_data.reshape((args.batchsize, c, s, s))

    label_base = Array(ctypes.c_float, args.batchsize * d)
    label = np.ctypeslib.as_array(label_base.get_obj())
    label = label.reshape((args.batchsize, d))

    x_queue, o_queue = Queue(), Queue()
    workers = [Process(target=transform,
                       args=(args, x_queue, args.datadir, args.fname_index,
                             args.joint_index, o_queue))
               for _ in range(args.batchsize)]
    for w in workers:
        w.start()

    while True:
        x_batch = input_q.get()
        if x_batch is None:
            break

        # data augmentation
        for x in x_batch:
            x_queue.put(x)
        j = 0
        while j != len(x_batch):
            a, b = o_queue.get()
            input_data[j] = a
            label[j] = b
            j += 1
        minibatch_q.put([input_data, label])

    for _ in range(args.batchsize):
        x_queue.put(None)
    for w in workers:
        w.join()
예제 #15
0
    def __init__(self):

        self.all_curves = Listing.index_all_curves()
        index_file = open ("outputs/index_file.txt","w")
        for index, item in enumerate(self.all_curves):
              index_file.write("%i,%s" % (index, str(item)))
        index_file.close()
        self.n = len(self.all_curves)

        self.total_costs_matrix_base = Array(ctypes.c_double, self.n*self.n)
        self.total_costs_matrix = numpy.ctypeslib.as_array(
                             self.total_costs_matrix_base.get_obj())
        self.total_costs_matrix = self.total_costs_matrix.reshape(self.n,self.n)
예제 #16
0
    def test_continuous_send_dialog(self):
        self.add_signal_to_form("esaver.coco")
        self.__add_first_signal_to_generator()

        port = self.get_free_port()

        gframe = self.form.generator_tab_controller
        expected = np.zeros(gframe.total_modulated_samples, dtype=np.complex64)
        expected = gframe.modulate_data(expected)
        current_index = Value("L", 0)
        buffer = Array("f", 4 * len(expected))

        process = Process(target=receive, args=(port, current_index, 2 * len(expected), buffer))
        process.daemon = True
        process.start()
        time.sleep(1)  # ensure server is up

        ContinuousModulator.BUFFER_SIZE_MB = 10

        continuous_send_dialog = self.__get_continuous_send_dialog()
        continuous_send_dialog.device.set_client_port(port)
        continuous_send_dialog.device_settings_widget.ui.spinBoxNRepeat.setValue(2)
        continuous_send_dialog.ui.btnStart.click()
        QTest.qWait(1000)
        time.sleep(1)
        process.join(1)

        # CI sometimes swallows a sample
        self.assertGreaterEqual(current_index.value, len(expected) - 1)

        buffer = np.frombuffer(buffer.get_obj(), dtype=np.complex64)
        for i in range(len(expected)):
            self.assertEqual(buffer[i], expected[i], msg=str(i))

        continuous_send_dialog.ui.btnStop.click()
        continuous_send_dialog.ui.btnClear.click()
        QTest.qWait(1)

        continuous_send_dialog.close()
예제 #17
0
파일: camstream.py 프로젝트: napratin/lumos
class CameraStreamer(Process):
  def __init__(self, stayAliveObj=None, frameCountObj=None, imageObj=None, imageShapeObj=None):
    Process.__init__(self)
    print "CameraStreamer.__init__(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
    # * Store references to and/or create shared objects
    self.stayAliveObj = stayAliveObj if stayAliveObj is not None else Value(c_bool, True)
    self.frameCountObj = frameCountObj if frameCountObj is not None else Value('i', 0)
    self.imageShapeObj = imageShapeObj if imageShapeObj is not None else Array('i', (camera_frame_height, camera_frame_width, camera_frame_depth))
    if imageObj is not None:
      # ** Use supplied shared image object
      self.imageObj = imageObj
    else:
      # ** Create shared image object
      image = np.zeros((camera_frame_height, camera_frame_width, camera_frame_depth), dtype=np.uint8)  # create an image
      imageShape = image.shape  # store original shape
      imageSize = image.size  # store original size (in bytes)
      image.shape = imageSize  # flatten numpy array
      self.imageObj = Array(c_ubyte, image)  # create a synchronized shared array object
  
  def run(self):
    print "CameraStreamer.run(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
    # * Interpret shared objects properly (NOTE this needs to happen in the child process)
    self.image = ctypeslib.as_array(self.imageObj.get_obj())  # get flattened image array
    self.image.shape = ctypeslib.as_array(self.imageShapeObj.get_obj())  # restore original shape
    
    # * Open camera and set desired capture properties
    self.camera = cv2.VideoCapture(0)
    if self.camera.isOpened():
      result_width = self.camera.set(cv.CV_CAP_PROP_FRAME_WIDTH, camera_frame_width)
      result_height = self.camera.set(cv.CV_CAP_PROP_FRAME_HEIGHT, camera_frame_height)
      print "CameraStreamer.run(): Camera frame size set to {width}x{height} (result: {result_width}, {result_height})".format(width=camera_frame_width, height=camera_frame_height, result_width=result_width, result_height=result_height)
    else:
      print "CameraStreamer.run(): Unable to open camera; aborting..."
      self.stayAliveObj.value = False
      return
    
    # * Keep reading frames into shared image until stopped or read error occurs
    while self.stayAliveObj.value:
      try:
        #print "CameraStreamer.run(): Frame # {}, stay alive? {}".format(self.frameCountObj.value, self.stayAliveObj.value)  # [debug]
        isOkay, frame = self.camera.read()
        if not isOkay:
          self.stayAliveObj.value = False
        self.frameCountObj.value = self.frameCountObj.value + 1
        self.image[:] = frame
      except KeyboardInterrupt:
        self.stayAliveObj.value = False
    
    # * Clean-up
    self.camera.release()
예제 #18
0
파일: camstream.py 프로젝트: napratin/lumos
 def __init__(self, stayAliveObj=None, frameCountObj=None, imageObj=None, imageShapeObj=None):
   Process.__init__(self)
   print "CameraStreamer.__init__(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
   # * Store references to and/or create shared objects
   self.stayAliveObj = stayAliveObj if stayAliveObj is not None else Value(c_bool, True)
   self.frameCountObj = frameCountObj if frameCountObj is not None else Value('i', 0)
   self.imageShapeObj = imageShapeObj if imageShapeObj is not None else Array('i', (camera_frame_height, camera_frame_width, camera_frame_depth))
   if imageObj is not None:
     # ** Use supplied shared image object
     self.imageObj = imageObj
   else:
     # ** Create shared image object
     image = np.zeros((camera_frame_height, camera_frame_width, camera_frame_depth), dtype=np.uint8)  # create an image
     imageShape = image.shape  # store original shape
     imageSize = image.size  # store original size (in bytes)
     image.shape = imageSize  # flatten numpy array
     self.imageObj = Array(c_ubyte, image)  # create a synchronized shared array object
예제 #19
0
class Buffer(object):
    def __init__(self, size=1000, data_type="int32"):
        self.data_type = data_type
        self.head = Value("i", 0)
        self.ring_buffer = Array(data_type[0], range(size))
        self.size = size
        for i in range(size):
            self.ring_buffer[i] = 0  # probably really slow but not done often

    def get_head_value(self):
        return self.ring_buffer[self.head.value]

    def get_buffer(self):
        buf = np.frombuffer(self.ring_buffer.get_obj(), dtype=self.data_type)
        return np.concatenate((buf[self.head.value + 1 :], buf[0 : self.head.value]))

    def push(self, v):
        self.head.value = self.head.value + 1
        if self.head.value == self.size:
            self.head.value = 0
        self.ring_buffer[self.head.value] = v  # randint(0,10)
예제 #20
0
 def __init__(self, dtype: np.generic, shape: Tuple[int]) -> None:
     self.arr = Array(_NP_TO_CT[dtype.type],
                      int(np.prod(shape)))  # type: ignore
     self.dtype = dtype
     self.shape = shape
예제 #21
0
# 可以使用 Value or Array 将数据存储在共享内存映射中
# 这里的 Array 和 numpy 中的不同, 它只能是一维的,不能是多维的
# 同样和 Value 一样,需要定义数据形式, 否则会报错

from multiprocessing import Process, Value, Array


def f(n, a):
    n.value = 3.1415927
    for i in a:
        a[i] = -a[i]


if __name__ == "__main__":
    num = Value("d", 0.0)
    arr = Array("i", range(10))

    p = Process(target=f, args=(num, arr))
    p.start()
    p.join()

    print(num.value)
    print(arr[:])
예제 #22
0
class MjRenderPool:
    """
    Utilizes a process pool to render a MuJoCo simulation across
    multiple GPU devices. This can scale the throughput linearly
    with the number of available GPUs. Throughput can also be
    slightly increased by using more than one worker per GPU.
    """

    DEFAULT_MAX_IMAGE_SIZE = 512 * 512  # in pixels

    def __init__(self, model, device_ids=1, n_workers=None,
                 max_batch_size=None, max_image_size=DEFAULT_MAX_IMAGE_SIZE,
                 modder=None):
        """
        Args:
        - model (PyMjModel): MuJoCo model to use for rendering
        - device_ids (int/list): list of device ids to use for rendering.
            One or more workers will be assigned to each device, depending
            on how many workers are requested.
        - n_workers (int): number of parallel processes in the pool. Defaults
            to the number of device ids.
        - max_batch_size (int): maximum number of states that can be rendered
            in batch using .render(). Defaults to the number of workers.
        - max_image_size (int): maximum number pixels in images requested
            by .render()
        - modder (Modder): modder to use for domain randomization.
        """
        self._closed, self.pool = False, None

        if not (modder is None or inspect.isclass(modder)):
            raise ValueError("modder must be a class")

        if isinstance(device_ids, int):
            device_ids = list(range(device_ids))
        else:
            assert isinstance(device_ids, list), (
                "device_ids must be list of integer")

        n_workers = n_workers or 1
        self._max_batch_size = max_batch_size or (len(device_ids) * n_workers)
        self._max_image_size = max_image_size

        array_size = self._max_image_size * self._max_batch_size

        self._shared_rgbs = Array(ctypes.c_uint8, array_size * 3)
        self._shared_depths = Array(ctypes.c_float, array_size)

        self._shared_rgbs_array = np.frombuffer(
            self._shared_rgbs.get_obj(), dtype=ctypes.c_uint8)
        assert self._shared_rgbs_array.size == (array_size * 3), (
            "Array size is %d, expected %d" % (
                self._shared_rgbs_array.size, array_size * 3))
        self._shared_depths_array = np.frombuffer(
            self._shared_depths.get_obj(), dtype=ctypes.c_float)
        assert self._shared_depths_array.size == array_size, (
            "Array size is %d, expected %d" % (
                self._shared_depths_array.size, array_size))

        worker_id = Value(ctypes.c_int)
        worker_id.value = 0

        if get_start_method() != "spawn":
            raise RuntimeError(
                "Start method must be set to 'spawn' for the "
                "render pool to work. That is, you must add the "
                "following to the _TOP_ of your main script, "
                "before any other imports (since they might be "
                "setting it otherwise):\n"
                "  import multiprocessing as mp\n"
                "  if __name__ == '__main__':\n"
                "    mp.set_start_method('spawn')\n")

        self.pool = Pool(
            processes=len(device_ids) * n_workers,
            initializer=MjRenderPool._worker_init,
            initargs=(
                model.get_mjb(),
                worker_id,
                device_ids,
                self._shared_rgbs,
                self._shared_depths,
                modder))

    @staticmethod
    def _worker_init(mjb_bytes, worker_id, device_ids,
                     shared_rgbs, shared_depths, modder):
        """
        Initializes the global state for the workers.
        """
        s = RenderPoolStorage()

        with worker_id.get_lock():
            proc_worker_id = worker_id.value
            worker_id.value += 1
        s.device_id = device_ids[proc_worker_id % len(device_ids)]

        s.shared_rgbs_array = np.frombuffer(
            shared_rgbs.get_obj(), dtype=ctypes.c_uint8)
        s.shared_depths_array = np.frombuffer(
            shared_depths.get_obj(), dtype=ctypes.c_float)

        # avoid a circular import
        from mujoco_py import load_model_from_mjb, MjRenderContext, MjSim
        s.sim = MjSim(load_model_from_mjb(mjb_bytes))
        # attach a render context to the sim (needs to happen before
        # modder is called, since it might need to upload textures
        # to the GPU).
        MjRenderContext(s.sim, device_id=s.device_id)

        if modder is not None:
            s.modder = modder(s.sim, random_state=proc_worker_id)
            s.modder.whiten_materials()
        else:
            s.modder = None

        global _render_pool_storage
        _render_pool_storage = s

    @staticmethod
    def _worker_render(worker_id, state, width, height,
                       camera_name, randomize):
        """
        Main target function for the workers.
        """
        s = _render_pool_storage

        forward = False
        if state is not None:
            s.sim.set_state(state)
            forward = True
        if randomize and s.modder is not None:
            s.modder.randomize()
            forward = True
        if forward:
            s.sim.forward()

        rgb_block = width * height * 3
        rgb_offset = rgb_block * worker_id
        rgb = s.shared_rgbs_array[rgb_offset:rgb_offset + rgb_block]
        rgb = rgb.reshape(height, width, 3)

        depth_block = width * height
        depth_offset = depth_block * worker_id
        depth = s.shared_depths_array[depth_offset:depth_offset + depth_block]
        depth = depth.reshape(height, width)

        rgb[:], depth[:] = s.sim.render(
            width, height, camera_name=camera_name, depth=True,
            device_id=s.device_id)

    def render(self, width, height, states=None, camera_name=None,
               depth=False, randomize=False, copy=True):
        """
        Renders the simulations in batch. If no states are provided,
        the max_batch_size will be used.

        Args:
        - width (int): width of image to render.
        - height (int): height of image to render.
        - states (list): list of MjSimStates; updates the states before
            rendering. Batch size will be number of states supplied.
        - camera_name (str): name of camera to render from.
        - depth (bool): if True, also return depth.
        - randomize (bool): calls modder.rand_all() before rendering.
        - copy (bool): return a copy rather than a reference

        Returns:
        - rgbs: NxHxWx3 numpy array of N images in batch of width W
            and height H.
        - depth: NxHxW numpy array of N images in batch of width W
            and height H. Only returned if depth=True.
        """
        if self._closed:
            raise RuntimeError("The pool has been closed.")

        if (width * height) > self._max_image_size:
            raise ValueError(
                "Requested image larger than maximum image size. Create "
                "a new RenderPool with a larger maximum image size.")
        if states is None:
            batch_size = self._max_batch_size
            states = [None] * batch_size
        else:
            batch_size = len(states)

        if batch_size > self._max_batch_size:
            raise ValueError(
                "Requested batch size larger than max batch size. Create "
                "a new RenderPool with a larger max batch size.")

        self.pool.starmap(
            MjRenderPool._worker_render,
            [(i, state, width, height, camera_name, randomize)
             for i, state in enumerate(states)])

        rgbs = self._shared_rgbs_array[:width * height * 3 * batch_size]
        rgbs = rgbs.reshape(batch_size, height, width, 3)
        if copy:
            rgbs = rgbs.copy()

        if depth:
            depths = self._shared_depths_array[:width * height * batch_size]
            depths = depths.reshape(batch_size, height, width).copy()
            if copy:
                depths = depths.copy()
            return rgbs, depths
        else:
            return rgbs

    def close(self):
        """
        Closes the pool and terminates child processes.
        """
        if not self._closed:
            if self.pool is not None:
                self.pool.close()
                self.pool.join()
            self._closed = True

    def __del__(self):
        self.close()
예제 #23
0
import numpy as np
import ctypes
import logging


def wait():
    try:
        raw_input("Press Enter to stop...")
    except EOFError:
        pass


if __name__ == "__main__":
    logging.basicConfig(filename="log.log", level=logging.DEBUG)

    sharedArrayBase = Array(ctypes.c_uint8, 256 * 576 * 720)
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, 576, 720)
    startTime = Value("d", 0.0)

    sharedArrayBase2 = Array(ctypes.c_uint8, 256 * 576 * 720)
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, 576, 720)
    startTime2 = Value("d", 0.0)

    bc = BufferedCapture(sharedArray, startTime, sharedArray2, startTime2)

    c = Compression(sharedArray, startTime, sharedArray2, startTime2, 499)

    bc.startCapture()
    c.start()
        TEST_SENTENCES.append(line[:-1])
    # print(TEST_SENTENCES)
    maxlen = 30

    p = vlc.MediaPlayer('./AudioWAV_1600/StateMachine recordings/welcome.mp3')
    blockPrint()
    p.play()
    enablePrint()
    time.sleep(12)

    q = Queue()
    mutex = Lock()

    shared_folder = Value('i', 0)
    flag = Value('i', 0)
    pred_arr = Array('i', range(24))
    sm_bool = Array('i', range(24))
    visual_vocal_arr = Array('d', range(144))
    correct = Value('i', 0)
    text_arr = Array('d', range(120))
    text_pred_arr = Array('i', range(24))
    current_visual_seq = Value('i', 0)
    current_text_seq = Value('i', 0)
    correct = Value('i', 0)
    current_ov_seq = Value('i', -1)
    total = 24

    # p3 = Process(target=sendtoard, args=((q),mutex))
    p4 = Process(target=webcam, args=(
        shared_folder,
        flag,
예제 #25
0
if __name__ == '__main__':
    print("--------------Value, Array 共享内存")

from multiprocessing import Process, Value, Array


def fav(n, a):
    n.value = 3.1415927
    for i in range(len(a)):
        a[i] = -a[i]


if __name__ == '__main__':
    num = Value('d', 0.0)
    arr = Array('i', range(10))

    p = Process(target=fav, args=(num, arr))
    p.start()
    p.join()

    print(num.value)
    print(arr[:])

if __name__ == '__main__':
    print("--------------Manager 服务进程管理")
# Manager() 返回的管理器支持类型: list 、 dict 、 Namespace 、 Lock 、 RLock 、 Semaphore 、 BoundedSemaphore 、 Condition 、 Event 、 Barrier 、 Queue 、 Value 和 Array 。

from multiprocessing import Process, Manager

예제 #26
0
import numpy as np
import re, subprocess, os, time, sys
from threading import Timer
from multiprocessing import Array, Value
import ctypes

PUNCTUATION = (".", "?", "!")
PUNCTUATION_PROBS = (0.7, 0.25, 0.05)
default_re = re.compile("$")
bag_lines = Array(ctypes.c_char_p, 500)
num_baglines = Value(ctypes.c_ushort, 0)


class RWMC(dict):
    def __init__(self,
                 filename=None,
                 timeout=216000,
                 emote_regex=default_re,
                 min_len=50000,
                 commands_regex=default_re):
        self.timeout = timeout
        self.filename = filename
        self.emote_regex = emote_regex
        self.commands_regex = commands_regex
        self.min_len = min_len
        if timeout is not None:
            assert timeout >= 0
        if filename:
            assert os.path.exists(filename)
            logs = subprocess.Popen(["tail", "-n", "5000", filename], \
                stdout = subprocess.PIPE)
예제 #27
0
def main(argv):

    procs = 1  # number of child processes
    comprBool = -1  # to compress set to 1. to decompress set to 0
    fullCheck = 0  # case 1 check for all files
    fichArgs = ""  # files to iterate

    try:
        opts, args = getopt.getopt(argv, "cdp:t", ["numprocs="])
    except getopt.GetoptError:
        print 'pzip -c|-d [-p n] [-t] {ficheiros}'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-c':
            comprBool = 1
        elif opt == '-d':
            comprBool = 0
        elif opt == '-t':
            fullCheck = 1
        elif opt in ("-p", "--numprocs"):
            try:
                procs = int(arg)
            except:
                print "Insert whole number"
                return

    if procs < 1:
        print "Insert Int bigger than 0"
        return

    if comprBool == -1:
        print "Insert -c (compress) or -d (decompress)"
        return 0

    fichArgs = []
    if args == []:
        print "Insert File Names: (CTRL + D to Finish)"
        #linha = sys.stdin.readline()
        #fichArgs = linha.split()
        for line in sys.stdin:
            temp = line
            temp2 = temp.rstrip('\n')
            fichArgs.append(temp2)
    else:
        fichArgs = args

    fichList = []

    if fullCheck == 1:  # -t stop in last existing file
        for ficheiro in fichArgs:
            if os.path.isfile(ficheiro):
                fichList.append(ficheiro)
            else:
                print "O ficheiro '" + ficheiro + "' nao existe."
                break

    elif fullCheck == 0:  # -t compress all files even if one does not exist
        for ficheiro in fichArgs:
            if os.path.isfile(ficheiro):
                fichList.append(ficheiro)

    # print "\n"
    # print "-t: " + str(fullCheck)
    # print "lista de ficheiros: " + str(fichList)
    # print "numero de processo: " + str(procs)
    # print "\n"

    tamanhoLista = len(fichList)
    listaPartilhada = Array(c_char_p, tamanhoLista)

    for p in range(tamanhoLista):
        listaPartilhada[p] = fichList[p]

    # print "lista partilhada copiada: "
    # for p in listaPartilhada:
    #     print p
    # print "\n"

    intPartilhado = Value(c_int, -1)

    vazio = Semaphore(1)

    def filho():
        while (intPartilhado.value < len(listaPartilhada) - 1):
            vazio.acquire()
            intPartilhado.value += 1
            temp = listaPartilhada[intPartilhado.value]
            # print temp
            # print intPartilhado.value
            vazio.release()
            if comprBool == 0:
                # print "process number " + str(os.getpid()) + " will decompress " + temp
                time.sleep(0.1)
                decompress(temp)
            elif comprBool == 1:
                print "process number " + str(
                    os.getpid()) + " will compress " + temp
                time.sleep(0.1)
                compress(temp)
        return 0

    filhos = []
    for i in range(procs):
        newP = Thread(target=filho)
        filhos.append(newP)
        newP.start()
    for p in filhos:
        p.join()

    print "Numero de ficheiros processados: " + str(intPartilhado.value + 1)
from multiprocessing import Process, Array, freeze_support
import time
import random


def fun(target):
    for i in target:
        print(i, end=' ')
    print(target.value)


if __name__ == "__main__":
    freeze_support()
    #创建共享内存
    #共享内存开辟5个整型列表空间
    shm = Array("c", b"hello")
    p = Process(target=fun, args=(shm, ))
    p.start()
    p.join()
예제 #29
0
    def __init__(self,
                 env_fns,
                 spaces=None,
                 worker_id=0,
                 base_port=5005,
                 curriculum=None,
                 seed=0,
                 docker_training=False,
                 no_graphics=False,
                 brain_name="CustomBrain"):
        """
        If you don't specify observation_space, we'll have to create a dummy
        environment to get it.
        """
        if spaces:
            observation_space, action_space = spaces
        else:
            print('Creating dummy env object to get spaces')
            dummy = env_fns[0](0)
            observation_space, action_space = dummy.observation_space, dummy.action_space
            dummy.close()
            del dummy
        VecEnv.__init__(self, len(env_fns), observation_space, action_space)
        self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(
            observation_space)
        self.obs_bufs = [{
            k: Array(_NP_TO_CT[self.obs_dtypes[k].type],
                     int(np.prod(self.obs_shapes[k])))
            for k in self.obs_keys
        } for _ in env_fns]
        self.parent_pipes = []
        self.procs = []
        self.agents = []
        for idx, (env_fn, obs_buf) in enumerate(zip(env_fns, self.obs_bufs)):
            wrapped_fn = CloudpickleWrapper(env_fn)
            parent_pipe, child_pipe = Pipe()
            proc = Process(target=_subproc_worker,
                           args=(child_pipe, parent_pipe, wrapped_fn, obs_buf,
                                 self.obs_shapes, self.obs_dtypes,
                                 self.obs_keys, idx))
            proc.daemon = True
            self.procs.append(proc)
            self.parent_pipes.append(parent_pipe)
            proc.start()
            child_pipe.close()
            self.agents += [str(idx)]
        self._n_agents = len(self.agents)
        self.waiting_step = False

        class Viewer(object):
            def __init__(self):
                return

            def close(self):
                return

        self.viewer = Viewer()
        self._brains = {}
        brain_name = brain_name if isinstance(
            action_space, gym.spaces.Discrete) else "Continuous" + brain_name
        self._brain_names = [brain_name]
        self._external_brain_names = []
        self.num_actions = action_space.shape[0]
        resolution = None
        if isinstance(observation_space, gym.spaces.Box):
            assert len(observation_space.shape
                       ) == 3, "Only H,W,C continuous obeservations possible"
            resolution = [{
                "height": observation_space.shape[0],
                "width": observation_space.shape[1],
                "blackAndWhite": observation_space.shape[2]
            }]

        self._brains[brain_name] = \
            BrainParameters(brain_name, {
                "vectorObservationSize": 0 if isinstance(observation_space, gym.spaces.Box) else observation_space.n,
                "numStackedVectorObservations": len(env_fns) if isinstance(observation_space, gym.spaces.Discrete) else 0,
                "cameraResolutions": resolution,
                "vectorActionSize": action_space.n if isinstance(action_space, gym.spaces.Discrete) else action_space.shape[0],
                "vectorActionDescriptions": ["", ""],
                "vectorActionSpaceType": 0 if isinstance(action_space, gym.spaces.Discrete) else 1,
                "vectorObservationSpaceType": 0 if isinstance(observation_space,gym.spaces.Discrete) else 1
            })
        self._external_brain_names += [brain_name]
        self._num_brains = len(self._brain_names)
        self._num_external_brains = len(self._external_brain_names)
        self._curriculum = Curriculum(curriculum, None)
        self._global_done = None
        self.viewer = None
예제 #30
0
def main(verbose=False, n_processes=None, given_array=None, order='asc'):
    """
    Create environment for simulating the distributed n-2 rounds alternative OET
    sorting. Each Process node has it's own resource. Each of the non-terminal
    node is connected to two adjacent processes with a synchronized duplex Pipe
    connection.
    """

    num_process = 0
    process_list = []

    if not given_array:

        num_process = int(random.uniform(2.0, 20.0))
        if n_processes:
            num_process = n_processes

        connection_list = [Pipe() for i in range(num_process - 1)]

        shared_array = None
        if verbose:
            shared_array = Array('i', (num_process + 1) * num_process)

        process_list = [
            ProcessNode(None,
                        connection_list[0][0],
                        shared_array=shared_array,
                        order=order)
        ]
        for i in range(num_process - 2):
            p = ProcessNode(connection_list[i][1],
                            connection_list[i + 1][0],
                            shared_array=shared_array,
                            order=order)
            process_list.append(p)
        process_list.append(
            ProcessNode(connection_list[num_process - 2][1],
                        shared_array=shared_array,
                        order=order))

    else:

        num_process = len(given_array)

        connection_list = [Pipe() for i in range(num_process - 1)]

        shared_array = None
        if verbose:
            shared_array = Array('i', (num_process + 1) * num_process)

        process_list = [
            ProcessNode(None,
                        connection_list[0][0],
                        shared_array=shared_array,
                        data=given_array[0],
                        order=order)
        ]

        for i in range(num_process - 2):
            p = ProcessNode(connection_list[i][1],
                            connection_list[i + 1][0],
                            shared_array=shared_array,
                            data=given_array[i],
                            order=order)
            process_list.append(p)

        process_list.append(
            ProcessNode(connection_list[num_process - 2][1],
                        shared_array=shared_array,
                        data=given_array[-1],
                        order=order))

    # Start time
    start = time.clock()

    for p in process_list:
        p.start()

    for p in process_list:
        p.join()

    # Elapsed time
    time_elapsed = time.clock() - start

    if verbose:
        print("number of process {}".format(num_process))

        print_factor = 1
        if num_process <= 10 and num_process >= 5:
            print_factor = 2
        else:
            print_factor = 5

        for i in range(0, num_process):
            if i == 0:
                print("Initial : ", end=" ")
                for j in range(0, num_process):
                    print("P{}({})".format(j + 1,
                                           shared_array[i * num_process + j]),
                          end=" ")
                print("\n")
            elif (i == (num_process - 1)) or (i % print_factor == 0):
                print("Round {} : ".format(i), end=" ")
                for j in range(0, num_process):
                    print("P{}({})".format(j + 1,
                                           shared_array[i * num_process + j]),
                          end=" ")
                print("\n")

    return time_elapsed
예제 #31
0
# reading datasets

with open('documents_gst_pickle.dictionary', 'rb') as documents_pickle:
    codes = pickle.load(documents_pickle)

# mapping codes to dict with { id: content }

codes_id = {}

for index, code in enumerate(codes):
    codes_id[index] = codes[code]

# preparing shared array, the integer is count of documents in the dataset

uncompared = Array('i', 30)

# marking all documents as uncompared (because we could use only integers, 40 is used)

for ind, code in enumerate(codes_id):
    uncompared[ind] = code


def comparison(uncompared):
    global codes_id
    first = -1
    should_continue = True  # are some uncompared documents?

    while should_continue:
        # find uncompared document
        with mutex:
예제 #32
0
    def main(self, args):
        if self.enable_camera_preview:
            #variable to compute preview framerate
            self.loop_count = 1
            self.loop_time = 0
            self.loop_start = 0
            self.total_time = 0
            self.preview_fps = 0

            self.camera_not_started = True
            # initialize VideoFrameCapture object
            cap = VideoFrameCapture(int(args.video_device),
                                    float(args.frame_width),
                                    float(args.frame_height),
                                    float(args.framerate))
            shape = cap.get_frame_size()
            self.camera_not_started = False

            # define shared variables
            self.shared_array_base = Array(ctypes.c_uint8,
                                           shape[0] * shape[1] * shape[2])
            self.frame = np.ctypeslib.as_array(
                self.shared_array_base.get_obj())
            self.frame = self.frame.reshape(shape[0], shape[1], shape[2])
            self.grabbing_fps = Value('f', 0.0)

            # start processes which run in parallel
            self.preview_synchro_event = Event()
            self.preview_process = Process(name='camera_streaming',
                                           target=camera_streaming,
                                           args=(cap, self.shared_array_base,
                                                 self.preview_synchro_event,
                                                 self.grabbing_fps))
            # launch capture process
            self.preview_process.daemon = True
            self.preview_process.start()

        # initialize NeuralNetwork object
        self.nn = NeuralNetwork(args.model_file, args.label_file,
                                float(args.input_mean), float(args.input_std))
        shape = self.nn.get_img_size()

        # define shared variables
        self.nn_processing_start = Value(ctypes.c_bool, False)
        self.nn_processing_finished = Value(ctypes.c_bool, False)
        self.nn_img_shared_array = Array(ctypes.c_uint8,
                                         shape[0] * shape[1] * shape[2])
        self.nn_img = np.ctypeslib.as_array(self.nn_img_shared_array.get_obj())
        self.nn_img = self.nn_img.reshape(shape[0], shape[1], shape[2])
        self.nn_inference_time = Value('f', 0)
        self.nn_inference_fps = Value('f', 0.0)

        self.nn_result_locations_shared_array = Array(ctypes.c_float,
                                                      1 * 10 * 4)
        self.nn_result_locations = np.ctypeslib.as_array(
            self.nn_result_locations_shared_array.get_obj())
        self.nn_result_locations = self.nn_result_locations.reshape(1, 10, 4)

        self.nn_result_classes_shared_array = Array(ctypes.c_float, 1 * 10)
        self.nn_result_classes = np.ctypeslib.as_array(
            self.nn_result_classes_shared_array.get_obj())
        self.nn_result_classes = self.nn_result_classes.reshape(1, 10)

        self.nn_result_scores_shared_array = Array(ctypes.c_float, 1 * 10)
        self.nn_result_scores = np.ctypeslib.as_array(
            self.nn_result_scores_shared_array.get_obj())
        self.nn_result_scores = self.nn_result_scores.reshape(1, 10)

        # start processes which run in parallel
        self.nn_synchro_event = Event()
        self.nn_process = Process(
            name='nn_processing',
            target=nn_processing,
            args=(self.nn, self.nn_img_shared_array, self.nn_processing_start,
                  self.nn_processing_finished, self.nn_inference_time,
                  self.nn_result_locations_shared_array,
                  self.nn_result_classes_shared_array,
                  self.nn_result_scores_shared_array, self.nn_synchro_event,
                  self.nn_inference_fps))
        # launch nn process
        self.nn_process.daemon = True
        self.nn_process.start()

        # wait the nn process to start
        self.nn_synchro_event.wait()

        if self.enable_camera_preview:
            self.preview_synchro_event.wait()

            # define the crop parameters that will be used to crop the input preview frame to
            # the requested NN input image size
            self.y1 = int(0)
            self.y2 = int(self.frame.shape[0])
            self.x1 = int((self.frame.shape[1] - self.frame.shape[0]) / 2)
            self.x2 = int(self.x1 + self.frame.shape[0])

            # set the following variable to True to trig the first NN inference
            self.nn_processing_finished.value = True

            # hidde the progress bar
            GLib.source_remove(self.timeout_id)
            self.progressbar.hide()

            GLib.idle_add(self.camera_preview)
        else:
            # hidde the progress bar
            GLib.source_remove(self.timeout_id)
            self.progressbar.hide()

            if not self.enable_camera_preview:
                self.button = Gtk.Button.new_with_label("Next inference")
                self.vbox.pack_start(self.button, False, False, 15)
                self.button.connect("clicked", self.still_picture)
                self.button.show_all()
예제 #33
0
class MainUIWindow(Gtk.Window):
    def __init__(self, args):
        Gtk.Window.__init__(self, title=os.path.basename(args.model_file))

        if args.image == "":
            self.enable_camera_preview = True
        else:
            self.enable_camera_preview = False

        self.maximize()
        self.screen_width = self.get_screen().get_width()
        self.screen_height = self.get_screen().get_height()

        if self.screen_width == 720:
            self.picture_width = 480
            self.picture_height = 360
        else:
            self.picture_width = 320
            self.picture_height = 240

        self.set_position(Gtk.WindowPosition.CENTER)
        self.connect('destroy', Gtk.main_quit)

        self.vbox = Gtk.VBox()
        self.add(self.vbox)

        self.progressbar = Gtk.ProgressBar()
        self.vbox.pack_start(self.progressbar, False, False, 15)

        self.hbox = Gtk.HBox()
        self.vbox.pack_start(self.hbox, False, False, 15)

        self.image = Gtk.Image()
        self.hbox.pack_start(self.image, False, False, 15)

        self.label = Gtk.Label()
        self.label.set_size_request(400, -1)  # -1 to keep height automatic
        self.label.set_alignment(0, 0)
        self.label.set_line_wrap(True)
        self.label.set_line_wrap_mode(Gtk.WrapMode.WORD)
        self.hbox.pack_start(self.label, False, False, 15)

        self.timeout_id = GLib.timeout_add(50, self.on_timeout)

    def on_timeout(self):
        self.progressbar.pulse()
        return True

    def update_preview(self, inference_time, display_fps, grab_fps,
                       inference_fps):
        str_inference_time = str("{0:0.1f}".format(inference_time))
        str_display_fps = str("{0:.1f}".format(display_fps))
        str_grab_fps = str("{0:.1f}".format(grab_fps))
        str_inference_fps = str("{0:.1f}".format(inference_fps))

        self.label.set_markup(
            "<span font='10' color='#002052FF'><b>display   @%sfps\n</b></span>"
            "<span font='10' color='#002052FF'><b>inference @%sfps\n\n\n</b></span>"
            "<span font='15' color='#002052FF'><b>inference time: %sms\n</b></span>"
            % (str_grab_fps, str_inference_fps, str_inference_time))

    def update_still(self, inference_time):
        str_inference_time = str("{0:0.1f}".format(inference_time))

        self.label.set_markup(
            "<span font='15' color='#002052FF'><b>inference time: %sms\n</b></span>"
            % (str_inference_time))

    def update_frame(self, frame, labels):
        img = Image.fromarray(frame)
        draw = ImageDraw.Draw(img)

        if self.nn_result_scores[0][0] > 0.5:
            y0 = int(self.nn_result_locations[0][0][0] * frame.shape[0])
            x0 = int(self.nn_result_locations[0][0][1] * frame.shape[1])
            y1 = int(self.nn_result_locations[0][0][2] * frame.shape[0])
            x1 = int(self.nn_result_locations[0][0][3] * frame.shape[1])
            label = labels[int(self.nn_result_classes[0][0])]
            accuracy = self.nn_result_scores[0][0] * 100
            draw.rectangle([(x0, y0), (x1, y1)], outline=(0, 0, 255))
            draw.rectangle([(x0, y0),
                            (x0 +
                             ((len(label) + 4) * char_text_width), y0 + 14)],
                           (0, 0, 255))
            draw.text((x0 + 2, y0 + 2), label + " " + str(int(accuracy)) + "%",
                      (255, 255, 255))

        if self.nn_result_scores[0][1] > 0.5:
            y0 = int(self.nn_result_locations[0][1][0] * frame.shape[0])
            x0 = int(self.nn_result_locations[0][1][1] * frame.shape[1])
            y1 = int(self.nn_result_locations[0][1][2] * frame.shape[0])
            x1 = int(self.nn_result_locations[0][1][3] * frame.shape[1])
            label = labels[int(self.nn_result_classes[0][1])]
            accuracy = self.nn_result_scores[0][1] * 100
            draw.rectangle([(x0, y0), (x1, y1)], outline=(255, 0, 0))
            draw.rectangle([(x0, y0),
                            (x0 +
                             ((len(label) + 4) * char_text_width), y0 + 14)],
                           (255, 0, 0))
            draw.text((x0 + 2, y0 + 2), label + " " + str(int(accuracy)) + "%",
                      (255, 255, 255))

        if self.nn_result_scores[0][2] > 0.5:
            y0 = int(self.nn_result_locations[0][2][0] * frame.shape[0])
            x0 = int(self.nn_result_locations[0][2][1] * frame.shape[1])
            y1 = int(self.nn_result_locations[0][2][2] * frame.shape[0])
            x1 = int(self.nn_result_locations[0][2][3] * frame.shape[1])
            label = labels[int(self.nn_result_classes[0][2])]
            accuracy = self.nn_result_scores[0][2] * 100
            draw.rectangle([(x0, y0), (x1, y1)], outline=(0, 255, 0))
            draw.rectangle([(x0, y0),
                            (x0 +
                             ((len(label) + 4) * char_text_width), y0 + 14)],
                           (0, 255, 0))
            draw.text((x0 + 2, y0 + 2), label + " " + str(int(accuracy)) + "%",
                      (255, 255, 255))

        data = img.tobytes()
        data = GLib.Bytes.new(data)
        pixbuf = GdkPixbuf.Pixbuf.new_from_bytes(
            data, GdkPixbuf.Colorspace.RGB, False, 8, frame.shape[1],
            frame.shape[0], frame.shape[2] * frame.shape[1])
        self.image.set_from_pixbuf(pixbuf.copy())

    # termination function
    def terminate(self):
        print("Main: termination")
        if self.enable_camera_preview:
            if self.camera_not_started:
                return
            self.preview_process.terminate()
        self.nn_process.terminate()

    # get random file in a directory
    def getRandomFile(self, path):
        """
        Returns a random filename, chosen among the files of the given path.
        """
        files = os.listdir(path)
        index = random.randrange(0, len(files))
        return files[index]

    # GTK camera preview function
    def camera_preview(self):
        # crop the preview frame to fit the NN input size
        frame_crop = self.frame[self.y1:self.y2, self.x1:self.x2]
        frame_crop_RGB = cv2.cvtColor(frame_crop, cv2.COLOR_BGR2RGB)
        frame_crop_RGB_resize = cv2.resize(
            frame_crop_RGB, (self.nn_img.shape[1], self.nn_img.shape[0]))

        if self.nn_processing_finished.value == True:
            self.nn_processing_finished.value = False
            # grab a new frame
            self.nn_img[:, :, :] = frame_crop_RGB_resize
            # display the cropped image that will feed the NN
            #cv2.imshow("nn_img", self.nn_img)
            # request NN processing
            self.nn_processing_start.value = True

        # compute preview FPS
        loop_stop = timer()
        self.loop_time = loop_stop - self.loop_start
        self.loop_start = loop_stop
        self.total_time = self.total_time + self.loop_time
        if self.loop_count == 15:
            self.preview_fps = self.loop_count / self.total_time
            self.loop_count = 0
            self.total_time = 0
        self.loop_count = self.loop_count + 1

        # write information onf the GTK UI
        inference_time = self.nn_inference_time.value * 1000
        inference_fps = self.nn_inference_fps.value
        display_fps = self.preview_fps
        grab_fps = self.grabbing_fps.value

        self.update_preview(inference_time, display_fps, grab_fps,
                            inference_fps)

        # update the preview frame
        labels = self.nn.get_labels()
        self.update_frame(frame_crop_RGB, labels)

        return True

    # GTK still picture function
    def still_picture(self, button):
        #input("Press Enter to process new inference...")
        self.nn_processing_finished.value = False
        # get randomly a picture in the directory
        rfile = self.getRandomFile(args.image)
        print("Picture ", args.image + "/" + rfile)
        img = Image.open(args.image + "/" + rfile)

        # display the picture in the screen
        prev_frame = cv2.resize(np.array(img),
                                (self.picture_width, self.picture_height))

        # execute the inference
        nn_frame = cv2.resize(prev_frame,
                              (self.nn_img.shape[1], self.nn_img.shape[0]))
        self.nn_img[:, :, :] = nn_frame
        self.nn_processing_start.value = True
        while not self.nn_processing_finished.value:
            pass

        # write information onf the GTK UI
        inference_time = self.nn_inference_time.value * 1000

        self.update_still(inference_time)

        # update the preview frame
        labels = self.nn.get_labels()
        self.update_frame(prev_frame, labels)

        return True

    def main(self, args):
        if self.enable_camera_preview:
            #variable to compute preview framerate
            self.loop_count = 1
            self.loop_time = 0
            self.loop_start = 0
            self.total_time = 0
            self.preview_fps = 0

            self.camera_not_started = True
            # initialize VideoFrameCapture object
            cap = VideoFrameCapture(int(args.video_device),
                                    float(args.frame_width),
                                    float(args.frame_height),
                                    float(args.framerate))
            shape = cap.get_frame_size()
            self.camera_not_started = False

            # define shared variables
            self.shared_array_base = Array(ctypes.c_uint8,
                                           shape[0] * shape[1] * shape[2])
            self.frame = np.ctypeslib.as_array(
                self.shared_array_base.get_obj())
            self.frame = self.frame.reshape(shape[0], shape[1], shape[2])
            self.grabbing_fps = Value('f', 0.0)

            # start processes which run in parallel
            self.preview_synchro_event = Event()
            self.preview_process = Process(name='camera_streaming',
                                           target=camera_streaming,
                                           args=(cap, self.shared_array_base,
                                                 self.preview_synchro_event,
                                                 self.grabbing_fps))
            # launch capture process
            self.preview_process.daemon = True
            self.preview_process.start()

        # initialize NeuralNetwork object
        self.nn = NeuralNetwork(args.model_file, args.label_file,
                                float(args.input_mean), float(args.input_std))
        shape = self.nn.get_img_size()

        # define shared variables
        self.nn_processing_start = Value(ctypes.c_bool, False)
        self.nn_processing_finished = Value(ctypes.c_bool, False)
        self.nn_img_shared_array = Array(ctypes.c_uint8,
                                         shape[0] * shape[1] * shape[2])
        self.nn_img = np.ctypeslib.as_array(self.nn_img_shared_array.get_obj())
        self.nn_img = self.nn_img.reshape(shape[0], shape[1], shape[2])
        self.nn_inference_time = Value('f', 0)
        self.nn_inference_fps = Value('f', 0.0)

        self.nn_result_locations_shared_array = Array(ctypes.c_float,
                                                      1 * 10 * 4)
        self.nn_result_locations = np.ctypeslib.as_array(
            self.nn_result_locations_shared_array.get_obj())
        self.nn_result_locations = self.nn_result_locations.reshape(1, 10, 4)

        self.nn_result_classes_shared_array = Array(ctypes.c_float, 1 * 10)
        self.nn_result_classes = np.ctypeslib.as_array(
            self.nn_result_classes_shared_array.get_obj())
        self.nn_result_classes = self.nn_result_classes.reshape(1, 10)

        self.nn_result_scores_shared_array = Array(ctypes.c_float, 1 * 10)
        self.nn_result_scores = np.ctypeslib.as_array(
            self.nn_result_scores_shared_array.get_obj())
        self.nn_result_scores = self.nn_result_scores.reshape(1, 10)

        # start processes which run in parallel
        self.nn_synchro_event = Event()
        self.nn_process = Process(
            name='nn_processing',
            target=nn_processing,
            args=(self.nn, self.nn_img_shared_array, self.nn_processing_start,
                  self.nn_processing_finished, self.nn_inference_time,
                  self.nn_result_locations_shared_array,
                  self.nn_result_classes_shared_array,
                  self.nn_result_scores_shared_array, self.nn_synchro_event,
                  self.nn_inference_fps))
        # launch nn process
        self.nn_process.daemon = True
        self.nn_process.start()

        # wait the nn process to start
        self.nn_synchro_event.wait()

        if self.enable_camera_preview:
            self.preview_synchro_event.wait()

            # define the crop parameters that will be used to crop the input preview frame to
            # the requested NN input image size
            self.y1 = int(0)
            self.y2 = int(self.frame.shape[0])
            self.x1 = int((self.frame.shape[1] - self.frame.shape[0]) / 2)
            self.x2 = int(self.x1 + self.frame.shape[0])

            # set the following variable to True to trig the first NN inference
            self.nn_processing_finished.value = True

            # hidde the progress bar
            GLib.source_remove(self.timeout_id)
            self.progressbar.hide()

            GLib.idle_add(self.camera_preview)
        else:
            # hidde the progress bar
            GLib.source_remove(self.timeout_id)
            self.progressbar.hide()

            if not self.enable_camera_preview:
                self.button = Gtk.Button.new_with_label("Next inference")
                self.vbox.pack_start(self.button, False, False, 15)
                self.button.connect("clicked", self.still_picture)
                self.button.show_all()
    def stop(self):
        # indicate that the thread should be stopped
        self.stopped = True

################## SET PROCESSES ##################
runflag = True
#f is for function and l is for loop
#pipes for carr
pipecl1, pipecf1 = Pipe(False)
#pipes for uarr
#pipeuf1, pipeul1 = Pipe()
#pipes for socket objects
#pipesockf1, pipesockl1 = Pipe()
#set process
arr1 = np.array([0,0,0],dtype=np.float64)
uarray1 = Array('d',3)
#uarray1 = sharedctypes.synchronized(arr1)

uflag1 = Value('B',1)
proc1 = Process(target=socketcomm,args=(port1,pipecf1,uflag1,uarray1))

#f is for function and l is for loop
#pipes for carr
pipecl2, pipecf2 = Pipe(False)
#pipes for uarr
#pipeuf2, pipeul2 = Pipe()
#pipes for socket objects
#pipesockf2, pipesockl2 = Pipe()
#set process

arr2 = np.array([0,0,0],dtype=np.float64)
예제 #35
0
class CostComputation(object):
    """Computes the cost matrix."""

    def __init__(self):

        self.all_curves = Listing.index_all_curves()
        index_file = open ("outputs/index_file.txt","w")
        for index, item in enumerate(self.all_curves):
              index_file.write("%i,%s" % (index, str(item)))
        index_file.close()
        self.n = len(self.all_curves)

        self.total_costs_matrix_base = Array(ctypes.c_double, self.n*self.n)
        self.total_costs_matrix = numpy.ctypeslib.as_array(
                             self.total_costs_matrix_base.get_obj())
        self.total_costs_matrix = self.total_costs_matrix.reshape(self.n,self.n)


    def set_total_costs_matrix(self, i, j, def_param = None):
        def_param = total_costs_matrix_base
        curve_name_i = all_curves[i][0]
        curve_type_i = all_curves[i][1]
        curve_file_i = fits.open(os.getcwd()+'/memoria/'+
                        'inputs/'+curve_type_i+'/'+curve_name_i+'.fits',
                        memmap=False)

        curve_data_i = Extractor.get_values(curve_file_i)

        curve_file_i.close()

        curve_name_j = all_curves[j][0]
        curve_type_j = all_curves[j][1]
        curve_file_j = fits.open(os.getcwd()+'/memoria/'+
                        'inputs/'+curve_type_j+'/'+curve_name_j+'.fits',
                        memmap=False)

        curve_data_j = Extractor.get_values(curve_file_j)

        curve_file_j.close()

        x,y = curve_data_i, curve_data_j

        dtw = DTW(x,y)
        cost_matrix = dtw.compute_cost_matrix(DTW.euclidean_distance)
        acc_cost_matrix, cost = dtw.compute_acc_cost_matrix(cost_matrix)

        self.total_costs_matrix[i,j] = cost


    def write_cost_matrix(self):
        begin = timeit.default_timer()

        pool = Pool(processes=cpu_count())
        iterable = []
        for i in range(self.n):
            for j in range(i+1,self.n):
                iterable.append((i,j))
        pool.starmap(self.set_total_costs_matrix, iterable)

        self.total_costs_matrix.dump(os.getcwd()+'/memoria/outputs/cost_matrix')

        end = timeit.default_timer()
        print(end - begin)
예제 #36
0
    def run(self):
        manager = Manager()
        joystickState = manager.dict()
        joystickStateNum = Value('d', 0.0)
        acousticStateNum = Value('d', 0.0)
        heartbeatValue = Array('i', [0] * 3)

        acousticProcess = Process(target=self.acousticPlayer,
                                  args=(joystickStateNum, acousticStateNum))
        joystickReaderProcess = Process(target=self.joystickReader,
                                        args=(joystickState, joystickStateNum))
        joystickUpdaterProcess = Process(target=self.joystickUpdater,
                                         args=(joystickState, joystickStateNum,
                                               acousticStateNum,
                                               heartbeatValue))

        try:
            # Start the processes
            joystickReaderProcess.start()
            joystickUpdaterProcess.start()
            acousticProcess.start()

            # control heartbeat
            maxBrightness = 50
            increment = 1
            increasing = True
            brightness = 0
            while True:
                # If all processes are running, show blue/green heartbeat
                if acousticProcess.is_alive(
                ) and joystickReaderProcess.is_alive(
                ) and joystickUpdaterProcess.is_alive():
                    heartbeatValue[0] = 0
                    heartbeatValue[1] = maxBrightness - brightness
                    heartbeatValue[2] = brightness
                # Otherwise show red/orange heartbeat
                else:
                    heartbeatValue[0] = 25
                    heartbeatValue[1] = 0
                    heartbeatValue[2] = brightness / 2
                # if joystick updater is running, then it will set the LEDs
                if not joystickUpdaterProcess.is_alive():
                    self._leds.setPixel(0, *heartbeatValue)
                    self._leds.show()

                brightness += increment * (1 if increasing else -1)
                if brightness > maxBrightness - 2:
                    increasing = False
                if brightness < 2:
                    increasing = True

                sleep(0.05)
        except:
            pass

        # Something went wrong: kill the process
        acousticProcess.terminate()
        joystickReaderProcess.terminate()
        joystickUpdaterProcess.terminate()
        # show a red LED then exit
        self._leds.clearStrip()
        self._leds.setPixel(0, 25, 0, 0)
        self._leds.show()
예제 #37
0
    def multi_fit(self, data, target, epochs=1, eval_x=None, eval_y=None):

        hash_x = data.copy()
        # Remove id if in the columns
        if self.id in hash_x:
            hash_x.drop(self.id, axis=1, inplace=True)

        if eval_x is not None:
            # val_hash_x = self.multi_hash(eval_x)
            val_hash_x = eval_x.copy()
            # Remove id if in the columns
            if self.id in val_hash_x:
                val_hash_x.drop(self.id, axis=1, inplace=True)

        start = datetime.now()
        count = len(data)
        n_ = Array('d', self.n, lock=False)
        z_ = Array('d', self.z, lock=False)
        lock_ = Lock()
        for e_ in range(epochs):
            # Compute predictions
            loss_train = Value('d', 0, lock=False)
            full_data = np.hstack((target.values.reshape(-1,
                                                         1), hash_x.values))
            # Here we use Process directly since map does not support shared objects
            # z and n will be updated wildly, no lock has been implemented
            processes = [
                Process(target=self._train_samples,
                        args=(partial_data, z_, n_, lock_, loss_train))
                for partial_data in np.array_split(full_data, self.cpus)
            ]

            for p in processes:
                p.start()

            while processes:
                processes.pop().join()

            if eval_x is not None:
                # Compute validation losses
                p = Pool(self.cpus)
                # Shared memory is not supported by map and z_ and n_ should be read only here
                # So we create np.arrays form shared memory objects
                oof_v = p.map(
                    functools.partial(self._predict_samples,
                                      z_=np.array(z_),
                                      n_=np.array(n_)),
                    np.array_split(val_hash_x.values, self.cpus))
                val_preds = np.hstack(oof_v)
                p.close()
                p.join()
                val_logloss = log_loss(eval_y, val_preds)
                val_auc = roc_auc_score(eval_y, val_preds)
                # Display current training and validation losses
                # t_logloss stands for current train_logloss, v for valid
                print(
                    'time_used:%s\tepoch: %-4drows:%d\tt_logloss:%.5f\tv_logloss:%.5f\tv_auc:%.6f'
                    % (datetime.now() - start, e_, count + 1,
                       (loss_train.value / count), val_logloss, val_auc))
                del val_preds
                del oof_v
                gc.collect()
            else:
                print('time_used:%s\tepoch: %-4drows:%d\tt_logloss:%.5f' %
                      (datetime.now() - start, e_, count + 1,
                       (loss_train.value / count)))

            # del loss_v
            # print(z_)
            gc.collect()

        self.n = np.array(n_)
        self.z = np.array(z_)
예제 #38
0
파일: array.py 프로젝트: lhcwm/note
from multiprocessing import Process,Array
import time

# 创建共享内存
# shm= Array('i',[1,2,3,4,6])
# 指定开辟空间大小
# shm=Array('i',5)
# 存入字符串
shm=Array('c',b'hello')

def fun ():
    for i in shm:
        print(i)
    shm[4]=b'k'

p=Process(target=fun)
p.start()
p.join()
print(shm.value) #打印字符串
for i in shm:
    print(i)
예제 #39
0
def add_all(fr, to, index, array):

    s = 0
    for n in range(fr, to + 1):
        s += n
    print(f'index:{index} start={fr:,} end={to:,} sum={s}')
    array[index] = s


print(f'{__name__}')
if __name__ == '__main__':
    print('-- process --')
    num = 100
    start_time = time.time()
    wk_num = 10
    result = Array('q', wk_num)
    worker = [None] * wk_num

    end_num = 10_000 * num
    division = end_num // wk_num

    for i in range(wk_num):
        start = i * division
        end = (i + 1) * division - 1
        #print(f'start={start:,} end={end:,}')
        worker[i] = Process(target=add_all, args=(start, end, i, result))
        worker[i].start()
    wk_sum = 0
    for i in range(wk_num):
        worker[i].join()
        #print(result)
예제 #40
0
def photometry(event, pcf, photdir, mute, owd):

    tini = time.time()

    # Create photometry log
    logname = event.logname
    log = le.Logedit(photdir + "/" + logname, logname)
    log.writelog("\nStart " + photdir + " photometry: " + time.ctime())

    parentdir = os.getcwd() + "/"
    os.chdir(photdir)

    # Parse the attributes from the control file to the event:
    attrib = vars(pcf)
    keys = attrib.keys()
    for key in keys:
        setattr(event, key, attrib.get(key))

    maxnimpos, npos = event.maxnimpos, event.npos
    # allocating frame parameters:
    event.fp.aplev = np.zeros((npos, maxnimpos))
    event.fp.aperr = np.zeros((npos, maxnimpos))
    event.fp.nappix = np.zeros((npos, maxnimpos))
    event.fp.skylev = np.zeros((npos, maxnimpos))
    event.fp.skyerr = np.zeros((npos, maxnimpos))
    event.fp.nskypix = np.zeros((npos, maxnimpos))
    event.fp.nskyideal = np.zeros((npos, maxnimpos))
    event.fp.status = np.zeros((npos, maxnimpos))
    event.fp.good = np.zeros((npos, maxnimpos))

    # For interpolated aperture photometry, we need to "interpolate" the
    # mask, which requires float values. Thus, we convert the mask to
    # floats (this needs to be done before processes are spawned or memory
    # usage balloons).
    if event.mask.dtype != float:
        event.mask = event.mask.astype(float)

    # Aperture photometry:
    if event.phottype == "aper":  # not event.dooptimal or event.from_aper is None:

        # Multy Process set up:
        # Shared memory arrays allow only 1D Arrays :(
        aplev = Array("d", np.zeros(npos * maxnimpos))  # aperture flux
        aperr = Array("d", np.zeros(npos * maxnimpos))  # aperture error
        nappix = Array("d",
                       np.zeros(npos * maxnimpos))  # number of aperture pixels
        skylev = Array("d", np.zeros(npos * maxnimpos))  # sky level
        skyerr = Array("d", np.zeros(npos * maxnimpos))  # sky error
        nskypix = Array("d",
                        np.zeros(npos * maxnimpos))  # number of sky pixels
        nskyideal = Array("d", np.zeros(
            npos * maxnimpos))  # ideal number of sky pixels
        status = Array("d", np.zeros(npos * maxnimpos))  # apphot return status
        good = Array("d", np.zeros(npos * maxnimpos))  # good flag
        # Size of chunk of data each core will process:
        chunksize = maxnimpos // event.ncores + 1

        event.aparr = np.ones(npos * maxnimpos) * event.photap + event.offset

        print("Number of cores: " + str(event.ncores))
        # Start Muti Procecess:
        processes = []
        for nc in range(event.ncores):
            start = nc * chunksize  # Starting index to process
            end = (nc + 1) * chunksize  # Ending   index to process
            proc = Process(target=do_aphot,
                           args=(start, end, event, log, mute, aplev, aperr,
                                 nappix, skylev, skyerr, nskypix, nskyideal,
                                 status, good, 0))
            processes.append(proc)
            proc.start()

        # Make sure all processes finish their work:
        for nc in range(event.ncores):
            processes[nc].join()

        # Put the results in the event. I need to reshape them:
        event.fp.aplev = np.asarray(aplev).reshape(npos, maxnimpos)
        event.fp.aperr = np.asarray(aperr).reshape(npos, maxnimpos)
        event.fp.nappix = np.asarray(nappix).reshape(npos, maxnimpos)
        event.fp.skylev = np.asarray(skylev).reshape(npos, maxnimpos)
        event.fp.skyerr = np.asarray(skyerr).reshape(npos, maxnimpos)
        event.fp.nskypix = np.asarray(nskypix).reshape(npos, maxnimpos)
        event.fp.nskyideal = np.asarray(nskyideal).reshape(npos, maxnimpos)
        event.fp.status = np.asarray(status).reshape(npos, maxnimpos)
        event.fp.good = np.asarray(good).reshape(npos, maxnimpos)

        # raw photometry (no sky subtraction):
        event.fp.apraw = (event.fp.aplev + (event.fp.skylev * event.fp.nappix))

        # Print results into the log if it wasn't done before:
        for pos in range(npos):
            for i in range(event.nimpos[pos]):
                log.writelog(
                    '\nframe =%7d       ' % i + 'pos   =%5d       ' % pos +
                    'y =%7.3f       ' % event.fp.y[pos, i] +
                    'x =%7.3f' % event.fp.x[pos, i] + '\n' +
                    'aplev =%11.3f   ' % event.fp.aplev[pos, i] +
                    'aperr =%9.3f   ' % event.fp.aperr[pos, i] +
                    'nappix =%6.2f' % event.fp.nappix[pos, i] + '\n' +
                    'skylev=%11.3f   ' % event.fp.skylev[pos, i] +
                    'skyerr=%9.3f   ' % event.fp.skyerr[pos, i] +
                    'nskypix=%6.2f   ' % event.fp.nskypix[pos, i] +
                    'nskyideal=%6.2f' % event.fp.nskyideal[pos, i] + '\n' +
                    'status=%7d       ' % event.fp.status[pos, i] +
                    'good  =%5d' % event.fp.good[pos, i],
                    mute=True)

    elif event.phottype == "var":  # variable aperture radius

        # Multy Process set up:
        # Shared memory arrays allow only 1D Arrays :(
        aplev = Array("d", np.zeros(npos * maxnimpos))  # aperture flux
        aperr = Array("d", np.zeros(npos * maxnimpos))  # aperture error
        nappix = Array("d",
                       np.zeros(npos * maxnimpos))  # number of aperture pixels
        skylev = Array("d", np.zeros(npos * maxnimpos))  # sky level
        skyerr = Array("d", np.zeros(npos * maxnimpos))  # sky error
        nskypix = Array("d",
                        np.zeros(npos * maxnimpos))  # number of sky pixels
        nskyideal = Array("d", np.zeros(
            npos * maxnimpos))  # ideal number of sky pixels
        status = Array("d", np.zeros(npos * maxnimpos))  # apphot return status
        good = Array("d", np.zeros(npos * maxnimpos))  # good flag
        # Size of chunk of data each core will process:
        chunksize = maxnimpos // event.ncores + 1

        event.aparr = event.fp.noisepix[0]**.5 * event.photap + event.offset

        print("Number of cores: " + str(event.ncores))
        # Start Muti Procecess:
        processes = []
        for nc in range(event.ncores):
            start = nc * chunksize  # Starting index to process
            end = (nc + 1) * chunksize  # Ending   index to process
            proc = Process(target=do_aphot,
                           args=(start, end, event, log, mute, aplev, aperr,
                                 nappix, skylev, skyerr, nskypix, nskyideal,
                                 status, good, 0))
            processes.append(proc)
            proc.start()

        # Make sure all processes finish their work:
        for nc in range(event.ncores):
            processes[nc].join()

        # Put the results in the event. I need to reshape them:
        event.fp.aplev = np.asarray(aplev).reshape(npos, maxnimpos)
        event.fp.aperr = np.asarray(aperr).reshape(npos, maxnimpos)
        event.fp.nappix = np.asarray(nappix).reshape(npos, maxnimpos)
        event.fp.skylev = np.asarray(skylev).reshape(npos, maxnimpos)
        event.fp.skyerr = np.asarray(skyerr).reshape(npos, maxnimpos)
        event.fp.nskypix = np.asarray(nskypix).reshape(npos, maxnimpos)
        event.fp.nskyideal = np.asarray(nskyideal).reshape(npos, maxnimpos)
        event.fp.status = np.asarray(status).reshape(npos, maxnimpos)
        event.fp.good = np.asarray(good).reshape(npos, maxnimpos)

        # raw photometry (no sky subtraction):
        event.fp.apraw = (event.fp.aplev + (event.fp.skylev * event.fp.nappix))

        # Print results into the log if it wasn't done before:
        for pos in range(npos):
            for i in range(event.nimpos[pos]):
                log.writelog(
                    '\nframe =%7d       ' % i + 'pos   =%5d       ' % pos +
                    'y =%7.3f       ' % event.fp.y[pos, i] +
                    'x =%7.3f' % event.fp.x[pos, i] + '\n' +
                    'aplev =%11.3f   ' % event.fp.aplev[pos, i] +
                    'aperr =%9.3f   ' % event.fp.aperr[pos, i] +
                    'nappix =%6.2f' % event.fp.nappix[pos, i] + '\n' +
                    'skylev=%11.3f   ' % event.fp.skylev[pos, i] +
                    'skyerr=%9.3f   ' % event.fp.skyerr[pos, i] +
                    'nskypix=%6.2f   ' % event.fp.nskypix[pos, i] +
                    'nskyideal=%6.2f' % event.fp.nskyideal[pos, i] + '\n' +
                    'status=%7d       ' % event.fp.status[pos, i] +
                    'good  =%5d' % event.fp.good[pos, i],
                    mute=True)

    elif event.phottype == "ell":  # elliptical
        # Multy Process set up:
        # Shared memory arrays allow only 1D Arrays :(
        aplev = Array("d", np.zeros(npos * maxnimpos))  # aperture flux
        aperr = Array("d", np.zeros(npos * maxnimpos))  # aperture error
        nappix = Array("d",
                       np.zeros(npos * maxnimpos))  # number of aperture pixels
        skylev = Array("d", np.zeros(npos * maxnimpos))  # sky level
        skyerr = Array("d", np.zeros(npos * maxnimpos))  # sky error
        nskypix = Array("d",
                        np.zeros(npos * maxnimpos))  # number of sky pixels
        nskyideal = Array("d", np.zeros(
            npos * maxnimpos))  # ideal number of sky pixels
        status = Array("d", np.zeros(npos * maxnimpos))  # apphot return status
        good = Array("d", np.zeros(npos * maxnimpos))  # good flag
        # Size of chunk of data each core will process:
        chunksize = maxnimpos // event.ncores + 1

        print("Number of cores: " + str(event.ncores))
        # Start Muti Procecess:
        processes = []
        for nc in range(event.ncores):
            start = nc * chunksize  # Starting index to process
            end = (nc + 1) * chunksize  # Ending   index to process
            proc = Process(target=do_aphot,
                           args=(start, end, event, log, mute, aplev, aperr,
                                 nappix, skylev, skyerr, nskypix, nskyideal,
                                 status, good, 0))
            processes.append(proc)
            proc.start()

        # Make sure all processes finish their work:
        for nc in range(event.ncores):
            processes[nc].join()

        # Put the results in the event. I need to reshape them:
        event.fp.aplev = np.asarray(aplev).reshape(npos, maxnimpos)
        event.fp.aperr = np.asarray(aperr).reshape(npos, maxnimpos)
        event.fp.nappix = np.asarray(nappix).reshape(npos, maxnimpos)
        event.fp.skylev = np.asarray(skylev).reshape(npos, maxnimpos)
        event.fp.skyerr = np.asarray(skyerr).reshape(npos, maxnimpos)
        event.fp.nskypix = np.asarray(nskypix).reshape(npos, maxnimpos)
        event.fp.nskyideal = np.asarray(nskyideal).reshape(npos, maxnimpos)
        event.fp.status = np.asarray(status).reshape(npos, maxnimpos)
        event.fp.good = np.asarray(good).reshape(npos, maxnimpos)

        # raw photometry (no sky subtraction):
        event.fp.apraw = (event.fp.aplev + (event.fp.skylev * event.fp.nappix))

        # Print results into the log if it wasn't done before:
        for pos in range(npos):
            for i in range(event.nimpos[pos]):
                log.writelog(
                    '\nframe =%7d       ' % i + 'pos   =%5d       ' % pos +
                    'y =%7.3f       ' % event.fp.y[pos, i] +
                    'x =%7.3f' % event.fp.x[pos, i] + '\n' +
                    'aplev =%11.3f   ' % event.fp.aplev[pos, i] +
                    'aperr =%9.3f   ' % event.fp.aperr[pos, i] +
                    'nappix =%6.2f' % event.fp.nappix[pos, i] + '\n' +
                    'skylev=%11.3f   ' % event.fp.skylev[pos, i] +
                    'skyerr=%9.3f   ' % event.fp.skyerr[pos, i] +
                    'nskypix=%6.2f   ' % event.fp.nskypix[pos, i] +
                    'nskyideal=%6.2f' % event.fp.nskyideal[pos, i] + '\n' +
                    'status=%7d       ' % event.fp.status[pos, i] +
                    'good  =%5d' % event.fp.good[pos, i],
                    mute=True)

    elif event.phottype == "psffit":
        event.fp.aplev = event.fp.flux
        event.fp.skylev = event.fp.psfsky
        event.fp.good = np.zeros((event.npos, event.maxnimpos))
        for pos in range(event.npos):
            event.fp.good[pos, 0:event.nimpos[pos]] = 1

    elif event.phottype == "optimal":
        # utils for profile construction:
        pshape = np.array([2 * event.otrim + 1, 2 * event.otrim + 1])
        subpsf = np.zeros(np.asarray(pshape, int) * event.expand)
        x = np.indices(pshape)

        clock = t.Timer(np.sum(event.nimpos),
                        progress=np.array([0.05, 0.1, 0.25, 0.5, 0.75, 1.1]))

        for pos in range(npos):
            for i in range(event.nimpos[pos]):

                # Integer part of center of subimage:
                cen = np.rint([event.fp.y[pos, i], event.fp.x[pos, i]])
                # Center in the trimed image:
                loc = (event.otrim, event.otrim)
                # Do the trim:
                img, msk, err = ie.trimimage(event.data[i, :, :, pos],
                                             *cen,
                                             *loc,
                                             mask=event.mask[i, :, :, pos],
                                             uncd=event.uncd[i, :, :, pos])

                # Center of star in the subimage:
                ctr = (event.fp.y[pos, i] - cen[0] + event.otrim,
                       event.fp.x[pos, i] - cen[1] + event.otrim)

                # Make profile:
                # Index of the position in the supersampled PSF:
                pix = pf.pos2index(ctr, event.expand)
                profile, pctr = pf.make_psf_binning(event.psfim, pshape,
                                                    event.expand,
                                                    [pix[0], pix[1], 1.0, 0.0],
                                                    event.psfctr, subpsf)

                #subtract the sky level:
                img -= event.fp.psfsky[pos, i]
                # optimal photometry calculation:
                immean, uncert, good = op.optphot(img,
                                                  profile,
                                                  var=err**2.0,
                                                  mask=msk)

                event.fp.aplev[pos, i] = immean
                event.fp.aperr[pos, i] = uncert
                event.fp.skylev[pos, i] = event.fp.psfsky[pos, i]
                event.fp.good[pos, i] = good

                # Report progress:
                clock.check(np.sum(event.nimpos[0:pos]) + i,
                            name=event.centerdir)

    # START PREFLASH EDIT :::::::::::::::::::::::::::::::::::::

    # Do aperture on preflash data:
    if event.havepreflash:
        print("\nStart preflash photometry:")
        premaxnimpos = event.premaxnimpos
        preaplev = Array("d", np.zeros(npos * premaxnimpos))
        preaperr = Array("d", np.zeros(npos * premaxnimpos))
        prenappix = Array("d", np.zeros(npos * premaxnimpos))
        preskylev = Array("d", np.zeros(npos * premaxnimpos))
        preskyerr = Array("d", np.zeros(npos * premaxnimpos))
        preskynpix = Array("d", np.zeros(npos * premaxnimpos))
        preskyideal = Array("d", np.zeros(npos * premaxnimpos))
        prestatus = Array("d", np.zeros(npos * premaxnimpos))
        pregood = Array("d", np.zeros(npos * premaxnimpos))

        # Start Procecess:
        mute = False
        proc = Process(target=do_aphot,
                       args=(0, event.prenimpos[0], event, log, mute, preaplev,
                             preaperr, prenappix, preskylev, preskyerr,
                             preskynpix, preskyideal, prestatus, pregood, 1))
        proc.start()
        proc.join()

        # Put the results in the event. I need to reshape them:
        event.prefp.aplev = np.asarray(preaplev).reshape(npos, premaxnimpos)
        event.prefp.aperr = np.asarray(preaperr).reshape(npos, premaxnimpos)
        event.prefp.nappix = np.asarray(prenappix).reshape(npos, premaxnimpos)
        event.prefp.status = np.asarray(prestatus).reshape(npos, premaxnimpos)
        event.prefp.skylev = np.asarray(preskylev).reshape(npos, premaxnimpos)
        event.prefp.good = np.asarray(pregood).reshape(npos, premaxnimpos)

        # raw photometry (no sky subtraction):
        event.prefp.aplev = (event.prefp.aplev +
                             (event.prefp.skylev * event.prefp.nappix))
        # END PREFLASH EDIT :::::::::::::::::::::::::::::::::::::::

    if event.method in ["bpf"]:
        event.ispsf = False

    # PSF aperture correction:
    if event.ispsf and event.phottype == "aper":
        log.writelog('Calculating PSF aperture:')
        event.psfim = event.psfim.astype(np.float64)

        imerr = np.ones(np.shape(event.psfim))
        imask = np.ones(np.shape(event.psfim))
        skyfrac = 0.1

        event.aperfrac, ape, event.psfnappix, event.psfskylev, sle, \
             event.psfnskypix, event.psfnskyideal, event.psfstatus  \
                       = ap.apphot_c(event.psfim, imerr, imask,
                                     event.psfctr[0], event.psfctr[1],
                                     event.photap * event.psfexpand,
                                     event.skyin  * event.psfexpand,
                                     event.skyout * event.psfexpand,
                                     skyfrac, event.apscale, event.skymed)

        event.aperfrac += event.psfskylev * event.psfnappix

        event.fp.aplev /= event.aperfrac
        event.fp.aperr /= event.aperfrac

        log.writelog('Aperture contains %f of PSF.' % event.aperfrac)

    if event.ispsf and event.phottype == "var":
        log.writelog('Calculating PSF aperture:')
        event.psfim = event.psfim.astype(np.float64)

        imerr = np.ones(np.shape(event.psfim))
        imask = np.ones(np.shape(event.psfim))
        skyfrac = 0.1

        avgap = np.mean(event.aparr)

        event.aperfrac, ape, event.psfnappix, event.psfskylev, sle, \
             event.psfnskypix, event.psfnskyideal, event.psfstatus  \
                       = ap.apphot_c(event.psfim, imerr, imask,
                                     event.psfctr[0], event.psfctr[1],
                                     avgap        * event.psfexpand,
                                     event.skyin  * event.psfexpand,
                                     event.skyout * event.psfexpand,
                                     skyfrac, event.apscale, event.skymed)

        event.aperfrac += event.psfskylev * event.psfnappix

        event.fp.aplev /= event.aperfrac
        event.fp.aperr /= event.aperfrac

        log.writelog('Aperture contains %f of PSF.' % event.aperfrac)

    if event.ispsf and event.phottype == "ell":
        log.writelog('Calculating PSF aperture:')
        event.psfim = event.psfim.astype(np.float64)

        imerr = np.ones(np.shape(event.psfim))
        imask = np.ones(np.shape(event.psfim))
        skyfrac = 0.1

        avgxwid = np.mean(event.fp.xsig * event.photap)
        avgywid = np.mean(event.fp.ysig * event.photap)
        avgrot = np.mean(event.fp.rot)

        event.aperfrac, ape, event.psfnappix, event.psfskylev, sle, \
             event.psfnskypix, event.psfnskyideal, event.psfstatus  \
                       = ap.elphot_c(event.psfim, imerr, imask,
                                     event.psfctr[0], event.psfctr[1],
                                     avgxwid * event.psfexpand,
                                     avgywid * event.psfexpand,
                                     avgrot,
                                     event.skyin  * event.psfexpand,
                                     event.skyout * event.psfexpand,
                                     skyfrac, event.apscale, event.skymed)

        event.aperfrac += event.psfskylev * event.psfnappix

        event.fp.aplev /= event.aperfrac
        event.fp.aperr /= event.aperfrac

        log.writelog('Aperture contains %f of PSF.' % event.aperfrac)

    # Sadly we must do photometry for every aperture used
    # Possibly use a range and interpolate? Might be an option
    # for the future to speed this up.
    # This is commented out, as it seems to just remove the corrections
    # made by variable or elliptical photometry
    # if event.ispsf and (event.phottype == "var" or event.phottype == "ell"):
    #   log.writelog('Calculating PSF aperture. This may take some time.')
    #   event.psfim = event.psfim.astype(np.float64)

    #   imerr = np.ones(np.shape(event.psfim))
    #   imask = np.ones(np.shape(event.psfim))
    #   skyfrac = 0.1

    #   aperfrac     = Array("d", np.zeros(npos*maxnimpos))# psf flux
    #   aperfracerr  = Array("d", np.zeros(npos*maxnimpos))# psf flux error
    #   psfnappix    = Array("d", np.zeros(npos*maxnimpos))# psf aperture pix num
    #   psfsky       = Array("d", np.zeros(npos*maxnimpos))# psf sky level
    #   psfskyerr    = Array("d", np.zeros(npos*maxnimpos))# psf sky error
    #   psfnskypix   = Array("d", np.zeros(npos*maxnimpos))# psf sky pix num
    #   psfnskyideal = Array("d", np.zeros(npos*maxnimpos))# psf ideal sky pix num
    #   psfstatus    = Array("d", np.zeros(npos*maxnimpos))# psf return status
    #   psfgood      = Array("d", np.zeros(npos*maxnimpos))# psf good flag

    #   processes=[]
    #   for nc in range(event.ncores):
    #     start =  nc    * chunksize
    #     end   = (nc+1) * chunksize
    #     proc = Process(target=do_aphot_psf, args=(start, end, event, log, mute,
    #                                               aperfrac, aperfracerr,
    #                                               psfnappix,
    #                                               psfsky, psfskyerr,
    #                                               psfnskypix, psfnskyideal,
    #                                               psfstatus, psfgood))

    #     processes.append(proc)
    #     proc.start()

    #   for nc in range(event.ncores):
    #     processes[nc].join()

    #   # Reshape
    #   event.aperfrac     = np.asarray(aperfrac    ).reshape(npos,maxnimpos)
    #   event.aperfracerr  = np.asarray(aperfracerr ).reshape(npos,maxnimpos)
    #   event.psfnappix    = np.asarray(psfnappix   ).reshape(npos,maxnimpos)
    #   event.psfsky       = np.asarray(psfsky      ).reshape(npos,maxnimpos)
    #   event.psfskyerr    = np.asarray(psfskyerr   ).reshape(npos,maxnimpos)
    #   event.psfnskypix   = np.asarray(psfnskypix  ).reshape(npos,maxnimpos)
    #   event.psfnskyideal = np.asarray(psfnskyideal).reshape(npos,maxnimpos)
    #   event.psfstatus    = np.asarray(psfstatus   ).reshape(npos,maxnimpos)
    #   event.psfgood      = np.asarray(psfgood     ).reshape(npos,maxnimpos)

    #   event.aperfrac += event.psfsky * event.psfnappix

    #   event.fp.aplev /= event.aperfrac
    #   event.fp.aperr /= event.aperfrac

    #   log.writelog('Aperture contains average %f of PSF.'%np.mean(event.aperfrac))

    # save
    print("\nSaving ...")
    # denoised data:
    if event.denphot:
        killdata = 'dendata'
    else:
        killdata = 'data'
    me.saveevent(event,
                 event.eventname + "_pht",
                 delete=[killdata, 'uncd', 'mask'])

    # Print time elapsed and close log:
    cwd = os.getcwd() + "/"
    log.writelog("Output files (" + event.photdir + "):")
    log.writelog("Data:")
    log.writelog(" " + cwd + event.eventname + "_pht.dat")
    log.writelog("Log:")
    log.writelog(" " + cwd + logname)

    dt = t.hms_time(time.time() - tini)
    log.writeclose("\nEnd Photometry. Time (h:m:s):  %s " % dt + "  (" +
                   photdir + ")")
    print("--------------  ------------\n")

    os.chdir(owd)

    if event.runp5:
        os.system("python3 poet.py p5 %s/%s" %
                  (event.centerdir, event.photdir))
예제 #41
0
 def __init__(self, size: int):
     self.__data = Array("f", 2*size)
     self.size = size
     self.__current_index = Value("L", 0)
예제 #42
0
    print(f"Appending {number}")
    # multiprocessing.Array does not have an append method, so we have to use this
    # syntax (in fact the Array has already 10 elements, so we just have to assign them here)
    arr[number] = number


def parse_args():
    parser = argparse.ArgumentParser(
        description=__doc__, formatter_class=RawDescriptionHelpFormatter)
    return parser.parse_args()


if __name__ == "__main__":
    args = parse_args()

    shared_variable = Array(typecode_or_type="i", size_or_initializer=10)

    processes = []
    for i in range(10):
        process_name = f"Subprocess-{i}"
        proc = Process(target=target,
                       args=(i, shared_variable),
                       name=process_name)
        processes.append(proc)

    print(f"State of the shared variable BEFORE processing:")
    for item in shared_variable:
        print(item)
    for proc in processes:
        proc.start()
예제 #43
0
def baumWelchP(bitext_sd, s_count, t_table,
               sd_count):  #L is the number of observations

    N = maxTargetSentenceLength(bitext_sd)
    print 'N', N
    N_max = N
    Y = bitext_sd

    (indexMap, biword) = map_bitext_to_int(sd_count)
    sd_size = len(indexMap)
    lastLikelihood = 0

    #a = zeros((N+1,N+1))
    a_array = Array(ct.c_double, ((N + 1) * (N + 1)))
    a_array2 = np.frombuffer(
        a_array.get_obj())  # mp_arr and arr share the same memory
    a = a_array2.reshape((N + 1, N + 1))  # b and arr share the same memory

    #pi = zeros(N+1)
    pi = Array('d', N + 1)

    logLikelihood = Value('d', 0.0)

    lastLogLikelihood = Value('d', 0.0)

    L = len(Y)
    #N = len(Y[0][1]) #first sentence x length
    #(a,pi) = initializeUniformly(N)

    for iterations in range(0, 10):
        #E step
        #c = defaultdict(int)
        startTime = time.time()
        print 'iteration', iterations
        logLikelihood.value = 0.0
        #totalGammaOverAllObservations = zeros(N+1)
        totalGammaOverAllObservations = Array('d', [0] * (N + 1))

        #totalGammaDeltaOverAllObservations_t = zeros((N+1,sd_size))
        totalGammaDeltaOverAllObservations_t = Array('d', [0] * ((N + 1) *
                                                                 (sd_size)))

        #totalGammaDeltaOverAllObservations_t_overall_states = zeros(sd_size)
        totalGammaDeltaOverAllObservations_t_overall_states = Array(
            'd', [0] * sd_size)

        totalGammaDeltaOverAllObservations_t_overall_states_over_dest = defaultdict(
            int)

        #totalGamma1OverAllObservations = zeros(N+1)
        totalGamma1OverAllObservations = Array('d', [0] * (N + 1))

        #totalC_j_Minus_iOverAllObservations = zeros((N+1,N+1))
        totalC_j_Minus_iOverAllObservations_array = Array(
            ct.c_double, (N + 1) * (N + 1))
        totalC_j_Minus_iOverAllObservations_array2 = np.frombuffer(
            totalC_j_Minus_iOverAllObservations_array.get_obj())
        totalC_j_Minus_iOverAllObservations = totalC_j_Minus_iOverAllObservations_array2.reshape(
            (N + 1, N + 1))
        for i in range(N + 1):
            for j in range(N + 1):
                totalC_j_Minus_iOverAllObservations[i, j] = 0.0

        #totalC_l_Minus_iOverAllObservations = zeros(N+1)
        totalC_l_Minus_iOverAllObservations = Array('d', [0] * (N + 1))

        intervals = 10
        jobs = []
        lock = RLock()
        length_of_interval = L / intervals
        for i in range(0, intervals - 1):
            start = i * length_of_interval
            end = (i + 1) * length_of_interval
            #print start
            #print end
            p = Process(target=Expectation2,
                        args=(lock, t_table, N, Y, sd_size, indexMap,
                              iterations, totalGammaOverAllObservations,
                              totalGammaDeltaOverAllObservations_t,
                              totalGamma1OverAllObservations,
                              totalC_j_Minus_iOverAllObservations,
                              totalC_l_Minus_iOverAllObservations, start, end,
                              a, pi, logLikelihood, lastLogLikelihood))
            p.start()
            jobs.append(p)

        start = (intervals - 1) * length_of_interval
        end = L
        p = Process(target=Expectation2,
                    args=(lock, t_table, N, Y, sd_size, indexMap, iterations,
                          totalGammaOverAllObservations,
                          totalGammaDeltaOverAllObservations_t,
                          totalGamma1OverAllObservations,
                          totalC_j_Minus_iOverAllObservations,
                          totalC_l_Minus_iOverAllObservations, start, end, a,
                          pi, logLikelihood, lastLogLikelihood))
        p.start()
        jobs.append(p)
        for p in jobs:
            p.join()

        endTime = time.time()

        #print N
        print "%.2gs" % (endTime - startTime)
        #N = len(totalGamma1OverAllObservations)-1
        #print N
        print 'last , new ', lastLogLikelihood.value, logLikelihood.value
        #print 'likelihood difference ', (logLikelihood.value - lastLogLikelihood.value)
        lastLogLikelihood.value = logLikelihood.value

        totalGammaOverAllObservationsOverAllStates = 0.0
        sartTime = time.time()
        for i in range(1, N + 1):
            totalGammaOverAllObservationsOverAllStates += totalGammaOverAllObservations[
                i]

        print 'likelihood ', logLikelihood
        #lastLikelihood = liklihood
        N = len(totalGamma1OverAllObservations) - 1
        #print N

        # Create expected_counts(d,c) to be consistent with the Berg-Kirkpatrick et al.
        # To make it more memory efficient just keep either totalGammaDeltaOverAllObservations_t_overall_states or expected_counts
        expected_counts = defaultdict(int)

        for i in range(1, N + 1):
            for k in range(sd_size):

                address = i * sd_size + k
                totalGammaDeltaOverAllObservations_t_overall_states[
                    k] += totalGammaDeltaOverAllObservations_t[address]
                (f, e) = biword[k]
                totalGammaDeltaOverAllObservations_t_overall_states_over_dest[
                    e] += totalGammaDeltaOverAllObservations_t[address]

        for k in range(sd_size):
            (f, e) = biword[k]
            expected_counts[(
                f, e)] = totalGammaDeltaOverAllObservations_t_overall_states[k]

        totalGammaOverAllObservationsOverAllStates = 0.0
        for i in range(1, N + 1):
            totalGammaOverAllObservationsOverAllStates += totalGammaOverAllObservations[
                i]

        # M Step
        # We can clear a, b and pi here and then set the values for them
        a = zeros((2 * N + 1, 2 * N + 1))
        pi = zeros(2 * N + 1)
        t_table = defaultdict(int)
        for i in range(1, N + 1):
            #pi[i] = totalGamma1OverAllObservations[i]*(1.0/(2*L))
            pi[i] = 1.0 / (2 * N)

            for j in range(1, N + 1):
                a[(i, j)] = totalC_j_Minus_iOverAllObservations[
                    (i, j)] / totalC_l_Minus_iOverAllObservations[i]

        for i in range(1, N + 1):
            #pi[i + N] = totalGamma1OverAllObservations[i]*(1.0/(2*L))
            pi[i + N] = 1.0 / (2 * N)

        for i in range(1, N + 1):
            for j in range(1, N + 1):
                a[i][i + N] = p0H
                a[i + N][i + N] = p0H

        for i in range(1, N + 1):
            for j in range(1, N + 1):
                a[i + N][j] = (1 - p0H) * a[i][j]

        check_probability(a, N)
        #smooth_transition_probabilities()

        for k in range(sd_size):
            (f, e) = biword[k]
            t_table[(
                f, e
            )] = totalGammaDeltaOverAllObservations_t_overall_states[
                k] / totalGammaDeltaOverAllObservations_t_overall_states_over_dest[
                    e]

        print iterations

    return (a, t_table, pi)
예제 #44
0
파일: fit_pb.py 프로젝트: piyanatk/sim
import numpy as np

matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.modeling import fitting, models

from opstats.utils.settings import MWA_FREQ_EOR_ALL_80KHZ


CENPIX = 3740
DEGPIX = 0.0160428
ANG = np.arange(-CENPIX, CENPIX) * DEGPIX

freqs = MWA_FREQ_EOR_ALL_80KHZ
shared_array_base = Array('d', freqs.size)
shared_array = np.frombuffer(shared_array_base.get_obj())
beam_dir = '/data3/piyanat/runs/fhd_uvlt50/output_data/'
beamxx_files = [
    beam_dir + 'vis_interp_delta_21cm_l128_0.000h_{:.3f}MHz_Beam_XX.fits'
    .format(f) for f in freqs
    ]
beamyy_files = [
    beam_dir + 'vis_interp_delta_21cm_l128_0.000h_{:.3f}MHz_Beam_YY.fits'
    .format(f) for f in freqs
]


def make_ibeam_cross(xx_beam, yy_beam):
    """Combine XX and YY beam into Stokes I beam and return the cross section.
    Assume perfect array feed, i.e. I = (XX + YY) / 2. 2 in the denominator
def main():
    """
    Doppler Gesture detector
    """
    global CHUNK
    global RATE
    global CHANNELS

    # Read in command-line arguments and switches
    parser = argparse.ArgumentParser(description='Plays a tone (20kHz default) and then looks for doppler shifts within a window range')
    parser.add_argument('--tone', '-t', dest='tone', action='store', type=int,
                        default=TONE, help='Tone (Hz)')
    parser.add_argument('--window', '-w', dest='window', action='store', type=int,
                        default=WINDOW, help='Window range (Hz)')
    parser.add_argument('--channels', '-c', dest='channels', action='store', type=int,
                        default=CHANNELS, help='Number of channels (1 or 2)')
    parser.add_argument('--size', '-s', dest='size', action='store', type=int,
                        default=CHUNK, help='Sample size')
    parser.add_argument('--rate', '-r', dest='rate', action='store', type=int,
                        default=RATE, help='Sample rate (Hz)')
    args = parser.parse_args()

    CHUNK = args.size
    RATE = args.rate
    CHANNELS = args.channels

    # Verify arguments

    # Check that the args.channels argument has the correct number of channels.
    if args.channels not in [1, 2]:
        print("Invalid number of channels. Please enter as 1 or 2")
        sys.exit(-1)

    if CHANNELS == 2:
        shared_array_base = Array(ctypes.c_double, 2*CHUNK)
    else:
        shared_array_base = Array(ctypes.c_double, CHUNK)

    shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())

    if CHANNELS == 2:
        shared_array.dtype = complex

    shared_array = shared_array.reshape(1, CHUNK)

    sync_event = Event()

    # Initialize all processes and then start them
    tonePlayer_p = Process(target=tonePlayer, args=(
        args.tone,
        sync_event,))
    tonePlayer_p.daemon = True

    recorder_p = Process(target=recorder, args=(
        shared_array,
        args.tone,
        args.window,
        sync_event,))
    recorder_p.daemon = True

    if PLOTTER:
        plotter_p = Process(target=pydoppler.plotter, args=(
            shared_array,
            args.channels,
            args.tone,
            args.window,
            args.rate,))
        plotter_p.daemon = True

    if AMBIGUITY:
        ambiguity_p = Process(target=pydoppler.plotamb, args=(
            shared_array,
            args.channels,
            args.tone,
            args.window,
            args.rate,))
        ambiguity_p.daemon = True

    if WATERFALL:
        waterfall_p = Process(target=pydoppler.waterfall, args=(
            shared_array,
            args.channels,
            args.tone,
            args.window,
            args.rate,))
        waterfall_p.daemon = True

    recorder_p.start()
    tonePlayer_p.start()

    if PLOTTER:
        plotter_p.start()
    if AMBIGUITY:
        ambiguity_p.start()
    if WATERFALL:
        waterfall_p.start()

    tonePlayer_p.join()
    recorder_p.join()
    if PLOTTER:
        plotter_p.join()
    if AMBIGUITY:
        ambiguity_p.join()
    if WATERFALL:
        waterfall_p.join()
예제 #46
0
class CRDataLoader:
    def __init__(self, dataset, shuffle=False, num_parallel_batch=2, noise_channel=False):
        # parameters settings
        self.dataset = dataset
        self.config_dict = self.dataset.config_dict
        self.n_class = dataset.dataset.object_cfg.n_class
        self.batch_size = self.config_dict['train_cfg']['batch_size']
        self.radar_configs = self.dataset.dataset.sensor_cfg.radar_cfg
        self.model_configs = self.config_dict['model_cfg']
        self.ramap_rsize = self.radar_configs['ramap_rsize']
        self.ramap_asize = self.radar_configs['ramap_asize']
        self.n_chirps = self.dataset.n_chirps

        if noise_channel:
            self.n_class = self.n_class + 1
        else:
            self.n_class = self.n_class

        self.length = len(dataset) // self.batch_size + (1 if len(dataset) % self.batch_size != 0 else 0)
        self.loading_seq = [i for i in range(len(dataset))]
        if shuffle:
            random.shuffle(self.loading_seq)
        self.restart = False

        assert num_parallel_batch > 0 and type(num_parallel_batch) == int

        self.win_size = dataset.win_size
        n_shradar = num_parallel_batch * self.batch_size * 2 * dataset.win_size * self.n_chirps * self.ramap_rsize \
                    * self.ramap_asize
        self.shradar = Array(ctypes.c_double, n_shradar)
        n_shconf = num_parallel_batch * self.batch_size * self.n_class * dataset.win_size * self.ramap_rsize \
                   * self.ramap_asize
        self.shconf = Array(ctypes.c_double, n_shconf)
        self.num_parallel_batch = num_parallel_batch

    def __len__(self):
        return self.length

    def __iter__(self):
        data_dict_stack = [None, None]
        procs = [None, None]

        random.shuffle(self.loading_seq)
        cur_loading_seq = self.loading_seq[:self.batch_size]
        data_dict_stack[0] = self.dataset.getBatch(cur_loading_seq)
        procs[0] = Process(target=self.getBatchArray,
                           args=(self.shradar, self.shconf, data_dict_stack[0], cur_loading_seq, 0))
        procs[0].start()

        index_num = self.num_parallel_batch - 1
        for i in range(self.__len__()):
            index_num = (index_num + 1) % self.num_parallel_batch
            procs[index_num].join()
            procs[index_num] = None

            if i < self.length - self.num_parallel_batch:
                cur_loading_seq = self.loading_seq[
                                  self.batch_size * (i + self.num_parallel_batch - 1): self.batch_size * (
                                          i + self.num_parallel_batch)]
            else:
                cur_loading_seq = self.loading_seq[self.batch_size * (i + self.num_parallel_batch - 1):]

            if i < self.length - self.num_parallel_batch + 1:
                stack_id_next = (index_num + 1) % self.num_parallel_batch
                data_dict_stack[stack_id_next] = self.dataset.getBatch(cur_loading_seq)
                procs[stack_id_next] = Process(target=self.getBatchArray,
                                               args=(self.shradar, self.shconf, data_dict_stack[stack_id_next],
                                                     cur_loading_seq, (index_num + 1) % self.num_parallel_batch))
                procs[stack_id_next].start()

            shradarnp = np.frombuffer(self.shradar.get_obj())
            if self.n_chirps == 1:
                shradarnp = shradarnp.reshape(self.num_parallel_batch, self.batch_size, 2, self.win_size,
                                              self.ramap_rsize, self.ramap_asize)
            else:
                shradarnp = shradarnp.reshape(self.num_parallel_batch, self.batch_size, 2, self.win_size,
                                              self.n_chirps, self.ramap_rsize, self.ramap_asize)
            shconfnp = np.frombuffer(self.shconf.get_obj())
            shconfnp = shconfnp.reshape(self.num_parallel_batch, self.batch_size, self.n_class, self.win_size,
                                        self.ramap_rsize, self.ramap_asize)

            if i < self.length - 1:
                data_length = self.batch_size
            else:
                data_length = len(self.dataset) - self.batch_size * i

            data_dict_return = dict(
                status=data_dict_stack[index_num]['status'],
                image_paths=data_dict_stack[index_num]['image_paths'],
                radar_data=torch.from_numpy(shradarnp[index_num, :data_length, :, :, :, :]),
                anno=dict(
                    obj_infos=data_dict_stack[index_num]['anno']['obj_infos'],
                    confmaps=torch.from_numpy(shconfnp[index_num, :data_length, :, :, :, :]),
                )
            )
            yield data_dict_return

    def __getitem__(self, index):
        if self.restart:
            random.shuffle(self.loading_seq)
        if index == self.length - 1:
            self.restart = True
            results = self.dataset.getBatch(self.loading_seq[self.batch_size * index:])
        else:
            results = self.dataset.getBatch(self.loading_seq[self.batch_size * index: self.batch_size * (index + 1)])
        results = list(results)
        for i in range(2):
            results[i] = torch.from_numpy(results[i])
        return results

    def getBatchArray(self, shradar, shconf, data_dict, loading_seq, index):
        shradarnp = np.frombuffer(shradar.get_obj())
        if self.n_chirps == 1:
            shradarnp = shradarnp.reshape(self.num_parallel_batch, self.batch_size, 2, self.win_size,
                                          self.ramap_rsize, self.ramap_asize)
        else:
            shradarnp = shradarnp.reshape(self.num_parallel_batch, self.batch_size, 2, self.win_size, self.n_chirps,
                                          self.ramap_rsize, self.ramap_asize)
        shconfnp = np.frombuffer(shconf.get_obj())
        shconfnp = shconfnp.reshape(self.num_parallel_batch, self.batch_size, self.n_class, self.win_size,
                                    self.ramap_rsize, self.ramap_asize)
        shradarnp[index, :len(loading_seq), :, :, :, :] = data_dict['radar_data']
        shconfnp[index, :len(loading_seq), :, :, :, :] = data_dict['anno']['confmaps']
예제 #47
0
    # Reading and selecting parameters to execute
    param = pd.read_csv(args.param_file, header=0, index_col=0)
    if args.job_id is not None:
        param_sel = param.loc[args.job_id]  # Select param by job_id number
    else:
        param_sel = param.iloc[0]  # Select the first data row.
    input_file = param_sel.iloc[0]
    filter_directory = param_sel.iloc[1]
    output_file = param_sel.iloc[2]

    # Read input data cube
    data_da = xr.open_dataarray(input_file)
    data_array = data_da.values

    # Create shared memory array to store filtered data cube
    filtered_data_array_base = Array('d', data_array.size)
    filtered_data_array = np.frombuffer(filtered_data_array_base.get_obj())
    filtered_data_array.shape = data_array.shape

    # Read in list of filter files
    filter_files = glob('{:s}/*.nc'.format(filter_directory))
    filter_files.sort()
    nbins = len(filter_files)

    # Attributes for output files
    # Temporary read in the first filter to read filter information
    da0 = xr.open_dataarray(filter_files[0])
    extra_attrs = {'filter_type': 'wedge',
                   'filter_bandwidth': da0.attrs['filter_bandwidth'],
                   'image_bandwidth': da0.attrs['channel_bandwidth'],
                   'theta': da0.attrs['theta'],
예제 #48
0
        Process.__init__(self)

    # Overriding run process
    def run(self):
        # Process.start()
        for i in range(len(self.intArr)):
            self.intArr[i] = 2 * i
        for i in range(len(self.intArr)):
            self.sharedArr[i] = 2 * i
        time.sleep(0.02)
        # return  # redundant?

    def getArr(self):
        return self.intArr

    def getSharedArr(self):
        return self.sharedArr


# %% Main running function
if __name__ == '__main__':
    arr = [1] * 5
    sharedArray = Array('i', 5)
    pr = simpleProcess(arr, sharedArray)
    pr.start()
    print("id of a subprocess:", pr.pid)
    pr.join()
    result = pr.getArr()  # really, it doesn't return values here
    resultShared = pr.getSharedArr(
    )[:]  # for returning list instead of shared array (from memory)
예제 #49
0
    def __init__(self, model, device_ids=1, n_workers=None,
                 max_batch_size=None, max_image_size=DEFAULT_MAX_IMAGE_SIZE,
                 modder=None):
        """
        Args:
        - model (PyMjModel): MuJoCo model to use for rendering
        - device_ids (int/list): list of device ids to use for rendering.
            One or more workers will be assigned to each device, depending
            on how many workers are requested.
        - n_workers (int): number of parallel processes in the pool. Defaults
            to the number of device ids.
        - max_batch_size (int): maximum number of states that can be rendered
            in batch using .render(). Defaults to the number of workers.
        - max_image_size (int): maximum number pixels in images requested
            by .render()
        - modder (Modder): modder to use for domain randomization.
        """
        self._closed, self.pool = False, None

        if not (modder is None or inspect.isclass(modder)):
            raise ValueError("modder must be a class")

        if isinstance(device_ids, int):
            device_ids = list(range(device_ids))
        else:
            assert isinstance(device_ids, list), (
                "device_ids must be list of integer")

        n_workers = n_workers or 1
        self._max_batch_size = max_batch_size or (len(device_ids) * n_workers)
        self._max_image_size = max_image_size

        array_size = self._max_image_size * self._max_batch_size

        self._shared_rgbs = Array(ctypes.c_uint8, array_size * 3)
        self._shared_depths = Array(ctypes.c_float, array_size)

        self._shared_rgbs_array = np.frombuffer(
            self._shared_rgbs.get_obj(), dtype=ctypes.c_uint8)
        assert self._shared_rgbs_array.size == (array_size * 3), (
            "Array size is %d, expected %d" % (
                self._shared_rgbs_array.size, array_size * 3))
        self._shared_depths_array = np.frombuffer(
            self._shared_depths.get_obj(), dtype=ctypes.c_float)
        assert self._shared_depths_array.size == array_size, (
            "Array size is %d, expected %d" % (
                self._shared_depths_array.size, array_size))

        worker_id = Value(ctypes.c_int)
        worker_id.value = 0

        if get_start_method() != "spawn":
            raise RuntimeError(
                "Start method must be set to 'spawn' for the "
                "render pool to work. That is, you must add the "
                "following to the _TOP_ of your main script, "
                "before any other imports (since they might be "
                "setting it otherwise):\n"
                "  import multiprocessing as mp\n"
                "  if __name__ == '__main__':\n"
                "    mp.set_start_method('spawn')\n")

        self.pool = Pool(
            processes=len(device_ids) * n_workers,
            initializer=MjRenderPool._worker_init,
            initargs=(
                model.get_mjb(),
                worker_id,
                device_ids,
                self._shared_rgbs,
                self._shared_depths,
                modder))
예제 #50
0
            if prev_y is None:
                prev_y = y
            if prev_x is None:
                prev_x = x
            diff_x = x - prev_x
            diff_y = y - prev_y
            threads.append((diff_x, diff_y))
            threadnum += 1
            mutex.release()


status = True
while status:
    cap = cv2.VideoCapture(0)

    action_buffer = Array('i', range(0))
    # 初始化 pygame
    pygame.init()

    # 设置游戏界面大小、背景图片及标题
    # 游戏界面像素大小
    screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))

    # 游戏界面标题
    pygame.display.set_caption('劲椎病治疗')

    # 背景图
    background = pygame.image.load('resources/image/background.png').convert()

    # Game Over 的背景图
    game_over = pygame.image.load('resources/image/gameover.png')
예제 #51
0
    def __init__(self):
        self.ui_running = Value("b", 1)
        Gtk.Window.__init__(self, title="EMPR PC Visualiser")
        self.set_default_size(800, 600)
        self.set_border_width(10)
        self.lights = {}
        self.packet_last_value = Array("B", [0] * 512)

        #Container Box
        main_vertical_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL,
                                    spacing=10)
        main_vertical_box.set_homogeneous(False)
        #Menu Box
        menu_horizontal_box = Gtk.Box(spacing=10)
        menu_horizontal_box.set_homogeneous(False)
        main_vertical_box.pack_start(menu_horizontal_box, False, True, 0)
        #Menu
        label = Gtk.Label("Menu")
        menu_horizontal_box.pack_start(label, False, True, 0)

        self.toolbox_button = button = Gtk.Button(label="Open Toolbox")
        button.connect("clicked", self.on_open_toolbox)
        label.set_mnemonic_widget(button)
        menu_horizontal_box.pack_start(button, False, True, 0)

        button = Gtk.Button(label="Take Single Capture For All")
        button.connect("clicked", self.btn_single_capture)
        label.set_mnemonic_widget(button)
        menu_horizontal_box.pack_start(button, False, True, 0)

        self.cont_cap_button = button = Gtk.Button(
            label="Start Continuous Capture For All")
        button.connect("clicked", self.btn_multi_capture)
        label.set_mnemonic_widget(button)
        menu_horizontal_box.pack_start(button, False, True, 0)

        self.stop_cont_cap_button = button = Gtk.Button(
            label="Stop Continuous Capture For All")
        button.connect("clicked", self.btn_stop_multi_capture)
        label.set_mnemonic_widget(button)
        menu_horizontal_box.pack_start(button, False, True, 0)

        button = Gtk.Button(label="About and Help")
        button.connect("clicked", self.btn_help)
        label.set_mnemonic_widget(button)
        menu_horizontal_box.pack_start(button, False, True, 0)

        #Separator
        separator = Gtk.Separator()
        main_vertical_box.pack_start(separator, False, True, 0)

        #Canvas/Stage
        colour = Gdk.color_parse("#222222")
        rgba = Gdk.RGBA.from_color(colour)
        self.stage = stage = Gtk.Layout()
        stage.set_vexpand(True)
        stage.set_hexpand(True)
        stage.override_background_color(0, rgba)

        main_vertical_box.pack_start(stage, True, True, 0)

        self.add(main_vertical_box)

        self.show_all()

        self.stage_width = stage.get_allocation().width
        self.stage_height = stage.get_allocation().height

        self.timeout_id = GObject.timeout_add(75, self.on_timeout, None)
    number_of_days = int(sys.argv[2])

    queue_lock = Lock()
    counter_lock = Lock()
    write_lock = Lock()

    home_queue = Queue()
    market_queue = Queue()
    energy_exchange_queue = Queue()

    clock_ready = Event()
    weather_ready = Event()

    home_counter = Value('i', 0)
    market_ready = Value('b', False)
    temperature = Array('f', range(2))
    season = Array('f', range(2))

    console_connection, market_connection = Pipe()
    console_connection_2, weather_connection = Pipe()

    homes = []

    for i in range(number_of_homes):
        renewable_energy = random.randint(0, 2)
        # 0 = normal home, 1 = solar, 2 = wind
        policy = random.randint(0, 2)
        home_process = Process(
            target=home,
            args=(
                number_of_homes,
예제 #53
0
 def __init__(self):
     self.marker_shape = Array('d', [-1, -1, -1, -1])
예제 #54
0
파일: HackRF.py 프로젝트: zjywlive/urh
 def iq_to_bytes(samples: np.ndarray):
     arr = Array("B", 2 * len(samples), lock=False)
     numpy_view = np.frombuffer(arr, dtype=np.uint8)
     numpy_view[:] = samples.flatten(order="C")
     return arr
예제 #55
0
pygame.init()
clock = pygame.time.Clock()
screen = pygame.display.set_mode((width, height))

# Initialise the physics stuff
from Physics import World
Jacks_sweet_thread = World(random_plane, send)

from timeit import default_timer as current_time
from multiprocessing import Process, Pool
from time import sleep

import ctypes
import numpy as np

pixel_array_base = Array(ctypes.c_int, width*height)
pixel_array = np.ctypeslib.as_array(pixel_array_base.get_obj())
pixel_array = pixel_array.reshape(width, height)

from pygametranslator import Translator
Jacks_sweet_threads = Translator(recv, pixel_array)

# Start things
Jacks_sweet_thread.start()
Jacks_sweet_threads.start()
update_interval = 1/60
running = True
previous_update = current_time()
while running:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
예제 #56
0
 def flush_array(self):
     self.array[:] = Array(ctypes.c_byte, 1024)
예제 #57
0
파일: array.py 프로젝트: LLjh2017/pythonnet
from multiprocessing import Process,Array
import time 

# 创建共享内存,将列表放入共享内存
# shm=Array('i',[1,2,3,4,5])
# 在共享内存中开辟5个整形空间
# shm=Array('i',5)
# 存入字符串
shm = Array('c',b'Hello')
def fun():
    for i in shm:
        print(i)
p = Process(target = fun)
p.start()
p.join()
print(shm.value)
예제 #58
0
            print(error_message(str(e)))

            try:
                print(error_message('Second try'))
                sleep(10)
                extract_page(browser, writer)
            except Exception as e2:
                print(error_message('Second try failed'))
                print(error_message(str(e2)))

                # with error.get_lock():
                #     error.value = 1
                # sleep(24 * 60 * 60)
                # raise e2


if __name__ == '__main__':
    number_of_threads = 4

    error = Value('i', 0)
    current_documents = Array('i', [0] * number_of_threads)

    pool = Pool(processes=number_of_threads)

    pool.map(thread_function, range(681700, 683364 + 1, 10), chunksize=1)
    # pool.map(thread_function, range(1, 200 + 1, 10), chunksize = 1)

    pool.close()

    print(f'Finished - Time: {int(timer() - start_time)}s')
예제 #59
0
parser.add_argument('--offset', type=int, default=8)
parser.add_argument('--pad', type=int, default=24)  # (64 / 2) - (16 / 2)
parser.add_argument('--steps', type=int, default=256)
parser.add_argument('--relax', type=int, default=3)
parser.add_argument('--n_thread', type=int, default=8)
args = parser.parse_args()
print(args)

result_dir = args.result_dir
n_iter = int(result_dir.split('_')[-1])
label_dir = args.map_dir
result_fns = sorted(glob.glob('%s/*.npy' % result_dir))
n_results = len(result_fns)
eval_dir = '%s/evaluation_%d' % (result_dir, n_iter)

all_positive_base = Array(
    ctypes.c_double, n_results * args.channel * args.steps)
all_positive = np.ctypeslib.as_array(all_positive_base.get_obj())
all_positive = all_positive.reshape((n_results, args.channel, args.steps))

all_prec_tp_base = Array(
    ctypes.c_double, n_results * args.channel * args.steps)
all_prec_tp = np.ctypeslib.as_array(all_prec_tp_base.get_obj())
all_prec_tp = all_prec_tp.reshape((n_results, args.channel, args.steps))

all_true_base = Array(
    ctypes.c_double, n_results * args.channel * args.steps)
all_true = np.ctypeslib.as_array(all_true_base.get_obj())
all_true = all_true.reshape((n_results, args.channel, args.steps))

all_recall_tp_base = Array(
    ctypes.c_double, n_results * args.channel * args.steps)
예제 #60
0
from multiprocessing import Process, Queue, Array
from queue import Empty
from numpy import frombuffer, exp, ndarray, float64
from time import perf_counter
from ..datatypes import Flags

QUEUE = type(Queue())
ARRAY = type(Array('d', 10))
STOP: float = 1  # Queue-get timeout in seconds for process termination.


class SmootherParams():
    def __init__(self, coeff_queue: QUEUE, smooth_coeffs: ARRAY) -> None:
        self.__coeff_queue = self.__queue_type_checked(coeff_queue)
        self.__smooth_coeffs = self.__array_type_checked(smooth_coeffs)

    @property
    def coeff_queue(self) -> QUEUE:
        return self.__coeff_queue

    @property
    def smooth_coeffs(self) -> ARRAY:
        return self.__smooth_coeffs

    @staticmethod
    def __queue_type_checked(value: QUEUE) -> QUEUE:
        if type(value) is not QUEUE:
            raise TypeError('Coeff. queue must be a multiprocessing Queue!')
        if value._closed:
            raise OSError('Coefficient queue mut be open on instantiation!')
        return value