コード例 #1
0
ファイル: sampling.py プロジェクト: cdiener/cobrapy
def shared_np_array(shape, data=None, integer=False):
    """Create a new numpy array that resides in shared memory.

    Parameters
    ----------
    shape : tuple of ints
        The shape of the new array.
    data : numpy.array
        Data to copy to the new array. Has to have the same shape.
    integer : boolean
        Whether to use an integer array. Defaults to False which means
        float array.
    """
    size = np.prod(shape)
    if integer:
        array = Array(ctypes.c_int64, int(size))
        np_array = np.frombuffer(array.get_obj(), dtype="int64")
    else:
        array = Array(ctypes.c_double, int(size))
        np_array = np.frombuffer(array.get_obj())
    np_array = np_array.reshape(shape)

    if data is not None:
        if len(shape) != len(data.shape):
            raise ValueError("`data` must have the same dimensions"
                             "as the created array.")
        same = all(x == y for x, y in zip(shape, data.shape))
        if not same:
            raise ValueError("`data` must have the same shape"
                             "as the created array.")
        np_array[:] = data

    return np_array
コード例 #2
0
def get_predict(args, ortho, model):
    xp = cuda.cupy if args.gpu >= 0 else np
    args.h_limit, args.w_limit = ortho.shape[0], ortho.shape[1]
    args.canvas_h = args.h_limit
    args.canvas_w = args.w_limit

    # to share 'canvas' between different threads
    canvas_ = Array(ctypes.c_float, args.canvas_h * args.canvas_w * args.channels)
    canvas = np.ctypeslib.as_array(canvas_.get_obj())
    canvas = canvas.reshape((args.canvas_h, args.canvas_w, args.channels))

    # prepare queues and threads
    patch_queue = Queue(maxsize=5)
    preds_queue = Queue()
    patch_worker = Process(target=create_minibatch, args=(args, ortho, patch_queue))
    canvas_worker = Process(target=tile_patches, args=(args, canvas, preds_queue))
    patch_worker.start()
    canvas_worker.start()

    while True:
        minibatch = patch_queue.get()
        if minibatch is None:
            break
        minibatch = Variable(xp.asarray(minibatch, dtype=xp.float32), volatile=True)
        preds = model(minibatch, None).data
        if args.gpu >= 0:
            preds = xp.asnumpy(preds)
        [preds_queue.put(pred) for pred in preds]

    preds_queue.put(None)
    patch_worker.join()
    canvas_worker.join()

    return canvas
コード例 #3
0
ファイル: Gravitation.py プロジェクト: fnbellomo/GProject
    def steps_multiprocessing(self, number_of_steps, plot, plot_every_n):
        """ 
        Equal to take_steps but using multiprocesing.

        Parameters
        ----------
        number_of_steps : float
                 Total number of time steps.
        plot : object
                 make_plot Object.
        plot_every_n : float
                 Every few time steps are going on a plot.
        """
        
        shared_array_base = Array(ctypes.c_double, len(self.bodies)*2)
        shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
        shared_array = shared_array.reshape(2, len(self.bodies))

        counter = Value(ctypes.c_int64, 0)
        end_plot = Value(ctypes.c_int8, 1)

        old_counter = 1
        rk_fun = Process(target = self.rk_fun_task, args=(number_of_steps, plot_every_n, shared_array, end_plot, counter))
        plot_fun = Process(target = self.plot_fun_task, args=(old_counter, shared_array, end_plot, counter, plot))

        rk_fun.start()
        plot_fun.start()

        rk_fun.join()
        plot_fun.join()
コード例 #4
0
ファイル: snake_grow.py プロジェクト: CellProfiler/cellstar
def conv_single_image(image):
    shared_array_base = Array(ctypes.c_double, image.size)
    shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
    shared_array = shared_array.reshape(image.shape)
    shared_array[:] = image

    return shared_array
コード例 #5
0
def calculatePearsonCorrelationMatrixMultiprocessing(matrix, axis=0, symmetrical=True, getpvalmat=False):

    if axis == 1:
        matrix = matrix.T

    nRows = matrix.shape[0]

    # create shared array that can be used from multiple processes
    output_r_arr = Array(ctypes.c_double, matrix.shape[0] * matrix.shape[0])
    # then in each new process create a new numpy array using:
    output_r = np.frombuffer(output_r_arr.get_obj())  # mp_arr and arr share the same memory
    # make it two-dimensional
    output_r = output_r.reshape((matrix.shape[0], matrix.shape[0]))  # b and arr share the same memory
    # output_r = np.zeros((nRows,nRows))  # old version

    output_p_arr = Array(ctypes.c_double, matrix.shape[0] * matrix.shape[0])
    output_p = np.frombuffer(output_p_arr.get_obj())
    output_p = output_p.reshape((matrix.shape[0], matrix.shape[0]))

    print 'Calculating Pearson R for each row, multithreaded'
    print mp.cpu_count(), 'processes in pool'

    pool = None
    try:
        pool = mp.Pool(mp.cpu_count(),
                       initializer=_init_pool,
                       initargs=(matrix, output_r_arr, output_p_arr,
                                 nRows, symmetrical))

        # bar = tqdm(total=nRows*nRows/2)
        # tqdm.write('Calculating Pearson R for each row, multithreaded')
        for result in tqdm(pool.imap_unordered(_f, range(0, nRows)), total=nRows):
            # bar.update(result)
            pass
        # bar.close()
    finally:  # To make sure processes are closed in the end, even if errors happen
        pool.close()
        pool.join()

    print output_r

    if getpvalmat:
        return output_r, output_p
    else:
        return output_r
コード例 #6
0
ファイル: ringbuffer.py プロジェクト: belevtsoff/rdaclient.py
 def initialize(self, nChannels, nSamples, windowSize=1, nptype='float32'):
     '''
     Initializes the buffer with a new raw array
     
     Parameters
     ----------
     nChannels : int
         dimensionality of a single sample
     nSamples : int
         the buffer capacity in samples
     windowSize : int, optional
         optional, the size of the window to be used for reading the
         data. The pocket of the this size will be created
     nptype : string, optional
         the type of the data to be stored
                        
     '''
     self.__initialized = True
     
     # checking parameters
     if nChannels < 1:
         self.logger.warning('nChannels must be a positive integer, setting to 1')
         nChannels = 1
     if nSamples < 1:
         self.logger.warning('nSamples must be a positive integer, setting to 1')
         nSamples = 1
     if windowSize < 1:
         self.logger.warning('wondowSize must be a positive integer, setting to 1')
         windowSize = 1
     
     # initializing
     sizeBytes = c.sizeof(BufferHeader) + \
                 (nSamples + windowSize) * nChannels * np.dtype(nptype).itemsize
     
     raw = Array('c', sizeBytes)
     hdr = BufferHeader.from_buffer(raw.get_obj())
     
     hdr.bufSizeBytes = nSamples * nChannels * np.dtype(nptype).itemsize
     hdr.pocketSizeBytes = windowSize * nChannels * np.dtype(nptype).itemsize
     hdr.dataType = datatypes.get_code(nptype)
     hdr.nChannels = nChannels
     hdr.nSamplesWritten = 0
     
     self.initialize_from_raw(raw.get_obj())
コード例 #7
0
    def initialize(self, n_channels, n_samples, np_dtype='float32'):
        """Initializes the buffer with a new array."""
        logger.debug('Initializing {}x{} {} buffer.'.format(n_channels, n_samples, np_dtype))

        # check parameters
        if n_channels < 1 or n_samples < 1:
            logger.error('n_channels and n_samples must be a positive integer')
            raise SharedBufferError(1)

        size_bytes = ct.sizeof(SharedBufferHeader) + n_samples * n_channels * np.dtype(np_dtype).itemsize
        raw = Array('c', size_bytes)
        hdr = SharedBufferHeader.from_buffer(raw.get_obj())

        hdr.bufSizeBytes = size_bytes - ct.sizeof(SharedBufferHeader)
        hdr.dataType = DataTypes.get_code(np_dtype)
        hdr.nChannels = n_channels
        hdr.nSamples = n_samples
        hdr.position = 0

        self.initialize_from_raw(raw.get_obj())
コード例 #8
0
def main(argv):
    file_name = argv[0]
    cost = file_read(file_name)
    my_nodes = []
    threads = []
    global old_time
    old_time = time.time()
    global ans_mat
    global ans_mat_base
    global upd_mat
    global upd_mat_base
    ans_mat_base = Array(ctypes.c_float, (num_nodes + 1) * (num_nodes + 1))
    ans_mat = np.ctypeslib.as_array(ans_mat_base.get_obj())
    ans_mat = ans_mat.reshape((num_nodes + 1), (num_nodes + 1))
    for i in xrange(num_nodes + 1):
        for j in xrange(num_nodes + 1):
            ans_mat[i][j] = float(float('inf'))
    upd_mat_base = Array(ctypes.c_int, num_nodes + 1)
    upd_mat = np.ctypeslib.as_array(upd_mat_base.get_obj())
    for i in xrange(num_nodes):
        x = Node(i + 1, cost[(i + 1)], near[(i)])
        t = Process(target=x.node_server)
        threads.append(t)
        my_nodes.append(x)
    for i in xrange(num_nodes):
        t = Process(target=my_nodes[i].node_client)
        threads.append(t)
    for t in threads:
        t.start()
    for t in threads:
        t.join()
    new_file = "output_" + file_name
    f = open(new_file, 'w')
    f.write(str(num_nodes) + '\n')
    for i in xrange(num_nodes):
        st = "" + str(num_nodes - 1) + " "
        for j in range(1, num_nodes + 1):
            if (i + 1) != j:
                st += str(j) + " " + str(ans_mat[i][j]) + " "
        f.write(st + '\n')
    f.close()
コード例 #9
0
ファイル: train.py プロジェクト: cybermatt/deeppose
def load_data(args, input_q, minibatch_q):
    c = args.channel
    s = args.size
    d = args.joint_num * 2

    input_data_base = Array(ctypes.c_float, args.batchsize * c * s * s)
    input_data = np.ctypeslib.as_array(input_data_base.get_obj())
    input_data = input_data.reshape((args.batchsize, c, s, s))

    label_base = Array(ctypes.c_float, args.batchsize * d)
    label = np.ctypeslib.as_array(label_base.get_obj())
    label = label.reshape((args.batchsize, d))

    x_queue, o_queue = Queue(), Queue()
    workers = [Process(target=transform,
                       args=(args, x_queue, args.datadir, args.fname_index,
                             args.joint_index, o_queue))
               for _ in range(args.batchsize)]
    for w in workers:
        w.start()

    while True:
        x_batch = input_q.get()
        if x_batch is None:
            break

        # data augmentation
        for x in x_batch:
            x_queue.put(x)
        j = 0
        while j != len(x_batch):
            a, b = o_queue.get()
            input_data[j] = a
            label[j] = b
            j += 1
        minibatch_q.put([input_data, label])

    for _ in range(args.batchsize):
        x_queue.put(None)
    for w in workers:
        w.join()
コード例 #10
0
ファイル: multtest.py プロジェクト: choice17/upload
def main_arr():
    arr = Array('f', W * H * C)
    arrs = Array('f', [W, H, C, time.time()])
    q = Queue()
    print(W * H * C)
    Process(target=worker_arr, args=(arr, arrs, q)).start()
    for num in range(NUM):
        while q.qsize() == 0:
            time.sleep(0.01)
            continue
        q.get()
        b = np.frombuffer(arr.get_obj(), dtype=np.float32)
コード例 #11
0
    def __init__(self, model, processes, thinning=100, solver=None,
                 seed=None, **solver_kwargs):
        super(OptGPSampler, self).__init__(model, thinning, seed=seed)
        self.generate_fva_warmup(solver, **solver_kwargs)
        self.np = processes

        # This maps our saved center into shared memory,
        # meaning they are synchronized across processes
        shared_center = Array(ctypes.c_double, len(model.reactions))
        self.center = np.frombuffer(shared_center.get_obj())
        # Has to be like this because we want a copy
        self.center[:] = self.warmup.mean(axis=0)
コード例 #12
0
    def test_continuous_send_dialog(self):
        self.add_signal_to_form("esaver.complex16s")
        self.__add_first_signal_to_generator()

        port = util.get_free_port()

        gframe = self.form.generator_tab_controller  # type: GeneratorTabController
        for msg in gframe.table_model.protocol.messages:
            msg.pause = 5000

        expected = IQArray(None, np.float32, gframe.total_modulated_samples)
        expected = gframe.modulate_data(expected)
        current_index = Value("L", 0)
        buffer = Array("f", 4 * len(expected))
        ready = Value("i", 0)

        process = Process(target=receive,
                          args=(port, current_index, len(expected), buffer,
                                ready))
        process.daemon = True
        process.start()
        n = 0
        while ready.value == 0 and n < 50:  # ensure server is up
            time.sleep(0.1)
            n += 1

        self.assertTrue(ready.value)

        ContinuousModulator.BUFFER_SIZE_MB = 10

        continuous_send_dialog = self.__get_continuous_send_dialog()
        continuous_send_dialog.device.set_client_port(port)
        continuous_send_dialog.device_settings_widget.ui.spinBoxNRepeat.setValue(
            2)
        continuous_send_dialog.ui.btnStart.click()
        process.join(10)

        # CI sometimes swallows a sample
        self.assertGreaterEqual(current_index.value, len(expected) - 1)

        buffer = np.frombuffer(buffer.get_obj(), dtype=np.float32)
        buffer = buffer.reshape((len(buffer) // 2, 2))

        for i in range(len(expected)):
            self.assertEqual(buffer[i, 0], expected[i, 0], msg=str(i))
            self.assertEqual(buffer[i, 1], expected[i, 1], msg=str(i))

        continuous_send_dialog.ui.btnStop.click()
        continuous_send_dialog.ui.btnClear.click()
        QTest.qWait(1)

        continuous_send_dialog.close()
コード例 #13
0
def shared_np_array(shape, dtype='float'):
    """Form shared memory 1D numpy array"""
    from multiprocessing import Array
    arr_len = int(np.product(shape))
    if dtype == 'float':
        shared_array_base = Array(ctypes.c_double, arr_len)
    elif dtype == 'int':
        shared_array_base = Array(ctypes.c_int, arr_len)
    else:
        sys.exit('I don\'t know what happended here either')

    shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
    shared_array = shared_array.reshape(*shape)
    return shared_array
コード例 #14
0
    def initialize(self, n_channels, n_samples, np_dtype='float32'):
        """Initializes the buffer with a new array."""
        logger.debug('Initializing {}x{} {} buffer.'.format(
            n_channels, n_samples, np_dtype))

        # check parameters
        if n_channels < 1 or n_samples < 1:
            logger.error('n_channels and n_samples must be a positive integer')
            raise SharedBufferError(1)

        size_bytes = ct.sizeof(
            SharedBufferHeader
        ) + n_samples * n_channels * np.dtype(np_dtype).itemsize
        raw = Array('c', size_bytes)
        hdr = SharedBufferHeader.from_buffer(raw.get_obj())

        hdr.bufSizeBytes = size_bytes - ct.sizeof(SharedBufferHeader)
        hdr.dataType = DataTypes.get_code(np_dtype)
        hdr.nChannels = n_channels
        hdr.nSamples = n_samples
        hdr.position = 0

        self.initialize_from_raw(raw.get_obj())
コード例 #15
0
ファイル: camstream.py プロジェクト: napratin/lumos
class CameraStreamer(Process):
  def __init__(self, stayAliveObj=None, frameCountObj=None, imageObj=None, imageShapeObj=None):
    Process.__init__(self)
    print "CameraStreamer.__init__(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
    # * Store references to and/or create shared objects
    self.stayAliveObj = stayAliveObj if stayAliveObj is not None else Value(c_bool, True)
    self.frameCountObj = frameCountObj if frameCountObj is not None else Value('i', 0)
    self.imageShapeObj = imageShapeObj if imageShapeObj is not None else Array('i', (camera_frame_height, camera_frame_width, camera_frame_depth))
    if imageObj is not None:
      # ** Use supplied shared image object
      self.imageObj = imageObj
    else:
      # ** Create shared image object
      image = np.zeros((camera_frame_height, camera_frame_width, camera_frame_depth), dtype=np.uint8)  # create an image
      imageShape = image.shape  # store original shape
      imageSize = image.size  # store original size (in bytes)
      image.shape = imageSize  # flatten numpy array
      self.imageObj = Array(c_ubyte, image)  # create a synchronized shared array object
  
  def run(self):
    print "CameraStreamer.run(): [pid: {}, OS pid: {}]".format(self.pid, os.getpid())
    # * Interpret shared objects properly (NOTE this needs to happen in the child process)
    self.image = ctypeslib.as_array(self.imageObj.get_obj())  # get flattened image array
    self.image.shape = ctypeslib.as_array(self.imageShapeObj.get_obj())  # restore original shape
    
    # * Open camera and set desired capture properties
    self.camera = cv2.VideoCapture(0)
    if self.camera.isOpened():
      result_width = self.camera.set(cv.CV_CAP_PROP_FRAME_WIDTH, camera_frame_width)
      result_height = self.camera.set(cv.CV_CAP_PROP_FRAME_HEIGHT, camera_frame_height)
      print "CameraStreamer.run(): Camera frame size set to {width}x{height} (result: {result_width}, {result_height})".format(width=camera_frame_width, height=camera_frame_height, result_width=result_width, result_height=result_height)
    else:
      print "CameraStreamer.run(): Unable to open camera; aborting..."
      self.stayAliveObj.value = False
      return
    
    # * Keep reading frames into shared image until stopped or read error occurs
    while self.stayAliveObj.value:
      try:
        #print "CameraStreamer.run(): Frame # {}, stay alive? {}".format(self.frameCountObj.value, self.stayAliveObj.value)  # [debug]
        isOkay, frame = self.camera.read()
        if not isOkay:
          self.stayAliveObj.value = False
        self.frameCountObj.value = self.frameCountObj.value + 1
        self.image[:] = frame
      except KeyboardInterrupt:
        self.stayAliveObj.value = False
    
    # * Clean-up
    self.camera.release()
コード例 #16
0
ファイル: predict.py プロジェクト: WeiyvWang/Road-Extraction
def get_predict(gpu, sat_size, map_size, offset, channels, ortho, model,
                batchsize):

    xp = cuda.cupy if gpu >= 0 else np
    h_limit, w_limit = ortho.shape[0], ortho.shape[1]
    h_num = int(np.floor(h_limit / map_size))
    w_num = int(np.floor(w_limit / map_size))
    canvas_h = h_num * map_size - \
        (sat_size - map_size) + offset - 1
    canvas_w = w_num * map_size - \
        (sat_size - map_size) + offset - 1

    # to share 'canvas' between different threads
    canvas_ = Array(ctypes.c_float, canvas_h * canvas_w * channels)
    canvas = np.ctypeslib.as_array(canvas_.get_obj())
    canvas = canvas.reshape((canvas_h, canvas_w, channels))

    # prepare queues and threads
    patch_queue = Queue(maxsize=5)
    preds_queue = Queue()
    patch_worker = Process(target=create_minibatch,
                           args=(sat_size, map_size, offset, h_limit, w_limit,
                                 batchsize, ortho, patch_queue))
    canvas_worker = Process(target=tile_patches,
                            args=(sat_size, map_size, offset, h_limit, w_limit,
                                  canvas, preds_queue))
    patch_worker.start()
    canvas_worker.start()

    while True:
        minibatch = patch_queue.get()
        if minibatch is None:
            break
        with chainer.using_config('train', False):
            minibatch = Variable(xp.asarray(minibatch, dtype=xp.float32))
            preds = model(minibatch).data
        if gpu >= 0:
            preds = xp.asnumpy(preds)
        [preds_queue.put(pred) for pred in preds]

    preds_queue.put(None)
    patch_worker.join()
    canvas_worker.join()

    canvas = canvas[offset - 1:canvas_h - (offset - 1),
                    offset - 1:canvas_w - (offset - 1)]
    canvas /= offset

    return canvas
コード例 #17
0
ファイル: multtest.py プロジェクト: choice17/upload
def main_arr_l():
    arr = Array('f', W * H * C)
    arrs = Array('f', [W, H, C, time.time()])
    lock = Lock()
    Process(target=worker_arr_l, args=(
        arr,
        arrs,
        lock,
    )).start()
    for num in range(NUM):
        time.sleep(0.1)
        ret = lock.acquire()
        b = np.frombuffer(arr.get_obj(), dtype=np.float32)
        print('b', b[0], ret)  #, np.frombuffer(arrs.get_obj(),np.float32))
        lock.release()
コード例 #18
0
def analysis(pointxy, values, methodOfAnalysis):
    global file, singleValues, distances, Usethisarray
    t = values.split(" ")
    COL = int(t[1])
    ROW = int(t[0])
    list = []
    for r in range(ROW):
        for c in range(COL):
            list.append((r, c, pointxy[0], pointxy[1]))

    shared_array_base = Array(c_double, ROW * COL)
    singleValues = as_array(shared_array_base.get_obj())
    singleValues = singleValues.reshape(COL, ROW)

    shared_array = Array(c_double, ROW * COL * 20)
    distances = as_array(shared_array.get_obj())
    distances = distances.reshape(COL, ROW, 20)

    with ProcessPoolExecutor() as executor:
        if methodOfAnalysis is "strain":
            executor.map(multiprocessing_func, list)
        else:
            executor.map(intensity, list)
    entry.delete(0, tk.END)
コード例 #19
0
class Node():
    def __init__(self, node_num, cur_dv, nar):
        self.node_num = node_num
        self.cur_dv_base = Array(ctypes.c_float, (num_nodes + 1))
        self.cur_dv = np.ctypeslib.as_array(self.cur_dv_base.get_obj())
        for r in range(0, num_nodes + 1):
            self.cur_dv[r] = cur_dv[r]
        self.host = ""
        s1 = socket.socket()
        s1.bind(("", 0))
        self.server_port = s1.getsockname()[1]
        ser_port.append(self.server_port)
        s1.close()
        self.neigh_ar = nar
        self.connected = {}
        if node_num == 1:
            upd_mat[self.node_num] = 1
        else:
            upd_mat[self.node_num] = 1

    def node_server(self):
        s = socket.socket()
        s.bind((self.host, self.server_port))
        s.listen(5)
        global num_nodes
        while True:
            if (time.time() - old_time) > 14:
                break
            st_tim = time.time()
            fl = 0
            s.settimeout(1)
            conn = 0
            try:
                conn, addr = s.accept()
                data = conn.recv(10000)
            except socket.timeout, e:
                if conn:
                    conn.close()
                fl = 1
            except socket.error, e:
                if conn:
                    conn.close()
                fl = 1
            if fl == 1:
                break
            data = json.loads(data)
            self.update_matrix(data["node"], data["arr"])
            conn.close()
コード例 #20
0
ファイル: predict.py プロジェクト: ncmatson/OSTE
def get_predict(args, ortho, model):
    #xp = cuda.cupy if args.gpu >= 0 else np
    xp = np
    args.h_limit, args.w_limit = ortho.shape[0], ortho.shape[1]
    h_num = int(np.floor(args.h_limit / args.map_size))
    w_num = int(np.floor(args.w_limit / args.map_size))
    args.canvas_h = h_num * args.map_size - \
        (args.sat_size - args.map_size) + args.offset - 1
    args.canvas_w = w_num * args.map_size - \
        (args.sat_size - args.map_size) + args.offset - 1

    # to share 'canvas' between different threads
    canvas_ = Array(ctypes.c_float,
                    args.canvas_h * args.canvas_w * args.channels)
    canvas = np.ctypeslib.as_array(canvas_.get_obj())
    canvas = canvas.reshape((args.canvas_h, args.canvas_w, args.channels))

    # prepare queues and threads
    patch_queue = Queue(maxsize=5)
    preds_queue = Queue()
    patch_worker = Process(target=create_minibatch,
                           args=(args, ortho, patch_queue))
    canvas_worker = Process(target=tile_patches,
                            args=(args, canvas, preds_queue))
    patch_worker.start()
    canvas_worker.start()

    while True:
        minibatch = patch_queue.get()
        if minibatch is None:
            break
        minibatch = Variable(xp.asarray(minibatch, dtype=xp.float32),
                             volatile=True)
        preds = model(minibatch, None).data
        #if args.gpu >= 0:
        #preds = xp.asnumpy(preds)
        #print('gpu arg called, but no functionality provided')
        [preds_queue.put(pred) for pred in preds]

    preds_queue.put(None)
    patch_worker.join()
    canvas_worker.join()

    canvas = canvas[args.offset - 1:args.canvas_h - (args.offset - 1),
                    args.offset - 1:args.canvas_w - (args.offset - 1)]
    canvas /= args.offset

    return canvas
コード例 #21
0
ファイル: test_shm.py プロジェクト: USnark772/RSCompanionV3
def image_processor(index: int, shared_array: Array, shared_dim: tuple,
                    sem1: Semaphore, sem2: Semaphore, line: Array,
                    running: Value):
    shm_size = EDIT_HEIGHT * shared_dim[1] * shared_dim[2]
    np_arr = np.frombuffer(shared_array.get_obj(),
                           count=int(shm_size),
                           dtype=DTYPE).reshape(
                               (EDIT_HEIGHT, shared_dim[1], shared_dim[2]))
    out_name = profile_outdir + "test_shm_proc_" + str(
        index) + "_image_processor" + ".prof"
    pflr = cProfile.Profile()
    pflr.enable()
    while running.value != 0:
        sem1.acquire()
        np.copyto(np_arr, add_overlay(np_arr, line.value.decode(STR_ENCODING)))
        sem2.release()
    pflr.disable()
    pflr.dump_stats(out_name)
コード例 #22
0
ファイル: colors.py プロジェクト: yedell/color-challenge
def display_image(array_a: mp.Array, event_start: mp.Event,
                  event_quit: mp.Event, event_next_img: mp.Event,
                  width: mp.Value, height: mp.Value):
    """
    Continually reads from array_a & displays image w/ OpenCV.imshow()
    Waits for <q> key press to close window or <ENTER> to display next image
    - array_a: shared memory that contains single image to display
    - event_start: event signal to indicate when array_a has an image
    - event_quit: event signal to stop upon shutdown
    - event_next_img: event signal to display next image
    - width: image width (in pixels)
    - height: image height (in pixels)
    
    Return values: None
    """
    # TODO: get rid of this, I feel like I could make due with fewer event signals...
    event_start.wait()  # waits/blocks until array_a has an image

    while not event_quit.is_set():
        with array_a.get_lock():
            # reads image from array_a as np.ndarray with C type unsigned int
            image = np.frombuffer(array_a.get_obj(),
                                  dtype="I").reshape(height.value, width.value,
                                                     3)
            color_text = RGB_COLORS[tuple(image[0][0])]
            # converts RGB image to BGR for OpenCV and displays image in new window
            cv2.imshow(f"Random Color Image Viewer",
                       cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB))

            print(f"Image viewer showing: {color_text}")
            print(f"Press <Enter> to view next image or 'q' to quit...")

        key = cv2.waitKey(0)
        if key == ord('q'):  # if 'q' is pressed, quit
            event_quit.set()
            event_next_img.set()
            break
        elif key == 13:  # if <ENTER> key is pressed, go to next image
            event_next_img.set()
            print("\n*********\n <Enter> key pressed \n*********\n")
            # TODO: eliminate this sleep function!
            time.sleep(
                0.01
            )  # Atm, necessary to block to synchronize with other process.
コード例 #23
0
ファイル: rbfnnpy.py プロジェクト: ethan-jiang-1/basic_models
    def _calculate_phi(self, x):
        C = self.workers
        neurons = self.neurons
        mu = self.mu
        sigmas = self.sigmas
        phi = self.phi = None
        n = self.n


        def heavy_lifting(c, phi):
            s = jobs[c][1] - jobs[c][0]
            for k, i in enumerate(xrange(math.ceil(jobs[c][0]), math.ceil(jobs[c][1]))):
                for j in xrange(neurons):
                    # phi[i, j] = metrics(x[i,:], mu[j])**3)
                    # phi[i, j] = plateSpine(x[i,:], mu[j]))
                    # phi[i, j] = invMultiQuadric(x[i,:], mu[j], sigmas[j]))
                    phi[i, j] = multiQuadric(x[i,:], mu[j], sigmas[j])
                    # phi[i, j] = gaussian(x[i,:], mu[j], sigmas[j]))
                if k % 1000 == 0:
                    percent = true_divide(k, s)*100
                    print(c, ': {:2.2f}%'.format(percent))
            print(c, ': Done')
        
        # distributing the work between 4 workers
        shared_array = Array(c_double, n * neurons)
        phi = frombuffer(shared_array.get_obj())
        phi = phi.reshape((n, neurons))

        jobs = []
        workers = []

        p = n / C
        m = n % C
        for c in range(C):
            jobs.append((c*p, (c+1)*p + (m if c == C-1 else 0)))
            worker = Process(target = heavy_lifting, args = (c, phi))
            workers.append(worker)
            worker.start()

        for worker in workers:
            worker.join()

        return phi
コード例 #24
0
ファイル: rbfnnpy.py プロジェクト: eugeniashurko/rbfnnpy
    def _calculate_phi(self, x):
        C = self.workers
        neurons = self.neurons
        mu = self.mu
        sigmas = self.sigmas
        phi = self.phi = None
        n = self.n


        def heavy_lifting(c, phi):
            s = jobs[c][1] - jobs[c][0]
            for k, i in enumerate(xrange(jobs[c][0], jobs[c][1])):
                for j in xrange(neurons):
                    # phi[i, j] = metrics(x[i,:], mu[j])**3)
                    # phi[i, j] = plateSpine(x[i,:], mu[j]))
                    # phi[i, j] = invMultiQuadric(x[i,:], mu[j], sigmas[j]))
                    phi[i, j] = multiQuadric(x[i,:], mu[j], sigmas[j])
                    # phi[i, j] = gaussian(x[i,:], mu[j], sigmas[j]))
                if k % 1000 == 0:
                    percent = true_divide(k, s)*100
                    print c, ': {:2.2f}%'.format(percent)
            print c, ': Done'
        
        # distributing the work between 4 workers
        shared_array = Array(c_double, n * neurons)
        phi = frombuffer(shared_array.get_obj())
        phi = phi.reshape((n, neurons))

        jobs = []
        workers = []

        p = n / C
        m = n % C
        for c in range(C):
            jobs.append((c*p, (c+1)*p + (m if c == C-1 else 0)))
            worker = Process(target = heavy_lifting, args = (c, phi))
            workers.append(worker)
            worker.start()

        for worker in workers:
            worker.join()

        return phi
コード例 #25
0
def _update_progress(shared: Array, N: int) -> None:
    pbar_widgets = [
        f"{Fore.GREEN}Computing level variance: {Fore.RESET}",
        f"{Fore.BLUE}",
        Percentage(),
        f" {Fore.RESET}",
        " ",
        Timer(),
        f" {Fore.YELLOW}",
        AnimatedMarker(),
        f"{Fore.RESET}",
    ]
    pbar = ProgressBar(widgets=pbar_widgets, maxval=N).start()
    progress = np.frombuffer(shared.get_obj())
    done = int(progress[0])
    while done < N:  # type: ignore
        done = int(progress[0])
        pbar.update(done)
    pbar.finish()
コード例 #26
0
ファイル: tensorboard_logger.py プロジェクト: raznem/spp-rl
    def _from_mp_array_to_tensor(self, mp_arr: mp.Array) -> torch.tensor:
        """
        Convert multiprocessing.Array with recorded frames into torch.tensor.
        Additionally remove last black frames and change chanel position.
        Original shape: B x H x W x C
        Output shape    B x C x H x W

        Args:
            mp_arr (mp.Array): recorded frames in the shared memory.

        Returns:
            torch.tensor: tensor with data ready to use in tensorboard writer
        """
        arr = np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
        arr = arr.reshape(self.frames_shape)
        arr = self._remove_black_frames(arr)
        arr = torch.tensor(arr)
        arr = torch.unsqueeze(arr.permute(0, 3, 1, 2), dim=0)
        return arr
コード例 #27
0
ファイル: utils.py プロジェクト: zijuzhang/gcc-nmf
class SharedMemoryCircularBuffer():
    def __init__(self, shape, initValue=0):
        self.array = Array(ctypes.c_double, int(prod(shape)))
        self.values = frombuffer(self.array.get_obj()).reshape(shape)
        self.values[:] = initValue

        self.numValues = self.values.shape[-1]

        self.index = Value(ctypes.c_int)
        self.index.value = 0

    def set(self, newValues, index=None):
        index = self.index.value if index is None else index
        numNewValues = newValues.shape[-1]
        if index + numNewValues < self.numValues:
            self.values[..., index:index + numNewValues] = newValues
            self.index.value = index + numNewValues
            return self.index.value
        else:
            numAtEnd = self.numValues - index
            numAtStart = numNewValues - numAtEnd

            self.values[..., index:] = newValues[..., :numAtEnd]
            self.values[..., :numAtStart] = newValues[..., numAtEnd:]
            self.index.value = numAtStart
            return self.index.value

    def get(self, index=None):
        index = (self.index.value - 1) % self.numValues if index is None else (
            index % self.numValues)
        #print(self.numValues, self.values.shape)
        return self.values[..., index]

    def getUnraveledArray(self):
        return concatenate([
            self.values[:, self.index.value:],
            self.values[:, :self.index.value]
        ],
                           axis=-1)

    def size(self):
        return self.values.shape[-1]
コード例 #28
0
    def test_continuous_send_dialog(self):
        self.add_signal_to_form("esaver.coco")
        self.__add_first_signal_to_generator()

        port = self.get_free_port()

        gframe = self.form.generator_tab_controller
        expected = np.zeros(gframe.total_modulated_samples, dtype=np.complex64)
        expected = gframe.modulate_data(expected)
        current_index = Value("L", 0)
        buffer = Array("f", 4 * len(expected))

        process = Process(target=receive,
                          args=(port, current_index, 2 * len(expected),
                                buffer))
        process.daemon = True
        process.start()
        time.sleep(1)  # ensure server is up

        ContinuousModulator.BUFFER_SIZE_MB = 10

        continuous_send_dialog = self.__get_continuous_send_dialog()
        continuous_send_dialog.device.set_client_port(port)
        continuous_send_dialog.device_settings_widget.ui.spinBoxNRepeat.setValue(
            2)
        continuous_send_dialog.ui.btnStart.click()
        QTest.qWait(1000)
        time.sleep(1)
        process.join(1)

        # CI sometimes swallows a sample
        self.assertGreaterEqual(current_index.value, len(expected) - 1)

        buffer = np.frombuffer(buffer.get_obj(), dtype=np.complex64)
        for i in range(len(expected)):
            self.assertEqual(buffer[i], expected[i], msg=str(i))

        continuous_send_dialog.ui.btnStop.click()
        continuous_send_dialog.ui.btnClear.click()
        QTest.qWait(1)

        continuous_send_dialog.close()
コード例 #29
0
def run(frame: Array, target_in_sight: Condition):
    archive_path = Path("/mnt/nas/data/birdthings")
    archive_path.mkdir(exist_ok=True, parents=True)
    while True:
        with target_in_sight:
            target_in_sight.wait()

        with frame.get_lock():
            now = datetime.now()
            if not (7 < now.hour < 18):
                continue
            datepart, timepart = str(now).split(" ")
            dest = (
                archive_path / datepart / timepart.split(":")[0] / (timepart + ".jpeg")
            )
            dest.parent.mkdir(exist_ok=True, parents=True)
            Image.frombytes("RGB", camera.RESOLUTION, frame.get_obj()).save(
                dest, format="jpeg"
            )
            logging.info(f'archived image {dest}')
コード例 #30
0
def setup_variables(mode):
    if mode == PROD or mode == TEST:
        params = parse_options("params-prod.yaml")
    else:
        params = parse_options("params-simu.yaml")

    if 'simulation' in params:
        width = params["simulation"]["capture_width"]
        height = params["simulation"]["capture_height"]
    elif 'ai_video_streamer' in params:
        width = params["ai_video_streamer"]["capture_width"]
        height = params["ai_video_streamer"]["capture_height"]

    image_size = (width, height, 3)
    arr_size = width * height * 3

    shared_array = Array(c_uint8, arr_size)
    shared_image = np.frombuffer(
        shared_array.get_obj(),
        dtype=np.uint8)
    shared_image = np.reshape(shared_image, image_size)

    shared_state = Value(c_bool, True)

    process_manager = Manager()
    shared_data = process_manager.dict({
            "actualDuration": -1,
            "actualDurationFPS": -1,
            "totalDuration": -1,
            "totalDurationFPS": -1,
            "imageCaptureDuration": -1,
            "obsCreationDuration": -1,
            "brainDuration": -1,
            "frontendDuration": -1,
            "status": "Initialized",
            "lowerObs": [],
            "upperObs": [],
            "angles": []
        })

    return shared_image, shared_array, shared_state, shared_data
コード例 #31
0
ファイル: counters.py プロジェクト: wenlintan/trap-control
class Buffer(object):
    def __init__(self, size=1000, data_type="int32"):
        self.data_type = data_type
        self.head = Value("i", 0)
        self.ring_buffer = Array(data_type[0], range(size))
        self.size = size
        for i in range(size):
            self.ring_buffer[i] = 0  # probably really slow but not done often

    def get_head_value(self):
        return self.ring_buffer[self.head.value]

    def get_buffer(self):
        buf = np.frombuffer(self.ring_buffer.get_obj(), dtype=self.data_type)
        return np.concatenate((buf[self.head.value + 1 :], buf[0 : self.head.value]))

    def push(self, v):
        self.head.value = self.head.value + 1
        if self.head.value == self.size:
            self.head.value = 0
        self.ring_buffer[self.head.value] = v  # randint(0,10)
コード例 #32
0
ファイル: camera.py プロジェクト: dflemstr/birdthing
def run(frame: Array, new_frame: Condition):
    with picamera.PiCamera(
            resolution=RESOLUTION) as camera, picamera.array.PiRGBArray(
                camera, size=RESOLUTION) as data_container:
        stream = camera.capture_continuous(data_container,
                                           format="rgb",
                                           use_video_port=True)

        f: picamera.array.PiArrayOutput
        for f in stream:
            with frame.get_lock():
                image = numpy.frombuffer(frame.get_obj(),
                                         dtype=numpy.uint8).reshape(
                                             (RESOLUTION[1], RESOLUTION[0], 3))
                numpy.copyto(image, f.array)

            with new_frame:
                new_frame.notify_all()

            data_container.seek(0)
            data_container.truncate()
コード例 #33
0
class Buffer(object):
    def __init__(self, size=1000, data_type='int32'):
        self.data_type = data_type
        self.head = Value('i', 0)
        self.ring_buffer = Array(data_type[0], range(size))
        self.size = size
        for i in range(size):
            self.ring_buffer[i] = 0  #probably really slow but not done often

    def get_head_value(self):
        return self.ring_buffer[self.head.value]

    def get_buffer(self):
        buf = np.frombuffer(self.ring_buffer.get_obj(), dtype=self.data_type)
        return np.concatenate(
            (buf[self.head.value + 1:], buf[0:self.head.value]))

    def push(self, v):
        self.head.value = self.head.value + 1
        if self.head.value == self.size: self.head.value = 0
        self.ring_buffer[self.head.value] = v  #randint(0,10)
コード例 #34
0
class GridMapper(Process):
    def __init__(self, params: GridMapperParams) -> None:
        super().__init__()
        self.__params = self.__params_type_checked(params)
        self.__stop = Stop()
        self.__output = Array('d', self.__params.grid.size)
        self.__kde = KernelDensity(bandwidth=self.__params.kernel.bandwidth,
                                   kernel=self.__params.kernel.name,
                                   **KDE_PARAMETERS)
        self.__grid = self.__grid_from_params()

    @property
    def stop(self):
        return self.__stop

    @property
    def output(self) -> ARRAY:
        return self.__output

    def run(self) -> None:
        while not self.__stop.is_set():
            if self.__params.data:
                n_points = len(self.__params.data)
                self.__kde.fit(self.__params.data.values())
                density_on_grid = exp(self.__kde.score_samples(self.__grid))
                with self.__output.get_lock():
                    self.__output.get_obj()[:] = n_points * density_on_grid

    def __grid_from_params(self) -> ndarray:
        x_line = linspace(*self.__params.bounds.x_range, self.__params.grid.x)
        y_line = linspace(*self.__params.bounds.y_range, self.__params.grid.y)
        x_grid, y_grid = meshgrid(x_line, y_line)
        return column_stack((x_grid.ravel(), y_grid.ravel()))

    @staticmethod
    def __params_type_checked(value: GridMapperParams) -> GridMapperParams:
        if type(value) is not GridMapperParams:
            raise TypeError('Parameters must be of type <GridMapperParams>!')
        return value
コード例 #35
0
    def test_continuous_send_dialog(self):
        self.add_signal_to_form("esaver.coco")
        self.__add_first_signal_to_generator()

        port = self.get_free_port()

        gframe = self.form.generator_tab_controller
        expected = np.zeros(gframe.total_modulated_samples, dtype=np.complex64)
        expected = gframe.modulate_data(expected)
        current_index = Value("L", 0)
        buffer = Array("f", 4 * len(expected))

        process = Process(target=receive, args=(port, current_index, 2 * len(expected), buffer))
        process.daemon = True
        process.start()
        time.sleep(1)  # ensure server is up

        ContinuousModulator.BUFFER_SIZE_MB = 10

        continuous_send_dialog = self.__get_continuous_send_dialog()
        continuous_send_dialog.device.set_client_port(port)
        continuous_send_dialog.device_settings_widget.ui.spinBoxNRepeat.setValue(2)
        continuous_send_dialog.ui.btnStart.click()
        QTest.qWait(1000)
        time.sleep(1)
        process.join(1)

        # CI sometimes swallows a sample
        self.assertGreaterEqual(current_index.value, len(expected) - 1)

        buffer = np.frombuffer(buffer.get_obj(), dtype=np.complex64)
        for i in range(len(expected)):
            self.assertEqual(buffer[i], expected[i], msg=str(i))

        continuous_send_dialog.ui.btnStop.click()
        continuous_send_dialog.ui.btnClear.click()
        QTest.qWait(1)

        continuous_send_dialog.close()
コード例 #36
0
def align_allreal_vs_allrandom(rd_filename, out_filename):
    """Aling all the sequences in the database
    
    Arguments:
    - `rd_filename`: random database filename
    - `out_filename`: output file name -
                   output is a .npz with 3 arrays
                   ids1 (int16), ids2 (int16) , scores(float32)
    """
    nrdic = load_random_db(rd_filename)
    id_list = sorted(nrdic.keys())
    assert len(realdic) == len(nrdic), "databases must be same length"
    total = np.max(nrdic.keys())
    shared_sco_base = Array(ctypes.c_float, total * total)
    scomat_ = np.frombuffer(shared_sco_base.get_obj(), dtype=np.float32)
    scomat = scomat_.reshape((total, total))
    global scomat
    align_func = partial(align_ess_vs_real, nrdic=nrdic)
    pool = Pool(processes=6)
    pool.map(align_func, id_list)

    np.save(out_filename, scomat)
コード例 #37
0
    def _to_share_obj(val):
        dtype  = val.dtype

        if dtype == np.int32:
            c_type = ctypes.c_int32
        elif dtype == np.uint8:
            c_type = ctypes.c_uint8
        elif dtype == np.int64:
            c_type = ctypes.c_longlong
        elif dtype == np.float32:
            c_type = ctypes.c_float
        elif dtype == np.float64:
            c_type = ctypes.c_double
        else:
            raise ValueError('dtype `{dtype}` not implemented.')

        #https://research.wmz.ninja/articles/2018/03/on-sharing-large-arrays-when-using-pythons-multiprocessing.html
        X = Array(c_type, val.size)
        X_np = np.frombuffer(X.get_obj(), dtype = dtype).reshape(val.shape)
        np.copyto(X_np, val)

        return X, val.shape, dtype
コード例 #38
0
def get_predict(args, ortho, model):
    xp = cuda.cupy if args.gpu >= 0 else np
    args.h_limit, args.w_limit = ortho.shape[0], ortho.shape[1]
    args.canvas_h = args.h_limit
    args.canvas_w = args.w_limit

    # to share 'canvas' between different threads
    canvas_ = Array(
        ctypes.c_float, args.canvas_h * args.canvas_w * args.channels)
    canvas = np.ctypeslib.as_array(canvas_.get_obj())
    canvas = canvas.reshape((args.canvas_h, args.canvas_w, args.channels))

    # prepare queues and threads
    patch_queue = Queue(maxsize=5)
    preds_queue = Queue()
    patch_worker = Process(
        target=create_minibatch, args=(args, ortho, patch_queue))
    canvas_worker = Process(
        target=tile_patches, args=(args, canvas, preds_queue))
    patch_worker.start()
    canvas_worker.start()

    while True:
        minibatch = patch_queue.get()
        if minibatch is None:
            break
        minibatch = Variable(
            xp.asarray(minibatch, dtype=xp.float32), volatile=True)
        preds = model(minibatch, None).data
        if args.gpu >= 0:
            preds = xp.asnumpy(preds)
        [preds_queue.put(pred) for pred in preds]

    preds_queue.put(None)
    patch_worker.join()
    canvas_worker.join()

    return canvas
コード例 #39
0
    def compress(self, workflows, floatarray, start, feeder, cpus=None):
        if cpus is None:
            cpus = cpu_count()
        # Generate shared memory object
        mapping = {'float32':ctypes.c_float, 'float64':ctypes.c_double}
        shared_array_base = Array(mapping.get(str(floatarray.dtype)), floatarray.size)
        shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
        shared_array[:] = floatarray.flat[:]
        shared_array = shared_array.reshape(floatarray.shape)

        with futures.ProcessPoolExecutor(max_workers=cpus) as executor:
            jobs = {executor.submit(x.compress, shared_array, start, feeder): str(x) for wfID,x in enumerate(workflows)}
            try:
                for done in futures.as_completed(jobs):
                    name = jobs[done]
                    print('Compression: WF id:{} DONE!'.format(name))
                    try:
                        result = done.result()
                        yield (name, result)
                    except:
                        yield (name, "Error")
            except KeyboardInterrupt:
                _ = [k.cancel() for k in jobs.keys()]
コード例 #40
0
ファイル: kernel.py プロジェクト: kannab98/modeling
    def __multiple_kernels_(kernel, X, Y, model_coeffs):

        for j in range(N):
            # Срез массивов коэффициентов по оси K, условие !=1 для phi (он не зависит от band)
            host_constants = tuple([
                model_coeffs[i][edge[j]:edge[j + 1]]
                for i in range(len(model_coeffs))
            ])
            # Create shared array
            arr_share = Array('d', 6 * X.size)
            # arr_share and arr share the same memory
            arr[j] = np.frombuffer(arr_share.get_obj()).reshape((6, X.size))

            X0 = X.flatten()
            Y0 = Y.flatten()
            process[j] = Process(target=init,
                                 args=(kernel, arr[j], X0, Y0, host_constants))
            process[j].start()

        for j in range(N):
            process[j].join()

        return arr
コード例 #41
0
def align_allran_vs_allran(rd_filename, out_filename):
    """Aling all the sequences in the database
    
    Arguments:
    - `rd_filename`: random database filename
    - `out_filename`: output file name -
                   output is a .npz with 3 arrays
                   ids1 (int16), ids2 (int16) , scores(float32)
    """
    nrdic = load_random_db(rd_filename)
    total = np.max(nrdic.keys())
    i1, i2 = np.triu_indices(total)
    i1 = np.int16(i1 + 1)
    i2 = np.int16(i2 + 1)
    indices = np.vstack((i1, i2)).T
    # scomat = np.zeros((total, total), dtype = np.float16)
    # alternative scomat
    shared_sco_base = Array(ctypes.c_float, total * total)
    # scomat = np.ctypeslib.as_array(shared_sco_base.get_obj())
    scomat_ = np.frombuffer(shared_sco_base.get_obj(), dtype=np.float32)
    scomat = scomat_.reshape((total, total))
    global scomat
    align_func = partial(fill_mat, nrdic=nrdic)
    pool = Pool(processes=4)
    pool.map(align_func, indices, chunksize=1000)

    ids1 = np.zeros(len(indices))
    ids2 = np.zeros(len(indices))
    scores = np.zeros(len(indices))
    for i in xrange(len(indices)):
        id1, id2 = indices[i]
        sco = scomat[id1 - 1, id2 - 1]
        ids1[i] = id1
        ids2[i] = id2
        scores[i] = sco
    np.savez(out_filename, ids1=ids1, ids2=ids2, scores=scores)
コード例 #42
0
    def __init__(self, num_agents):
        self.num_agents = num_agents

        self.lock = Lock()
        self.ep_lock = Lock()
        global_policy = Array('d',
                              GRID_HEIGHT * GRID_WIDTH * len(ACTIONS),
                              lock=self.lock)
        self.global_policy = np.frombuffer(global_policy.get_obj(),
                                           dtype='d').reshape(
                                               GRID_HEIGHT, GRID_WIDTH,
                                               len(ACTIONS))
        self.global_step_num = Value('i', 0)
        self.global_step_max = T_MAX
        self.episodes = Queue()

        self.processes = [
            Process(target=self.instantiate_agent) for _ in range(num_agents)
        ]
        for process in self.processes:
            process.start()
        for process in self.processes:
            process.join()
        self.episodes.put(STOP)
コード例 #43
0
ファイル: fhdpost4.py プロジェクト: piyanatk/sim
from multiprocessing import Pool, Manager, Array

import numpy as np
from astropy.io import fits
import pandas as pd
from scipy.stats import skew, kurtosis


# Setting for the script
_mgr = Manager()
_config = _mgr.Namespace()
_config.freq, _config.redshift, _config.ion_frac = np.genfromtxt(
    '/data3/piyanat/model/21cm/interpolated/interp_delta_21cm_f_z_xi.csv',
    delimiter=',', unpack=True)
_shared_arr = Array('d', 5 * 7 * len(_config.freq))
_stats_arr = np.frombuffer(_shared_arr.get_obj())
_stats_arr.shape = (5, 7, len(_config.freq))


def _stats(arr, mask=None):
    if mask is not None:
        data = arr[mask]
    else:
        data = arr.ravel()
    return (data.min(), data.max(), data.mean(), data.std(), data.var(),
            skew(data), kurtosis(data))


def _cal_stats(i):
    f = _config.freq[i]
    z = _config.redshift[i]
コード例 #44
0
ファイル: test_parallel.py プロジェクト: sixy6e/stash
            yend = nrows
        for xstep in xstart:
            if xstep + xtile < ncols:
                xend = xstep + xtile
            else:
                xend = ncols
            l.append((ystep,yend,xstep, xend))
    #et = datetime.datetime.now()
    #print 'get_tile2 time taken: ', et - st
    return l 

if __name__ == '__main__':
    a = numpy.random.randint(0,101, (100,100))
    b = Array('i', 100*100) 
    #out = numpy.zeros_like(a)
    arr = numpy.frombuffer(b.get_obj())
    print arr.shape
    #out = numpy.frombuffer(b.get_obj()).reshape(100,100)
    print 'a'
    print a
    print 'out'
    print out
    tiles = get_tile2(a, 12,12)
    pool = Pool(processes=2)
    #pool.apply(func, args=(a,tiles, out))
    result=[pool.apply_async(func, (a, tile, out)) for tile in tiles]
    print 'a'
    print a
    print 'out'
    print out
    print 'result'
コード例 #45
0
ファイル: fhdpost3.py プロジェクト: piyanatk/sim
    parser = argparse.ArgumentParser()
    parser.add_argument('--root_dir', type=str,
                        default='/data3/piyanat/runs/post_uvlt50_I_intnorm/',
                        help='root directory of simulation data')
    parser.add_argument('--bins', type=int, default=60,
                        help='Number of bins for the PDF')
    parser.add_argument('--nprocs', type=int, default=8,
                        help='number of processes to spawn')
    args = parser.parse_args()
    _config.freq = s.MWA_FREQ_EOR_ALL_80KHZ
    _config.root_dir = args.root_dir
    _config.pdf_dir = args.root_dir + 'pdf/'
    _config.maps_dir = args.root_dir + 'maps/'
    _config.pdf_bin = args.bins
    check_dir(_config.pdf_dir)
    check_dir(_config.maps_dir)
    _shared_arr_base = Array('d', len(_config.freq) * 5 * _config.pdf_bin * 2)
    _shared_arr = np.frombuffer(_shared_arr_base.get_obj())
    _shared_arr.shape = (len(_config.freq), 5, _config.pdf_bin, 2)
    pool = Pool(args.nprocs)
    pool.map(_make_pdf, range(_config.freq.size))
    pool.close()
    pool.join()
    p4d = Panel4D(_shared_arr,
                  labels=['{:.3f}'.format(f) for f in _config.freq],
                  items=['model', 'gauss', 'fhd', 'fhd_scaled', 'res'],
                  major_axis=np.arange(_config.pdf_bin),
                  minor_axis=['pdf', 'bin_centers'])
    p4d.to_hdf(_config.pdf_dir + 'pdf_p4d.h5', key='pdf', mode='w',
               format='table')
コード例 #46
0
ファイル: cost_computation.py プロジェクト: tomasyany/spectra
class CostComputation(object):
    """Computes the cost matrix."""

    def __init__(self):

        self.all_curves = Listing.index_all_curves()
        index_file = open ("outputs/index_file.txt","w")
        for index, item in enumerate(self.all_curves):
              index_file.write("%i,%s" % (index, str(item)))
        index_file.close()
        self.n = len(self.all_curves)

        self.total_costs_matrix_base = Array(ctypes.c_double, self.n*self.n)
        self.total_costs_matrix = numpy.ctypeslib.as_array(
                             self.total_costs_matrix_base.get_obj())
        self.total_costs_matrix = self.total_costs_matrix.reshape(self.n,self.n)


    def set_total_costs_matrix(self, i, j, def_param = None):
        def_param = total_costs_matrix_base
        curve_name_i = all_curves[i][0]
        curve_type_i = all_curves[i][1]
        curve_file_i = fits.open(os.getcwd()+'/memoria/'+
                        'inputs/'+curve_type_i+'/'+curve_name_i+'.fits',
                        memmap=False)

        curve_data_i = Extractor.get_values(curve_file_i)

        curve_file_i.close()

        curve_name_j = all_curves[j][0]
        curve_type_j = all_curves[j][1]
        curve_file_j = fits.open(os.getcwd()+'/memoria/'+
                        'inputs/'+curve_type_j+'/'+curve_name_j+'.fits',
                        memmap=False)

        curve_data_j = Extractor.get_values(curve_file_j)

        curve_file_j.close()

        x,y = curve_data_i, curve_data_j

        dtw = DTW(x,y)
        cost_matrix = dtw.compute_cost_matrix(DTW.euclidean_distance)
        acc_cost_matrix, cost = dtw.compute_acc_cost_matrix(cost_matrix)

        self.total_costs_matrix[i,j] = cost


    def write_cost_matrix(self):
        begin = timeit.default_timer()

        pool = Pool(processes=cpu_count())
        iterable = []
        for i in range(self.n):
            for j in range(i+1,self.n):
                iterable.append((i,j))
        pool.starmap(self.set_total_costs_matrix, iterable)

        self.total_costs_matrix.dump(os.getcwd()+'/memoria/outputs/cost_matrix')

        end = timeit.default_timer()
        print(end - begin)
コード例 #47
0
ファイル: alazar_parallel.py プロジェクト: SchusterLab/slab
    def acquire_parallel(self, worker_cls, worker_args, result_shape, plot=False):
        """
        :param worker: Function which the subordinate threads execute as their target
        :param proc_fun: Function used by the subordinate threads to process their buffers
        :param result_shape: Shape of the buffer which is the result of the entire acquisition.
        """
        from multiprocessing import Array, Value, Event
        from slab.plotting import ScriptPlotter
        import time

        acquire_buffer_time = self.samples_per_buffer / (self.samples_per_second)
        print('Acquire buffer time %.2e' % acquire_buffer_time)
        print('Inter-buffer time %.2e' % self.seconds_per_buffer)
        print('Duty Cycle', acquire_buffer_time / self.seconds_per_buffer)

        try:
            # I don't know why this needs to happen again?
            channel = 0
            if self.ch1_enabled:
                channel |= 1
            if self.ch2_enabled:
                channel |= 2
            pretriggers = C.c_long(0)
            flags = U32(513)
            ret = self.Az.AlazarBeforeAsyncRead(self.handle,U32(channel),pretriggers,
                                       U32(self.config.samplesPerRecord), 
                                       U32(self.config.recordsPerBuffer),
                                       U32(self.config.recordsPerAcquisition),
                                       flags)            
            
            # Initialize buffers
            buffers = [Array(U8, self.bytes_per_buffer) for _ in range(self.buffer_count)]
            for b in buffers:
                ret = self.Az.AlazarPostAsyncBuffer(self.handle, b.get_obj(), U32(self.bytes_per_buffer))
                self.assert_az(ret, 0, 'Initial Post Buffer')

            res_buffer = Array(C.c_longdouble, result_shape)

            # Initialize threads
            bufs_merged = Value(U32, 1)
            buf_ready_events = [Event() for _ in range(self.buffer_count)]
            buf_post_events = [Event() for _ in range(self.buffer_count)]
            workers = [worker_cls(*(worker_args + (self.config, b, bre, bpe, res_buffer, bufs_merged)))
                       for b, bre, bpe in zip(buffers, buf_ready_events, buf_post_events)]

            for w in workers:
                w.start()
            time.sleep(1)

            import atexit
            atexit.register(lambda: [w.terminate() for w in workers])


            # Initialize things used during capture
            if plot:
                plotter = ScriptPlotter()
                plotter.init_plot('Data', rank=1, accum=False)
            buffers_acquired, buffers_completed, plot_count = 0, 0, 0
            start_time = time.time()

            # Begin capture
            ret = self.Az.AlazarStartCapture(self.handle)
            self.assert_az(ret, 0, "Start Capture")
            unready_count = 0
            while buffers_completed < self.buffers_per_acquisition:

                # Post all completed buffers
                while buf_post_events[buffers_completed % self.buffer_count].is_set():
                    buf_post_events[buffers_completed % self.buffer_count].clear()
                    buf = buffers[buffers_completed % self.buffer_count]
                    with buf.get_lock():
                        ret = self.Az.AlazarPostAsyncBuffer(self.handle, buf.get_obj(), U32(self.bytes_per_buffer))
                        self.assert_az(ret, buffers_acquired, 'Post Buffer')
                    buffers_completed += 1

                    # Current buffer rotates in a ring
                buf_idx = buffers_acquired % self.buffer_count
                buf = buffers[buf_idx]

                # Pull data to buffer
                with buf.get_lock():
                    ret = self.Az.AlazarWaitAsyncBufferComplete(self.handle, buf.get_obj(), U32(self.timeout))
                    if ret == 573:
                        unready_count += 1
                        continue # BufferNotReady, go back and try to post some buffers.
                    else:
                        self.assert_az(ret, buffers_acquired, 'Wait Buffer Complete')

                buffers_acquired += 1

                # Tell worker thread to begin processing
                buf_ready_events[buf_idx].set()

                # If a second has elapsed, replot the avg_buffer
                if (time.time() - start_time) / self.seconds_per_plot > plot_count:
                    if plot:
                        with res_buffer.get_lock():
                            plotter.msg(buffers_acquired, buffers_completed, bufs_merged.value)
                            plotter.plot(np.frombuffer(res_buffer.get_obj()), 'Data')
                        plot_count += 1
                    else:
                        print(buffers_acquired, buffers_completed)
                        plot_count += 1
        finally:
            pass
        
#            self.Az.AlazarAbortAsyncRead(self.handle)
#            if buffers_completed:
#                final_time = time.time()
#                print 'Unready Count', unready_count
#                total_time = final_time - start_time
#                print 'Total time', total_time
#                actual_time_per_buffer = total_time / buffers_completed
#                print 'Time per buffer %.2e' % actual_time_per_buffer
#                errf = lambda a, b: abs(a - b) / min(a, b)
#                print 'Perceived overhead %.1f%%' % (errf(actual_time_per_buffer, seconds_per_buffer) * 100)
#            else:
#                print 'No buffers completed'

        res = np.frombuffer(res_buffer.get_obj())
        return res
コード例 #48
0
ファイル: CaptureAndCompress.py プロジェクト: apevec/RMS
import ctypes
import logging


def wait():
    try:
        raw_input("Press Enter to stop...")
    except EOFError:
        pass


if __name__ == "__main__":
    logging.basicConfig(filename="log.log", level=logging.DEBUG)

    sharedArrayBase = Array(ctypes.c_uint8, 256 * 576 * 720)
    sharedArray = np.ctypeslib.as_array(sharedArrayBase.get_obj())
    sharedArray = sharedArray.reshape(256, 576, 720)
    startTime = Value("d", 0.0)

    sharedArrayBase2 = Array(ctypes.c_uint8, 256 * 576 * 720)
    sharedArray2 = np.ctypeslib.as_array(sharedArrayBase2.get_obj())
    sharedArray2 = sharedArray2.reshape(256, 576, 720)
    startTime2 = Value("d", 0.0)

    bc = BufferedCapture(sharedArray, startTime, sharedArray2, startTime2)

    c = Compression(sharedArray, startTime, sharedArray2, startTime2, 499)

    bc.startCapture()
    c.start()
コード例 #49
0
def main():
    """
    Doppler Gesture detector
    """
    global CHUNK
    global RATE
    global CHANNELS

    # Read in command-line arguments and switches
    parser = argparse.ArgumentParser(description='Plays a tone (20kHz default) and then looks for doppler shifts within a window range')
    parser.add_argument('--tone', '-t', dest='tone', action='store', type=int,
                        default=TONE, help='Tone (Hz)')
    parser.add_argument('--window', '-w', dest='window', action='store', type=int,
                        default=WINDOW, help='Window range (Hz)')
    parser.add_argument('--channels', '-c', dest='channels', action='store', type=int,
                        default=CHANNELS, help='Number of channels (1 or 2)')
    parser.add_argument('--size', '-s', dest='size', action='store', type=int,
                        default=CHUNK, help='Sample size')
    parser.add_argument('--rate', '-r', dest='rate', action='store', type=int,
                        default=RATE, help='Sample rate (Hz)')
    args = parser.parse_args()

    CHUNK = args.size
    RATE = args.rate
    CHANNELS = args.channels

    # Verify arguments

    # Check that the args.channels argument has the correct number of channels.
    if args.channels not in [1, 2]:
        print("Invalid number of channels. Please enter as 1 or 2")
        sys.exit(-1)

    if CHANNELS == 2:
        shared_array_base = Array(ctypes.c_double, 2*CHUNK)
    else:
        shared_array_base = Array(ctypes.c_double, CHUNK)

    shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())

    if CHANNELS == 2:
        shared_array.dtype = complex

    shared_array = shared_array.reshape(1, CHUNK)

    sync_event = Event()

    # Initialize all processes and then start them
    tonePlayer_p = Process(target=tonePlayer, args=(
        args.tone,
        sync_event,))
    tonePlayer_p.daemon = True

    recorder_p = Process(target=recorder, args=(
        shared_array,
        args.tone,
        args.window,
        sync_event,))
    recorder_p.daemon = True

    if PLOTTER:
        plotter_p = Process(target=pydoppler.plotter, args=(
            shared_array,
            args.channels,
            args.tone,
            args.window,
            args.rate,))
        plotter_p.daemon = True

    if AMBIGUITY:
        ambiguity_p = Process(target=pydoppler.plotamb, args=(
            shared_array,
            args.channels,
            args.tone,
            args.window,
            args.rate,))
        ambiguity_p.daemon = True

    if WATERFALL:
        waterfall_p = Process(target=pydoppler.waterfall, args=(
            shared_array,
            args.channels,
            args.tone,
            args.window,
            args.rate,))
        waterfall_p.daemon = True

    recorder_p.start()
    tonePlayer_p.start()

    if PLOTTER:
        plotter_p.start()
    if AMBIGUITY:
        ambiguity_p.start()
    if WATERFALL:
        waterfall_p.start()

    tonePlayer_p.join()
    recorder_p.join()
    if PLOTTER:
        plotter_p.join()
    if AMBIGUITY:
        ambiguity_p.join()
    if WATERFALL:
        waterfall_p.join()
コード例 #50
0
update_c(c1.ravel(),c2.ravel()) #########

print "Running main loop..."
while runflag:
    #print uflag1.value
    #print uflag2.value
    dat1 = uflag1.value
    dat2 = uflag2.value
    frame = vs.read() #for testing
    #datrecv1 = pipeul1.recv() #Blocking
    #datrecv2 = pipeul2.recv() #Blocking
    #if datrecv1[0] and datrecv2[0]:
    if (dat1 == 2) and (dat2 ==2):
        #pos3d =  calc3d(datrecv2[2].ravel(),datrecv1[2].ravel()) ########
        with uarray1.get_lock():
            arr1 = np.frombuffer(uarray1.get_obj())
        with uarray2.get_lock():
            arr2 = np.frombuffer(uarray2.get_obj())
        pos3d =  calc3d(arr1,arr2)
        #print np.asarray(pos3d)
        imgpts, jac = cv2.projectPoints(np.float32([np.asarray(pos3d)]).reshape(-1,3), rvecs, tvecs, mtx, dist)
        cv2.rectangle(frame,(int(imgpts[0,0,0]) - 2,int(imgpts[0,0,1]) - 2),(int(imgpts[0,0,0]) + 2 ,int(imgpts[0,0,1]) + 2),(255,0,0),1)
    #elif not datrecv1[1] or not datrecv2[1]:
    elif (dat1 == 0) or (dat2 ==0):
        runflag = False
    cv2.imshow('frame',frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
    	break

print "Waiting for both processes to stop..."
#while pipeul1.poll(0.5) or pipeul2.poll(0.5):
コード例 #51
0
clock = pygame.time.Clock()
screen = pygame.display.set_mode((width, height))

# Initialise the physics stuff
from Physics import World
Jacks_sweet_thread = World(random_plane, send)

from timeit import default_timer as current_time
from multiprocessing import Process, Pool
from time import sleep

import ctypes
import numpy as np

pixel_array_base = Array(ctypes.c_int, width*height)
pixel_array = np.ctypeslib.as_array(pixel_array_base.get_obj())
pixel_array = pixel_array.reshape(width, height)

from pygametranslator import Translator
Jacks_sweet_threads = Translator(recv, pixel_array)

# Start things
Jacks_sweet_thread.start()
Jacks_sweet_threads.start()
update_interval = 1/60
running = True
previous_update = current_time()
while running:
    for event in pygame.event.get():
        if event.type == pygame.QUIT:
            running = False
コード例 #52
0
def shared_zeros(n1, n2):
    # create a  2D numpy array which can be then changed in different threads
    shared_array_base = Array(ctypes.c_double, n1 * n2)
    shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
    shared_array = shared_array.reshape(n1, n2)
    return shared_array
コード例 #53
0
class MjRenderPool:
    """
    Utilizes a process pool to render a MuJoCo simulation across
    multiple GPU devices. This can scale the throughput linearly
    with the number of available GPUs. Throughput can also be
    slightly increased by using more than one worker per GPU.
    """

    DEFAULT_MAX_IMAGE_SIZE = 512 * 512  # in pixels

    def __init__(self, model, device_ids=1, n_workers=None,
                 max_batch_size=None, max_image_size=DEFAULT_MAX_IMAGE_SIZE,
                 modder=None):
        """
        Args:
        - model (PyMjModel): MuJoCo model to use for rendering
        - device_ids (int/list): list of device ids to use for rendering.
            One or more workers will be assigned to each device, depending
            on how many workers are requested.
        - n_workers (int): number of parallel processes in the pool. Defaults
            to the number of device ids.
        - max_batch_size (int): maximum number of states that can be rendered
            in batch using .render(). Defaults to the number of workers.
        - max_image_size (int): maximum number pixels in images requested
            by .render()
        - modder (Modder): modder to use for domain randomization.
        """
        self._closed, self.pool = False, None

        if not (modder is None or inspect.isclass(modder)):
            raise ValueError("modder must be a class")

        if isinstance(device_ids, int):
            device_ids = list(range(device_ids))
        else:
            assert isinstance(device_ids, list), (
                "device_ids must be list of integer")

        n_workers = n_workers or 1
        self._max_batch_size = max_batch_size or (len(device_ids) * n_workers)
        self._max_image_size = max_image_size

        array_size = self._max_image_size * self._max_batch_size

        self._shared_rgbs = Array(ctypes.c_uint8, array_size * 3)
        self._shared_depths = Array(ctypes.c_float, array_size)

        self._shared_rgbs_array = np.frombuffer(
            self._shared_rgbs.get_obj(), dtype=ctypes.c_uint8)
        assert self._shared_rgbs_array.size == (array_size * 3), (
            "Array size is %d, expected %d" % (
                self._shared_rgbs_array.size, array_size * 3))
        self._shared_depths_array = np.frombuffer(
            self._shared_depths.get_obj(), dtype=ctypes.c_float)
        assert self._shared_depths_array.size == array_size, (
            "Array size is %d, expected %d" % (
                self._shared_depths_array.size, array_size))

        worker_id = Value(ctypes.c_int)
        worker_id.value = 0

        if get_start_method() != "spawn":
            raise RuntimeError(
                "Start method must be set to 'spawn' for the "
                "render pool to work. That is, you must add the "
                "following to the _TOP_ of your main script, "
                "before any other imports (since they might be "
                "setting it otherwise):\n"
                "  import multiprocessing as mp\n"
                "  if __name__ == '__main__':\n"
                "    mp.set_start_method('spawn')\n")

        self.pool = Pool(
            processes=len(device_ids) * n_workers,
            initializer=MjRenderPool._worker_init,
            initargs=(
                model.get_mjb(),
                worker_id,
                device_ids,
                self._shared_rgbs,
                self._shared_depths,
                modder))

    @staticmethod
    def _worker_init(mjb_bytes, worker_id, device_ids,
                     shared_rgbs, shared_depths, modder):
        """
        Initializes the global state for the workers.
        """
        s = RenderPoolStorage()

        with worker_id.get_lock():
            proc_worker_id = worker_id.value
            worker_id.value += 1
        s.device_id = device_ids[proc_worker_id % len(device_ids)]

        s.shared_rgbs_array = np.frombuffer(
            shared_rgbs.get_obj(), dtype=ctypes.c_uint8)
        s.shared_depths_array = np.frombuffer(
            shared_depths.get_obj(), dtype=ctypes.c_float)

        # avoid a circular import
        from mujoco_py import load_model_from_mjb, MjRenderContext, MjSim
        s.sim = MjSim(load_model_from_mjb(mjb_bytes))
        # attach a render context to the sim (needs to happen before
        # modder is called, since it might need to upload textures
        # to the GPU).
        MjRenderContext(s.sim, device_id=s.device_id)

        if modder is not None:
            s.modder = modder(s.sim, random_state=proc_worker_id)
            s.modder.whiten_materials()
        else:
            s.modder = None

        global _render_pool_storage
        _render_pool_storage = s

    @staticmethod
    def _worker_render(worker_id, state, width, height,
                       camera_name, randomize):
        """
        Main target function for the workers.
        """
        s = _render_pool_storage

        forward = False
        if state is not None:
            s.sim.set_state(state)
            forward = True
        if randomize and s.modder is not None:
            s.modder.randomize()
            forward = True
        if forward:
            s.sim.forward()

        rgb_block = width * height * 3
        rgb_offset = rgb_block * worker_id
        rgb = s.shared_rgbs_array[rgb_offset:rgb_offset + rgb_block]
        rgb = rgb.reshape(height, width, 3)

        depth_block = width * height
        depth_offset = depth_block * worker_id
        depth = s.shared_depths_array[depth_offset:depth_offset + depth_block]
        depth = depth.reshape(height, width)

        rgb[:], depth[:] = s.sim.render(
            width, height, camera_name=camera_name, depth=True,
            device_id=s.device_id)

    def render(self, width, height, states=None, camera_name=None,
               depth=False, randomize=False, copy=True):
        """
        Renders the simulations in batch. If no states are provided,
        the max_batch_size will be used.

        Args:
        - width (int): width of image to render.
        - height (int): height of image to render.
        - states (list): list of MjSimStates; updates the states before
            rendering. Batch size will be number of states supplied.
        - camera_name (str): name of camera to render from.
        - depth (bool): if True, also return depth.
        - randomize (bool): calls modder.rand_all() before rendering.
        - copy (bool): return a copy rather than a reference

        Returns:
        - rgbs: NxHxWx3 numpy array of N images in batch of width W
            and height H.
        - depth: NxHxW numpy array of N images in batch of width W
            and height H. Only returned if depth=True.
        """
        if self._closed:
            raise RuntimeError("The pool has been closed.")

        if (width * height) > self._max_image_size:
            raise ValueError(
                "Requested image larger than maximum image size. Create "
                "a new RenderPool with a larger maximum image size.")
        if states is None:
            batch_size = self._max_batch_size
            states = [None] * batch_size
        else:
            batch_size = len(states)

        if batch_size > self._max_batch_size:
            raise ValueError(
                "Requested batch size larger than max batch size. Create "
                "a new RenderPool with a larger max batch size.")

        self.pool.starmap(
            MjRenderPool._worker_render,
            [(i, state, width, height, camera_name, randomize)
             for i, state in enumerate(states)])

        rgbs = self._shared_rgbs_array[:width * height * 3 * batch_size]
        rgbs = rgbs.reshape(batch_size, height, width, 3)
        if copy:
            rgbs = rgbs.copy()

        if depth:
            depths = self._shared_depths_array[:width * height * batch_size]
            depths = depths.reshape(batch_size, height, width).copy()
            if copy:
                depths = depths.copy()
            return rgbs, depths
        else:
            return rgbs

    def close(self):
        """
        Closes the pool and terminates child processes.
        """
        if not self._closed:
            if self.pool is not None:
                self.pool.close()
                self.pool.join()
            self._closed = True

    def __del__(self):
        self.close()
コード例 #54
0
def findStandard(img, downSample=True):
    print 'Downsample'
    if downSample:
        
        s = img.shape        
        img = cv2.resize(img, (int(s[1] * 2560 / s[0]), 2560))
        
   
    total = numpy.zeros(img.shape).astype(numpy.uint8)
    
    labimg = cv2.cvtColor(img, cv2.COLOR_BGR2LAB).astype(numpy.float32)
    
    thresh = 200
    margin = 0.2 #black in between line on the standard is roughly 0.2 of the square width
    squares = []
    output = numpy.copy(img)
    #gen circular mask for houghGrid
    a, b = 30,30
    rad = 30
    y,x = numpy.ogrid[-a:61-a, -b:61-b]
    mask = x*x + y*y <= rad*rad
    horizontalOffsets = []
    verticalOffsets = []
    #for calculating orientation
    if HIGH_MEMORY:
        
        size = labimg.size
        print labimg.size, labimg.shape
        sharedlabimg_base = Array(ctypes.c_float, size)
        p = Pool(initializer=initProcess,initargs=(sharedlabimg_base,))
        sharedlabimg = numpy.frombuffer(sharedlabimg_base.get_obj(), dtype=numpy.float32)
        sharedlabimg = sharedlabimg.reshape(labimg.shape)
        print labimg.size, labimg.shape
        print sharedlabimg.size, sharedlabimg.shape
        sharedlabimg[:,:,:]=labimg[:,:,:]
        '''
        #test to see what sharedlabimg looks like after that
        sharedlabimg = numpy.frombuffer(sharedlabimg_base.get_obj(), dtype=numpy.float32)
        sharedlabimg = sharedlabimg.reshape(labimg.shape)
        cv2.imshow('test2', labimg[::6,::6])
        cv2.imshow('test', sharedlabimg[::6,::6])
        if cv2.waitKey(0) == ord('q'):
            quit()
            '''
            
        nMap = p.map(calcDistance, [(c,labimg.shape) for c in labStandardColors.reshape((labStandardColors.shape[0]*labStandardColors.shape[1],labStandardColors.shape[2]))])
        
    #print 'Find squares of each color'
    for r, row in enumerate(standardColors):
        for c, color in enumerate(row):           
            
            labColor = labStandardColors[r][c]
            print 'Calculating color ', labColor
            #print 'Calculate lab distance'
            if not HIGH_MEMORY:
                n = numpy.linalg.norm(labimg-labColor, axis=2)
            else:
                n = nMap[r*len(row)+c]
            n = n * 255 / n.max()
            n = n.astype(numpy.uint8)
            
            #print 'Threshold'
            #n = cv2.adaptiveThreshold(n, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, int(n.shape[1]*0.02) | 1, 6)
            ret, n = cv2.threshold(n, 50, 255, cv2.THRESH_BINARY_INV)
            #print 'Morphology'
            n = cv2.morphologyEx(n, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT,(3,3)))
            #cv2.imshow(str(i*4+c), cv2.resize(n, dsize=(0,0), fx=0.2, fy=0.2))                
            #print 'Contours'
            contours,h = cv2.findContours(n, cv2.RETR_TREE , cv2.CHAIN_APPROX_SIMPLE )            
            #sometimes findcontours doesn't reutnr numpy arrays            
            for i, contour in enumerate(contours):
                contours[i] = numpy.array(contour)
            toDraw = []
            indices = []
            #print 'Process contours'
            for i, contour in enumerate(contours):    
                s = Square()
                s, count = s.processContour(contour, i, contours, h, minWidth=(labimg.shape[1] / 100))
                if s:
                    contours[i] = s
            curSquares = []            
            for square in contours:
                if isinstance(square, Square):                    
                    square.color = (int(color[0]), int(color[1]), int(color[2]))
                    curSquares.append(square)
            labels = numpy.zeros((img.shape[0], img.shape[1])).astype(numpy.uint8)               
            means = []     
            #print 'Calculate LAB'
            for i in range(0,len(curSquares)):            
                cv2.drawContours(labels, [curSquares[i].contour], -1, i+1, -1)
                roi = cv2.boundingRect(curSquares[i].contour)                                
                mean = cv2.mean(labimg[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]] , numpy.array(labels[roi[1]:roi[1]+roi[3], roi[0]:roi[0]+roi[2]] == i+1).astype(numpy.uint8))
                
                curSquares[i].labColor = (mean[0], mean[1], mean[2])
                ##print curSquares[i].labColor, numpy.linalg.norm(curSquares[i].labColor-labColor)
                #cv2.drawContours(total, [curSquares[i].contour], -1, (255,255,255), -1)
                means.append((numpy.linalg.norm(curSquares[i].labColor-labColor), curSquares[i]))
            
            means.sort(key=itemgetter(0), reverse=True)
            colorYield[r][c] += len(means)
            ##print r, c, colorYield[r][c]
            #print 'Add squares, calculate horizontal offsets and vertical offset'
            if len(means) > 0:                                
                for mean in means:
                    square = mean[1]   
                    #check if there is already a square there
                    fail = False
                    for anotherSquare in squares:
                        if numpy.linalg.norm(anotherSquare.center-square.center) < MIN_DIST:
                            #too close!
                            fail = True
                            ##print 'Too close'
                            break
                    if fail:
                        continue
                           
                    
                    squares.append(square)
                    points = mean[1].contour
                    #draw estimated location of colorchecker
                    t = square.center
                    
                    horizontalOffset = ((points[1][0] - points[0][0]) / 2 + (points[2][0] - points[3][0]) / 2) * 1.3
                    verticalOffset = ((points[2][0] - points[1][0]) / 2 + (points[3][0] - points[0][0]) / 2) * 1.3
                    swap = False
                    if len(horizontalOffsets) == 0:
                        swap = abs(horizontalOffset[0]*(1) + horizontalOffset[1]*(0)) < abs(verticalOffset[0]*(1) + verticalOffset[1]*(0))
                        if swap:
                            horizontalOffsets.append(verticalOffset)
                            verticalOffsets.append(horizontalOffset)    
                            horizontalOffset = horizontalOffsets[-1]
                            verticalOffset = verticalOffsets[-1]
                            
                        else:
                            horizontalOffsets.append(horizontalOffset)
                            verticalOffsets.append(verticalOffset)
                        if horizontalOffset[0] < 0:
                            horizontalOffset = -horizontalOffset
                            horizontalOffsets[-1] = -horizontalOffsets[-1]
                        if verticalOffset[1] < 0:
                            verticalOffset = -verticalOffset
                            verticalOffsets[-1] = -verticalOffsets[-1]
                    else:
                        #check to see which one we're closer to, 
                        swap = numpy.abs(horizontalOffset[0]*horizontalOffsets[0][0] + horizontalOffset[1]*horizontalOffsets[0][1]) < numpy.abs(verticalOffset[0]*horizontalOffsets[0][0] + verticalOffset[1]*horizontalOffsets[0][1])
                        ##print horizontalOffset[0]*horizontalOffsets[0][0] + horizontalOffset[1]*horizontalOffsets[0][1], verticalOffset[0]*horizontalOffsets[0][0] + verticalOffset[1]*horizontalOffsets[0][1], horizontalOffsets[0]
                        if swap:
                            horizontalOffsets.append(verticalOffset)
                            verticalOffsets.append(horizontalOffset)                            
                            horizontalOffset = horizontalOffsets[-1]
                            verticalOffset = verticalOffsets[-1]
                        else:
                            horizontalOffsets.append(horizontalOffset)
                            verticalOffsets.append(verticalOffset)    
                        if projectAonB(horizontalOffset, horizontalOffsets[0]) < 0:
                            horizontalOffset = -horizontalOffset
                            horizontalOffsets[-1] = -horizontalOffsets[-1]                        
                        if projectAonB(verticalOffset, verticalOffsets[0]) < 0:
                            verticalOffset = -verticalOffset
                            verticalOffsets[-1] = -verticalOffsets[-1]
            #print 'Done'         
    #calculate estimated location of colorchecker
                    
                
                    
                
    #print 'Calculating offsets'
    horizontalOffsets = numpy.array(horizontalOffsets)
    verticalOffsets = numpy.array(verticalOffsets)
    h, v = numpy.mean(horizontalOffsets, axis=0), numpy.mean(verticalOffsets, axis=0)
    diagonalOffsetDistance = numpy.max(numpy.array([numpy.linalg.norm(h+v), numpy.linalg.norm(v-h)]))
    ##print h,v,diagonalOffsetDistance
    averagePerimeter = numpy.mean(numpy.array([s.perimeter for s in squares]))
    averagePosition = numpy.mean(numpy.array([s.center for s in squares]), axis=0)
    cv2.circle(total, (int(averagePosition[0]), int(averagePosition[1])), 5, (255,128,255), 5)
    meanHO, meanVO = numpy.mean(horizontalOffsets, axis=0), numpy.mean(verticalOffsets, axis=0)
    ##print len(horizontalOffsets), len(verticalOffsets), len(squares)
    a = numpy.array([[horizontalOffsets[count], verticalOffsets[count], squares[count]] for count in range(0,len(squares)) if numpy.dot(horizontalOffsets[count],meanHO) / numpy.linalg.norm(horizontalOffsets[count]) / numpy.linalg.norm(meanHO)  > MAX_VECTERROR and numpy.dot(verticalOffsets[count],meanVO) / numpy.linalg.norm(verticalOffsets[count]) / numpy.linalg.norm(meanVO)  > MAX_VECTERROR and abs(squares[count].perimeter - averagePerimeter) / averagePerimeter < MAX_NORMALIZED_PERIMETER_ERROR and numpy.linalg.norm(averagePosition - squares[count].center) < MAX_NUMBER_SQUARES_FROM_MEAN * diagonalOffsetDistance])       
    if len(a) > 0:
        horizontalOffsets = a[:,0]
        verticalOffsets = a[:,1]
        squares = a[:,2]
        h, v = numpy.mean(horizontalOffsets, axis=0), numpy.mean(verticalOffsets, axis=0)
        ##print h, v
        hx = h[0]
        hy = h[1]
        vx = v[0]
        vy = v[1]
        
        basis = numpy.linalg.inv(numpy.matrix([[hx,vx], [hy,vy]]))
        for square in squares:     
            cv2.circle(total, (square.center[0], square.center[1]), 5, (255,255,255), 5)
            cv2.drawContours(total, [square.contour], -1, square.color, 5)    
            #change basis vectors
            
            target = numpy.matrix([[square.center[0]], [square.center[1]]])
            out = basis * target  
            square.gridX = out.item((0,0))
            square.gridY = out.item((1,0))            
        squares =sorted(squares, key=lambda square: square.gridX*square.gridX+square.gridY*square.gridY)
        offsetX = sorted(squares, key=lambda square:square.gridX)[0].gridX
        offsetY = sorted(squares, key=lambda square:square.gridY)[0].gridY
        maxX = 6
        maxY = 4
        squareDict = {}
        topLeftSquare = None
        topLeft = 24
        topRightSquare = None
        topRight = 24
        bottomLeftSquare = None
        bottomLeft = 24
        bottomRightSquare = None
        bottomRight = 24
        totalGX = 0
        totalGY = 0
        totalXOff = 0
        totalYOff = 0
        for square in squares: 
            ##print square.gridX, square.gridY
            square.gridX -= offsetX
            square.gridY -= offsetY    
        count = 0
        tsquares = None
        residuals = 0
        bestresiduals = 1000000
        besttsquares = None
        bestmaxX = 0
        bestmaxY = 0
        #print 'Find corner squares, residuals and offsets'
        #smart finding of maxX and maxY givest best possible chance of finding fit
        while count < 4:
            minX = numpy.mean([square.gridX for square in squares if square.gridX >= count+0.5  and square.gridX <= count+1.5]) / (count+1)
            minY = numpy.mean([square.gridY for square in squares if square.gridY >= count+0.5 and square.gridY <= count+1.5]) / (count+1)
            count += 1
            if math.isnan(minX) or math.isnan(minY):
                continue
            
            maxX = 0
            maxY = 0
            residuals = 0
            tsquares = deepcopy(squares)
            for square in tsquares:    
                tx = square.gridX
                ty = square.gridY
                residuals += abs(square.gridX/minX-round(square.gridX/minX)) + abs(square.gridY/minY-round(square.gridY/minY))
                square.gridX = round(square.gridX/minX)
                square.gridY = round(square.gridY/minY)                
                gridX = int(square.gridX)
                gridY = int(square.gridY)    
                ##print tx, ty, minX, minY, gridX, gridY                
                if int(square.gridX) > maxX:
                    maxX = int(square.gridX)
                    totalXOff = tx
                if int(square.gridY) > maxY:
                    maxY = int(square.gridY)
                    totalYOff = ty
                if not gridY in squareDict:
                    squareDict[gridY] = {}
                if not gridX in squareDict[gridY]:
                    squareDict[gridY][gridX] = square                
                if 4-gridX + 6-gridY < bottomRight:
                    bottomRight = 4-gridX + 6-gridY
                    bottomRightSquare = square
                if gridX + gridY < topLeft:
                    topLeft = gridX + gridY
                    topLeftSquare = square
                if 4-gridX + gridY < topRight:
                    topRight = 4-gridX + gridY
                    topRightSquare = square
                if gridX + 6-gridY < bottomLeft:
                    bottomLeft = gridX + 6-gridY
                    bottomLeftSquare = square   
            if residuals < bestresiduals and ((maxX < 6 and maxY < 4) or (maxX < 4 and maxY < 6)):
                bestresiduals = residuals
                besttsquares = tsquares
                bestmaxX = maxX
                bestmaxY = maxY
            ##print maxX, maxY, 'max'
        
        #compare to base case
        maxX = 0
        maxY = 0
        residuals = 0
        #print 'Find more residuals'
        tsquares = deepcopy(squares)
        for square in tsquares:    
            tx = square.gridX
            ty = square.gridY
            ##print tx,ty
            residuals += abs(square.gridX-round(square.gridX)) + abs(square.gridY-round(square.gridY))
            square.gridX = round(square.gridX)
            square.gridY = round(square.gridY)
            gridX = int(square.gridX)
            gridY = int(square.gridY)     
            if int(square.gridX) > maxX:
                maxX = int(square.gridX)
                totalXOff = tx
            if int(square.gridY) > maxY:
                maxY = int(square.gridY)
                totalYOff = ty
            if not gridY in squareDict:
                squareDict[gridY] = {}
            if not gridX in squareDict[gridY]:
                squareDict[gridY][gridX] = square                
            if 4-gridX + 6-gridY < bottomRight:
                bottomRight = 4-gridX + 6-gridY
                bottomRightSquare = square
            if gridX + gridY < topLeft:
                topLeft = gridX + gridY
                topLeftSquare = square
            if 4-gridX + gridY < topRight:
                topRight = 4-gridX + gridY
                topRightSquare = square
            if gridX + 6-gridY < bottomLeft:
                bottomLeft = gridX + 6-gridY
                bottomLeftSquare = square   
        if residuals < bestresiduals and ((maxX < 6 and maxY < 4) or (maxX < 4 and maxY < 6)):
            bestresiduals = residuals
            besttsquares = tsquares
            bestmaxX = maxX
            bestmaxY = maxY
        squares = besttsquares    
        maxX = bestmaxX
        maxY = bestmaxY
        #print 'Found final maxX and maxY'      
        if maxX != 0:
            ax = totalXOff / float(maxX)
        else:
            ax = 1
        if maxY != 0:
            ay = totalYOff / float(maxY)
        else:
            ay = 1
        recalculatedHorizontalOffset = ax * h
        recalculatedVerticalOffset = ay * v
        ##print recalculatedHorizontalOffset, recalculatedVerticalOffset
        #connect them all 
        for square in squares:  
            for nsquare in squares:
                if abs(nsquare.gridX-square.gridX)+abs(nsquare.gridY-square.gridY) == 1:
                    cv2.line(total, square.tupleCenter, nsquare.tupleCenter, 255, 5)
        #make fake squares
        #print 'Make fake squares'
        for cy in range(-6,6):
            for cx in range(-6, 6):
                if cy in squareDict and cx in squareDict[cy]:
                    pass
                else:
                    if not cy in squareDict:
                        squareDict[cy] = {}
                    s = Square()                    
                    nearestIndex = sorted([(abs(square.gridX-cx)+abs(square.gridY-cy), i) for i, square in enumerate(squares)], key=itemgetter(0))[0][1]
                    s.center = ((cx-squares[nearestIndex].gridX)*recalculatedHorizontalOffset+(cy-squares[nearestIndex].gridY)*recalculatedVerticalOffset+squares[nearestIndex].center).astype(int)
                    
                    if s.center[0] > 0 and s.center[1] > 0 and s.center[0] < labimg.shape[1] and s.center[1] < labimg.shape[0]:
                        s.labColor = labimg[s.center[1], s.center[0]]
                        squareDict[cy][cx] = s
                        cv2.circle(total, (s.center[0], s.center[1]), 5, (255,255,255), 5)
                        ##print cx,cy, 'make'
        possibilities = [] 
        #print 'Check possibilities'
        for i, rotatedPossible in enumerate(labRotated):
            width = rotatedPossible.shape[1]
            height = rotatedPossible.shape[0]
            tmaxX = maxX
            tmaxY = maxY
            for y in range(0,height-tmaxY):
                for x in range(0,width-tmaxX):
                    terror = 0
                    count = 0
                    for cy in range(0,tmaxY+1):
                        for cx in range(0,tmaxX+1):
                            if cy in squareDict and cx in squareDict[cy]:
                                square = squareDict[cy][cx]
                                labColor = rotatedPossible[y+cy][x+cx]
                                terror += numpy.linalg.norm(square.labColor-labColor)                                   
                                count += 1
                           
                    ##print terror, count, width, height, tmaxX, tmaxY, i, x, y
                    possibilities.append([1/float(count), terror/float(count), (y,x,i)])
        #print 'Find best possibilities'    
        possibilities.sort(key=itemgetter(0,1))        
        rotMatrix = numpy.array([[0,-1],[1,0]])
        if len(possibilities) > 0:
            ans = possibilities[0][2]
            col = numpy.array([[0,0],[0,5],[3,5],[3,0]])
            regPoints = numpy.matrix(numpy.transpose(col))
            ##print numpy.linalg.matrix_power(rotMatrix, ans[2])            
            regPoints = numpy.array(numpy.transpose(numpy.linalg.matrix_power(rotMatrix, ans[2])*regPoints))            
            regPoints[:,0] -= numpy.min(regPoints[:,0])
            regPoints[:,1] -= numpy.min(regPoints[:,1])
            ##print regPoints
            position = squares[0].center
            xoff = squares[0].gridX
            yoff = squares[0].gridY
            for i, regPoint in enumerate(regPoints):
                regPoint -= numpy.array([ans[1], ans[0]])
                regPoint -= numpy.array([xoff, yoff])
                regPoint = ((regPoint[1]*recalculatedVerticalOffset +regPoint[0]*recalculatedHorizontalOffset)+position).astype(int)
                color = (int(standardColors[col[i][1],col[i][0]][0]),int(standardColors[col[i][1],col[i][0]][1]),int(standardColors[col[i][1],col[i][0]][2]))   
                #snap to squares
                snap = []
                for square in squares:
                    if numpy.linalg.norm(square.center-regPoint) < MIN_DIST*5:
                        snap.append(square)
                for cy in squareDict:
                    for cx in squareDict[cy]:
                        square = squareDict[cy][cx]
                        if numpy.linalg.norm(square.center-regPoint) < MIN_DIST*5:
                            snap.append(square) 
                if len(snap) > 0:
                    regPoint = sorted(snap, key = lambda square:numpy.linalg.norm(square.center-regPoint))[0].center
                regPoints[i] = regPoint                        
                cv2.circle(total, (regPoint[0], regPoint[1]), 20, color ,20)
            pt = cv2.getPerspectiveTransform(numpy.array(regPoints).astype(numpy.float32), numpy.array([[50,50], [50,550], [350,550], [350,50]]).astype(numpy.float32))
            
            total = cv2.warpPerspective(img, pt, (400,600))
            
        else:
            print 'NO POSSIBILITIES FOUND', width, height, maxX, maxY, possibilities
            total = cv2.resize(total, dsize=(0,0), fx=0.2, fy=0.2)
        
            
    
    return total
コード例 #55
0
ファイル: fit_pb.py プロジェクト: piyanatk/sim
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.modeling import fitting, models

from opstats.utils.settings import MWA_FREQ_EOR_ALL_80KHZ


CENPIX = 3740
DEGPIX = 0.0160428
ANG = np.arange(-CENPIX, CENPIX) * DEGPIX

freqs = MWA_FREQ_EOR_ALL_80KHZ
shared_array_base = Array('d', freqs.size)
shared_array = np.frombuffer(shared_array_base.get_obj())
beam_dir = '/data3/piyanat/runs/fhd_uvlt50/output_data/'
beamxx_files = [
    beam_dir + 'vis_interp_delta_21cm_l128_0.000h_{:.3f}MHz_Beam_XX.fits'
    .format(f) for f in freqs
    ]
beamyy_files = [
    beam_dir + 'vis_interp_delta_21cm_l128_0.000h_{:.3f}MHz_Beam_YY.fits'
    .format(f) for f in freqs
]


def make_ibeam_cross(xx_beam, yy_beam):
    """Combine XX and YY beam into Stokes I beam and return the cross section.
    Assume perfect array feed, i.e. I = (XX + YY) / 2. 2 in the denominator
    is there to renormalized the beam peak to 1.
コード例 #56
0
ファイル: apply_filters3.py プロジェクト: piyanatk/sim
    param = pd.read_csv(args.param_file, header=0, index_col=0)
    if args.job_id is not None:
        param_sel = param.loc[args.job_id]  # Select param by job_id number
    else:
        param_sel = param.iloc[0]  # Select the first data row.
    input_file = param_sel.iloc[0]
    filter_directory = param_sel.iloc[1]
    output_file = param_sel.iloc[2]

    # Read input data cube
    data_da = xr.open_dataarray(input_file)
    data_array = data_da.values

    # Create shared memory array to store filtered data cube
    filtered_data_array_base = Array('d', data_array.size)
    filtered_data_array = np.frombuffer(filtered_data_array_base.get_obj())
    filtered_data_array.shape = data_array.shape

    # Read in list of filter files
    filter_files = glob('{:s}/*.nc'.format(filter_directory))
    filter_files.sort()
    nbins = len(filter_files)

    # Attributes for output files
    # Temporary read in the first filter to read filter information
    da0 = xr.open_dataarray(filter_files[0])
    extra_attrs = {'filter_type': 'wedge',
                   'filter_bandwidth': da0.attrs['filter_bandwidth'],
                   'image_bandwidth': da0.attrs['channel_bandwidth'],
                   'theta': da0.attrs['theta'],
                   'theta_unit': da0.attrs['theta_unit'],
コード例 #57
0
    def post_process(self):
        """

        :return:
        """

        nb_features = len(self.timeline)
        nb_lambda = len(self.lambdas)
        density = [0]*nb_features
        decision = [0]*nb_features
        decision_smooth = 5
        processes = []
        result = Queue(10000)
        maxproc = 10
        boundaries = []
        self.features = log(self.features)
        determinants = Array(c_double, nb_features*self.winsizemax)
        reste = [l for l in self.lambdas]

        while len(reste) > 0:
            l = reste[0]
            processes = [p for p in processes if p.is_alive()]
            if len(processes) < maxproc:
                p = Process(target=scalable_bic_segmentation, name='lambda %.2f' % l, args=(self.features, l, 120,
                                                                                            self.winsizemax,
                                                                                            self.enlargment_step,
                                                                                            result, determinants ))
                reste.remove(l)
                processes += [p]
                p.start()

            if not result.empty():
                while not result.empty():
                    boundaries += [result.get()]

        map(Process.join, processes)
        while not result.empty():
            boundaries += [result.get()]

        for _, _, t in sorted(boundaries):
            density[t] += 1.0/float(nb_lambda)

        tmp = [d for d in density]

        while max(tmp) > 0:
            i = argmax(tmp)
            start, stop = max([0, i-decision_smooth]), min([nb_features, i+decision_smooth])
            decision[i] = sum(tmp[start:stop])
            for p in range(start, stop):
                tmp[p] = 0

        precompute = frombuffer(determinants.get_obj()).reshape((nb_features, self.winsizemax))
        precompute[precompute > 0] = 1

        segments = []
        current_start = 0

        for i, v in enumerate(decision):
            if v > self.thvote:
                segments += [(current_start, i, len(segments) % 2)]
                current_start = i

        if current_start < (nb_features-1):
            segments += [(current_start, nb_features-1, len(segments) % 2)]
        if self.regroup:
            segments = bic_clustering(self.features, segments, self.lambdas)

        segments = sorted(map(lambda x: (x[0]*self.wStep, x[1]*self.wStep, x[2]), segments), key=lambda x:x[0])

        segs = self.new_result(data_mode='label', time_mode='segment')
        label = set([v[2] for v in segments])
        segs.data_object.label_metadata.label = {lab: str(lab) for lab in label}
        segs.data_object.time = array([s[0] for s in segments])
        segs.data_object.duration = array([s[1] - s[0] for s in segments])
        segs.data_object.label = array([s[2] for s in segments])
        self.add_result(segs)
コード例 #58
0
ファイル: RingBuffer.py プロジェクト: jopohl/urh
class RingBuffer(object):
    """
    A RingBuffer containing complex values.
    """
    def __init__(self, size: int):
        self.__data = Array("f", 2*size)
        self.size = size
        self.__left_index = Value("L", 0)
        self.__right_index = Value("L", 0)
        self.__length = Value("L", 0)

    def __len__(self):
        return self.__length.value

    @property
    def left_index(self):
        return self.__left_index.value

    @left_index.setter
    def left_index(self, value):
        self.__left_index.value = value % self.size

    @property
    def right_index(self):
        return self.__right_index.value

    @right_index.setter
    def right_index(self, value):
        self.__right_index.value = value % self.size

    @property
    def is_empty(self) -> bool:
        return len(self) == 0

    @property
    def space_left(self):
        return self.size - len(self)

    @property
    def data(self):
        return np.frombuffer(self.__data.get_obj(), dtype=np.complex64)

    @property
    def view_data(self):
        """
        Get a representation of the ring buffer for plotting. This is expensive, so it should only be used in frontend
        :return:
        """
        left, right = self.left_index, self.left_index + len(self)
        if left > right:
            left, right = right, left

        data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
        return np.concatenate((data[left:right], data[right:], data[:left]))

    def clear(self):
        self.left_index = 0
        self.right_index = 0

    def will_fit(self, number_values: int) -> bool:
        return number_values <= self.space_left

    def push(self, values: np.ndarray):
        """
        Push values to buffer. If buffer can't store all values a ValueError is raised
        """
        n = len(values)
        if len(self) + n > self.size:
            raise ValueError("Too much data to push to RingBuffer")

        slide_1 = np.s_[self.right_index:min(self.right_index + n, self.size)]
        slide_2 = np.s_[:max(self.right_index + n - self.size, 0)]
        with self.__data.get_lock():
            data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
            data[slide_1] = values[:slide_1.stop - slide_1.start]
            data[slide_2] = values[slide_1.stop - slide_1.start:]
            self.right_index += n

        self.__length.value += n

    def pop(self, number: int, ensure_even_length=False):
        """
        Pop number of elements. If there are not enough elements, all remaining elements are returned and the
        buffer is cleared afterwards. If buffer is empty, an empty numpy array is returned.

        If number is -1 (or any other value below zero) than complete buffer is returned
        """
        if ensure_even_length:
            number -= number % 2

        if len(self) == 0 or number == 0:
            return np.array([], dtype=np.complex64)

        if number < 0:
            # take everything
            number = len(self)
        else:
            number = min(number, len(self))

        with self.__data.get_lock():
            data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)

            result = np.empty(number, dtype=np.complex64)

            if self.left_index + number > len(data):
                end = len(data) - self.left_index
            else:
                end = number

            result[:end] = data[self.left_index:self.left_index + end]
            if end < number:
                result[end:] = data[:number-end]

        self.left_index += number
        self.__length.value -= number

        return result
コード例 #59
0
ファイル: evaluate.py プロジェクト: BuggyMcBugFace/ssai-cnn
parser.add_argument('--steps', type=int, default=256)
parser.add_argument('--relax', type=int, default=3)
parser.add_argument('--n_thread', type=int, default=8)
args = parser.parse_args()
print(args)

result_dir = args.result_dir
n_iter = int(result_dir.split('_')[-1])
label_dir = args.map_dir
result_fns = sorted(glob.glob('%s/*.npy' % result_dir))
n_results = len(result_fns)
eval_dir = '%s/evaluation_%d' % (result_dir, n_iter)

all_positive_base = Array(
    ctypes.c_double, n_results * args.channel * args.steps)
all_positive = np.ctypeslib.as_array(all_positive_base.get_obj())
all_positive = all_positive.reshape((n_results, args.channel, args.steps))

all_prec_tp_base = Array(
    ctypes.c_double, n_results * args.channel * args.steps)
all_prec_tp = np.ctypeslib.as_array(all_prec_tp_base.get_obj())
all_prec_tp = all_prec_tp.reshape((n_results, args.channel, args.steps))

all_true_base = Array(
    ctypes.c_double, n_results * args.channel * args.steps)
all_true = np.ctypeslib.as_array(all_true_base.get_obj())
all_true = all_true.reshape((n_results, args.channel, args.steps))

all_recall_tp_base = Array(
    ctypes.c_double, n_results * args.channel * args.steps)
all_recall_tp = np.ctypeslib.as_array(all_recall_tp_base.get_obj())
コード例 #60
0
ファイル: RingBuffer.py プロジェクト: Cyber-Forensic/urh
class RingBuffer(object):
    """
    A RingBuffer containing complex values.
    """
    def __init__(self, size: int):
        self.__data = Array("f", 2*size)
        self.size = size
        self.__current_index = Value("L", 0)

    @property
    def current_index(self):
        return self.__current_index.value

    @current_index.setter
    def current_index(self, value):
        self.__current_index.value = value

    @property
    def is_empty(self) -> bool:
        return self.current_index == 0

    @property
    def space_left(self):
        return self.size - self.current_index

    @property
    def data(self):
        return np.frombuffer(self.__data.get_obj(), dtype=np.complex64)

    def __getitem__(self, index):
        return self.data[index]

    def __repr__(self):
        return "RingBuffer " + str(self.data)

    def __increase_current_index_by(self, n: int):
        self.current_index += n
        if self.current_index > self.size:
            self.current_index = self.size

    def clear(self):
        self.current_index = 0

    def will_fit(self, number_values: int) -> bool:
        return number_values <= self.space_left

    def push(self, values: np.ndarray):
        """
        Push values to buffer. If buffer can't store all values a ValueError is raised
        :param values: 
        :return: 
        """
        n = len(values)

        with self.__data.get_lock():
            data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
            data[self.current_index:self.current_index+n] = values

        self.__increase_current_index_by(n)

    def pop(self, number: int) -> np.ndarray:
        """
        Pop number of elements. If there are not enough elements, all remaining elements are returned and the
        buffer is cleared afterwards. If buffer is empty, an empty numpy array is returned.
        """
        if number > self.current_index:
            number = self.current_index

        with self.__data.get_lock():
            self.current_index -= number
            result = np.copy(self.data[0:number])
            data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
            data[:] = np.roll(data, -number)

        return result