Пример #1
0
def producer(conn):
    # os.environ["PYTHONWARNINGS"] = "ignore"
    feed_shm_name = '{}_{}_{}'.format('test', os.getpid(),
                                      threading.currentThread().ident)
    print('input shm name : {}'.format(feed_shm_name))

    feed_shm = SharedMemory(name=feed_shm_name, create=True, size=2 * 4)

    feed_shm_arr = np.ndarray((1, 2), dtype=np.float32, buffer=feed_shm.buf)
    input_arr = np.random.random((1, 2)).astype(np.float32)
    feed_shm_arr[:] = input_arr[:]

    conn.send(feed_shm_name)
    result_shm_name = conn.recv()
    result_shm = SharedMemory(name=result_shm_name)
    result_shm_arr = np.ndarray((1, 2),
                                dtype=np.float32,
                                buffer=result_shm.buf)
    print('Output array : {}'.format(result_shm_arr))

    conn.send('exit')
    del result_shm_arr
    result_shm.close()

    conn.recv()
    del feed_shm_arr
    feed_shm.close()
    feed_shm.unlink()

    print('clean and exit')

    return
Пример #2
0
    def __init__(self, args: RemoteArgs, progress_cb, is_master):
        self._args = args
        self._progress_cb = progress_cb
        self._make_verifier()

        shm = SharedMemory(args.shm_name, False)
        try:
            self._setup_shm(shm.buf)
            self._process()
            self._done_workers.fetch_add(1)
        except:
            # set to a negative value to mark failure
            self._read_idx.fetch_add(-args.inputs.shape[0]**2)
            raise
        finally:
            self._read_idx = None
            self._done_workers = None
            self._nr_adv_verifier = None
            self._nr_adv_ref = None
            self._output = None
            shm.close()
            # manual cleanup due to a bug (https://bugs.python.org/issue39959 )
            if not is_master and sys.version_info <= (3, 8, 2):
                from multiprocessing import shared_memory
                if shared_memory._USE_POSIX:
                    from multiprocessing.resource_tracker import unregister
                    unregister(shm._name, "shared_memory")
def chunk_worker(left_spec, right_spec, pairs_slice):

    # recover the SharedMemory bloc
    left_shm = SharedMemory(left_spec['name'])
    right_shm = SharedMemory(right_spec['name'])
    # Create the np.recarray from the buffer of the shared memory
    left_arr = np.recarray(shape=left_spec['shape'],
                           dtype=left_spec['dtype'],
                           buf=left_shm.buf)
    right_arr = np.recarray(shape=right_spec['shape'],
                            dtype=right_spec['dtype'],
                            buf=right_shm.buf)

    #print (left_arr[0][2], right_arr[0][2])

    results = []
    for idx_pair in pairs_slice:
        geom1 = wkt.loads(left_arr[idx_pair[0]][2])
        geom2 = wkt.loads(right_arr[idx_pair[1]][2])

        pt1, pt2 = ops.nearest_points(geom1, geom3)

        dist, a1, a2 = V_inv((pt1.y, pt1.x), (pt2.y, pt2.x))

        dist = dist * 1000  #m

        results.append((idx_pair[0], idx_pair[1], dist))

    return results
def random_drop(vocab, embedding: Embedding, percent, vocab_size):
    weights_mem = SharedMemory(embedding.embedding_memory_name)
    W = np.ndarray(shape=embedding.embedding_memory_shape,
                   dtype=embedding.embedding_memory_dtype,
                   buffer=weights_mem.buf)

    w2i_mem = SharedMemory(embedding.w2i_memory_name)
    w2i = embedding.buff_to_dict(w2i_mem, embedding.w2i_memory_size)

    dropped_words = {}
    for c in vocab:
        if c not in dropped_words:
            dropped_words[c] = []

        existing_words = []
        for word in vocab[c]:  # iterating over words
            try:
                _ = W[w2i[word]]
                existing_words.append(word)
            except KeyError:
                continue

        size = existing_words.__len__()
        rm_num = math.floor(size * percent)
        vocab_size -= rm_num
        for i in range(rm_num):
            rng = random.randint(0, size - i - 1)
            dropped_words[c].append(existing_words[rng])
            del existing_words[rng]
        vocab[c] = existing_words

    return dropped_words
def sensor_read_process(raw_shm: shared_memory.SharedMemory,
                        proc_shm: shared_memory.SharedMemory, r_lock: Lock,
                        p_lock: Lock):
    script_dir = Path(__file__).parent
    device_file = script_dir.parent.joinpath("um7_A500CNP8.json")
    assert device_file.exists(
    ), f"Device file with connection info: {device_file} does not exist!"
    um7 = UM7Serial(device=str(device_file))

    for packet in um7.recv_broadcast(flush_buffer_on_start=False):
        packet_bytes = bytes(json.dumps(packet.__dict__), encoding='utf-8')
        assert len(
            packet_bytes
        ) <= BUFFER_SIZE, f"Packet cannot be serialized, increase `BUFFER` size at least up to {len(packet_bytes)}"
        if isinstance(packet, UM7AllRawPacket):
            r_lock.acquire()
            raw_shm.buf[:] = b' ' * BUFFER_SIZE
            raw_shm.buf[:len(packet_bytes)] = packet_bytes
            r_lock.release()
            # logging.warning(f"[SR][RAW] -> {packet}")
        elif isinstance(packet, UM7AllProcPacket):
            p_lock.acquire()
            proc_shm.buf[:] = b' ' * BUFFER_SIZE
            proc_shm.buf[:len(packet_bytes)] = packet_bytes
            p_lock.release()
Пример #6
0
def attach_shm_array(
    token: tuple[str, str, tuple[str, str]],
    size: int = _default_size,
    readonly: bool = True,
) -> ShmArray:
    '''
    Attach to an existing shared memory array previously
    created by another process using ``open_shared_array``.

    No new shared mem is allocated but wrapper types for read/write
    access are constructed.

    '''
    token = _Token.from_msg(token)
    key = token.shm_name

    if key in _known_tokens:
        assert _Token.from_msg(_known_tokens[key]) == token, "WTF"

    # attach to array buffer and view as per dtype
    shm = SharedMemory(name=key)
    shmarr = np.ndarray((size, ), dtype=token.dtype, buffer=shm.buf)
    shmarr.setflags(write=int(not readonly))

    first = SharedInt(
        shm=SharedMemory(
            name=token.shm_first_index_name,
            create=False,
            size=4,  # std int
        ), )
    last = SharedInt(
        shm=SharedMemory(
            name=token.shm_last_index_name,
            create=False,
            size=4,  # std int
        ), )

    # make sure we can read
    first.value

    sha = ShmArray(
        shmarr,
        first,
        last,
        shm,
    )
    # read test
    sha.array

    # Stash key -> token knowledge for future queries
    # via `maybe_opepn_shm_array()` but only after we know
    # we can attach.
    if key not in _known_tokens:
        _known_tokens[key] = token

    # "close" attached shm on process teardown
    tractor._actor._lifetime_stack.callback(sha.close)

    return sha
Пример #7
0
    def __init__(self,
                 chunk_size: int=DEFAULT_CHUNK_SIZE,
                 maxsize: int=DEFAULT_MAXSIZE,
                 serializer=None,
                 integrity_check: bool=False,
                 deadlock_check: bool=False,
                 deadlock_immanent_check: bool=True,
                 watermark_check: bool = False,
                 use_semaphores: bool = True,
                 verbose: bool=False):
        ctx = mp.get_context() # TODO: What is the proper type hint here?

        super().__init__(maxsize, ctx=ctx)

        self.qid: int = self.__class__.qid_counter
        self.__class__.qid_counter += 1

        self.verbose: bool = verbose
        if self.verbose:
            print("Starting ShmQueue qid=%d pid=%d chunk_size=%d maxsize=%d." % (self.qid, os.getpid(), chunk_size, maxsize), file=sys.stderr, flush=True) # ***

        self.chunk_size: int = min(chunk_size, self.__class__.MAX_CHUNK_SIZE) \
            if chunk_size > 0 else self.__class__.MAX_CHUNK_SIZE

        self.maxsize: int = maxsize if maxsize > 0 else self.__class__.DEFAULT_MAXSIZE

        self.serializer = serializer or pickle

        self.integrity_check: bool = integrity_check
        self.deadlock_check: bool = deadlock_check
        self.deadlock_immanent_check: bool = deadlock_immanent_check
        self.watermark_check: bool = watermark_check
        self.chunk_watermark: int = 0

        self.mid_counter: int = 0

        self.producer_lock = ctx.Lock()
        self.free_list_lock = ctx.Lock()
        self.msg_list_lock = ctx.Lock()

        self.use_semaphores: bool = use_semaphores
        if not use_semaphores:
            # Put the None case first to make mypy happier.
            self.free_list_semaphore: typing.Optional[typing.Any] = None # TODO: what is the type returned by ctx.Semaphore(0)?
            self.msg_list_semaphore: typing.Optional[typing.Any] = None
        else:
            self.free_list_semaphore = ctx.Semaphore(0)
            self.msg_list_semaphore = ctx.Semaphore(0)
        
        self.list_heads: SharedMemory = SharedMemory(create=True, size=self.__class__.LIST_HEAD_SIZE * 2)
        self.init_list_head(self.__class__.FREE_LIST_HEAD)
        self.init_list_head(self.__class__.MSG_LIST_HEAD)

        self.block_locks: typing.List[typing.Any] = [ctx.Lock()] * maxsize # TODO: what is the type returned by ctx.Lock()?
        self.data_blocks: typing.List[SharedMemory] = []
        block_id: int
        for block_id in range(maxsize):
            self.data_blocks.append(SharedMemory(create=True, size=self.__class__.META_BLOCK_SIZE + self.chunk_size))
            self.add_free_block(block_id)
Пример #8
0
 async def teardown(**kwargs):
     object_ids = kwargs.get('object_ids')
     for object_id in object_ids:
         try:
             shm = SharedMemory(name=object_id)
             shm.unlink()
             await asyncio.sleep(0)
         except FileNotFoundError:
             pass
Пример #9
0
def open_shm_array(
    key: Optional[str] = None,
    size: int = _default_size,
    dtype: Optional[np.dtype] = None,
    readonly: bool = False,
) -> ShmArray:
    """Open a memory shared ``numpy`` using the standard library.

    This call unlinks (aka permanently destroys) the buffer on teardown
    and thus should be used from the parent-most accessor (process).
    """
    # create new shared mem segment for which we
    # have write permission
    a = np.zeros(size, dtype=dtype)
    a['index'] = np.arange(len(a))

    shm = SharedMemory(name=key, create=True, size=a.nbytes)
    array = np.ndarray(a.shape, dtype=a.dtype, buffer=shm.buf)
    array[:] = a[:]
    array.setflags(write=int(not readonly))

    token = _make_token(key=key, dtype=dtype)

    # create single entry arrays for storing an first and last indices
    first = SharedInt(shm=SharedMemory(
        name=token.shm_first_index_name,
        create=True,
        size=4,  # std int
    ))

    last = SharedInt(shm=SharedMemory(
        name=token.shm_last_index_name,
        create=True,
        size=4,  # std int
    ))

    last.value = first.value = int(_secs_in_day)

    shmarr = ShmArray(
        array,
        first,
        last,
        shm,
    )

    assert shmarr._token == token
    _known_tokens[key] = shmarr.token

    # "unlink" created shm on process teardown by
    # pushing teardown calls onto actor context stack
    tractor._actor._lifetime_stack.callback(shmarr.close)
    tractor._actor._lifetime_stack.callback(shmarr.destroy)

    return shmarr
Пример #10
0
 def alloc(self, sm_name=None, create=True):
     sm_name = sm_name or _make_filename()
     try:
         sm = SharedMemory(sm_name, create, self.__total_size)
     except:
         sm = SharedMemory(sm_name, not create, self.__total_size)
     buffer = sm.buf
     res = Dict()
     offset = 0
     for name, length, c_type, size in self.__vars:
         res[name] = np.ndarray((length, ), c_type, buffer, offset=offset)
         offset += size
     return sm, res
def category_center(vocab, embedding: Embedding, percent, vocab_size):
    weights_mem = SharedMemory(embedding.embedding_memory_name)
    W = np.ndarray(shape=embedding.embedding_memory_shape,
                   dtype=embedding.embedding_memory_dtype,
                   buffer=weights_mem.buf)

    w2i_mem = SharedMemory(embedding.w2i_memory_name)
    w2i = embedding.buff_to_dict(w2i_mem, embedding.w2i_memory_size)

    dropped_words = {}
    for c in vocab:  # c is the category
        if c not in dropped_words:  # populating dropped_words with empty lists
            dropped_words[c] = []
        mean = None
        for word in vocab[c]:  # iterating over words
            try:
                # getting coefficients of the words of c
                if mean is None:
                    mean = np.array([W[w2i[word]]])
                else:
                    vector = [W[w2i[word]]]
                    mean = np.append(mean, vector, axis=0)
            except KeyError:
                continue
        # getting the category center
        mean = np.mean(mean)
        ranks = None
        # calculating distances from the category centers and sorting by that afterwards
        for i, word in enumerate(vocab[c]):
            try:
                if ranks is None:
                    ranks = np.array([[i, _distance(mean, W[w2i[word]])]])
                else:
                    ranks = np.append(ranks,
                                      [[i, _distance(mean, W[w2i[word]])]],
                                      axis=0)
            except KeyError:
                continue
        ranks = ranks[ranks[:, 1].argsort(), :]
        drop = math.floor(ranks.shape[0] * percent)
        vocab_size -= drop

        word_list = []
        for i in range(drop):
            index = int(ranks[-i - 1, 0])
            dropped_words[c].append(vocab[c][index])
            word_list.append(vocab[c][index])
        for w in word_list:
            vocab[c].remove(w)

    return dropped_words
Пример #12
0
 def __init__(self, name, queue, out_queue, shm, max_retries=5,
              logging_queue=None, dl_timeout=10):
     super().__init__(name=name)
     self.q = queue
     self.o_q = out_queue
     self.session = requests.session()
     self.session.headers.update({
         'User-Agent': 'EpicGamesLauncher/10.18.6-14188424+++Portal+Release-Live Windows/10.0.18363.1.256.64bit'
     })
     self.max_retries = max_retries
     self.shm = SharedMemory(name=shm)
     self.log_level = logging.getLogger().level
     self.logging_queue = logging_queue
     self.dl_timeout = float(dl_timeout) if dl_timeout else 10.0
Пример #13
0
def work_with_shared_memory(shm_name, shape, dtype):
    print(f'With SharedMemory: {current_process()=}')
    # Locate the shared memory by its name
    shm = SharedMemory(shm_name)
    # Create the np.recarray from the buffer of the shared memory
    np_array = np.recarray(shape=shape, dtype=dtype, buf=shm.buf)
    return np.nansum(np_array.val)
Пример #14
0
 def _write_init(self):
     # keep last 8 bytes to record actual memory size
     self.shm = shm = SharedMemory(name=self._object_id,
                                   create=True,
                                   size=self._size + 8)
     self._write_actual_size()
     self._buffer = self._mv = shm.buf
Пример #15
0
def _init_memory_worker(recording, arrays, shm_names, shapes, dtype):
    # create a local dict per worker
    worker_ctx = {}
    if isinstance(recording, dict):
        from spikeinterface.core import load_extractor
        worker_ctx['recording'] = load_extractor(recording)
    else:
        worker_ctx['recording'] = recording

    worker_ctx['dtype'] = np.dtype(dtype)

    if arrays is None:
        # create it from share memory name
        from multiprocessing.shared_memory import SharedMemory
        arrays = []
        # keep shm alive
        worker_ctx['shms'] = []
        for i in range(len(shm_names)):
            shm = SharedMemory(shm_names[i])
            worker_ctx['shms'].append(shm)
            arr = np.ndarray(shape=shapes[i], dtype=dtype, buffer=shm.buf)
            arrays.append(arr)

    worker_ctx['arrays'] = arrays

    return worker_ctx
Пример #16
0
def load(q_comb, layerNo, layerSN):
    # layerNo: the number in the height axis used to be load
    all_temp_tif = glob.glob("temp*.tif")
    shm_list = list()
    shared_img_list = list()
    core_no = mp.cpu_count() - 2

    for n in range(core_no):
        shm_list.append(SharedMemory(create=True, size=2 * layerNo * 5 * 9261))
        shared_img_list.append(
            np.ndarray(shape=(layerNo, 9261, 5),
                       dtype="uint16",
                       buffer=shm_list[n].buf))

    for idx, tif_name in enumerate(all_temp_tif):
        img = TFF.imread(tif_name,
                         key=range(layerSN, layerSN + core_no * layerNo))
        for n in range(core_no):
            shared_img_list[n] = np.ndarray(shape=(layerNo, img.shape[1],
                                                   img.shape[2]),
                                            dtype=img.dtype,
                                            buffer=shm_list[n].buf)
            np.copyto(shared_img_list[n],
                      img[n * layerNo:(n + 1) * layerNo, :, :])
        for idx, a_pipe in enumerate(q_comb):
            a_pipe[0].send([
                tif_name, shm_list[idx].name, shared_img_list[idx].shape, idx
            ])
Пример #17
0
def is_p(i, j, config: Config, embedding_memory: MemoryInfo, lamb: int):
    lambdas = {l + 1: 0 for l in range(lamb)}

    # Embedding
    weights_mem = SharedMemory(embedding_memory.name)
    w = np.ndarray(shape=embedding_memory.shape, buffer=weights_mem.buf)

    V_1 = np.array([w[:, j]])
    V_2 = np.array([np.arange(V_1.shape[1])])
    V = np.append(V_1, V_2, axis=0)
    V_sorted = V[:, V[0, :].argsort()]

    S = set(config.semantic_categories.categories.vocab[
        config.semantic_categories.categories.i2c[i]])
    n_i = config.semantic_categories.categories.vocab[
        config.semantic_categories.categories.i2c[i]].__len__()

    for l in range(lamb):
        v_p = V_p(V_sorted, n_i, l + 1, config)
        v_n = V_n(V_sorted, n_i, l + 1, config)

        IS_p = S.intersection(v_p).__len__() / n_i * 100
        IS_n = S.intersection(v_n).__len__() / n_i * 100

        lambdas[l + 1] = max(IS_p, IS_n)
    return lambdas
Пример #18
0
    def load_file(self, *args, **kwargs):
        shm = SharedMemory(create=True, size=5 * np.dtype('<U64').itemsize)

        # get array metadata, shared memory buffer name, dtype, and shape
        array_metadata = np.ndarray(shape=(5,), dtype=np.dtype('<U64'), buffer=shm.buf)

        importer_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_inscopix_importer.py')

        file_path = self.widget.line_edit_path.text()

        cmd = ['python', importer_path, '--isx-path', file_path, '--shm-meta-array-name', shm.name]
        print(cmd)
        proc = Popen(
            cmd,
            env=os.environ.copy()
        )
        proc.wait()

        # read the metadata
        name = array_metadata[0]
        dtype = array_metadata[1]
        shape = array_metadata[2]

        # open the shared buffer
        existing_shm = SharedMemory(name=array_metadata[0])

        shape = tuple(map(int, shape.split(',')))

        _imgseq = np.ndarray(shape, dtype=np.dtype(dtype), buffer=existing_shm.buf)

        imgseq = np.zeros(shape=shape, dtype=np.dtype(dtype))
        imgseq[:] = _imgseq[:]

        d = \
            {
                'fps': float(array_metadata[3]),
                'origin': array_metadata[4],
                'date': '00000000_000000'
            }

        imgdata = ImgData(imgseq, d)

        self.vi.viewer.workEnv = ViewerWorkEnv(imgdata)
        self.vi.update_workEnv()

        existing_shm.close()
Пример #19
0
    def __init__(
            self,
            points,
            iD,
            kD,
            vectors_UID,
            tree_UID,
            maxsize,
            pipe,
            lock,
            distance_function=(lambda v: np.sum(v**2)),
            vector_function=(lambda p, n: p - n),
            square_distances=True,
    ):
        super(Horse, self).__init__()

        self.distance_function = distance_function
        self.vector_function = vector_function

        self.points = points

        if square_distances:
            self.iD = iD**2
            self.kD = kD**2
        else:
            self.iD = iD
            self.kD = kD

        self.maxsize = maxsize

        self.reached_bool = np.zeros(len(points), dtype=bool)
        self.reached_points = 0

        self.closest = np.ones(len(points), dtype=np.int)
        self.L = np.ones(len(points)) * np.inf
        self.dv = np.zeros((len(points), 3))

        self.vectors_sm = SharedMemory(name=vectors_UID)
        self.tree_sm = SharedMemory(name=tree_UID)

        self.nodes = as_numpy_arr((self.maxsize, 3), self.tree_sm)
        self.vectors = as_numpy_arr((self.maxsize, 3), self.vectors_sm)

        self.pipe = pipe
        self.lock = lock
        self.batch = None
Пример #20
0
 async def object_info(self, object_id) -> ObjectInfo:
     shm = SharedMemory(name=object_id)
     if _is_windows:
         return WinShmObjectInfo(size=shm.size,
                                 object_id=object_id,
                                 shm=shm)
     else:
         return ObjectInfo(size=shm.size, object_id=object_id)
Пример #21
0
def read_test(x, y, shm_name, shm_shape, shm_dtype):
    # Locate the shared memory by its name
    shm = SharedMemory(shm_name)
    # Create the np.ndarray from the buffer of the shared memory
    np_array = np.ndarray(shape=shm_shape, dtype=shm_dtype, buffer=shm.buf)
    result = np_array[x][y]
    print("RESULT:", result)
    return result
def read_from_shared(x, y, shm_name, shm_shape, shm_dtype):
    print("SWONEEE")
    # Locate the shared memory by its name
    shm = SharedMemory(shm_name)
    # Create the np.recarray from the buffer of the shared memory
    np_array = np.recarray(shape=shm_shape, dtype=shm_dtype, buf=shm.buf)
    return np.nansum(np_array.val)
    """
Пример #23
0
    def init_from_data_buffer(self, data_buffer):
        self.buffer = data_buffer

        self.shms = traverse_lists(
            self.buffer, lambda b: [
                SharedMemory(create=True, size=b.nbytes)
                for _ in range(self.queue_size)
            ])
Пример #24
0
def getMM(size, create=True):
  if isWindows:
    from mmap import mmap
    return mmap(-1, size, tagname=mmName)
  else: # requires Python >= 3.8
    from multiprocessing.shared_memory import SharedMemory
    shm = SharedMemory(mmName, create, size)
    return shm
Пример #25
0
    def find_adv_batch(self, model: nn.Module, inputs: torch.Tensor,
                       inputs_adv_ref: torch.Tensor, labels: torch.Tensor,
                       epsilon: float, max_nr_adv):

        self._start_workers()

        with ensure_training_state(model, False):
            model_outputs: torch.Tensor = model(inputs_adv_ref)
            eval_model = model.cvt_to_eval()

        correct_mask = torch.eq(model_outputs.argmax(dim=1), labels)
        idx_remap = np.arange(inputs.shape[0], dtype=np.int32)
        np.random.shuffle(idx_remap)
        assert inputs.dtype == torch.float32

        shm_size = BUFFER_COUNTER_SIZE + 4 * inputs.numel()
        shm = SharedMemory(size=shm_size, create=True)
        shm.buf[:BUFFER_COUNTER_SIZE] = b'\0' * BUFFER_COUNTER_SIZE
        shm_name = shm.name

        args = RemoteArgs(eval_model, inputs, inputs_adv_ref, labels,
                          correct_mask, epsilon, max_nr_adv, shm_name,
                          shm_size, idx_remap)

        try:
            return self._work(shm.buf, args)
        except:
            self.close()
            raise
        finally:
            shm.close()
            shm.unlink()
 def init_shared_memory(self, name, shape, dtype, config):
     log = config.logger
     matrix = np.zeros(shape, dtype=dtype)
     memory = SharedMemory(name, create=True, size=matrix.nbytes)
     log.info(
         f"Memory allocated for {name} with shape {shape} and dtype {dtype}. Overall size in memory: "
         f"{matrix.nbytes/1024/1024:.2f} MBytes ({matrix.nbytes} bytes)")
     del matrix
     self._memories.append(memory)
Пример #27
0
 def __init__(self, queue, out_queue, base_path, shm, cache_path=None, logging_queue=None):
     super().__init__(name='FileWorker')
     self.q = queue
     self.o_q = out_queue
     self.base_path = base_path
     self.cache_path = cache_path if cache_path else os.path.join(base_path, '.cache')
     self.shm = SharedMemory(name=shm)
     self.log_level = logging.getLogger().level
     self.logging_queue = logging_queue
Пример #28
0
def is_i(task: int, config: Config, embedding_memory: MemoryInfo,
         distance_memory: MemoryInfo, lamb):
    # Distance space
    dist_mem = SharedMemory(distance_memory.name)
    distance_matrix = np.ndarray(shape=distance_memory.shape,
                                 buffer=dist_mem.buf)

    return is_p(task, j_star(task, distance_matrix), config, embedding_memory,
                lamb)
Пример #29
0
def animate(i, ax, line, rd, queue, a_queue, lines_poiis, constants):
    if i < iterations - 1:
        frame_i = i + 1
    else:
        frame_i = i % iterations

    ax.set_title(str(frame_i))
    start_time = time.perf_counter()

    if not rd.finished_loading and not a_queue.empty():
        data_shared_info = a_queue.get(block=True)
        rd.data_shared = SharedMemory(data_shared_info['name'])
        rd.data = np.ndarray(shape=data_shared_info['shape'],
                             dtype=data_shared_info['dtype'],
                             buffer=rd.data_shared.buf)
        time0 = time.perf_counter() - start_time
        start_time = time.perf_counter()
        time1 = time.perf_counter() - start_time
        start_time = time.perf_counter()
        rd.finished_loading = True
        a_queue.task_done()
        a_queue.join()
        time2 = time.perf_counter() - start_time
        print("Put data!")
        # clear_process = Process(target=clear_queue, args=(queue,))
        # clear_process.start()
        # rd.clearer = clear_process
        # print(f"{time0:0.8f}" + " " + f"{time1:0.8f}" + " " + f"{time2:0.8f}")

    start_time = time.perf_counter()
    if not rd.finished_loading and frame_i % chunksize == 0:
        rd.set_reading_chunk(queue.get(block=True))
        queue.task_done()
    time4 = time.perf_counter() - start_time
    start_time = time.perf_counter()
    row = rd.get_row(frame_i)
    row = row.reshape(int(len(row) / 2), 2).transpose()
    time5 = time.perf_counter() - start_time
    # print(f"{time4:0.8f}" + " " + f"{time5:0.8f}")

    t = frame_i * constants['dt']
    T = constants['omega'] * t
    o_x = constants['radius_gal'] * np.cos(T)
    o_y = constants['radius_gal'] * np.sin(T)
    lines_poiis['o_line'].set_xdata([0, o_x])
    lines_poiis['o_line'].set_ydata([0, o_y])

    rd.update_poii_data(row)
    x_poiis = rd.poiis_x
    y_poiis = rd.poiis_y
    s_i = min(len(rd.poiis_x[0]), 300)
    for i, poii_line in enumerate(lines_poiis['lines']):
        poii_line.set_xdata(x_poiis[i][-s_i:])
        poii_line.set_ydata(y_poiis[i][-s_i:])

    line.set_xdata(row[0])
    line.set_ydata(row[1])
Пример #30
0
def score(config: Config,
          embedding_memory: MemoryInfo,
          distance_memory: MemoryInfo,
          lamb=5):
    """
    Calculating interpretability scores
    :param config: Config object
    :param embedding_memory: Memory info about the embedding
    :param distance_memory: Memory info about the distance matrix
    :param proc: Number of processes to use
    :param lamb: the number of lambda to compute from 1
    :return:
    """
    IS_i = []
    number_of_processes = config.project.processes
    pool = multiprocessing.Pool(processes=number_of_processes)

    # Results
    r = np.zeros([lamb, embedding_memory.shape[-1]], dtype=np.float)
    results_name = f"{config.memory_prefix}_lambdas_per_dim"
    results_mem = SharedMemory(name=results_name, create=True, size=r.nbytes)
    buf = np.ndarray(r.shape, dtype=r.dtype, buffer=results_mem.buf)
    buf[:, :] = r[:, :]
    del r

    relaxation_memory = MemoryInfo()
    relaxation_memory.name = results_name
    relaxation_memory.shape = buf.shape

    task_manager = Manager()
    task_queue = task_manager.Queue()
    progress_queue = task_manager.Queue()

    for i in range(config.semantic_categories.categories.i2c.__len__()):
        task_queue.put(i)

    inputs = []
    for i in range(number_of_processes):
        inputs.append([
            config, embedding_memory, distance_memory, task_queue,
            progress_queue, relaxation_memory, lamb
        ])

    progress = Process(target=_progress_bar,
                       args=(progress_queue, task_queue.qsize()))
    progress.start()

    with pool as p:
        _ = p.starmap(score_dist, inputs)

    progress.join()

    res = np.mean(buf, axis=1)
    IS_i = [res[i] for i in range(res.shape[0])]

    return IS_i