def find_border_points(indices):
    params = sa.attach('shm://params')
    eps = params[0]
    minpts = params[1]
    features = sa.attach("shm://features")
    core_points_index = sa.attach("shm://core_points")
    start_index = indices[0]
    end_index = indices[1]
    print('finding border points entered thread', start_index, end_index)
    distances = []

    sample = features[start_index:end_index + 1, :]

    border_points = []

    for point_index in range(sample.shape[0]):
        if point_index % 1000 == 0:
            gc.collect()
            print(start_index, " collecting garbage, at index: ", point_index,
                  ' remaining indices: ', sample.shape[0] - point_index)
        point = sample[point_index, :]
        distances = np.sqrt(np.sum((features - point)**2, axis=1))
        candidates = np.argwhere(distances <= eps) + start_index
        # if it is not a core point and is in the vicinity of a core point
        if np.intersect1d(candidates,
                          core_points_index).shape[0] >= 1 and not (
                              start_index + point_index) in core_points_index:
            border_points.append(start_index + point_index)
        del distances
    print('exiting border finding: ', start_index, end_index,
          end_index - start_index + 1, '\n\n')

    return border_points
示例#2
0
def load_shared(args):
    i, array_fundus_name, array_vessel_name, array_lm_name, fundus, vessel, lm, is_train = args
    array_fundus = SharedArray.attach(array_fundus_name)
    array_vessel = SharedArray.attach(array_vessel_name)
    array_lm = SharedArray.attach(array_lm_name)
    array_fundus[i], array_vessel[i], array_lm[i] = utils.load_augmented_lm(
        fundus, vessel, lm, augment=is_train)
def find_core_points(indices):
    params = sa.attach('shm://params')
    eps = params[0]
    minpts = params[1]
    features = sa.attach("shm://features")
    start_index = indices[0]
    end_index = indices[1]
    sample = features[start_index:end_index + 1, :]
    print('entered thread for core points, start index: ', start_index,
          ' end index: ', end_index, ' sample shape ', sample.shape)
    distances = []
    core_points = []
    # nearest_neighbours = {}
    for point_index in range(sample.shape[0]):
        if point_index % 1000 == 0:
            print(start_index, ' collected garbage', ' at index: ',
                  point_index, ' remaining indices: ',
                  sample.shape[0] - point_index)
            gc.collect()
        point = sample[point_index, :]
        distances = np.sqrt(np.sum((features - point)**2, axis=1))
        candidates = np.argwhere(distances <= eps)
        if candidates.shape[0] > minpts:
            core_points.append(start_index + point_index)
            # nearest_neighbours[start_index+point_index] = set(list(candidates.flatten()))
    print('core point search complete, exiting: ', start_index, end_index,
          '\n\n')
    features = None
    start_index = None
    end_index = None
    sample = None
    gc.collect()
    # return core_points,nearest_neighbours
    # return core_points,nearest_neighbours
    return (core_points, {})
def load(data_name):

    try:
        Xtr = sa.attach('shm://%s_Xtr' % (data_name))
        Ytr = sa.attach('shm://%s_Ytr' % (data_name))
        Ytr_p = sa.attach('shm://%s_Ytr_pitch' % (data_name))
        Ytr_s = sa.attach('shm://%s_Ytr_stream' % (data_name))
    except:
        # load cqt
        trdata = h5py.File('../ex_data/tr.h5', 'r')
        Xtr = sa.create('shm://%s_Xtr' % (data_name), (trdata['x'].shape),
                        dtype='float32')
        Xtr[:] = trdata['x'][:]
        #load instrument label
        Ytr = sa.create('shm://%s_Ytr' % (data_name), (trdata['yi'].shape),
                        dtype='float32')
        Ytr[:] = trdata['yi'][:]
        #load pitch label
        Ytr_p = sa.create('shm://%s_Ytr_pitch' % (data_name),
                          (trdata['yp'].shape),
                          dtype='float32')
        Ytr_p[:] = trdata['yp'][:]
        #load pianoroll label
        Ytr_s = sa.create('shm://%s_Ytr_stream' % (data_name),
                          (trdata['ys'].shape),
                          dtype='float32')
        Ytr_s[:] = trdata['ys'][:]

    return Xtr, Ytr, Ytr_p, Ytr_s
示例#5
0
def make_dataset(data_path, mode=None):
    try:
        mixture_array = sa.attach(f"shm://{mode}_mixture_array")
        vocal_array = sa.attach(f"shm://{mode}_vocal_array")

    except:
        mus = musdb.DB(root=data_path, is_wav=True, subsets=mode)
        mixture_list = list()
        vocal_list = list()
        for track in tqdm(mus):
            #mixture_list.append(track.audio.sum(axis=-1))
            mixture_list.append(norm(track.audio)[0])
            #vocal_list.append(track.targets['vocals'].audio.sum(axis=-1))
            vocal_list.append(norm(track.targets['vocals'].audio)[0])
        mixture_array = np.concatenate(mixture_list)
        vocal_array = np.concatenate(vocal_list)

        assert mixture_array.shape == vocal_array.shape

        mixture_array_sa = sa.create(f"shm://{mode}_mixture_array",
                                     mixture_array.shape)
        vocal_array_sa = sa.create(f"shm://{mode}_vocal_array",
                                   vocal_array.shape)
        mixture_array_sa[::] = mixture_array
        vocal_array_sa[::] = vocal_array

    return dict(mixture_array=mixture_array, vocal_array=vocal_array)
示例#6
0
def load_shared(args):
    i, array_fundus_rescale, array_fundus_rescale_mean_subtract, fundus, is_train = args
    array_fundus_rescale = SharedArray.attach(array_fundus_rescale)
    array_fundus_rescale_mean_subtract = SharedArray.attach(
        array_fundus_rescale_mean_subtract)
    array_fundus_rescale[i], array_fundus_rescale_mean_subtract[
        i] = utils.load_augmented([fundus], is_train)
def load_shared(args):
    i, fundus_array_name, vessel_array_name, seg_array_name, augment, fundus_fname, vessel_fname, seg_fname = args
    fundus_array = SharedArray.attach(fundus_array_name)
    vessel_array = SharedArray.attach(vessel_array_name)
    seg_array = SharedArray.attach(seg_array_name)
    fundus_array[i], vessel_array[i], seg_array[
        i] = utils.load_augmented_fundus_vessel([fundus_fname], [vessel_fname],
                                                [seg_fname], augment)
示例#8
0
def __dist_wrapper(data) :
    func, mat_buf, dist_buf, s, e, start, allowed_missing = data
    mat = sa.attach(mat_buf)
    dist = sa.attach(dist_buf)
    if e > s :
        d = func(mat[:, 1:], s, e, allowed_missing)
        dist[s:e] = d
    del mat, dist
def shm_step(step):
    shm_y = SharedArray.attach("shm://y")
    shm_t = SharedArray.attach("shm://t")
    shm_i_app = SharedArray.attach("shm://i_app")
    ind = step.ind
    inputs = {"Current": shm_i_app[ind]}
    step_solution = step.do_step(shm_y[:, ind], shm_t[ind], inputs)
    shm_y[:, ind] = step_solution.y[:, -1]
    shm_t[ind] = step_solution.t[-1]
def load_shared(args):
    
    i, array_ex, array_he, array_ma, array_se, array_fundus_rescale_mean_subtract, fundus, features_home, is_train = args
    array_fundus_rescale_mean_subtract = SharedArray.attach(array_fundus_rescale_mean_subtract)
    array_ex = SharedArray.attach(array_ex)
    array_he = SharedArray.attach(array_he)
    array_ma = SharedArray.attach(array_ma)
    array_se = SharedArray.attach(array_se)
    array_ex[i], array_he[i], array_ma[i], array_se[i], array_fundus_rescale_mean_subtract[i] = utils.load_features_fundus([fundus], feature_shape_ex_he, feature_shape_ma,
                                                                                                                            feature_shape_se, features_home, is_train)
示例#11
0
    def put_bytes_mpu_mp_shm(self, s3_bucket, s3_key, array_name, block_size, new_session=False):
        """Put bytes into a S3 object using Multi-Part upload in parallel with shared memory

        :param str s3_bucket: name of the s3 bucket.
        :param str s3_key: name of the s3 key.
        :param bytes data: data to store in s3.
        :param int block_size: block size for upload.
        :param bool new_session: Flag to create a new session or reuse existing session.
            True: create new session
            False: reuse existing session
        :return: Multi-part upload response
        """

        def work_put_shm(block_number, array_name, s3_bucket, s3_key, block_size, mpu):
            part_number = block_number + 1
            start = block_number * block_size
            end = (block_number + 1) * block_size
            shared_array = sa.attach(array_name)
            data_chunk = io.BytesIO(shared_array.data[start:end])

            s3 = boto3.session.Session().resource('s3')
            # s3 = boto3.resource('s3')
            response = s3.meta.client.upload_part(Bucket=s3_bucket,
                                                  Key=s3_key,
                                                  UploadId=mpu['UploadId'],
                                                  PartNumber=part_number,
                                                  Body=data_chunk)

            return dict(PartNumber=part_number, ETag=response['ETag'])

        if not self.enable_s3:
            data = sa.attach(array_name)
            return self.put_bytes(s3_bucket, s3_key, data, new_session)

        s3 = self.s3_resource(new_session)

        mpu = s3.meta.client.create_multipart_upload(Bucket=s3_bucket, Key=s3_key)

        shared_array = sa.attach(array_name)
        num_blocks = int(np.ceil(shared_array.nbytes / float(block_size)))
        parts_dict = dict(Parts=[])
        blocks = range(num_blocks)

        results = self.pool.map(work_put_shm, blocks, repeat(array_name), repeat(s3_bucket),
                                repeat(s3_key), repeat(block_size), repeat(mpu))

        for result in results:
            parts_dict['Parts'].append(result)

        mpu_response = s3.meta.client.complete_multipart_upload(Bucket=s3_bucket,
                                                                Key=s3_key,
                                                                UploadId=mpu['UploadId'],
                                                                MultipartUpload=parts_dict)
        return mpu_response
示例#12
0
def worker_init(model):
    global worker_model
    global worker_Y
    global worker_X
    global worker_U
    worker_model = model
    worker_Y = sa.attach(model.sharedprefix + 'Y_obs')
    try:
        worker_X = sa.attach(model.sharedprefix + 'X')
        worker_U = sa.attach(model.sharedprefix + 'U')
    except:
        worker_X = None
        worker_U = None
示例#13
0
文件: dataset.py 项目: astanic/hri
    def __init__(self, name, split, meta_data, use_shared_memory,
                 use_self_edges, n_children, n_atoms, debug, bw, seq_len):

        self.meta_data = meta_data
        self.use_self_edges = use_self_edges
        self.name = name
        self.n_children = n_children
        self.n_atoms = n_atoms

        if use_shared_memory:
            # Data is already in SharedArray in the RAM
            sa_name = name + split
            self.images = SharedArray.attach("shm://" + sa_name + '_images')
            loc = SharedArray.attach("shm://" + sa_name + '_loc')
            vel = SharedArray.attach("shm://" + sa_name + '_vel')
            self.edges = SharedArray.attach("shm://" + sa_name + '_edges')
            self.full_adj = SharedArray.attach("shm://" + sa_name + '_full_adj').copy()
        else:
            dataset_path = os.path.join(ROOT_DATA_DIR, name)
            # Load the data from the disk to the RAM
            dataset = torch.load(os.path.join(dataset_path, split + '.pt'))
            self.images = dataset['images']
            loc = dataset['loc']
            vel = dataset['vel']
            self.edges = dataset['edges']
            # full_adj is the adjacency matrix of the max possible graph
            # graphs of the samples in the dataset are generated by starting
            # from this full_adj matrix, and then dropping certain edges
            self.full_adj = dataset["full_adj"].copy()

        self.n_seq = self.images.shape[0]
        if debug:
            self.n_seq = self.n_seq // 10
            self.images = self.images[:self.n_seq]
            loc = loc[:self.n_seq]
            vel = vel[:self.n_seq]
            self.edges = self.edges[:self.n_seq]

        if seq_len < 50:
            self.images = self.images[:, :seq_len]
            loc = loc[:, :seq_len]
            vel = vel[:, :seq_len]

        if bw:
            self.images = np.amax(self.images, axis=-1, keepdims=True)

        self.hierarchy_nodes_list = create_hierarchy_nodes_list(
            self.name, self.n_children, self.n_atoms)

        self.preprocess_data(split, loc, vel)
示例#14
0
def load():
    avg, std = np.load('data/cqt_avg_std.npy')
    try:
        Xtr = sa.attach('shm://%s_Xtr' % (data_name))
        Ytr = sa.attach('shm://%s_Ytr' % (data_name))
    except:
        vadata = h5py.File('ex_data/' + data_name + '/va.h5', 'r')
        trdata = h5py.File('ex_data/' + data_name + '/tr.h5', 'r')
        Xtr = sa.create('shm://%s_Xtr' % (data_name), (trdata['x'].shape),
                        dtype='float32')
        Xtr[:] = trdata['x'][:]
        Ytr = sa.create('shm://%s_Ytr' % (data_name), (trdata['y'].shape),
                        dtype='float32')

    return Xtr, Ytr, avg, std
示例#15
0
文件: metrics.py 项目: zxgdhd/musegan
    def load_data(filepath, location):
        """Load and return the training data."""
        print('[*] Loading data...')

        # Load data from SharedArray
        if location == 'sa':
            import SharedArray as sa
            data = sa.attach(filepath)

        # Load data from hard disk
        elif location == 'hd':
            if os.path.isabs(filepath):
                data = np.load(filepath)
            else:
                root = os.path.dirname(
                    os.path.dirname(os.path.realpath(__file__)))
                data = np.load(
                    os.path.abspath(
                        os.path.join(root, 'training_data', filepath)))

        else:
            raise ValueError("Unrecognized value for `location`")

        # Reshape data
        data = data.reshape(-1, config['num_timestep'], config['num_pitch'],
                            config['num_track'])

        return data
示例#16
0
 def __create_or_link(self, name, shape, type):
     try:
         data = SharedArray.attach("shm://%s" % name)
         return (data, True)
     except:
         data = SharedArray.create("shm://%s" % name, shape=shape, dtype=type)
         return (data, False)
示例#17
0
def load_data():
    """Load and return the training data."""
    print('[*] Loading data...')

    # Load data from SharedArray
    if CONFIG['data']['training_data_location'] == 'sa':
        import SharedArray as sa
        x_train = sa.attach(CONFIG['data']['training_data'])

    # Load data from hard disk
    elif CONFIG['data']['training_data_location'] == 'hd':
        if os.path.isabs(CONFIG['data']['training_data_location']):
            x_train = np.load(CONFIG['data']['training_data'])
        else:
            filepath = os.path.abspath(os.path.join(
                os.path.realpath(__file__), 'training_data',
                CONFIG['data']['training_data']))
            x_train = np.load(filepath)

    # Reshape data
    x_train = x_train.reshape(
        -1, CONFIG['model']['num_bar'], CONFIG['model']['num_timestep'],
        CONFIG['model']['num_pitch'], CONFIG['model']['num_track'])
    print('Training set size:', len(x_train))

    return x_train
    def __init__(self, security, db='bcolz', freq='day'):
        assert db in ['bcolz', 'shm']
        assert freq in ['day', 'minute']

        self.security = security
        self.db = db
        self.freq = freq

        if db == 'bcolz':
            if freq == 'day':
                p = jqdata.get_config().get_bcolz_day_path(security)
            else:
                p = jqdata.get_config().get_bcolz_minute_path(security)
            ct = retry_bcolz_open(p)
            self.table = _BenchTable(ct.cols['close'][:], ct.cols['factor'][:],
                                     ct.cols['date'][:])
        else:
            if freq == 'day':
                p = jqdata.get_config().get_day_shm_path(security.code)
                arr = SharedArray.attach("file://" + p, readonly=True)
                date_idx = 0
                close_idx = security.day_column_names.index('close') + 1
                factor_idx = security.day_column_names.index('factor') + 1
                self.table = _BenchTable(arr[:, close_idx], arr[:, factor_idx],
                                         arr[:, date_idx])
            else:
                raise Exception("unsupport db=shm, freq=minute")
示例#19
0
    def worker(self, worker_num, genome_location, block_size, verbose):
        initialize_result = magenta.initialize_device(worker_num)
        if initialize_result != 0:
            raise Exception('Initialization of CUDA device ' + worker_num +
                            'returned error code: ' + initialize_result)
        print("Initialization worked!")
        magenta.load_genome(genome_location, block_size, verbose)
        print("Genome loaded!")

        flag_array = sa.attach("shm://pore_flags")

        print("Shared memory flags attached")
        #DCT TODO: Get location of all 512 buffer locations in shared memory from wrapper
        print(len(self))
        while True:
            if len(self) > 0:
                print("In loop")
                try:
                    item, args, kwargs = self.pop()
                    args = args + (flag_array, )
                except Exception as e:
                    print(e)
                try:
                    #DCT TODO: Pass in buffer location for specific channel being used. Add this as an argument to the job
                    # print("Adding job to queue")
                    # sys.stdout.flush()
                    item(*args, **kwargs)
                except Exception as e:
                    print(e)
                    print("Timeout!")
示例#20
0
    def load_data(filepath, location):
        """Load and return the training data."""
        print('[*] Loading data...')

        # Load data from SharedArray
        if location == 'sa':
            import SharedArray as sa
            data = sa.attach(filepath)

        # Load data from hard disk
        elif location == 'hd':
            if os.path.isabs(filepath):
                data = np.load(filepath)
            else:
                root = os.path.dirname(os.path.dirname(
                    os.path.realpath(__file__)))
                data = np.load(os.path.abspath(os.path.join(
                    root, 'training_data', filepath)))

        else:
            raise ValueError("Unrecognized value for `location`")

        # Reshape data
        data = data.reshape(-1, config['num_timestep'], config['num_pitch'],
                            config['num_track'])

        return data
示例#21
0
def load_data():
    """Load and return the training data."""
    print('[*] Loading data...')

    # Load data from SharedArray
    if CONFIG['data']['training_data_location'] == 'sa':
        import SharedArray as sa
        x_train = sa.attach(CONFIG['data']['training_data'])

    # Load data from hard disk
    elif CONFIG['data']['training_data_location'] == 'hd':
        if os.path.isabs(CONFIG['data']['training_data_location']):
            x_train = np.load(CONFIG['data']['training_data'])
        else:
            filepath = os.path.abspath(
                os.path.join(os.path.realpath(__file__), 'training_data',
                             CONFIG['data']['training_data']))
            x_train = np.load(filepath)

    # Reshape data
    x_train = x_train.reshape(-1, CONFIG['model']['num_bar'],
                              CONFIG['model']['num_timestep'],
                              CONFIG['model']['num_pitch'],
                              CONFIG['model']['num_track'])
    print('Training set size:', len(x_train))

    return x_train
示例#22
0
文件: main.py 项目: jeansami/GAN
def load_data():
    """Load and return the training data."""
    print('[*] Loading data...')

    # Load data from SharedArray
    if CONFIG['data']['training_data_location'] == 'sa':
        import SharedArray as sa
        x_train = sa.attach(CONFIG['data']['training_data'])

    # Load data from hard disk
    elif CONFIG['data']['training_data_location'] == 'hd':
        if os.path.isabs(CONFIG['data']['training_data']):
            x_train = np.load(CONFIG['data']['training_data'])
        else:
            filepath = 'C:\\Users\\jeany\\AI\\musegan-master\\v2\\training_data\\lastfm_alternative_8b_phrase.npy'
            x_train = np.load(filepath)

    # Reshape data
    x_train = x_train.reshape(-1, CONFIG['model']['num_bar'],
                              CONFIG['model']['num_timestep'],
                              CONFIG['model']['num_pitch'],
                              CONFIG['model']['num_track'])
    print('Training set size:', len(x_train))

    return x_train
示例#23
0
def main():

    os.environ['CUDA_VISIBLE_DEVICES'] = config['gpu']
    config_tf = tf.ConfigProto()
    config_tf.gpu_options.allow_growth = True

    with tf.Session(config=config_tf) as sess:
        x_train = sa.attach(config['path_x'])
        y_train = sa.attach(config['path_y'])

        model_file = importlib.import_module(config['path_model'])
        model = model_file.Net(config)

        trainer = Trainer(sess, model, config)
        trainer.load()
        trainer.train(x_train, y_train)
示例#24
0
    def shard_array_to_s3_mp(self, array, indices, s3_bucket, s3_keys):
        """Shard array to S3 in parallel.

        :param ndarray array: array to be put into S3
        :param list indices: indices corrsponding to the s3 keys
        :param str s3_bucket: S3 bucket to use
        :param list s3_keys: List of S3 keys corresponding to the indices.
        """
        def work_shard_array_to_s3(s3_key, index, array_name, s3_bucket):
            array = sa.attach(array_name)
            if sys.version_info >= (3, 5):
                data = bytes(array[index].data)
            else:
                data = bytes(np.ascontiguousarray(array[index]).data)

            if self.enable_compression:
                cctx = zstd.ZstdCompressor(level=9, write_content_size=True)
                data = cctx.compress(data)

            self.s3aio.s3io.put_bytes(s3_bucket, s3_key, data)

        array_name = '_'.join(['SA3IO', str(uuid.uuid4()), str(os.getpid())])
        sa.create(array_name, shape=array.shape, dtype=array.dtype)
        shared_array = sa.attach(array_name)
        shared_array[:] = array
        results = self.pool.map(work_shard_array_to_s3, s3_keys, indices,
                                repeat(array_name), repeat(s3_bucket))

        sa.delete(array_name)
def f_load(m_name, fp):
    try:
        out = sa.attach(m_name)
    except:
        out = np.load(fp)
        X = sa.create(m_name, (out.shape), dtype='float32')
        X[:] = out
    return out.astype('float32')
示例#26
0
 def shm_step(args):
     ind, tstep = args
     shm_y = SharedArray.attach('shm://y')
     shm_t = SharedArray.attach('shm://t')
     shm_i_app = SharedArray.attach('shm://i_app')
     shm_V = SharedArray.attach('shm://V')
     sol_init.y[:, -1] = shm_y[:, ind]
     sol_init.t[-1] = shm_t[ind]
     inputs = {"Current": shm_i_app[ind]}
     step_solution = step_solver.step(sol_init,
                                      model,
                                      dt=dt,
                                      npts=2,
                                      inputs=inputs,
                                      save=False)
     shm_y[:, ind] = step_solution.y[:, -1]
     shm_t[ind] = step_solution.t[-1]
示例#27
0
 async def get_data_pointer(self):
     while self.data is None:
         try:
             self.data = sa.attach(
                 "shm://env_state")  # FileNotFoundError TODO
         except FileNotFoundError as e:
             pass
         await asyncio.sleep(1)
示例#28
0
async def generator(request):
    frame = sa.attach(frame_buffer)
    current = frame.copy()

    detection_queue = Topics.start_listener(TopicNames.detection)
    detection_sub = CallbackListener(detection_queue, daemon=True, run=True)

    fails = 0

    while True:
        # oh boy : D, web dev is so eezy
        # https://github.com/encode/starlette/pull/320
        # https://github.com/encode/starlette/issues/297
        # https://github.com/tiangolo/fastapi/issues/410
        if await request.is_disconnected():
            print("video stream disconnected : D")
            detection_sub.running = False
            break
        if not np.array_equal(current, frame):
            # print("frame")
            fails = 0
            current = frame.copy()
            if TopicNames.detection in detection_sub.messages:
                last_detection: DetectionPacket = detection_sub.messages[
                    TopicNames.detection]

                for face in last_detection.points:
                    cv.rectangle(current, (face.x, face.y),
                                 (face.x + face.w, face.y + face.h),
                                 face.color, 2)
                    cv.putText(
                        current,
                        '%d-%d   %.1f, %.1f' %
                        (face.pid, face.age, face.dx, face.dy),
                        (face.x, face.y - 8),
                        cv.FONT_HERSHEY_SIMPLEX,
                        0.5,
                        (255, 255, 0),
                        1,
                        cv.LINE_AA,
                    )
                    cv.putText(
                        current,
                        'ms %d' % face.ms,
                        (face.x + 4, face.y - 8 + face.h),
                        cv.FONT_HERSHEY_SIMPLEX,
                        0.5,
                        (255, 255, 0),
                        1,
                        cv.LINE_AA,
                    )
            ret, jpg = cv.imencode('.jpg', current,
                                   (cv.IMWRITE_JPEG_QUALITY, 60))
            yield (b'--frame\r\nContent-Type: image/jpeg\r\n\r\n' +
                   jpg.tostring() + b'\r\n')
        else:
            fails += 1
        await asyncio.sleep(0.032 if fails < 5 else 1)
示例#29
0
 def work_get(block_number, array_name, s3_bucket, s3_key, s3_max_size, block_size):
     start = block_number * block_size
     end = (block_number + 1) * block_size
     if end > s3_max_size:
         end = s3_max_size
     d = self.get_byte_range(s3_bucket, s3_key, start, end, True)
     # d = np.frombuffer(d, dtype=np.uint8, count=-1, offset=0)
     shared_array = sa.attach(array_name)
     shared_array[start:end] = d
示例#30
0
 def work_load_data(array_name, index, datasets):
     data = sa.attach(array_name)
     _fuse_measurement(
         data[index],
         datasets,
         geobox,
         measurement,
         fuse_func=fuse_func,
         skip_broken_datasets=skip_broken_datasets)
示例#31
0
def attach_or_create(name):
    name = "shm://" + name

    try:
        return SharedArray.attach(name)
    except Exception:
        pass

    return SharedArray.create(name, BUFFER_SIZE, dtype=np.uint8)
示例#32
0
def get_silhouette2(data):
    dist_buf, tag = data
    s = np.unique(tag)
    if 2 <= s.size < tag.shape[0]:
        dist = sa.attach(dist_buf)
        ss = silhouette_score(dist.astype(float), tag, metric='precomputed')
        return ss
    else:
        return 0.
示例#33
0
def load_data(data_source, data_filename):
    """Load and return the training data."""
    if data_source == 'sa':
        import SharedArray as sa
        return sa.attach(data_filename)
    if data_source == 'npy':
        return load_data_from_npy(data_filename)
    if data_source == 'npz':
        return load_data_from_npz(data_filename)
    raise ValueError("Expect `data_source` to be one of 'sa', 'npy', 'npz'. "
                     "But get " + str(data_source))
示例#34
0
def animation_system():
    static_data = square
    static_data_attributes = VAO.TEXTURED_DATA_2D

    instance_data = sa.attach('animation_gfx')
    instance_data_attributes = ANIMATION_DATA

    shader = boa_gfx.gl_shader.shader_manager.get_shader('animation.shader')
    texture = TextureArray.from_directory('./textures/animations')

    system = ParticleSystem(static_data, static_data_attributes,
                            instance_data, instance_data_attributes,
                            shader, texture)
    return system
示例#35
0
def creature_system():
    static_data = square
    static_data_attributes = VAO.TEXTURED_DATA_2D

    instance_data = sa.attach('creature_gfx')
    instance_data_attributes = CREATURE_DATA

    shader = boa_gfx.gl_shader.shader_manager.get_shader('creature.shader')
    texture = TextureArray.from_directory('./textures/creatures')

    system = ParticleSystem(static_data, static_data_attributes,
                            instance_data, instance_data_attributes,
                            shader, texture)
    return system
示例#36
0
def food_system():
    static_data = square
    static_data_attributes = VAO.TEXTURED_DATA_2D

    instance_data = sa.attach('food_gfx')
    instance_data_attributes = FOOD_DATA

    shader = boa_gfx.gl_shader.shader_manager.get_shader('food.shader')
    texture = Texture.from_file('./textures/food/food.png')

    system = ParticleSystem(static_data, static_data_attributes,
                            instance_data, instance_data_attributes,
                            shader, texture)
    return system
def load_shared(args):
    i, array_name, fname, kwargs = args
    array = SharedArray.attach(array_name)
    array[i] = data.load_augment(fname, **kwargs)
 def add_data_sa(self, path_new, key='train'):
     self.x[key] = sa.attach(path_new)
     print('data size:', self.x[key].shape)