def transform(self, Xb, yb):

        shared_array_name = str(uuid4())
        try:
            shared_array = SharedArray.create(
                shared_array_name, [len(Xb), 3, self.config.get('w'), 
                                    self.config.get('h')], dtype=np.float32)
                                        
            fnames, labels = super(SharedIterator, self).transform(Xb, yb)
            args = []

            for i, fname in enumerate(fnames):
                kwargs = {k: self.config.get(k) for k in ['w', 'h']}
                if not self.deterministic:
                    kwargs.update({k: self.config.get(k) 
                                   for k in ['aug_params', 'sigma']})
                kwargs['transform'] = getattr(self, 'tf', None)
                kwargs['color_vec'] = getattr(self, 'color_vec', None)
                args.append((i, shared_array_name, fname, kwargs))

            self.pool.map(load_shared, args)
            Xb = np.array(shared_array, dtype=np.float32)

        finally:
            SharedArray.delete(shared_array_name)

        if labels is not None:
            labels = labels[:, np.newaxis]

        return Xb, labels
def find_border_points(indices):
    params = sa.attach('shm://params')
    eps = params[0]
    minpts = params[1]
    features = sa.attach("shm://features")
    core_points_index = sa.attach("shm://core_points")
    start_index = indices[0]
    end_index = indices[1]
    print('finding border points entered thread', start_index, end_index)
    distances = []

    sample = features[start_index:end_index + 1, :]

    border_points = []

    for point_index in range(sample.shape[0]):
        if point_index % 1000 == 0:
            gc.collect()
            print(start_index, " collecting garbage, at index: ", point_index,
                  ' remaining indices: ', sample.shape[0] - point_index)
        point = sample[point_index, :]
        distances = np.sqrt(np.sum((features - point)**2, axis=1))
        candidates = np.argwhere(distances <= eps) + start_index
        # if it is not a core point and is in the vicinity of a core point
        if np.intersect1d(candidates,
                          core_points_index).shape[0] >= 1 and not (
                              start_index + point_index) in core_points_index:
            border_points.append(start_index + point_index)
        del distances
    print('exiting border finding: ', start_index, end_index,
          end_index - start_index + 1, '\n\n')

    return border_points
Ejemplo n.º 3
0
    def run(self):
        """
        # TODO: write description
        """
        try:
            self.t0 = time.time()
            self.t1 = self.t0
            q = self.channel.queue_declare(queue='detector')
            self.channel.queue_declare(queue='time_logs')
            if q.method.message_count >= 59:
                time.sleep(1)

            frame_num, timestamp, images_list = self.batch_generator.__next__()
            self.log_time("Took next batch:")

            sh_mem_adress = f"shm://{self.module_name}_{frame_num}"
            try:
                shared_mem = sa.create(sh_mem_adress, np.shape(images_list))
            except:
                sa.delete(sh_mem_adress)
                shared_mem = sa.create(sh_mem_adress, np.shape(images_list))
            self.log_time('Created shared memory')
            shared_mem[:] = np.array(images_list)
            self.log_time('Copied to shared memory:')
            self.channel.basic_publish(exchange='',
                                       routing_key='detector',
                                       body=sh_mem_adress)

            self.log_time('Published message:')
            ########################################################################
            del frame_num, timestamp, images_list

            self.log_time('Full time:', from_start=True)
        except StopIteration:  # no more frames left in videos_provider
            print('stop iter')
Ejemplo n.º 4
0
 def __create_or_link(self, name, shape, type):
     try:
         data = SharedArray.attach("shm://%s" % name)
         return (data, True)
     except:
         data = SharedArray.create("shm://%s" % name, shape=shape, dtype=type)
         return (data, False)
Ejemplo n.º 5
0
def load_shared(args):
    i, array_fundus_rescale, array_fundus_rescale_mean_subtract, fundus, is_train = args
    array_fundus_rescale = SharedArray.attach(array_fundus_rescale)
    array_fundus_rescale_mean_subtract = SharedArray.attach(
        array_fundus_rescale_mean_subtract)
    array_fundus_rescale[i], array_fundus_rescale_mean_subtract[
        i] = utils.load_augmented([fundus], is_train)
    def transform(self, Xb, yb):

        shared_array_name = str(uuid4())
        try:
            shared_array = SharedArray.create(
                shared_array_name,
                [len(Xb), 3,
                 self.config.get('w'),
                 self.config.get('h')],
                dtype=np.float32)

            fnames, labels = super(SharedIterator, self).transform(Xb, yb)
            args = []

            for i, fname in enumerate(fnames):
                kwargs = {k: self.config.get(k) for k in ['w', 'h']}
                if not self.deterministic:
                    kwargs.update({
                        k: self.config.get(k)
                        for k in ['aug_params', 'sigma']
                    })
                kwargs['transform'] = getattr(self, 'tf', None)
                kwargs['color_vec'] = getattr(self, 'color_vec', None)
                args.append((i, shared_array_name, fname, kwargs))

            self.pool.map(load_shared, args)
            Xb = np.array(shared_array, dtype=np.float32)

        finally:
            SharedArray.delete(shared_array_name)

        if labels is not None:
            labels = labels[:, np.newaxis]

        return Xb, labels
Ejemplo n.º 7
0
def make_dataset(data_path, mode=None):
    try:
        mixture_array = sa.attach(f"shm://{mode}_mixture_array")
        vocal_array = sa.attach(f"shm://{mode}_vocal_array")

    except:
        mus = musdb.DB(root=data_path, is_wav=True, subsets=mode)
        mixture_list = list()
        vocal_list = list()
        for track in tqdm(mus):
            #mixture_list.append(track.audio.sum(axis=-1))
            mixture_list.append(norm(track.audio)[0])
            #vocal_list.append(track.targets['vocals'].audio.sum(axis=-1))
            vocal_list.append(norm(track.targets['vocals'].audio)[0])
        mixture_array = np.concatenate(mixture_list)
        vocal_array = np.concatenate(vocal_list)

        assert mixture_array.shape == vocal_array.shape

        mixture_array_sa = sa.create(f"shm://{mode}_mixture_array",
                                     mixture_array.shape)
        vocal_array_sa = sa.create(f"shm://{mode}_vocal_array",
                                   vocal_array.shape)
        mixture_array_sa[::] = mixture_array
        vocal_array_sa[::] = vocal_array

    return dict(mixture_array=mixture_array, vocal_array=vocal_array)
Ejemplo n.º 8
0
def load_shared(args):
    i, array_fundus_name, array_vessel_name, array_lm_name, fundus, vessel, lm, is_train = args
    array_fundus = SharedArray.attach(array_fundus_name)
    array_vessel = SharedArray.attach(array_vessel_name)
    array_lm = SharedArray.attach(array_lm_name)
    array_fundus[i], array_vessel[i], array_lm[i] = utils.load_augmented_lm(
        fundus, vessel, lm, augment=is_train)
Ejemplo n.º 9
0
    def __exit__(self, *args):

        for array in self._shared:
            try:
                sa.delete(array)
            except FileNotFoundError:
                pass
    def __init__(self, file_path, file_name):
        print('class', DBSCAN.eps, DBSCAN.minpts)
        self.core_points = []
        self.core_point_labels = []
        self.core_points_index = []
        self.border_points_index = []
        self.border_points = []
        self.border_point_labels = []
        self.noise_points = []
        # self.nearest_neighbours = {}      # use for small values, space complexity is O(n^2)
        self.n_threads = cpu_count()
        self.features = []
        self.labels = []
        self.features, self.labels = process_dataset(
            file_path, file_name)  # limit the size of the dataset
        size = 10000
        self.features, self.labels = self.features[:size, :], self.labels[:
                                                                          size]
        print('features: \n', self.features.shape)
        try:
            sa.delete("shm://features")
        except Exception as e:
            print('file does not exist')
        self.shared_memory = sa.create("shm://features", self.features.shape)

        # copy the array into the shared memory
        for row_index in range(self.features.shape[0]):
            for point_index in range(self.features.shape[1]):
                self.shared_memory[row_index,
                                   point_index] = self.features[row_index,
                                                                point_index]
        self.clusters = []
def find_core_points(indices):
    params = sa.attach('shm://params')
    eps = params[0]
    minpts = params[1]
    features = sa.attach("shm://features")
    start_index = indices[0]
    end_index = indices[1]
    sample = features[start_index:end_index + 1, :]
    print('entered thread for core points, start index: ', start_index,
          ' end index: ', end_index, ' sample shape ', sample.shape)
    distances = []
    core_points = []
    # nearest_neighbours = {}
    for point_index in range(sample.shape[0]):
        if point_index % 1000 == 0:
            print(start_index, ' collected garbage', ' at index: ',
                  point_index, ' remaining indices: ',
                  sample.shape[0] - point_index)
            gc.collect()
        point = sample[point_index, :]
        distances = np.sqrt(np.sum((features - point)**2, axis=1))
        candidates = np.argwhere(distances <= eps)
        if candidates.shape[0] > minpts:
            core_points.append(start_index + point_index)
            # nearest_neighbours[start_index+point_index] = set(list(candidates.flatten()))
    print('core point search complete, exiting: ', start_index, end_index,
          '\n\n')
    features = None
    start_index = None
    end_index = None
    sample = None
    gc.collect()
    # return core_points,nearest_neighbours
    # return core_points,nearest_neighbours
    return (core_points, {})
Ejemplo n.º 12
0
    def delete_created_arrays(self):
        """Delete all created shared memory arrays.

          Arrays are prefixed by 'S3' or 'DCCORE'.
        """
        for a in self.list_created_arrays():
            sa.delete(a)
Ejemplo n.º 13
0
def main():
    """Main function"""
    filepath, name, prefix, dtype = parse_arguments()

    if name is None:
        name = os.path.splitext(os.path.basename(filepath))[0]
        if prefix is not None:
            name = prefix + '_' + name

    print("Loading data from '{}'.".format(filepath))
    if filepath.endswith('.npy'):
        data = np.load(filepath)
        data = data.astype(dtype)
        print("Saving data to shared memory.")
        sa.delete(name)
        sa_array = sa.create(name, data.shape, data.dtype)
        np.copyto(sa_array, data)
    else:
        with np.load(filepath) as loaded:
            print("Saving data to shared memory.")
            sa_array = sa.create(name, loaded['shape'], dtype)
            sa_array[[x for x in loaded['nonzero']]] = True

    print("Successfully saved: (name='{}', shape={}, dtype={})".format(
        name, sa_array.shape, sa_array.dtype))
Ejemplo n.º 14
0
def to_shared_memory(object, name):
    logging.info("Writing to shared memory %s" % name)
    meta_information = {}
    for property_name in object.properties:
        data = object.__getattribute__(property_name)

        if data is None:
            data = np.zeros(0)

        # Wrap single ints in arrays
        if data.shape == ():
            data = np.array([data], dtype=data.dtype)

        data_type = data.dtype
        data_shape = data.shape
        meta_information[property_name] = (data_type, data_shape)

        # Make shared memory and copy data to buffer
        #logging.info("Field %s has shape %s and type %s" % (property_name, data_shape, data_type))
        try:
            sa.delete(name + "_" + property_name)
            logging.info("Deleted already shared memory")
        except FileNotFoundError:
            logging.info("No existing shared memory, can create new one")

        shared_array = sa.create(name + "_" + property_name, data_shape, data_type)
        shared_array[:] = data

    f = open(name + "_meta.shm", "wb")
    pickle.dump(meta_information, f)
    logging.info("Done writing to shared memory")
Ejemplo n.º 15
0
    def shard_array_to_s3_mp(self, array, indices, s3_bucket, s3_keys):
        """Shard array to S3 in parallel.

        :param ndarray array: array to be put into S3
        :param list indices: indices corrsponding to the s3 keys
        :param str s3_bucket: S3 bucket to use
        :param list s3_keys: List of S3 keys corresponding to the indices.
        """
        def work_shard_array_to_s3(s3_key, index, array_name, s3_bucket):
            array = sa.attach(array_name)
            if sys.version_info >= (3, 5):
                data = bytes(array[index].data)
            else:
                data = bytes(np.ascontiguousarray(array[index]).data)

            if self.enable_compression:
                cctx = zstd.ZstdCompressor(level=9, write_content_size=True)
                data = cctx.compress(data)

            self.s3aio.s3io.put_bytes(s3_bucket, s3_key, data)

        array_name = generate_array_name('SA3IO')
        sa.create(array_name, shape=array.shape, dtype=array.dtype)
        shared_array = sa.attach(array_name)
        shared_array[:] = array
        results = self.pool.map(work_shard_array_to_s3, s3_keys, indices,
                                repeat(array_name), repeat(s3_bucket))

        sa.delete(array_name)
Ejemplo n.º 16
0
    def transform(self, Xb, yb):
        shared_array_name = str(uuid4())
        fnames, labels = Xb, yb
        args = []
        da_args = self.da_args()
        for i, fname in enumerate(fnames):
            args.append((i, shared_array_name, fname, da_args))

        if self.num_image_channels is None:
            test_img = data.load_augment(fnames[0], **da_args)
            self.num_image_channels = test_img.shape[-1]

        try:
            shared_array = SharedArray.create(
                shared_array_name,
                [len(Xb), self.w, self.h, self.num_image_channels],
                dtype=np.float32)

            self.pool.map(load_shared, args)
            Xb = np.array(shared_array, dtype=np.float32)

        finally:
            SharedArray.delete(shared_array_name)

        # if labels is not None:
        #     labels = labels[:, np.newaxis]

        return Xb, labels
Ejemplo n.º 17
0
def create_new_sa_array(name, shape, dtype):
    try:
        sa.delete(name)
    except FileNotFoundError:
        pass
    finally:
        sa_array = sa.create(name, shape, dtype=dtype)
    return sa_array
Ejemplo n.º 18
0
def CreateShared(Name, shape, dtype):
    try:
        a = SharedArray.create(Name, shape, dtype=dtype)
    except OSError:
        print >> log, ModColor.Str("File %s exists, deleting" % Name)
        DelArray(Name)
        a = SharedArray.create(Name, shape, dtype=dtype)
    return a
Ejemplo n.º 19
0
def f_load(m_name, fp):
    try:
        out = sa.attach(m_name)
    except:
        out = np.load(fp)
        X = sa.create(m_name, (out.shape), dtype='float32')
        X[:] = out
    return out.astype('float32')
Ejemplo n.º 20
0
def Lock(array):
    global _locking
    if _locking:
        try:
            SharedArray.mlock(array)
        except:
            print >> log, "Warning: Cannot lock memory. Try updating your kernel security settings."
            _locking = False
Ejemplo n.º 21
0
def create_new_sa_array(name, shape, dtype):
    try:
        sa.delete(name)
    except FileNotFoundError:
        pass
    finally:
        sa_array = sa.create(name, shape, dtype=dtype)
    return sa_array
Ejemplo n.º 22
0
def __dist_wrapper(data) :
    func, mat_buf, dist_buf, s, e, start, allowed_missing = data
    mat = sa.attach(mat_buf)
    dist = sa.attach(dist_buf)
    if e > s :
        d = func(mat[:, 1:], s, e, allowed_missing)
        dist[s:e] = d
    del mat, dist
def load_shared(args):
    i, fundus_array_name, vessel_array_name, seg_array_name, augment, fundus_fname, vessel_fname, seg_fname = args
    fundus_array = SharedArray.attach(fundus_array_name)
    vessel_array = SharedArray.attach(vessel_array_name)
    seg_array = SharedArray.attach(seg_array_name)
    fundus_array[i], vessel_array[i], seg_array[
        i] = utils.load_augmented_fundus_vessel([fundus_fname], [vessel_fname],
                                                [seg_fname], augment)
def shm_step(step):
    shm_y = SharedArray.attach("shm://y")
    shm_t = SharedArray.attach("shm://t")
    shm_i_app = SharedArray.attach("shm://i_app")
    ind = step.ind
    inputs = {"Current": shm_i_app[ind]}
    step_solution = step.do_step(shm_y[:, ind], shm_t[ind], inputs)
    shm_y[:, ind] = step_solution.y[:, -1]
    shm_t[ind] = step_solution.t[-1]
Ejemplo n.º 25
0
def attach_or_create(name):
    name = "shm://" + name

    try:
        return SharedArray.attach(name)
    except Exception:
        pass

    return SharedArray.create(name, BUFFER_SIZE, dtype=np.uint8)
Ejemplo n.º 26
0
 def init_mem(self, create=False):
     self.is_shared_memory = True
     import SharedArray as sa
     num_samples = int(self.BUFFER_DURATION * self.fs)
     if create:
         self.data = sa.create(self.SHARED_MEM_NAME, num_samples,
                               np.float32)
         atexit.register(self.cleanup_mem)  # Run cleanup on exit
     else:
         self.data = sa.attach(self.SHARED_MEM_NAME)
def load_shared(args):
    
    i, array_ex, array_he, array_ma, array_se, array_fundus_rescale_mean_subtract, fundus, features_home, is_train = args
    array_fundus_rescale_mean_subtract = SharedArray.attach(array_fundus_rescale_mean_subtract)
    array_ex = SharedArray.attach(array_ex)
    array_he = SharedArray.attach(array_he)
    array_ma = SharedArray.attach(array_ma)
    array_se = SharedArray.attach(array_se)
    array_ex[i], array_he[i], array_ma[i], array_se[i], array_fundus_rescale_mean_subtract[i] = utils.load_features_fundus([fundus], feature_shape_ex_he, feature_shape_ma,
                                                                                                                            feature_shape_se, features_home, is_train)
Ejemplo n.º 28
0
def Unlock(array):
    global _locking
    if _locking:
        try:
            SharedArray.munlock(array)
        except:
            print(
                "Warning Cannot unlock memory. Try updating your kernel security settings.",
                file=log)
            _locking = False
Ejemplo n.º 29
0
    def transform(self, fundus, vessel, coords):
        shared_array_fundus_name = str(uuid4())
        shared_array_vessel_name = str(uuid4())
        shared_array_lm_name = str(uuid4())
        try:
            shared_array_fundus = SharedArray.create(
                shared_array_fundus_name, [len(fundus), img_h, img_w, 3],
                dtype=np.float32)
            shared_array_vessel = SharedArray.create(
                shared_array_vessel_name, [len(fundus), img_h, img_w, 1],
                dtype=np.float32)
            shared_array_lm = SharedArray.create(shared_array_lm_name,
                                                 [len(fundus), 4],
                                                 dtype=np.float32)

            args = []

            for i, fname in enumerate(fundus):
                args.append((i, shared_array_fundus_name,
                             shared_array_vessel_name, shared_array_lm_name,
                             fundus[i], vessel[i], coords[i], self.is_train))

            self.pool.map(load_shared, args)
            fundus_img = np.array(shared_array_fundus, dtype=np.float32)
            vessel_img = np.array(shared_array_vessel, dtype=np.float32)
            coords_arr = np.array(shared_array_lm, dtype=np.float32)
        finally:
            SharedArray.delete(shared_array_fundus_name)
            SharedArray.delete(shared_array_vessel_name)
            SharedArray.delete(shared_array_lm_name)

        return fundus_img, vessel_img, coords_arr, fundus
Ejemplo n.º 30
0
    def transform(self, fundus, vessel, grade):
        shared_array_fundus_mean_subt_name = str(uuid4())
        shared_array_fundus_z_name = str(uuid4())
        shared_array_vessel_name = str(uuid4())
        
        try:
            shared_array_fundus_mean_subt = SharedArray.create(
                shared_array_fundus_mean_subt_name, [len(fundus), img_h, img_w, 3], dtype=np.float32)
            shared_array_fundus_z = SharedArray.create(
                shared_array_fundus_z_name, [len(fundus), img_h, img_w, 3], dtype=np.float32)
            shared_array_vessel = SharedArray.create(
                shared_array_vessel_name, [len(fundus), img_h, img_w, 1], dtype=np.float32)
            
            n_grades = len(grade)
            if self.grade_type == "DR":
                grade_onehot = np.zeros((n_grades, n_grade_dr))
            elif self.grade_type == "DME":
                grade_onehot = np.zeros((n_grades, n_grade_dme))
            for i in range(n_grades):
                grade_onehot[i, grade[i]] = 1
            
            args = []
            for i, _ in enumerate(fundus):
                args.append((i, shared_array_fundus_mean_subt_name, shared_array_fundus_z_name, shared_array_vessel_name, fundus[i], vessel[i], self.is_train, self.normalize))

            self.pool.map(load_shared, args)
            fundus_mean_subt_img = np.array(shared_array_fundus_mean_subt, dtype=np.float32)
            fundus_z_img = np.array(shared_array_fundus_z, dtype=np.float32)
            vessel_img = np.array(shared_array_vessel, dtype=np.float32)
        finally:
            SharedArray.delete(shared_array_fundus_mean_subt_name)
            SharedArray.delete(shared_array_fundus_z_name)
            SharedArray.delete(shared_array_vessel_name)

        return fundus, fundus_mean_subt_img, fundus_z_img, vessel_img, grade_onehot
Ejemplo n.º 31
0
    def put_bytes_mpu_mp_shm(self, s3_bucket, s3_key, array_name, block_size, new_session=False):
        """Put bytes into a S3 object using Multi-Part upload in parallel with shared memory

        :param str s3_bucket: name of the s3 bucket.
        :param str s3_key: name of the s3 key.
        :param bytes data: data to store in s3.
        :param int block_size: block size for upload.
        :param bool new_session: Flag to create a new session or reuse existing session.
            True: create new session
            False: reuse existing session
        :return: Multi-part upload response
        """

        def work_put_shm(block_number, array_name, s3_bucket, s3_key, block_size, mpu):
            part_number = block_number + 1
            start = block_number * block_size
            end = (block_number + 1) * block_size
            shared_array = sa.attach(array_name)
            data_chunk = io.BytesIO(shared_array.data[start:end])

            s3 = boto3.session.Session().resource('s3')
            # s3 = boto3.resource('s3')
            response = s3.meta.client.upload_part(Bucket=s3_bucket,
                                                  Key=s3_key,
                                                  UploadId=mpu['UploadId'],
                                                  PartNumber=part_number,
                                                  Body=data_chunk)

            return dict(PartNumber=part_number, ETag=response['ETag'])

        if not self.enable_s3:
            data = sa.attach(array_name)
            return self.put_bytes(s3_bucket, s3_key, data, new_session)

        s3 = self.s3_resource(new_session)

        mpu = s3.meta.client.create_multipart_upload(Bucket=s3_bucket, Key=s3_key)

        shared_array = sa.attach(array_name)
        num_blocks = int(np.ceil(shared_array.nbytes / float(block_size)))
        parts_dict = dict(Parts=[])
        blocks = range(num_blocks)

        results = self.pool.map(work_put_shm, blocks, repeat(array_name), repeat(s3_bucket),
                                repeat(s3_key), repeat(block_size), repeat(mpu))

        for result in results:
            parts_dict['Parts'].append(result)

        mpu_response = s3.meta.client.complete_multipart_upload(Bucket=s3_bucket,
                                                                Key=s3_key,
                                                                UploadId=mpu['UploadId'],
                                                                MultipartUpload=parts_dict)
        return mpu_response
Ejemplo n.º 32
0
def get_publisher(channel: str, shape: tuple, dtype) -> np.ndarray:
    # Create an array in shared memory.
    short_name = channel.split("://")[-1]
    mapping = {e.name.decode(): e for e in sa.list()}
    if short_name in mapping:
        array = mapping[short_name]
        if array.dtype == dtype and array.dims == shape:
            return sa.attach(channel)
        sa.delete(short_name)

    return sa.create(channel, shape, dtype)
Ejemplo n.º 33
0
    def load_data(filepath, location):
        """Load and return the training data."""
        print('[*] Loading data...')

        # Load data from SharedArray
        if location == 'sa':
            import SharedArray as sa
            data = sa.attach(filepath)

        # Load data from hard disk
        elif location == 'hd':
            if os.path.isabs(filepath):
                data = np.load(filepath)
            else:
                root = os.path.dirname(os.path.dirname(
                    os.path.realpath(__file__)))
                data = np.load(os.path.abspath(os.path.join(
                    root, 'training_data', filepath)))

        else:
            raise ValueError("Unrecognized value for `location`")

        # Reshape data
        data = data.reshape(-1, config['num_timestep'], config['num_pitch'],
                            config['num_track'])

        return data
Ejemplo n.º 34
0
def load_data():
    """Load and return the training data."""
    print('[*] Loading data...')

    # Load data from SharedArray
    if CONFIG['data']['training_data_location'] == 'sa':
        import SharedArray as sa
        x_train = sa.attach(CONFIG['data']['training_data'])

    # Load data from hard disk
    elif CONFIG['data']['training_data_location'] == 'hd':
        if os.path.isabs(CONFIG['data']['training_data_location']):
            x_train = np.load(CONFIG['data']['training_data'])
        else:
            filepath = os.path.abspath(os.path.join(
                os.path.realpath(__file__), 'training_data',
                CONFIG['data']['training_data']))
            x_train = np.load(filepath)

    # Reshape data
    x_train = x_train.reshape(
        -1, CONFIG['model']['num_bar'], CONFIG['model']['num_timestep'],
        CONFIG['model']['num_pitch'], CONFIG['model']['num_track'])
    print('Training set size:', len(x_train))

    return x_train
Ejemplo n.º 35
0
def load_data(data_source, data_filename):
    """Load and return the training data."""
    if data_source == 'sa':
        import SharedArray as sa
        return sa.attach(data_filename)
    if data_source == 'npy':
        return load_data_from_npy(data_filename)
    if data_source == 'npz':
        return load_data_from_npz(data_filename)
    raise ValueError("Expect `data_source` to be one of 'sa', 'npy', 'npz'. "
                     "But get " + str(data_source))
Ejemplo n.º 36
0
def animation_system():
    static_data = square
    static_data_attributes = VAO.TEXTURED_DATA_2D

    instance_data = sa.attach('animation_gfx')
    instance_data_attributes = ANIMATION_DATA

    shader = boa_gfx.gl_shader.shader_manager.get_shader('animation.shader')
    texture = TextureArray.from_directory('./textures/animations')

    system = ParticleSystem(static_data, static_data_attributes,
                            instance_data, instance_data_attributes,
                            shader, texture)
    return system
Ejemplo n.º 37
0
def creature_system():
    static_data = square
    static_data_attributes = VAO.TEXTURED_DATA_2D

    instance_data = sa.attach('creature_gfx')
    instance_data_attributes = CREATURE_DATA

    shader = boa_gfx.gl_shader.shader_manager.get_shader('creature.shader')
    texture = TextureArray.from_directory('./textures/creatures')

    system = ParticleSystem(static_data, static_data_attributes,
                            instance_data, instance_data_attributes,
                            shader, texture)
    return system
Ejemplo n.º 38
0
def food_system():
    static_data = square
    static_data_attributes = VAO.TEXTURED_DATA_2D

    instance_data = sa.attach('food_gfx')
    instance_data_attributes = FOOD_DATA

    shader = boa_gfx.gl_shader.shader_manager.get_shader('food.shader')
    texture = Texture.from_file('./textures/food/food.png')

    system = ParticleSystem(static_data, static_data_attributes,
                            instance_data, instance_data_attributes,
                            shader, texture)
    return system
Ejemplo n.º 39
0
def main():
    """Main function"""
    filepath, name, prefix = parse_arguments()

    data = np.load(filepath)

    if name is None:
        name = os.path.splitext(os.path.basename(filepath))[0]
        if prefix is not None:
            name = prefix + '_' + name

    sa_array = sa.create(name, data.shape, data.dtype)
    np.copyto(sa_array, data)

    print("Successfully saved: {}, {}, {}".format(name, data.shape, data.dtype))
Ejemplo n.º 40
0
def main():
    """Main function"""
    filepath, name, prefix, dtype = parse_arguments()

    if name is None:
        name = os.path.splitext(os.path.basename(filepath))[0]
        if prefix is not None:
            name = prefix + '_' + name

    print("Loading data from '{}'.".format(filepath))
    if filepath.endswith('.npy'):
        data = np.load(filepath)
        data = data.astype(dtype)
        print("Saving data to shared memory.")
        sa_array = sa.create(name, data.shape, data.dtype)
        np.copyto(sa_array, data)
    else:
        with np.load(filepath) as loaded:
            print("Saving data to shared memory.")
            sa_array = sa.create(name, loaded['shape'], dtype)
            sa_array[[x for x in loaded['nonzero']]] = True

    print("Successfully saved: (name='{}', shape={}, dtype={})".format(
        name, sa_array.shape, sa_array.dtype))
Ejemplo n.º 41
0
    def _setup_arrays(self, inshape):
        """ Setup instance variabels and arrays for processing """
        self.inshape = inshape

        # use shared array
        if self.wfunc is None:
            self.id = str(uuid.uuid4())
            self.arrout = sa.create(self.id, self.inshape)
            self.arrout[:] = np.nan
            self.chunks = self.chunk(self.inshape)

            # TODO - allow custom write function
            #self.arrout = np.empty((self.nbandsout, self.inshape[1], self.inshape[2]))

            def wfunc(output, chunk):
                self.arrout[:, chunk[1]:chunk[1] + chunk[3], chunk[0]:chunk[0] + chunk[2]] = output

            self.wfunc = wfunc
Ejemplo n.º 42
0
 def add_data_sa(self, path_new, key='train'):
     self.x[key] = sa.attach(path_new)
     print('data size:', self.x[key].shape)
def load_shared(args):
    i, array_name, fname, kwargs = args
    array = SharedArray.attach(array_name)
    array[i] = data.load_augment(fname, **kwargs)
Ejemplo n.º 44
0
 def cleanup():
     print('Cleaning up')
     sa.delete('creature_gfx')
Ejemplo n.º 45
0
 def _cleanup(self):
     if self.wfunc is None:
         sa.delete(self.id)