示例#1
0
    def _setup_screen(self, config):
        """ setup up screen generation or retrieval

        This function setup stage of types of movies to be played
        and can let self.get_screen_intensity_step to
        generate video.
        """

        self._image2d = image2Dfactory(self._input_type)(config)

        # find the mappings of screen points to image
        imagx, imagy = self.screen_to_image_map.map(self.grid[0],
                                                    self.grid[1])

        # given the values on points of a rectangular grid, interpolator
        # can find the values on any point of the plane by approximation
        xmax, ymax = imagx.max(), imagy.max()
        xmin, ymin = imagx.min(), imagy.min()
        self._interpolator = ImageTransform(
            self._image2d.get_grid(xmin, xmax, ymin, ymax), [imagx, imagy])
        self.screen_write_step = config['Retina']['screen_write_step']
示例#2
0
    def get_intensities(self, file, config={}):
        """ file: mat file is assumed but in case more file formats need to be
                  supported an additional field should be included in config
            config: {'still_image': True(default)/False, 
                     'steps':<simulation steps>,
                     'dt':<time step>, 'output_file':<filename>}
            returns: numpy array with with height the number of simulation
                     steps and width the number of neurons.
                     The order of neurons is the same as that returned 
                     in get_positions, with 'R1toR6' option for 'include'
                     configuration parameter
        """
        try:
            dt = config['dt']
        except KeyError:
            dt = 1e-4

        try:
            time_steps = config['steps']
        except KeyError:
            time_steps = 1000

        try:
            still_image = config['still_image']
        except KeyError:
            still_image = True

        try:
            # TODO
            output_file = config['output_file']
        except KeyError:
            output_file = None

        mat = loadmat(file)
        try:
            image = np.array(mat['im'])
        except KeyError:
            print('No variable "im" in given mat file')
            print('Available variables (and meta-data): {}'.format(mat.keys()))

        # photons were stored for 1ms
        image *= (dt / 1e-3)

        h_im, w_im = image.shape
        transform = ImageTransform(image)

        # screen positions
        # should be much more than the number of photoreceptors
        # shape (20, 3)*self._nrings
        screenlat, screenlong = np.meshgrid(
            np.linspace(0, PI / 2, 3 * self._nrings),
            np.linspace(-PI, PI, 20 * self._nrings))

        mapscreen = self.OMMATIDIUM_CLS.MAP_SCREEN
        # plane positions
        # shape (20, 3)*self._nrings
        mx, my = mapscreen.map(screenlat, screenlong)  # map from spherical
        # grid to plane

        # photoreceptor positions
        photorlat, photorlong = self.get_positions({
            'coord': 'spherical',
            'include': 'R1toR6',
            'add_dummy': False
        })

        if still_image:
            mx = mx - mx.min()  # start from 0
            my = my - my.min()
            mx *= h_im / mx.max()  # scale to image size
            my *= w_im / my.max()

            # shape (20, 3)*self._nrings
            transimage = transform.interpolate((mx, my))

            intensities = self._get_intensities(transimage, photorlat,
                                                photorlong, screenlat,
                                                screenlong)
            intensities = np.tile(intensities, (time_steps, 1))
        else:
            intensities = np.empty((time_steps, len(photorlat)),
                                   dtype='float32')
            mx = mx - mx.min()  # start from 0
            my = my - my.min()
            for i in range(time_steps):
                mx = mx + 2 * np.random.random() - 1  # move randomly
                my = my + 2 * np.random.random() - 1  # between -1 and 1
                mxi_max = mx.max()
                if mxi_max > h_im:
                    mx = mx - 2 * (mxi_max - h_im)
                mxi_min = mx.min()
                if mxi_min < 0:
                    mx = mx - 2 * mxi_min

                myi_max = my.max()
                if myi_max > w_im:
                    my = my - 2 * (myi_max - w_im)
                myi_min = my.min()
                if myi_min < 0:
                    my = my - 2 * myi_min

                transimage = transform.interpolate((mx, my))
                intensities[i] = self._get_intensities(transimage, photorlat,
                                                       photorlong, screenlat,
                                                       screenlong)

        # get interpolated image values on these positions

        return intensities
示例#3
0
    def get_intensities(self, file, config={}):
        """ file: mat file is assumed but in case more file formats need to be
                  supported an additional field should be included in config
            config: {'still_image': True(default)/False, 
                     'steps':<simulation steps>,
                     'dt':<time step>, 'output_file':<filename>}
            returns: numpy array with with height the number of simulation
                     steps and width the number of neurons.
                     The order of neurons is the same as that returned 
                     in get_positions, with 'R1toR6' option for 'include'
                     configuration parameter
        """
        try:
            dt = config["dt"]
        except KeyError:
            dt = 1e-4

        try:
            time_steps = config["steps"]
        except KeyError:
            time_steps = 1000

        try:
            still_image = config["still_image"]
        except KeyError:
            still_image = True

        try:
            # TODO
            output_file = config["output_file"]
        except KeyError:
            output_file = None

        mat = loadmat(file)
        try:
            image = np.array(mat["im"])
        except KeyError:
            print('No variable "im" in given mat file')
            print("Available variables (and meta-data): {}".format(mat.keys()))

        # photons were stored for 1ms
        image *= dt / 1e-3

        h_im, w_im = image.shape
        transform = ImageTransform(image)

        # screen positions
        # should be much more than the number of photoreceptors
        # shape (20, 3)*self._nrings
        screenlat, screenlong = np.meshgrid(
            np.linspace(0, PI / 2, 3 * self._nrings), np.linspace(-PI, PI, 20 * self._nrings)
        )

        mapscreen = self.OMMATIDIUM_CLS.MAP_SCREEN
        # plane positions
        # shape (20, 3)*self._nrings
        mx, my = mapscreen.map(screenlat, screenlong)  # map from spherical
        # grid to plane

        # photoreceptor positions
        photorlat, photorlong = self.get_positions({"coord": "spherical", "include": "R1toR6", "add_dummy": False})

        if still_image:
            mx = mx - mx.min()  # start from 0
            my = my - my.min()
            mx *= h_im / mx.max()  # scale to image size
            my *= w_im / my.max()

            # shape (20, 3)*self._nrings
            transimage = transform.interpolate((mx, my))

            intensities = self._get_intensities(transimage, photorlat, photorlong, screenlat, screenlong)
            intensities = np.tile(intensities, (time_steps, 1))
        else:
            intensities = np.empty((time_steps, len(photorlat)), dtype="float32")
            mx = mx - mx.min()  # start from 0
            my = my - my.min()
            for i in range(time_steps):
                mx = mx + 2 * np.random.random() - 1  # move randomly
                my = my + 2 * np.random.random() - 1  # between -1 and 1
                mxi_max = mx.max()
                if mxi_max > h_im:
                    mx = mx - 2 * (mxi_max - h_im)
                mxi_min = mx.min()
                if mxi_min < 0:
                    mx = mx - 2 * mxi_min

                myi_max = my.max()
                if myi_max > w_im:
                    my = my - 2 * (myi_max - w_im)
                myi_min = my.min()
                if myi_min < 0:
                    my = my - 2 * myi_min

                transimage = transform.interpolate((mx, my))
                intensities[i] = self._get_intensities(transimage, photorlat, photorlong, screenlat, screenlong)

        # get interpolated image values on these positions

        return intensities
示例#4
0
    def get_intensities(self, file, config={}):
        """ file: input image file, mat file is assumed with a variable im in it
            config: {'type': 'image'(default)/'video',
                     'steps':<simulation steps>,
                     'dt':<time step>, 
                     'output_file':<filename>
                     'factors': list of factors for scaled image }
            returns: numpy array with with height the number of simulation
                     steps and width the number of neurons.
                     The order of neurons is the same as that returned 
                     in get_positions, with 'include'
                     configuration parameter set to 'R1toR6'
        """
        # the order of ommatidia in output is the exact order of
        # _ommatidia parameter
        try:
            dt = config['dt']
        except KeyError:
            dt = 1e-4

        try:
            time_steps = config['steps']
        except KeyError:
            time_steps = 1000

        try:
            type = config['type']
        except KeyError:
            type = 'image'
            
        try:
            output_file = config['output_file']
        except KeyError:
            output_file = None

        try:
            factors = config['factors']
        except KeyError:
            factors = [1]

        mat = loadmat(file)
        try:
            image = np.array(mat['im'])
        except KeyError:
            print('No variable "im" in given mat file')
            print('Available variables (and meta-data): {}'.format(mat.keys()))

        # photons were stored for 1ms
        image *= (dt/1e-3)

        h_im, w_im = image.shape
        transform = ImageTransform(image)

        # screen positions
        # should be much more than the number of photoreceptors
        # shape (M, P)*self._nrings
        screenlat, screenlong = np.meshgrid(np.linspace((PI/2)/self.PARALLELS, 
                                                        PI/2, self.PARALLELS),
                                            np.linspace(-PI, PI, self.MERIDIANS))

        mapscreen = self.OMMATIDIUM_CLS.MAP_SCREEN
        # plane positions
        # shape (M, P)*self._nrings
        mx, my = mapscreen.map(screenlat, screenlong)   # map from spherical
                                                        # grid to plane

        # photoreceptor positions
        photorlat, photorlong = self.get_positions({'coord': 'spherical',
                                                    'include': 'R1toR6',
                                                    'add_dummy': False})

        
        ind_weights = self._pre_get_intensities(mx.shape[0], mx.shape[1],
                                                photorlat,photorlong,
                                                screenlat,screenlong)
        if type == 'image':
            mx = mx - mx.min()  # start from 0
            my = my - my.min()
            mx *= h_im/mx.max()  # scale to image size
            my *= w_im/my.max()

            # shape (M, P)*self._nrings
            transimage = transform.interpolate((mx, my))
            intensities = self._get_intensities(transimage, ind_weights)
            intensities = np.tile(intensities, (time_steps, 1))
            # Equal or almost equal parts of the array equal with the
            # factors are multiplied by the respective factor
            ilen = len(intensities)
            flen = len(factors)
            for i, factor in enumerate(factors):
                intensities[(i*ilen)//flen:((i+1)*ilen)//flen] *= factor

            positions = None
        elif type == 'video':
            intensities = np.empty((time_steps, len(photorlat)), 
                                   dtype='float32')
            positions = np.empty((time_steps, 4),
                                   dtype='float32')
            mx = mx - mx.min()  # start from 0
            my = my - my.min()
            for i in range(time_steps):
                scale = 0.1
                mx = mx + scale*(2*np.random.random() - 1)  # move randomly
                my = my + scale*(2*np.random.random() - 1)  # between -1 and 1
                mxi_max = mx.max()
                if mxi_max > h_im:
                    mx = mx - 2*(mxi_max-h_im)
                mxi_min = mx.min()
                if mxi_min < 0:
                    mx = mx - 2*mxi_min

                myi_max = my.max()
                if myi_max > w_im:
                    my = my - 2*(myi_max-w_im)
                myi_min = my.min()
                if myi_min < 0:
                    my = my - 2*myi_min
                
                
                transimage = transform.interpolate((mx, my))
                
                intensities[i] = self._get_intensities(transimage, ind_weights)
                positions[i] = [mx.min(), mx.max(), my.min(), my.max()]

        # get interpolated image values on these positions
        if output_file:
            with h5py.File(output_file, 'w') as f:
                f.create_dataset('array', intensities.shape,
                                 dtype=np.float64,
                                 data=intensities)

        InputConfig = namedtuple('InputConfig', 'image positions factors intensities')
        self.input_config = InputConfig(image=image, positions=positions,
                                        factors=factors, 
                                        intensities=intensities)
        return intensities
示例#5
0
class Screen(object):
    """ Defines the geometry of screen and generates screen images """

    __metaclass__ = ABCMeta

    def __init__(self, config):
        self._dtype = np.double

        # config attributes
        self._dt = config['General']['dt']
        self._input_type = config['Retina']['intype']
        if self._input_type is None:
            raise ValueError('No input type specified')

        # grid
        self._setup_screen(config)

    @abstractproperty
    def screen_to_image_map(self):
        pass

    @abstractproperty
    def grid(self):
        """ :return: a meshgrid tuple e.g
                [[x1, x2, x3],
                 [x1, x2, x3]],
                [[y1, y1, y1],
                 [y2, y2, y2]]
        """
        pass

    def _setup_screen(self, config):
        """ setup up screen generation or retrieval

        This function setup stage of types of movies to be played
        and can let self.get_screen_intensity_step to
        generate video.
        """

        self._image2d = image2Dfactory(self._input_type)(config)

        # find the mappings of screen points to image
        imagx, imagy = self.screen_to_image_map.map(self.grid[0],
                                                    self.grid[1])

        # given the values on points of a rectangular grid, interpolator
        # can find the values on any point of the plane by approximation
        xmax, ymax = imagx.max(), imagy.max()
        xmin, ymin = imagx.min(), imagy.min()
        self._interpolator = ImageTransform(
            self._image2d.get_grid(xmin, xmax, ymin, ymax), [imagx, imagy])
        self.screen_write_step = config['Retina']['screen_write_step']

    def setup_file(self, filename, read=False):
        """ tell self to use filename instead of generating video """
        self.file_open = False
        self.filename = filename
        if read:
            self.store_to_file = False
            self.read_from_file = True
            self.file_position = 0
        else:
            self.store_to_file = True
            self.read_from_file = False
            self.skip_step = self.screen_write_step
            self.step_count = 0

    def get_screen_intensity_steps(self, num_steps):
        """ generate or read the next num_steps of inputs """
        if not self.file_open:
            if self.read_from_file:
                self.inputfile = h5py.File(self.filename, 'r')
                self.inputarray = self.inputfile['/array']
            else:
                self.outputfile = h5py.File(self.filename, 'w')
                self.outputfile.create_dataset(
                    '/array', (0, self._height, self._width),
                    dtype = self._dtype,
                    maxshape=(None, self._height, self._width),
                    compression = 9)
            self.file_open = True
        try:
            if self.read_from_file:
                screens = self.inputarray[self.file_position:self.file_position+num_steps]
                self.file_position += num_steps
            else:
                # values on 2D
                images = self._image2d.generate_2dimage(num_steps)
                images = images[:,::-1,::-1]
                # values on screen
                screens = self._interpolator.interpolate(images)

            if self.store_to_file:
                if num_steps >= self.skip_step:
                    dataset_append(self.outputfile['/array'],
                        screens[(self.skip_step-self.step_count)%self.skip_step::self.skip_step])
                    self.step_count = (self.step_count+num_steps)%self.skip_step
                else:
                    self.step_count %= self.skip_step
                    if self.step_count == 0:
                        dataset_append(self.outputfile['/array'], screens[[0]])
                    
                    self.step_count += num_steps
                    if self.step_count > self.skip_step:
                        dataset_append(self.outputfile['/array'],
                            screens[[num_steps-(self.step_count-self.skip_step)]])
                        self.step_count -= self.skip_step

        except AttributeError:
            print('Function for file setup probably not called')
            raise

        return screens

    @abstractmethod
    def get_image2d_dim(self):
        """ screen is supposed to map values from
            (-x/2 to x/2) and (-y/2 to y/2)
            where x, y are the values returned
            from this function
        """
        pass

    @abstractmethod
    def generate_video(self, data, coordinates, rng, videofile):
        """
            data: values to be visualized
            coordinates: coordinates of values on screen
                         a tuple of arrays
            rng:         range of values
            videofile:   the file where output will be written
        """
        pass

    def close_files(self):
        try:
            if self.store_to_file:
                self.outputfile.close()
        except AttributeError:
            pass

        try:
            if self.read_from_file:
                self.inputfile.close()
        except AttributeError:
            pass