Exemplo n.º 1
0
    def update(self, y0, y1):
        if y0.dtype == np.uint8:
            y0 = (y0 / 255.0).astype('float32')
            y1 = (y1 / 255.0).astype('float32')
        self.num_samples += 1

        # init structures if not already
        if not self.initialized():
            self.init_structures(y0.shape)

        # check shape didn't change
        if self.shape != y0.shape:
            msg = 'Shape changed from %s to %s.' % (self.shape, y0.shape)
            raise ValueError(msg)

        # remember last images
        self.last_y0 = y0
        self.last_y1 = y1

        # The _update_scalar function is typically faster
        check_times = False
        ts = []
        ts.append(time.time())
        if check_times:
            self._update_vectorial(y0, y1)
        ts.append(time.time())
        self._update_scalar(y0, y1)
        ts.append(time.time())
        if check_times:
            delta = np.diff(ts)
            msg = 'Update times: vect %5.3f scal %5.3f seconds' % (delta[0],
                                                                   delta[1])
            logger.info(msg)
Exemplo n.º 2
0
    def merge(self, other):
        """ Merges the values obtained by "other" with ours. """
        if not other.initialized():
            # nothing to do
            return
        if not self.initialized() and other.initialized():
            # Let's initialized like the other
            self.init_structures(other.shape)
            self.num_samples = other.num_samples
            self.neig_esimmin_score = other.neig_esimmin_score
            if self.inference_method == DiffeomorphismEstimatorFaster.Similarity:
                self.neig_esim_score = other.neig_esim_score
            if self.inference_method == DiffeomorphismEstimatorFaster.Order:
                self.neig_eord_score = other.neig_eord_score
            return

        if self.inference_method != other.inference_method:
            raise ValueError()

        assert other.initialized(), "Can only merge initialized structures"
        logger.info('merging %s + %s samples' %
                    (self.num_samples, other.num_samples))

        self.num_samples += other.num_samples
        self.neig_esimmin_score += other.neig_esimmin_score

        if self.inference_method == DiffeomorphismEstimatorFaster.Order:
            self.neig_eord_score += other.neig_eord_score
        elif self.inference_method == DiffeomorphismEstimatorFaster.Similarity:
            self.neig_esim_score += other.neig_esim_score
        else:
            assert False
Exemplo n.º 3
0
    def update(self, y0, y1):
        if y0.dtype == np.uint8:
            y0 = (y0 / 255.0).astype('float32')
            y1 = (y1 / 255.0).astype('float32')
        self.num_samples += 1

        # init structures if not already
        if not self.initialized():
            self.init_structures(y0.shape)

        # check shape didn't change
        if self.shape != y0.shape:
            msg = 'Shape changed from %s to %s.' % (self.shape, y0.shape)
            raise ValueError(msg)

        # remember last images
        self.last_y0 = y0
        self.last_y1 = y1
        
        # The _update_scalar function is typically faster
        check_times = False
        ts = []
        ts.append(time.time())
        if check_times:
            self._update_vectorial(y0, y1)
        ts.append(time.time())
        self._update_scalar(y0, y1)
        ts.append(time.time())
        if check_times:
            delta = np.diff(ts)
            msg = 'Update times: vect %5.3f scal %5.3f seconds' % (delta[0], delta[1])
            logger.info(msg)
Exemplo n.º 4
0
    def merge(self, other):
        """ Merges the values obtained by "other" with ours. """
        if not other.initialized():
            # nothing to do
            return
        if not self.initialized() and other.initialized():
            # Let's initialized like the other
            self.init_structures(other.shape)
            self.num_samples = other.num_samples
            self.neig_esimmin_score = other.neig_esimmin_score
            if self.inference_method == DiffeomorphismEstimatorFaster.Similarity:
                self.neig_esim_score = other.neig_esim_score
            if self.inference_method == DiffeomorphismEstimatorFaster.Order:
                self.neig_eord_score = other.neig_eord_score
            return
        
        if self.inference_method != other.inference_method:
            raise ValueError()
            
        assert other.initialized(), "Can only merge initialized structures"
        logger.info('merging %s + %s samples' % (self.num_samples, other.num_samples))

        self.num_samples += other.num_samples
        self.neig_esimmin_score += other.neig_esimmin_score

        if self.inference_method == DiffeomorphismEstimatorFaster.Order:
            self.neig_eord_score += other.neig_eord_score
        elif self.inference_method == DiffeomorphismEstimatorFaster.Similarity:
            self.neig_esim_score += other.neig_esim_score
        else:
            assert False
Exemplo n.º 5
0
    def init_structures(self, shape):
        self.shape = shape
        self.nsensels = shape[0] * shape[1]

        self.ydd = np.zeros(shape, dtype='float32')

        # for each sensel, create an area
        self.lengths = np.ceil(self.max_displ *
                               np.array(self.shape)).astype('int32')
        # print(' Field Shape: %s' % str(self.shape))
        # print('    Fraction: %s' % str(self.max_displ))
        # print(' Search area: %s' % str(self.lengths))

        self.neighbor_coords = [None] * self.nsensels
        self.neighbor_indices = [None] * self.nsensels
        self.neighbor_indices_flat = [None] * self.nsensels
        self.neighbor_similarity_flat = [None] * self.nsensels
        self.neighbor_similarity_best = np.zeros(self.nsensels,
                                                 dtype='float32')
        self.neighbor_argsort_flat = [None] * self.nsensels
        self.neighbor_num_bestmatch_flat = [None] * self.nsensels

        self.flattening = Flattening.by_rows(shape)
        logger.info('Creating structure shape %s lengths %s' %
                    (self.shape, self.lengths))
        cmg = cmap(self.lengths)
        for coord in coords_iterate(self.shape):
            k = self.flattening.cell2index[coord]
            cm = cmg.copy()
            cm[:, :, 0] += coord[0]
            cm[:, :, 1] += coord[1]
            cm[:, :, 0] = cm[:, :, 0] % self.shape[0]
            cm[:, :, 1] = cm[:, :, 1] % self.shape[1]
            self.neighbor_coords[k] = cm

            indices = np.zeros(self.lengths, 'int32')
            for a, b in coords_iterate(indices.shape):
                c = tuple(cm[a, b, :])
                indices[a, b] = self.flattening.cell2index[c]

            self.neighbor_indices[k] = indices
            self.neighbor_indices_flat[k] = np.array(indices.flat)
            self.neighbor_similarity_flat[k] = np.zeros(
                indices.size, 'float32')
            self.neighbor_argsort_flat[k] = np.zeros(indices.size, 'float32')
            self.neighbor_num_bestmatch_flat[k] = np.zeros(
                indices.size, 'uint')
Exemplo n.º 6
0
    def init_structures(self, shape):
        self.shape = shape
        self.nsensels = shape[0] * shape[1]

        self.ydd = np.zeros(shape, dtype='float32')

        # for each sensel, create an area
        self.lengths = np.ceil(self.max_displ * 
                               np.array(self.shape)).astype('int32')
        # print(' Field Shape: %s' % str(self.shape))
        # print('    Fraction: %s' % str(self.max_displ))
        # print(' Search area: %s' % str(self.lengths))

        self.neighbor_coords = [None] * self.nsensels
        self.neighbor_indices = [None] * self.nsensels
        self.neighbor_indices_flat = [None] * self.nsensels
        self.neighbor_similarity_flat = [None] * self.nsensels
        self.neighbor_similarity_best = np.zeros(self.nsensels, dtype='float32')
        self.neighbor_argsort_flat = [None] * self.nsensels
        self.neighbor_num_bestmatch_flat = [None] * self.nsensels

        self.flattening = Flattening.by_rows(shape)
        logger.info('Creating structure shape %s lengths %s' % 
                  (self.shape, self.lengths))
        cmg = cmap(self.lengths)
        for coord in coords_iterate(self.shape):
            k = self.flattening.cell2index[coord]
            cm = cmg.copy()
            cm[:, :, 0] += coord[0]
            cm[:, :, 1] += coord[1]
            cm[:, :, 0] = cm[:, :, 0] % self.shape[0]
            cm[:, :, 1] = cm[:, :, 1] % self.shape[1]
            self.neighbor_coords[k] = cm

            indices = np.zeros(self.lengths, 'int32')
            for a, b in coords_iterate(indices.shape):
                c = tuple(cm[a, b, :])
                indices[a, b] = self.flattening.cell2index[c]

            self.neighbor_indices[k] = indices
            self.neighbor_indices_flat[k] = np.array(indices.flat)
            self.neighbor_similarity_flat[k] = np.zeros(indices.size, 'float32')
            self.neighbor_argsort_flat[k] = np.zeros(indices.size, 'float32')
            self.neighbor_num_bestmatch_flat[k] = np.zeros(indices.size, 'uint')
Exemplo n.º 7
0
    def get_value(self):
        ''' 
            Find maximum likelihood estimate for diffeomorphism looking 
            at each pixel singularly. 
            
            Returns a Diffeomorphism2D.
        '''

        if not self.initialized():
            msg = 'No data seen yet'
            raise Diffeo2dEstimatorInterface.NotReady(msg)

        maximum_likelihood_index = np.zeros(self.shape, dtype='int32')
        variance = np.zeros(self.shape, dtype='float32')
        E2 = np.zeros(self.shape, dtype='float32')
        E3 = np.zeros(self.shape, dtype='float32')
        E4 = np.zeros(self.shape, dtype='float32')
        num_problems = 0

        i = 0
        # for each coordinate
        for c in coords_iterate(self.shape):
            # find index in flat array
            k = self.flattening.cell2index[c]
            # Look at the average similarities of the neihgbors
            sim = self.neighbor_similarity_flat[k]
            sim_min = sim.min()
            sim_max = sim.max()
            if sim_max == sim_min:
                # if all the neighbors have the same similarity
                best_index = 0
                variance[c] = 0  # minimum information
                maximum_likelihood_index[c] = best_index
            else:
                best = np.argmin(sim)
                best_index = self.neighbor_indices_flat[k][best]
                # uncertainty ~= similarity of the best pixel
                variance[c] = sim[best]
                maximum_likelihood_index[c] = best_index

            E2[c] = self.neighbor_similarity_best[k] / self.num_samples
            # Best match error
            E3[c] = np.min(
                self.neighbor_num_bestmatch_flat[k]) / self.num_samples

            E4[c] = np.min(self.neighbor_argsort_flat[k]) / self.num_samples

            i += 1

        d = self.flattening.flat2coords(maximum_likelihood_index)

        if num_problems > 0:
            logger.info('Warning, %d were not informative.' % num_problems)
            pass

        # normalization for this variance measure
        vmin = variance.min()
        variance = variance - vmin
        vmax = variance.max()
        if vmax > 0:
            variance *= (1 / vmax)

        # return maximum likelihood plus uncertainty measure
        return Diffeomorphism2D(d, variance)  # , E2, E3, E4)
Exemplo n.º 8
0
    def get_value(self):
        ''' 
            Find maximum likelihood estimate for diffeomorphism looking 
            at each pixel singularly. 
            
            Returns a Diffeomorphism2D.
        '''
        
        if not self.initialized():
            msg = 'No data seen yet'
            raise Diffeo2dEstimatorInterface.NotReady(msg)
        
        maximum_likelihood_index = np.zeros(self.shape, dtype='int32')
        variance = np.zeros(self.shape, dtype='float32')
        E2 = np.zeros(self.shape, dtype='float32')
        E3 = np.zeros(self.shape, dtype='float32')
        E4 = np.zeros(self.shape, dtype='float32')
        num_problems = 0
        

        i = 0
        # for each coordinate
        for c in coords_iterate(self.shape):
            # find index in flat array
            k = self.flattening.cell2index[c]
            # Look at the average similarities of the neihgbors
            sim = self.neighbor_similarity_flat[k]
            sim_min = sim.min()
            sim_max = sim.max()
            if sim_max == sim_min:
                # if all the neighbors have the same similarity
                best_index = 0
                variance[c] = 0  # minimum information
                maximum_likelihood_index[c] = best_index
            else:
                best = np.argmin(sim)
                best_index = self.neighbor_indices_flat[k][best]
                # uncertainty ~= similarity of the best pixel
                variance[c] = sim[best]   
                maximum_likelihood_index[c] = best_index
            
        
            E2[c] = self.neighbor_similarity_best[k] / self.num_samples
            # Best match error
            E3[c] = np.min(self.neighbor_num_bestmatch_flat[k]) / self.num_samples
            
            E4[c] = np.min(self.neighbor_argsort_flat[k]) / self.num_samples
            
            i += 1
        
        d = self.flattening.flat2coords(maximum_likelihood_index)

        if num_problems > 0:
            logger.info('Warning, %d were not informative.' % num_problems)
            pass
        
        # normalization for this variance measure
        vmin = variance.min()
        variance = variance - vmin
        vmax = variance.max()
        if vmax > 0:
            variance *= (1 / vmax)
            
        # return maximum likelihood plus uncertainty measure
        return Diffeomorphism2D(d, variance)  # , E2, E3, E4)