def sparse_bool_mask(x, mask, axis=0): # Only necessary if indices may have non-unique elements indices = tf.boolean_mask(tf.range(tf.shape(x)[axis]), mask) n_indices = tf.size(indices) # Get indices for the axis idx = x.indices[:, axis] # Find where indices match the selection eq = tf.equal(tf.expand_dims(idx, 1), tf.cast(indices, tf.int64)) # TODO this has quadratic cost # Mask for selected values sel = tf.reduce_any(eq, axis=1) # Selected values values_new = tf.boolean_mask(x.values, sel, axis=0) # New index value for selected elements n_indices = tf.cast(n_indices, tf.int64) idx_new = tf.reduce_sum(tf.cast(eq, tf.int64) * tf.range(n_indices), axis=1) idx_new = tf.boolean_mask(idx_new, sel, axis=0) # New full indices tensor indices_new = tf.boolean_mask(x.indices, sel, axis=0) indices_new = tf.concat([ indices_new[:, :axis], tf.expand_dims(idx_new, 1), indices_new[:, axis + 1:] ], axis=1) # New shape shape_new = tf.concat( [x.dense_shape[:axis], [n_indices], x.dense_shape[axis + 1:]], axis=0) return tf.SparseTensor(indices_new, values_new, shape_new)
def _upsampled_registration(target_image, src_image, upsample_factor): upsample_factor = tf.constant(upsample_factor, tf.float32) target_shape = tf.shape(target_image) target_image = tf.reshape(target_image, target_shape[:3]) src_shape = tf.shape(src_image) src_image = tf.reshape(src_image, src_shape[:3]) src_freq = fft2d(src_image) target_freq = fft2d(target_image) shape = tf.reshape(tf.shape(src_freq)[1:3], (1, 2)) shape = tf.cast(shape, tf.float32) shape = tf.tile(shape, (tf.shape(target_freq)[0], 1)) image_product = src_freq * tf.conj(target_freq) cross_correlation = tf.spectral.ifft2d(image_product) maxima = find_maxima(tf.abs(cross_correlation)) midpoints = fix(tf.cast(shape, tf.float32) / 2.) shifts = maxima shifts = tf.where(shifts > midpoints, shifts - shape, shifts) shifts = tf.round(shifts * upsample_factor) / upsample_factor upsampled_region_size = tf.ceil(upsample_factor * 1.5) dftshift = fix(upsampled_region_size / 2.0) normalization = tf.cast(tf.size(src_freq[0]), tf.float32) normalization *= upsample_factor**2 sample_region_offset = dftshift - shifts * upsample_factor data = tf.conj(image_product) upsampled_dft = _upsampled_dft(data, upsampled_region_size, upsample_factor, sample_region_offset) cross_correlation = tf.conj(upsampled_dft) cross_correlation /= tf.cast(normalization, tf.complex64) cross_correlation = tf.abs(cross_correlation) maxima = find_maxima(cross_correlation) maxima = maxima - dftshift shifts = shifts + maxima / upsample_factor return shifts
def ndims(x): return tf.size(tf.shape(x))