def advanced_indexing(volume, *indices_list, **kwargs): """ Performs advanced indexing on `volume`. This function exists because in Theano<=0.9 advanced indexing is only supported along the first dimension. Notes ----- Assuming `volume` is C contiguous. """ strides = kwargs.get("strides") if strides is None: shapes = T.cast(volume.shape[:len(indices_list)], dtype=theano.config.floatX) strides = T.concatenate([T.ones( (1, )), T.cumprod(shapes[::-1])[:-1]], axis=0)[::-1] shapes = T.cast(volume.shape, dtype=theano.config.floatX) indices = T.maximum( 0, T.minimum(indices_list[-1], shapes[len(indices_list) - 1] - 1)) for i in range(len(indices_list) - 1): clipped_idx = T.maximum(0, T.minimum(indices_list[i], shapes[i] - 1)) indices += clipped_idx * strides[i] # indices = T.sum(T.stack(indices_list, axis=1)*strides[:len(indices_list)], axis=1) indices = T.cast(indices, dtype="int32") return volume.reshape((-1, volume.shape[-1]))[indices]
def test_trilinear_interpolation(): trk = nib.streamlines.load( os.path.abspath(pjoin(__file__, '..', 'data', 'CA.trk'))) trk.tractogram.apply_affine(np.linalg.inv(trk.affine)) dwi = nib.load(os.path.abspath(pjoin(__file__, '..', 'data', 'dwi.nii.gz'))) expected = eval_volume_at_3d_coordinates(dwi.get_data().astype('float32'), trk.streamlines._data) coords = T.matrix("coords") coords.tag.test_value = trk.streamlines._data volume = T.tensor3("image") volume.tag.test_value = dwi.get_data()[..., 0] fct = theano.function([volume, coords], eval_volume_at_3d_coordinates_in_theano( volume, coords)) # theano.printing.pydotprint(fct, 'interpolation_vol3d', with_ids=True) # Process directly multiple 3D volumes then concatenate the results. values = [] for i in range(dwi.shape[-1]): values_tmp = fct(dwi.get_data()[..., i], trk.streamlines._data) values.append(values_tmp) values = np.array(values).T assert_array_almost_equal(values, expected, decimal=4) # Process directly the 4D volume. volume = theano.shared(dwi.get_data()) coords = theano.shared(trk.streamlines._data) # Precompute strides that will be used in the interpolation. shapes = T.cast(volume.shape[:-1], dtype=theano.config.floatX) strides = T.concatenate([T.ones( (1, )), T.cumprod(shapes[::-1])[:-1]], axis=0)[::-1] volume_strides = strides.eval() values = eval_volume_at_3d_coordinates_in_theano( volume, coords, strides=volume_strides).eval() assert_array_almost_equal(values, expected, decimal=4) # fct = theano.function([], eval_volume_at_3d_coordinates_in_theano(volume, coords, strides=volume_strides)) # theano.printing.pydotprint(fct, 'interpolation_vol4d', with_ids=True) # Test tahat coordinates outside the volume are clipped. coords = coords * np.max(dwi.shape).astype('float32') expected = eval_volume_at_3d_coordinates(dwi.get_data().astype('float32'), coords.eval()) values = eval_volume_at_3d_coordinates_in_theano( volume, coords, strides=volume_strides).eval() assert_array_almost_equal(values, expected, decimal=4) coords = -coords expected = eval_volume_at_3d_coordinates(dwi.get_data().astype('float32'), coords.eval()) values = eval_volume_at_3d_coordinates_in_theano( volume, coords, strides=volume_strides).eval() assert_array_almost_equal(values, expected, decimal=4)
def advanced_indexing(volume, *indices_list, **kwargs): """ Performs advanced indexing on `volume`. This function exists because in Theano<=0.9 advanced indexing is only supported along the first dimension. Notes ----- Assuming `volume` is C contiguous. """ strides = kwargs.get("strides") if strides is None: shapes = T.cast(volume.shape[:len(indices_list)], dtype=theano.config.floatX) strides = T.concatenate([T.ones((1,)), T.cumprod(shapes[::-1])[:-1]], axis=0)[::-1] shapes = T.cast(volume.shape, dtype=theano.config.floatX) indices = T.maximum(0, T.minimum(indices_list[-1], shapes[len(indices_list)-1]-1)) for i in range(len(indices_list)-1): clipped_idx = T.maximum(0, T.minimum(indices_list[i], shapes[i]-1)) indices += clipped_idx * strides[i] # indices = T.sum(T.stack(indices_list, axis=1)*strides[:len(indices_list)], axis=1) indices = T.cast(indices, dtype="int32") return volume.reshape((-1, volume.shape[-1]))[indices]
def sym_pemb(P, order, roughness): # embed precision matrix (symbolic operation) k = -T.ones(order) d = T.arange(order) k = k**d d = d * 2 x = roughness * T.sqrt(2) r = T.zeros(order) r = T.join(0, [T.cumprod(1 - d) / (x**d), np.zeros(order)]).T r = r.flatten(1)[:-1] R = T.join(0, [r[i:i + order] for i in range(order)]) R = R * k.reshape((1, order)) R = T.nlinalg.matrix_inverse(R) return T.cast(sym_kron(R, P), theano.config.floatX)
def test_trilinear_interpolation(): trk = nib.streamlines.load(os.path.abspath(pjoin(__file__, '..', 'data', 'CA.trk'))) trk.tractogram.apply_affine(np.linalg.inv(trk.affine)) dwi = nib.load(os.path.abspath(pjoin(__file__, '..', 'data', 'dwi.nii.gz'))) expected = eval_volume_at_3d_coordinates(dwi.get_data().astype('float32'), trk.streamlines._data) coords = T.matrix("coords") coords.tag.test_value = trk.streamlines._data volume = T.tensor3("image") volume.tag.test_value = dwi.get_data()[..., 0] fct = theano.function([volume, coords], eval_volume_at_3d_coordinates_in_theano(volume, coords)) # theano.printing.pydotprint(fct, 'interpolation_vol3d', with_ids=True) # Process directly multiple 3D volumes then concatenate the results. values = [] for i in range(dwi.shape[-1]): values_tmp = fct(dwi.get_data()[..., i], trk.streamlines._data) values.append(values_tmp) values = np.array(values).T assert_array_almost_equal(values, expected, decimal=4) # Process directly the 4D volume. volume = theano.shared(dwi.get_data()) coords = theano.shared(trk.streamlines._data) # Precompute strides that will be used in the interpolation. shapes = T.cast(volume.shape[:-1], dtype=theano.config.floatX) strides = T.concatenate([T.ones((1,)), T.cumprod(shapes[::-1])[:-1]], axis=0)[::-1] volume_strides = strides.eval() values = eval_volume_at_3d_coordinates_in_theano(volume, coords, strides=volume_strides).eval() assert_array_almost_equal(values, expected, decimal=4) # fct = theano.function([], eval_volume_at_3d_coordinates_in_theano(volume, coords, strides=volume_strides)) # theano.printing.pydotprint(fct, 'interpolation_vol4d', with_ids=True) # Test tahat coordinates outside the volume are clipped. coords = coords * np.max(dwi.shape).astype('float32') expected = eval_volume_at_3d_coordinates(dwi.get_data().astype('float32'), coords.eval()) values = eval_volume_at_3d_coordinates_in_theano(volume, coords, strides=volume_strides).eval() assert_array_almost_equal(values, expected, decimal=4) coords = -coords expected = eval_volume_at_3d_coordinates(dwi.get_data().astype('float32'), coords.eval()) values = eval_volume_at_3d_coordinates_in_theano(volume, coords, strides=volume_strides).eval() assert_array_almost_equal(values, expected, decimal=4)
def _initialize_reps(self): self._reps = {} if self.nonparametric: # nu_aux = np.array([2.]+[-1.]*(self.nlatfeats-1)) nu_aux = np.array([0.]*self.nlatfeats) self._reps['nu'] = shared(nu_aux, name='nu') self._nu = T.nnet.sigmoid(self._reps['nu']) self._mu = T.cumprod(self._nu) verbreps_aux = np.random.normal(0., 1e-2, size=[self.data.n('verb')-self.data.n('clausetype'), self.nlatfeats]) projection_aux = np.random.normal(0., 1e-2, size=[self.nlatfeats, self.data.n('feature')]) verbfeatprob_aux = np.zeros([self.data.n('verb')-self.data.n('clausetype'), self.nlatfeats])-4. if self.data.n('clausetype'): try: assert self.data.n('clausetype') <= self.nlatfeats except AssertionError: raise ValueError('nlatfeats must be greater than or equal to the number of clausetypes') ctype_ident = (1.-1e-10)*np.eye(self.data.n('clausetype')) ct_aux_vr = np.log(ctype_ident)-np.log(1.-ctype_ident) ct_aux_vr = np.concatenate([ct_aux_vr, -np.inf*np.ones([self.data.n('clausetype'), self.nlatfeats-self.data.n('clausetype')])], axis=1) ct_aux_vfp = np.inf*np.ones([self.data.n('clausetype'), self.nlatfeats]) verbreps_aux = np.concatenate([ct_aux_vr, verbreps_aux]) verbfeatprob_aux = np.concatenate([ct_aux_vfp, verbfeatprob_aux]) self._reps['verbreps'] = shared(verbreps_aux, name='verbreps') self._reps['projection'] = shared(projection_aux, name='projection') self._reps['verbfeatprob'] = shared(verbfeatprob_aux, name='verbfeatprob') self._verbreps = T.nnet.sigmoid(self._reps['verbreps']) self._projection = T.nnet.sigmoid(self._reps['projection']) self._verbfeatprob = T.nnet.sigmoid(self._reps['verbfeatprob']) softand = self._verbfeatprob[:,:,None]*self._verbreps[:,:,None]*self._projection[None,:,:] self._featureprob = 1.-T.prod(1.-softand, axis=1)
def step(x_t, y_t, o_tm1, A, B, dodA, dodB, dodotm1): # the model itself o_t = T.dot(x_t, A) + T.dot(o_tm1, B) mse = T.mean(T.sum(T.square(y_t - o_t), axis=0)) # gradient of o_t w.r.t. A is x_t, w.r.t. B is o_tm1, w.r.t. o_tm1 is B dodA_t = T.repeat(T.shape_padright(T.mean(x_t, axis=0)), repeats=n_out, axis=1) dodA_up = T.concatenate([T.shape_padleft(dodA_t), dodA[:-1]], axis=0) dodB_t = T.repeat(T.shape_padright(T.mean(o_tm1, axis=0)), repeats=n_out, axis=1) dodB_up = T.concatenate([T.shape_padleft(dodB_t), dodB[:-1]], axis=0) dodotm1_t = B dodotm1_up = T.concatenate([T.shape_padleft(dodotm1_t), dodotm1[:-1]], axis=0) # deltaE: update component from current error # take mean over batch index # and padleft so size is 1 x n_out deltaE = T.shape_padleft(T.mean(T.grad(mse, o_t), axis=0)) # mean over the batch index # deltaR: update components over time from recurrence # cumulative product effectively does backprop # size is bptt_limit x n_out x n_out deltaR = T.cumprod(dodotm1_up, axis=0) # updates # dA = T.dot(deltaE,T.sum(T.batched_dot(deltaR,dodA_up),axis=0)) dA = deltaE * T.sum(T.batched_dot(deltaR, dodA_up), axis=0) dB = deltaE * T.sum(T.batched_dot(deltaR, dodB_up), axis=0) updates = OrderedDict() updates[A] = A - learning_rate * dA updates[B] = B - learning_rate * dB updates[dodA] = dodA_up updates[dodB] = dodB_up updates[dodotm1] = dodotm1_up return [o_t, mse, dA], updates