def _add_fold(s, kfold, fold_seed): typ = np.min_scalar_type(kfold * 2) if fold_seed is None: # If we don't have a specific seed, # just use a simple modulo-based mapping fold = cupy.arange(len(s), dtype=typ) cupy.mod(fold, kfold, out=fold) return fold else: cupy.random.seed(fold_seed) return cupy.random.choice( cupy.arange(kfold, dtype=typ), len(s))
def _add_fold(s, kfold, fold_seed=None): """Deterministically computes a '__fold__' column, given an optional random seed""" typ = np.min_scalar_type(kfold * 2) if fold_seed is None: # If we don't have a specific seed, # just use a simple modulo-based mapping fold = cupy.arange(len(s), dtype=typ) cupy.mod(fold, kfold, out=fold) return fold else: state = cupy.random.RandomState(fold_seed) return state.choice(cupy.arange(kfold, dtype=typ), len(s))
def getMeWtW2(W, U0, Nnearest=None): # this function compute the correlation between any two pairs of templates # it relies on the fact that the W and U0 are unit normalized, so that the product of a # template with itself is 1, as it should be if we're trying to calculate correlations # takes input the temporal and spatial factors of the low-rank template, as # well as the number of most similar template pairs desired to be output in # iList nt0, Nfilt, Nrank = W.shape WtW = cp.zeros((Nfilt, Nfilt), dtype=np.float32, order='F') # since the templates are factorized into orthonormal components, we can compute dot products # one dimension at a time for i in range(Nrank): for j in range(Nrank): # this computes the spatial dot product utu0 = cp.dot(U0[:, :, i].T, U0[:, :, j]) # this computes the temporal dot product wtw0 = cp.dot(W[:, :, i].T, W[:, :, j]) # the element-wise product of these is added to the matrix of correlatioons WtW = WtW + wtw0 * utu0 # also return a list of most correlated template pairs isort = cp.argsort(WtW, axis=0)[::-1] if Nnearest: # if we don't have enough templates yet, just wrap the indices around the range 1:Nfilt iNear = cp.mod(cp.arange(Nnearest), Nfilt) iList = isort[iNear, :] # return the list of pairs for each template return WtW, iList else: return WtW
def getMeWtW(W, U0, Nnearest=None): # this function computes correlation between templates at ALL timelags from each other # takes the max over timelags to obtain a similarity score # also returns lists of most similar templates to each template # takes as input the low-rank factorization of templates (W for time and U0 # for space) # W is timesamples (default = 61 ), by number of templates, by rank (default = 3) nt0, Nfilt, Nrank = W.shape Params = [1, Nfilt, 0, 0, 0, 0, 0, 0, 0, nt0] # initialize correlation matrix for all timelags WtW = cp.zeros((Nfilt, Nfilt, 2 * nt0 - 1), dtype=np.float32, order='F') for i in range(Nrank): for j in range(Nrank): # the dot product factorizes into separable products for each spatio-temporal component utu0 = cp.dot(U0[:, :, i].T, U0[:, :, j]) # spatial products # temporal convolutions get multiplied wit hthe spatial products wtw0 = mexWtW2(Params, W[:, :, i], W[:, :, j], utu0) # add it to the full correlation array WtW = WtW + wtw0 # the maximum across timelags accounts for sample alignment mismatch cc = cp.max(WtW, axis=2) if Nnearest: isort = cp.argsort(cc, axis=0)[::-1] # if we don't have enough templates yet, just wrap the indices around the range 1:Nfilt iNear = cp.mod(cp.arange(Nnearest), Nfilt) iList = isort[iNear, :] # return the list of pairs for each template return WtW, iList else: return WtW
def mod(numerator, denominator): """Compute the reminder of the divisin of a tensor by another Args: tensor1 (ndarray): numerator tensor. tensor2 (ndarray): denominator tensor.. """ return cp.mod(numerator, denominator)
def update(self, dt): phase_rep = cp.repeat(self.phase, self.size, axis=0) phase_diff = cp.sum(self.coupling*cp.sin(phase_rep.T - phase_rep), axis=0) dtheta = (self.internal_freq + phase_diff/self.size) self.phase = cp.mod(self.phase + dtheta * dt, 2*np.pi) self.hist = cp.vstack((self.hist, self.phase)) r, phi = self.order() self.rs.append(r) self.phis.append(phi)
def mod(numerator, denominator, dtype=None): """Compute the reminder of the divisin of a tensor by another Args: tensor1 (ndarray): numerator tensor. tensor2 (ndarray): denominator tensor. dtype (dtype): type of the returned tensor. """ return cp.mod(numerator, denominator, dtype=dtype)
def getKernels(params): # this function makes upsampling kernels for the temporal components. # those are used for interpolating the biggest negative peak, # and aligning the template to that peak with sub-sample resolution # needs nup, the interpolation factor (default = 10) # also needs sig, the interpolation smoothness (default = 1) nup = params.nup sig = params.sig nt0min = params.nt0min nt0 = params.nt0 xs = cp.arange(1, nt0 + 1) ys = cp.linspace(.5, nt0 + .5, nt0 * nup + 1)[:-1] # these kernels are just standard kriging interpolators # first compute distances between the sample coordinates # for some reason, this seems to be circular, although the waveforms are not circular # I think the reason had to do with some constant offsets in some channels? d = cp.mod(xs[:, np.newaxis] - xs[np.newaxis, :] + nt0, nt0) d = cp.minimum(d, nt0 - d) # the kernel covariance uses a squared exponential of spatial scale sig Kxx = cp.exp(-d**2 / sig**2) # do the same for the kernel similarities between upsampled "test" timepoints and # the original coordinates d = cp.mod(ys[:, np.newaxis] - xs[np.newaxis, :] + nt0, nt0) d = cp.minimum(d, nt0 - d) Kyx = cp.exp(-d**2 / sig**2) # the upsampling matrix is given by the following formula, # with some light diagonal regularization of the matrix inversion B = cp.dot(Kyx, cp.linalg.inv(Kxx + .01 * cp.eye(nt0))) B = B.reshape((nup, nt0, nt0), order='F') # A is just a slice through this upsampling matrix corresponding to the most negative point # this is used to compute the biggest negative deflection (after upsampling) A = cp.squeeze(B[:, nt0min - 1, :]) B = cp.transpose(B, [1, 2, 0]) return A.astype(np.float64), B.astype(np.float64)
def unwrap(p, discont=None, axis=-1, *, period=2 * numpy.pi): r"""Unwrap by taking the complement of large deltas w.r.t. the period. This unwraps a signal `p` by changing elements which have an absolute difference from their predecessor of more than ``max(discont, period/2)`` to their `period`-complementary values. For the default case where `period` is :math:`2\pi` and is ``discont`` is :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences are never greater than :math:`\pi` by adding :math:`2k\pi` for some integer :math:`k`. Args: p (cupy.ndarray): Input array. discont (float): Maximum discontinuity between values, default is ``period/2``. Values below ``period/2`` are treated as if they were ``period/2``. To have an effect different from the default, ``discont`` should be larger than ``period/2``. axis (int): Axis along which unwrap will operate, default is the last axis. period: float, optional Size of the range over which the input wraps. By default, it is :math:`2\pi`. Returns: cupy.ndarray: The result array. .. seealso:: :func:`numpy.unwrap` """ p = cupy.asarray(p) nd = p.ndim dd = sumprod.diff(p, axis=axis) if discont is None: discont = period / 2 slice1 = [slice(None, None)] * nd # full slices slice1[axis] = slice(1, None) slice1 = tuple(slice1) dtype = numpy.result_type(dd.dtype, period) if numpy.issubdtype(dtype, numpy.integer): interval_high, rem = divmod(period, 2) boundary_ambiguous = rem == 0 else: interval_high = period / 2 boundary_ambiguous = True interval_low = -interval_high ddmod = cupy.mod(dd - interval_low, period) + interval_low if boundary_ambiguous: cupy.copyto(ddmod, interval_high, where=(ddmod == interval_low) & (dd > 0)) ph_correct = ddmod - dd cupy.copyto(ph_correct, 0, where=abs(dd) < discont) up = cupy.array(p, copy=True, dtype=dtype) up[slice1] = p[slice1] + cupy.cumsum(ph_correct, axis=axis) return up
def corrszsz(dist, s, B, d): sz = cp.diag([Sz(conf, 0) for conf in range(0, d)]) corr = cp.array(0., dtype=np.float32) if dist == 0: sz2 = cp.tensordot(sz, sz, axes=(1, 0)) for i_bond in range(2): sB = cp.tensordot(cp.diag(s[np.mod(i_bond - 1, 2)]), B[i_bond], axes=(1, 1)) C = cp.tensordot(sB, cp.conj(sB), axes=([0, 2], [0, 2])) corr += cp.real( cp.tensordot(C, sz2, axes=([0, 1], [0, 1])) - cp.tensordot(C, sz, axes=([0, 1], [0, 1])) * cp.tensordot(C, sz, axes=([0, 1], [0, 1]))) return 0.5 * corr if dist != 0: dist = cp.abs(dist) for i_bond in range(2): sB = cp.tensordot(cp.diag(s[np.mod(i_bond - 1, 2)]), B[i_bond], axes=(1, 1)) C = cp.tensordot(sB, cp.conj(sB), axes=(0, 0)) R = cp.tensordot(C, sz, axes=([0, 2], [0, 1])) mean1 = cp.trace(R) for i in range(dist - 1): T = cp.tensordot(R, B[np.mod(i_bond + 1 + i, 2)], axes=(0, 1)) T = cp.tensordot(T, cp.conj(B[np.mod(i_bond + 1 + i, 2)]), axes=(0, 1)) R = cp.trace(T, axis1=0, axis2=2) C = cp.tensordot(B[cp.mod(i_bond + dist, 2)], cp.conj(B[cp.mod(i_bond + dist, 2)]), axes=(2, 2)) L = cp.tensordot(R, C, axes=([0, 1], [1, 3])) corr += cp.real( cp.tensordot(L, sz, axes=([0, 1], [0, 1])) - mean1 * mean1) return 0.5 * corr
def get_Jz(self, Nout, pz, fb=1, overlap=False): self.Nout = Nout num = int(self.N * pz * Nout) - cp.mod(int(self.N * pz * Nout), Nout) sample_nuerons = cp.random.choice(cp.arange(self.N), int(num), replace=overlap) neuro_per_read = int(self.N * pz) gz = cp.zeros((self.N, Nout)) for j in range(Nout): for i in sample_nuerons[j * neuro_per_read:(j + 1) * neuro_per_read]: gz[i, j] = 1 self.read_connectivity = gz # self.Jz = cp.multiply(randn,gz) #(N,Nout) self.Jz = cp.zeros((self.N, self.Nout)) self.Jgz = cp.multiply(fb * (cp.random.rand(self.N, Nout) - 0.5), gz) self.read_neurons = sample_nuerons.reshape(Nout, neuro_per_read) self.neuro_per_read = neuro_per_read
def unwrap(p, discont=numpy.pi, axis=-1): """Unwrap by changing deltas between values to 2*pi complement. Args: p (cupy.ndarray): Input array. discont (float): Maximum discontinuity between values, default is ``pi``. axis (int): Axis along which unwrap will operate, default is the last axis. Returns: cupy.ndarray: The result array. .. seealso:: :func:`numpy.unwrap` """ p = cupy.asarray(p) nd = p.ndim dd = diff(p, axis=axis) slice1 = [slice(None, None)] * nd # full slices slice1[axis] = slice(1, None) slice1 = tuple(slice1) ddmod = cupy.mod(dd + numpy.pi, 2 * numpy.pi) - numpy.pi cupy.copyto(ddmod, numpy.pi, where=(ddmod == -numpy.pi) & (dd > 0)) ph_correct = ddmod - dd cupy.copyto(ph_correct, 0, where=cupy.abs(dd) < discont) up = cupy.array(p, copy=True, dtype='d') up[slice1] = p[slice1] + cupy.cumsum(ph_correct, axis=axis) return up
def internal_train(self, input_series, output_series, dt, aplha, nt, test_input=None): ''' input: input time series of ndarray of dim (Nin,T) output time series of ndarray of dim (Nout,T) update the training every nt step ''' if input_series is None: input_series = cp.zeros(output_series.shape) self.add_input(output_series.shape[0], 0, 0) assert input_series.shape[1] == output_series.shape[1] L = input_series.shape[1] Dout = output_series.shape[0] #array to record output trajectories during training and testing train_out = cp.zeros((Dout, L)) test_out = cp.zeros((Dout, L)) x = self.x r = self.r Pz = repmat((1.0 / aplha) * cp.eye(self.N), self.Nout) P = repmat((1.0 / aplha) * cp.eye(self.N), self.Nout, n2=self.neuro_per_read) #_________________training__________________ for i in range(L): print(i) t = dt * i x = (1.0 - dt) * x + cp.dot(self.M, r * dt) + cp.dot( self.Jgi, input_series[:, i] * dt).reshape(-1, 1) r = cp.tanh(x) #(N,1) z = cp.dot(self.Jz.T, r) # (Nout,1) if cp.mod(i, nt) == 0: for readi in range(self.Nout): w_readi = self.Jz[:, readi] synapse_ind = cp.where(w_readi != 0) flt = cp.zeros((self.N, self.N)) flt[synapse_ind, synapse_ind] = 1 Pzi = Pz[:, :, readi] kzi = cp.dot(Pzi, cp.dot(flt, r)) #(Nout,1) rPr_z = cp.dot(r.T, kzi) # scalar c_z = 1.0 / (1.0 + rPr_z) # scalar Pz[:, :, readi] = cp.dot(Pzi, flt) - cp.dot(kzi, (kzi.T * c_z)) e_z = z[readi] - output_series[readi, i] dw = -e_z * kzi * c_z self.Jz[:, readi] += dw.reshape(-1, ) neurons_read_i = self.read_neurons[readi, :] for idx, neuroni in enumerate(neurons_read_i): w_ni = self.M[neuroni, :].reshape(1, -1) # (1,N) synapse_ind = cp.where(w_ni != 0)[ 1] # find neurons pre-synaptic to neuron i flt = cp.zeros((self.N, self.N)) flt[synapse_ind, synapse_ind] = 1 Pi = P[:, :, readi, idx] #(N,N) the actual dim of Pi is num of pre-synapse ki = cp.dot(Pi, cp.dot(flt, r)) rPr = cp.dot(r.T, ki) c = 1.0 / (1.0 + rPr) Pz[:, :, readi] = cp.dot(Pi, flt) - cp.dot(ki, (ki.T * c)) dw = -e_z * ki * c self.M[neuroni, :] += dw.reshape(-1, ) train_out[:, i] = z #_________________testing_______________ if test_input is None: test_input = input_series L = test_input.shape[1] for i in range(L): x = (1.0 - dt) * x + cp.dot(self.M, r * dt) + cp.dot( self.Jgi, input_series[:, i] * dt).reshape(-1, 1) r = cp.tanh(x) #(N,1) z = cp.dot(self.Jz.T, r) # (Nout,1) test_out[:, i] = z return train_out, test_out
def internal_train(self, input_series, output_series, dt, alpha, nt, nl=0, test_input=None): ''' input: input time series of ndarray of dim (Nin,T) output time series of ndarray of dim (Nout,T) update the training every nt step ''' if input_series is None: input_series = cp.zeros(output_series.shape) self.add_input(output_series.shape[0], 0, 0) assert input_series.shape[1] == output_series.shape[1] L = input_series.shape[1] Dout = output_series.shape[0] #array to record output trajectories during training and testing train_out = cp.zeros((Dout, L)) test_out = cp.zeros((Dout, L)) weight_train = cp.zeros((self.Nout, L)) noise = cp.random.randn(self.N, L) * nl x = self.x r = self.r z = cp.random.randn(self.Nout, 1) P_z = repmat((1.0 / alpha) * cp.eye(self.N), self.Nout) P_syn = repmat((1.0 / alpha) * cp.eye(self.N), self.neuro_per_read, n2=self.Nout) #_________initiate inverse Correlation Matrices and filters_____________ for i in range(self.Nout): flt = self.read_connectivity[:, i] P_z[i] = np.multiply(P_z[i], flt * flt.T) for j, idx in enumerate(self.read_neurons[:, i]): flt = self.M[idx, :].T P_syn[i, j] = np.multiply(P_z[i], flt * flt.T) #_________________training__________________ for i in tqdm(range(L)): # print(i) t = dt * i # x = (1.0 - dt) *x +cp.matmul(self.M,r*dt) + cp.matmul(self.Jgi,input_series[:,i]*dt).reshape(-1,1) x = (1.0 - dt) * x + cp.matmul(self.M, r * dt) + cp.matmul( self.Jgz, z * dt) + noise[:, i].reshape(-1, 1) * dt + cp.matmul( self.Jgi, input_series[:, i].reshape(-1, 1) * dt) r = cp.tanh(x) #(N,1) z = cp.matmul(self.Jz.T, r) # (Nout,1) if cp.mod(i, nt) == 0: for readi in range(self.Nout): e_z = z[readi] - output_series[readi, i] [P_z[readi], self.Jz[:, readi] ] = update_one(P_z[readi], self.read_connectivity[:, readi], r, e_z, self.Jz[:, readi]) neurons_read_i = self.read_neurons[readi, :] for idx, neuroni in enumerate(neurons_read_i): [P_syn[readi, neuroni], new_raw ] = update_one(P_syn[readi, neuroni], self.inter_connectivity[neuroni, :].T, r, e_z, self.M[neuroni].T) self.M[neuroni] = new_raw.T train_out[:, i] = z.reshape(-1, ) weight_train[:, i] = cp.diag(cp.sqrt(cp.matmul(self.Jz.T, self.Jz))) #_________________testing_______________ if test_input is None: test_input = input_series L = test_input.shape[1] for i in range(L): x = (1.0 - dt) * x + cp.matmul(self.M, r * dt) + cp.matmul( self.Jgz, z * dt) + noise[:, i].reshape(-1, 1) * dt + cp.matmul( self.Jgi, test_input[:, i].reshape(-1, 1) * dt) r = cp.tanh(x) #(N,1) z = cp.matmul(self.Jz.T, r) # (Nout,1) # print(r.shape,x.shape,z.shape) test_out[:, i] = z.reshape(-1, ) return train_out, test_out, weight_train
def fb_train(self, input_series, output_series, dt, alpha, nt, nl=0, test_input=None): if input_series is None: input_series = cp.zeros(output_series.shape) self.add_input(output_series.shape[0], 0, 0) if test_input is None: test_input = input_series assert input_series.shape[1] == output_series.shape[1] L = input_series.shape[1] Nout = output_series.shape[0] #array to record output trajectories during training and testing train_out = cp.zeros((Nout, L)) weight_train = cp.zeros((Nout, L)) x = self.x r = self.r z = cp.random.randn(Nout, 1) P_all = repmat(cp.eye(self.N) / alpha, Nout) flts = [] noise = cp.random.randn(self.N, L) * nl noise2 = cp.random.randn(self.N, L) * nl for i in range(self.Nout): flt = self.read_connectivity[:, i:i + 1] P_all[i] = cp.multiply(P_all[i], cp.matmul(flt, flt.T)) for i in tqdm(range(L)): # print(i) t = dt * i # x = (1.0 - dt) *x +cp.matmul(self.M,r*dt) + cp.matmul(self.Jgi,input_series[:,i]*dt).reshape(-1,1) + self.Jgz * z *dt #coef * (x'-x)/dt = -x + Mx --> x' = x+dt/coef *(-x+MX) # x = (1.0 - dt) *x +cp.matmul(self.M,r*dt) + cp.matmul(self.Jgz ,z*dt) + noise[:,i].reshape(-1,1) *dt + cp.matmul(self.Jgi,input_series[:,i].reshape(-1,1)*dt) #when consider different time scale neurons: x = x + (-x + cp.matmul(self.M, r) + cp.matmul( self.Jgz, z) + noise[:, i].reshape(-1, 1) + cp.matmul( self.Jgi, input_series[:, i:i + 1])) * dt / self.time_coef # x = (1.0 - dt) *x +cp.dot(self.M,r*dt) + cp.dot(self.Jgz, z*dt) r = cp.tanh(x) #(N,1) z = cp.matmul(self.Jz.T, r) # (Nout,1) if cp.mod(i, nt) == 0: for readi in range(self.Nout): e_z = z[readi] - output_series[readi, i] [P_all[readi], self.Jz[:, readi] ] = update_one(P_all[readi], self.read_connectivity[:, readi], r, e_z, self.Jz[:, readi]) #____________UPDATE PARAMETERS(original)_________ # [P_all, self.Jz] = update_weight(P_all,flts,r,z,self.Jz,output_series[:,i],delta=self.discount) train_out[:, i] = z.reshape(-1, ) weight_train[:, i] = cp.diag(cp.sqrt(cp.matmul(self.Jz.T, self.Jz))) if test_input is None: test_input = input_series L = test_input.shape[1] test_out = cp.zeros((Nout, L)) # print(P) # print(flts[1]) for i in range(L): # x = (1.0 - dt) *x +cp.matmul(self.M,r*dt) + cp.matmul(self.Jgi,input_series[:,i]*dt).reshape(-1,1) + self.Jgz *z *dt # x = (1.0 - dt) *x +cp.matmul(self.M,r*dt) + cp.matmul(self.Jgz, z *dt) + noise2[:,i].reshape(-1,1) *dt + cp.matmul(self.Jgi,test_input[:,i].reshape(-1,1)*dt) x = x + (-x + cp.matmul(self.M, r) + cp.matmul(self.Jgz, z) + noise2[:, i].reshape(-1, 1) + cp.matmul(self.Jgi, test_input[:, i].reshape( -1, 1))) * dt / self.time_coef r = cp.tanh(x) #(N,1) z = cp.matmul(self.Jz.T, r) # (Nout,1) test_out[:, i] = z.reshape(-1, ) self.P_all = P_all return train_out, test_out, weight_train
def logisticmap_calc(x, p, mu): return cp.mod(cp.multiply(mu, cp.multiply(x, cp.add(x, 1))), p)
def _unwrap_correct(dd, discont): ddmod = cupy.mod(dd + numpy.pi, 2*numpy.pi) - numpy.pi cupy.copyto(ddmod, numpy.pi, where=(ddmod == -numpy.pi) & (dd > 0)) ph_correct = ddmod - dd cupy.copyto(ph_correct, 0., where=cupy.abs(dd) < discont) return ph_correct
def ground_state(gort): #Hamiltonian htras = 0.5 #gort=0.10 gpar = 3.0 # diagonal part Ham = np.diag([ -gort * 0.5 * SzSz(conf, 0, 1) - gort * 0.5 * SzSz(conf, 2, 3) for conf in range(hilbertsize) ]) Ham += np.diag([ -gpar * SzSz(conf, 0, 2) - gpar * SzSz(conf, 1, 3) for conf in range(hilbertsize) ]) # off-diagonal part for conf in range(hilbertsize): value, newconf = Spinflip(conf, 0, 2) Ham[newconf, conf] -= value value, newconf = Spinflip(conf, 1, 3) Ham[newconf, conf] -= value #transverse external field for conf in range(hilbertsize): value, newconf = Sx(conf, 0) Ham[newconf, conf] -= htras * value value, newconf = Sx(conf, 1) Ham[newconf, conf] -= htras * value value, newconf = Sx(conf, 2) Ham[newconf, conf] -= htras * value value, newconf = Sx(conf, 3) Ham[newconf, conf] -= htras * value print(Ham) # First define the parameters of the model / simulation J = -1. chi = 100 d = 4 delta = 0.01 N = 1000 B = [] s = [] for i in range(2): # B.append(cp.zeros([2,1,1])); B[-1][0,0,0]=1 # s.append(cp.ones([1])) B.append(cp.random.rand(4, 10, 10)) s.append(cp.random.rand(10)) # Generate the two-site time evolution operator #H_bond = cp.array([[J,gx*0.5,gx*0.5,0], [gx*0.5,-J,0,gx*0.5], [gx*0.5,0,-J,gx*0.5], [0,gx*0.5,gx*0.5,J]] ) #H_bond += cp.diag([-gz,0,0,gz]) H_bond = Ham U = cp.asarray(np.reshape(expm(-delta * H_bond), (4, 4, 4, 4))) # Perform the imaginary time evolution alternating on A and B bonds for step in range(0, N): s, B = evol(s, B, U, chi, d) # compute magnetization mag = magnetization(s, B, d) print("sigmazeta =", mag) # Get the bond energies E = [] for i_bond in range(2): BB = cp.tensordot(B[i_bond], B[cp.mod(i_bond + 1, 2)], axes=(2, 1)) sBB = cp.tensordot(cp.diag(s[cp.mod(i_bond - 1, 2)]), BB, axes=(1, 1)) C = cp.tensordot(sBB, cp.reshape(H_bond, [d, d, d, d]), axes=([1, 2], [2, 3])) sBB = cp.conj(sBB) E.append( cp.squeeze(cp.tensordot(sBB, C, axes=([0, 3, 1, 2], [0, 1, 2, 3]))).item()) print("E_iTEBD =", cp.mean(E)) return s, B
def fb_train(self, input_series, output_series, dt, alpha, nt, test_input=None, fb=1.0): if input_series is None: input_series = cp.zeros(output_series.shape) self.add_input(output_series.shape[0], 0, 0) assert input_series.shape[1] == output_series.shape[1] L = input_series.shape[1] Nout = output_series.shape[0] #array to record output trajectories during training and testing train_out = cp.zeros((Nout, L)) weight_train = cp.zeros((Nout, L)) x = self.x r = self.r z = cp.random.randn(Nout, 1) P_all = repmat(cp.eye(self.N) / alpha, Nout) # TODO test proper form of Jgz # self.Jgz = fb*(cp.random.rand(self.N,Nout) -0.5) for i in range(self.Nout): flt = self.read_connectivity[:, i:i + 1] P_all[i] = cp.multiply(P_all[i], cp.matmul(flt, flt.T)) for i in range(L): # print(i) t = dt * i # x = (1.0 - dt) *x +cp.dot(self.M,r*dt) + cp.dot(self.Jgi,input_series[:,i]*dt).reshape(-1,1) + self.Jgz * z *dt x = (1.0 - dt) * x + cp.dot(self.M, r * dt) + cp.dot( self.Jgz, z * dt) r = cp.tanh(x) #(N,1) # print('r:',r.shape) z = cp.dot(self.Jz.T, r) # (Nout,1) # print('z:',z.shape) if cp.mod(i, nt) == 0: #_____update with update_one(P, flt, r, e, J) for readi in range(Nout): e_z = float(z[readi, 0] - output_series[readi, i]) [P_all[readi], self.Jz[:, readi] ] = update_one(P_all[readi], self.read_connectivity[:, readi], r, e_z, self.Jz[:, readi]) # r_p = cp.dot(flt,r) # k = cp.dot(P,r_p) #(N,1) # rPr = cp.dot(r_p.T,k) # scalar # c = 1.0/(1.0 + rPr) # scalar # P = P - cp.dot(k,(k.T*c)) # e = z - output_series[0,i] # dw = -e*k*c # self.Jz += dw.reshape(-1,1) train_out[:, i] = z[:, 0] weight_train[:, i] = cp.diag(cp.sqrt(cp.matmul(self.Jz.T, self.Jz))) if test_input is None: test_input = input_series L = test_input.shape[1] test_out = cp.zeros((Nout, L)) for i in range(L): # x = (1.0 - dt) *x +cp.dot(self.M,r*dt) + cp.dot(self.Jgi,input_series[:,i]*dt).reshape(-1,1) + self.Jgz *z *dt x = (1.0 - dt) * x + cp.dot(self.M, r * dt) + cp.dot( self.Jgz, z * dt) r = cp.tanh(x) #(N,1) z = cp.dot(self.Jz.T, r) # (Nout,1) test_out[:, i] = z[:, 0] return train_out, test_out, weight_train
def CE_cupy(period, data, xbins=10, ybins=5): """ Returns the conditional entropy of *data* rephased with *period*. **Parameters** period : number The period to rephase *data* by. data : array-like, shape = [n_samples, 2] or [n_samples, 3] Array containing columns *time*, *mag*, and (optional) *error*. xbins : int, optional Number of phase bins (default 10). ybins : int, optional Number of magnitude bins (default 5). """ import cupy as cp if period <= 0: return np.PINF shift = 0.0 col = 0 r = cp.array(data) r[:, 0] = cp.mod(r[:, col], period) / period bins, xedges, yedges = np.histogram2d(r[:, 0], r[:, 1], [xbins, ybins], [[0, 1], [0, 1]]) size = r.shape[0] if size > 0: # bins[i,j] / size divided_bins = bins / size # indices where that is positive # to avoid division by zero arg_positive = divided_bins > 0 # array containing the sums of each column in the bins array column_sums = np.sum(divided_bins, axis=1) #changed 0 by 1 # array is repeated row-wise, so that it can be sliced by arg_positive column_sums = np.repeat(np.reshape(column_sums, (xbins, 1)), ybins, axis=1) #column_sums = np.repeat(np.reshape(column_sums, (1,-1)), xbins, axis=0) # select only the elements in both arrays which correspond to a # positive bin select_divided_bins = divided_bins[arg_positive] select_column_sums = column_sums[arg_positive] # initialize the result array A = np.empty((xbins, ybins), dtype=float) # store at every index [i,j] in A which corresponds to a positive bin: # bins[i,j]/size * log(bins[i,:] / size / (bins[i,j]/size)) A[ arg_positive] = select_divided_bins \ * np.log(select_column_sums / select_divided_bins) # store 0 at every index in A which corresponds to a non-positive bin A[~arg_positive] = 0 # return the summation return np.sum(A) else: return np.PINF
def square(t, duty=0.5): """ Return a periodic square-wave waveform. The square wave has a period ``2*pi``, has value +1 from 0 to ``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in the interval [0,1]. Note that this is not band-limited. It produces an infinite number of harmonics, which are aliased back and forth across the frequency spectrum. Parameters ---------- t : array_like The input time array. duty : array_like, optional Duty cycle. Default is 0.5 (50% duty cycle). If an array, causes wave shape to change over time, and must be the same length as t. Returns ------- y : ndarray Output array containing the square waveform. Examples -------- A 5 Hz waveform sampled at 500 Hz for 1 second: >>> import cusignal >>> import cupy as cp >>> import matplotlib.pyplot as plt >>> t = cp.linspace(0, 1, 500, endpoint=False) >>> plt.plot(t, cp.asnumpy(cusignal.square(2 * cp.pi * 5 * t))) >>> plt.ylim(-2, 2) A pulse-width modulated sine wave: >>> plt.figure() >>> sig = cp.sin(2 * cp.pi * t) >>> pwm = cusignal.square(2 * cp.pi * 30 * t, duty=(sig + 1)/2) >>> plt.subplot(2, 1, 1) >>> plt.plot(cp.asnumpy(t), cp.asnumpy(sig)) >>> plt.subplot(2, 1, 2) >>> plt.plot(cp.asnumpy(t), cp.asnumpy(pwm)) >>> plt.ylim(-1.5, 1.5) """ t, w = asarray(t), asarray(duty) w = asarray(w + (t - t)) t = asarray(t + (w - w)) if t.dtype.char in ['fFdD']: ytype = t.dtype.char else: ytype = 'd' y = zeros(t.shape, ytype) # width must be between 0 and 1 inclusive mask1 = (w > 1) | (w < 0) place(y, mask1, nan) # on the interval 0 to duty*2*pi function is 1 tmod = mod(t, 2 * pi) mask2 = (1 - mask1) & (tmod < w * 2 * pi) place(y, mask2, 1) # on the interval duty*2*pi to 2*pi function is # (pi*(w+1)-tmod) / (pi*(1-w)) mask3 = (1 - mask1) & (1 - mask2) place(y, mask3, -1) return y
def reconstruct_alt(imgs, discs, hres_size, row, n_iters=1, o_f_init=None, del_1=1000, del_2=1, round_values=True, plot_per_frame=False, show_interval=None, subtract_bg=False, out_path=None): """The main reconstruction algorithm. Adapted from Tian et. al.""" # Put input images on GPU, estimate background noise imgs = [cp.array(img) for img in imgs] bgs = get_bg(imgs) if subtract_bg else cp.zeros(len(imgs)) IMAGESIZE = imgs[0].shape[0] CUTOFF_FREQ_px = get_cutoff(row) FRAMES = len(imgs) orig = IMAGESIZE // 2 - 1 # Low-res origin lres_size = (IMAGESIZE, IMAGESIZE) m1, n1 = lres_size m, n = hres_size losses = [] # Reconstruction Loss convs = [] # Inverse Convergence index # Initial high-res guess if lres_size == hres_size: # Initialize with ones # Use old algorithm F = lambda x: cp.fft.fftshift(cp.fft.fft2(x)) Ft = lambda x: cp.fft.ifft2(cp.fft.ifftshift(x)) o = cp.ones(hres_size) o_f = F(o) elif o_f_init is not None: # Initialize with given initialization F = lambda x: cp.fft.fftshift(cp.fft.fft2(cp.fft.ifftshift(x))) Ft = lambda x: cp.fft.fftshift(cp.fft.ifft2(cp.fft.ifftshift(x))) o = cp.zeros_like(o_f_init) o_f = o_f_init else: # Intialize with resized first frame from imgs F = lambda x: cp.fft.fftshift(cp.fft.fft2(cp.fft.ifftshift(x))) Ft = lambda x: cp.fft.fftshift(cp.fft.ifft2(cp.fft.ifftshift(x))) o = cp.sqrt( cp.array(cv2.resize(cp.asnumpy(imgs[0] - bgs[0]), hres_size))) o_f = Ft(o) # Pupil Function p = cp.zeros(lres_size) p = cp.array(cv2.circle(cp.asnumpy(p), (orig, orig), CUTOFF_FREQ_px, 1, -1)) ctf = p.copy() # Ideal Pupil, for filtering later on # Main Loop log = tqdm( total=n_iters, desc=f'Starting...', bar_format= '{percentage:3.0f}% [{elapsed}<{remaining} ({rate_inv_fmt})]{bar}{desc}', leave=False, ascii=True) for j in range(n_iters): conv = [] # Convergence Index for i in range(FRAMES): if discs[i] == 0: # Empty frame continue # Get k0x, k0y and hence, shifting values k0x, k0y = discs[i] # Construct auxillary functions for the set of LEDs (= 1, here) if hres_size == lres_size: shift_x, shift_y = [ -round(k0x - orig), -round(k0y - orig) ] if round_values else [-(k0x - orig), -(k0y - orig)] if not round_values: o_f_i = FourierShift2D(o_f, [shift_x, shift_y]) # O_i(k - k_m) else: o_f_i = cp.roll(o_f, int(shift_y), axis=0) o_f_i = cp.roll(o_f_i, int(shift_x), axis=1) yl, xl = 0, 0 # To reduce code later on else: # Output size larger than individual frames _orig = hres_size[0] // 2 - 1 del_x, del_y = k0x - orig, k0y - orig x, y = round(_orig - del_x), round(_orig - del_y) yl = int(y - m1 // 2) xl = int(x - n1 // 2) assert xl > 0 and yl > 0, 'Both should be > 0' o_f_i = o_f[yl:yl + n1, xl:xl + m1].copy() psi_k = o_f_i * p * ctf #DEBUG: REPLACE * ctf with * p # Plot outputs after each frame, for debugging if plot_per_frame: o_i = Ft(o_f_i * p) plt.figure(figsize=(10, 2)) plt.subplot(161) plt.imshow(cp.asnumpy(correct(abs(o_i)))) plt.title(f'$I_{{l}}({i})$') opts() #DEBUG plt.subplot(162) plt.imshow( cp.asnumpy( cv2.convertScaleAbs( cp.asnumpy(20 * cp.log(1 + abs(o_f_i * p)))))) plt.title(f'$S_{{l}}({i})$') opts() #DEBUG # Impose intensity constraint and update auxillary function psi_r = F(psi_k) #DEBUG: CHANGE BACK TO F # Low-res estimate obtained from our reconstruction I_l = abs(psi_r) if lres_size != hres_size else abs(psi_r) # Subtract background noise and clip values to avoid NaN I_hat = cp.clip(imgs[i] - bgs[i], a_min=0) phi_r = cp.sqrt(I_hat / (cp.abs(psi_r)**2)) * psi_r phi_k = Ft(phi_r) #DEBUG: CHANGE BACK TO Ft # Update object and pupil estimates if hres_size == lres_size: if not round_values: p_i = FourierShift2D(p, [-shift_x, -shift_y]) # P_i(k+k_m) else: p_i = cp.roll(p, int(-shift_y), axis=0) p_i = cp.roll(p_i, int(-shift_x), axis=1) if not round_values: phi_k_i = FourierShift2D( phi_k, [-shift_x, -shift_y]) # Phi_m_i(k+k_m) else: phi_k_i = cp.roll(phi_k, int(-shift_y), axis=0) phi_k_i = cp.roll(phi_k_i, int(-shift_x), axis=1) else: # Output size larger than individual frames p_i = p.copy() phi_k_i = phi_k.copy() ## O_{i+1}(k) temp = o_f[yl:yl + n1, xl:xl + m1].copy() + ( cp.abs(p_i) * cp.conj(p_i) * (phi_k_i - o_f[yl:yl + n1, xl:xl + m1].copy() * p_i) ) / \ ( cp.abs(p).max() * (cp.abs(p_i) ** 2 + del_1) ) ## P_{i+1}(k) p = p + ( cp.abs(o_f_i) * cp.conj(o_f_i) * (phi_k - o_f_i * p) ) / \ ( cp.abs(o_f[yl:yl + n1, xl:xl + m1].copy()).max() * (cp.abs(o_f_i) ** 2 + del_2) ) o_f[yl:yl + n1, xl:xl + m1] = temp.copy() ###### Using F here instead of Ft to get upright image o = F(o_f) if lres_size != hres_size else Ft(o_f) ###### if plot_per_frame: plt.subplot(163) plt.imshow(cp.asnumpy(cp.mod(ctf * cp.angle(p), 2 * cp.pi))) plt.title(f'P({i})') opts() #DEBUG plt.subplot(164) plt.imshow(cp.asnumpy(correct(abs(o)))) plt.title(f'$I_{{h}}({i})$') opts() #DEBUG plt.subplot(165) plt.imshow(cp.asnumpy(correct(cp.angle(o)))) plt.title(f'$\\theta(I_{{h}}({i}))$') opts() #DEBUG plt.subplot(166) plt.imshow(cp.asnumpy(show(cp.asnumpy(o_f)))) plt.title(f'$S_{{h}}({i})$') opts() plt.show() #DEBUG c = inv_conv_idx(I_l, imgs[i]) conv.append(c) if not plot_per_frame and (show_interval is not None and j % show_interval == 0): o_i = Ft(o_f_i * p) #DEBUG plt.figure(figsize=(10, 2)) plt.subplot(161) plt.imshow(cp.asnumpy(correct(abs(o_i)))) plt.title(f'$I_{{l}}({i})$') opts() #DEBUG plt.subplot(162) plt.imshow( cp.asnumpy( cv2.convertScaleAbs( cp.asnumpy(20 * cp.log(1 + abs(o_f_i * p)))))) plt.title(f'$S_{{l}}({i})$') opts() #DEBUG plt.subplot(163) plt.imshow(cp.asnumpy(cp.mod(ctf * cp.angle(p), 2 * cp.pi))) plt.title(f'P({i})') opts() #DEBUG plt.subplot(164) plt.imshow(cp.asnumpy(correct(abs(o)))) plt.title(f'$I_{{h}}({i})$') opts() #DEBUG plt.subplot(165) plt.imshow(cp.asnumpy(correct(cp.angle(o)))) plt.title(f'$\\theta(I_{{h}}({i}))$') opts() #DEBUG plt.subplot(166) plt.imshow( cp.asnumpy( cv2.convertScaleAbs(cp.asnumpy(20 * cp.log(1 + abs(o_f)))))) plt.title(f'$S_{{h}}({i})$') opts() plt.show() #DEBUG loss = metric_norm(imgs, o_f_i, p) losses.append(loss) conv = float(sum(conv) / len(conv)) convs.append(conv) log.set_description_str( f'[Iteration {j + 1}] Convergence Loss: {cp.asnumpy(conv):e}') log.update(1) scale = 7 plt.figure(figsize=(3 * scale, 4 * scale)) plt.subplot(421) plt.plot(cp.asnumpy(cp.arange(len(losses))), cp.asnumpy(cp.clip(cp.array(losses), a_min=None, a_max=1e4)), 'b-') plt.title('Loss Curve') plt.ylabel('Loss Value') plt.xlabel('Iteration') plt.subplot(422) plt.plot(cp.asnumpy(cp.arange(len(convs))), cp.asnumpy(cp.clip(cp.array(convs), a_min=None, a_max=1e14)), 'b-') plt.title('Convergence Index Curve') plt.ylabel('Convergence Index') plt.xlabel('Iteration') amp = cp.array(cv2.resize( read_tiff(row.AMPLITUDE.values[0])[0], hres_size)) phase = cp.array(cv2.resize(read_tiff(row.PHASE.values[0])[0], hres_size)) plt.subplot(434) plt.title(f'amplitude (Scaled up from {lres_size})') plt.imshow(cp.asnumpy(to_uint8(amp))) opts() plt.subplot(435) plt.title(f'phase (Scaled up from {lres_size})') plt.imshow(cp.asnumpy(to_uint8(phase))) plt.subplot(436) rec = abs(cp.sqrt(amp) * cp.exp(1j * phase)) plt.title(f'Ground Truth (Scaled up from {lres_size})') plt.imshow(cp.asnumpy(to_uint8(rec))) plt.subplot(437) plt.title('Reconstruction Amplitude') amp = abs(o) if lres_size == hres_size: amp = correct(amp) plt.imshow(cp.asnumpy(to_uint8((amp)))) plt.subplot(438) plt.title('Reconstruction Phase') phase = cp.angle(o) if lres_size == hres_size: phase = correct(phase) plt.imshow(cp.asnumpy(to_uint8(phase))) plt.subplot(439) plt.title('Reconstructed Image') rec = abs(cp.sqrt(amp) * cp.exp(1j * phase)) plt.imshow(cp.asnumpy(to_uint8(rec))) plt.subplot(427) plt.title(f'Recovered Pupil') p_show = cp.mod(ctf * cp.angle(p), 2 * cp.pi) p_show = (p_show / p_show.max() * 255).astype(np.uint8) plt.imshow(cp.asnumpy(p_show), cmap='nipy_spectral') plt.subplot(428) plt.title(f'Raw frames\' mean (Scaled up from {lres_size})') plt.imshow(cv2.resize(cp.asnumpy(cp.array(imgs).mean(axis=0)), hres_size)) if out_path is None: plt.show() else: plt.savefig(out_path, bbox_inches='tight') plt.close('all') # Ignore early noise and print where the error is lowest if n_iters > 10: it = cp.argmin(cp.array(convs[10:])) + 11 if out_path is not None: print(f'Convergence index lowest at {it}th iteration.') else: it = cp.argmin(cp.array(convs)) + 1 if out_path is not None: print(f'Convergence index lowest at {it}th iteration.') if lres_size == hres_size: o = correct(o) return o, p, it