def GaussForm(AtomicData): # At some point here, we need to add the portion that will find the scat factor based on atomtype # but for now... # scattering_params = etbl.get(at_type) #step = 0 OutputArray = cp.zeros((BoxS,BoxS,BoxS)) OutputArray = cp.array(OutputArray, dtype = np.complex64) scalefac = float(BoxS*apix) for atom in AtomicData: #step += 1 #t1 = time.time() if(atom[0][0] == 'H'): scattering_params = cp.array([1.0,3.15]) elif(atom[0] == 'OCbb'): scattering_params = cp.array([1.35,4.15]) else: scattering_params = cp.array([1.2,3.2]) scattering_params = (scattering_params/scalefac) coords = atom[1:] center = cp.array([cp.float(coords[0]/apix),coords[1]/apix,cp.float(coords[2]/apix)]) s = cp.float(1/scattering_params[1]) ampl = cp.float((1/cp.sqrt(cp.power(2*pi,3)))*(1/cp.power(s,3))) coords = None OutputArray += ((cp.float(scattering_params[0]) * cp.fft.ifftshift(ampl* cp.exp(-cp.power(pi,2)*(cp.power(ii,2)+cp.power(jj,2)+cp.power(kk,2))/(2*cp.power(s,2)) - ( (2*pi)*1j*(ii*center[0]+jj*center[1]+kk*center[2]) )) ))) center = None #t2 = time.time() #print('Atom Addition Time: ' + str(t2-t1)) #print('Current atom: ' + str(step)) ToAdd, Rem, ampl, s, center, coords, scattering_params,t1,t2 = None,None,None,None,None,None,None,None,None OutputArray = cp.asnumpy(OutputArray) #print('This is the size: ' + str(OutputArray.nbytes)) return OutputArray
def GaussForm(AtomicData, Params): #step = 0 OutputArray = cp.zeros((340, 340, 340)) OutputArray = cp.array(OutputArray, dtype=np.complex64) scattering_params = cp.array(Params) for atom in AtomicData: #step += 1 #t1 = time.time() coords = atom[1:] center = cp.array([ cp.float(coords[0] / apix), coords[1] / apix, cp.float(coords[2] / apix) ]) s = cp.float(1 / scattering_params[1]) ampl = cp.float( (1 / cp.sqrt(cp.power(2 * pi, 3))) * (1 / cp.power(s, 3))) coords = None OutputArray += ((cp.float(scattering_params[0]) * cp.fft.ifftshift( ampl * cp.exp(-cp.power(pi, 2) * (cp.power(ii, 2) + cp.power(jj, 2) + cp.power(kk, 2)) / (2 * cp.power(s, 2)) - ((2 * pi) * 1j * (ii * center[0] + jj * center[1] + kk * center[2])))))) center = None #t2 = time.time() #print('Atom Addition Time: ' + str(t2-t1)) #print('Current atom: ' + str(step)) ToAdd, Rem, ampl, s, center, coords, t1, t2 = None, None, None, None, None, None, None, None OutputArray = cp.asnumpy(OutputArray) #print('This is the size: ' + str(OutputArray.nbytes)) return OutputArray
def GaussForm(AtomicData, HParams): OutputArray = cp.zeros((340, 340, 340)) OutputArray = cp.array(OutputArray, dtype=np.complex64) for atom in AtomicData: if (atom[0][0] == 'H'): scattering_params = cp.array(HParams) else: scattering_params = cp.array([1, 3]) coords = atom[1:] center = cp.array([ cp.float(coords[0] / apix), coords[1] / apix, cp.float(coords[2] / apix) ]) s = cp.float(1 / scattering_params[1]) ampl = cp.float( (1 / cp.sqrt(cp.power(2 * pi, 3))) * (1 / cp.power(s, 3))) coords = None OutputArray += ((cp.float(scattering_params[0]) * cp.fft.ifftshift( ampl * cp.exp(-cp.power(pi, 2) * (cp.power(ii, 2) + cp.power(jj, 2) + cp.power(kk, 2)) / (2 * cp.power(s, 2)) - ((2 * pi) * 1j * (ii * center[0] + jj * center[1] + kk * center[2])))))) center = None ToAdd, Rem, ampl, s, center, coords, scattering_params, t1, t2 = None, None, None, None, None, None, None, None, None OutputArray = cp.asnumpy(OutputArray) return OutputArray
def mse(image1: np.ndarray, image2: np.ndarray) -> np.float: """ Mean squared error between two numpy ndarrays. If available we will use the GPU (cupy) else we will use the CPU (numpy) Input: - image1: fist numpy ndarray - image2: second numpy ndarray Ouput: - Mean squared error numpy.float """ err = np.float(np.sum((np.asarray(image1) - np.asarray(image2))**2)) err /= np.float(image1.shape[0] * image1.shape[1]) return err
def fit(self, epochs=1, batch_size=1, gamma=0.9, **args): X = args['X_train'] y = args['y_train'] if 'verbose' in args: verbose = args['verbose'] else: verbose = None epochs = int(epochs) loss_val = cp.zeros((cp.int(epochs))) par_gpu = deepcopy(self.start) momentum = { var: cp.zeros_like(self.start[var]) for var in self.start.keys() } #n_data=cp.float(y.shape[0]) for i in tqdm(range(np.int(epochs))): for X_batch, y_batch in self.iterate_minibatches(X, y, batch_size): n_batch = cp.float(y_batch.shape[0]) grad_p = self.model.grad(par_gpu, X_train=X_batch, y_train=y_batch) for var in par_gpu.keys(): momentum[var] = gamma * momentum[ var] - self.step_size * grad_p[var] par_gpu[var] += momentum[var] loss_val[i] = self.model.log_likelihood(par_gpu, X_train=X_batch, y_train=y_batch) if verbose and (i % (epochs / 10) == 0): print('loss: {0:.8f}'.format(cp.asnumpy(loss_val[i]))) return par_gpu, cp.asnumpy(loss_val)
def GaussForm(AtomicData): OutputArray = cp.zeros((BoxS, BoxS, BoxS)) OutputArray = cp.array(OutputArray, dtype=np.complex64) scalefac = float(apix) # Fit STDEV values if (args.reso != 0): resofac = sigmoid(args.reso, a_, b_, c_, d_) args.Heavystd *= resofac args.OCstd *= resofac args.Hstd *= resofac for atom in AtomicData: #t1 = time.time() if (atom[0][0] == 'H'): scattering_params = cp.array([args.HAmp, args.Hstd]) elif (atom[0] == 'OCbb'): scattering_params = cp.array([args.OCAmp, args.OCstd]) else: scattering_params = cp.array([args.HeavyAmp, args.Heavystd]) scattering_params = (scattering_params / scalefac) if (scattering_params[1] == 0.0): continue coords = atom[1:] center = cp.array([ cp.float(coords[0] / apix), coords[1] / apix, cp.float(coords[2] / apix) ]) s = cp.float(1 / scattering_params[1]) ampl = cp.float( (1 / cp.sqrt(cp.power(2 * pi, 3))) * (1 / cp.power(s, 3))) coords = None OutputArray += ((cp.float(scattering_params[0]) * cp.fft.ifftshift( ampl * cp.exp(-cp.power(pi, 2) * (cp.power(ii, 2) + cp.power(jj, 2) + cp.power(kk, 2)) / (2 * cp.power(s, 2)) - ((2 * pi) * 1j * (ii * center[0] + jj * center[1] + kk * center[2])))))) center = None #t2 = time.time() #print('Atom Addition Time: ' + str(t2-t1)) ToAdd, Rem, ampl, s, center, coords, scattering_params, t1, t2 = None, None, None, None, None, None, None, None, None OutputArray = cp.asnumpy(OutputArray) return OutputArray
def sim_map_lite(pose, ii, jj, kk, boxsize, apix, etbl): #fourier indices as input(ii,jj,kk) import cupy outmap = cupy.zeros((boxsize, boxsize, boxsize)) ii = cupy.asarray(ii) jj = cupy.asarray(jj) kk = cupy.asarray(kk) pi = cupy.pi for presi in range(1, pose.total_residue()): #get number of atoms #print(presi) for pres_atj in range(1, pose.residue(presi).natoms()): #for each atom, simulate gaussian with appropriate electron scattering factor #at_type = pose.residue(presi).atom_type(pres_atj) #print('this is the atom-type:' + at_type.str()) #scattering_params = etbl.get(at_type) scattering_params = cupy.array([1, 1]) coords = pose.residue(presi).xyz(pres_atj) center = cupy.array( [coords[0] / apix, coords[1] / apix, coords[2] / apix]) #print(center) s = cupy.float(1 / scattering_params[1]) ampl = cupy.float((1 / cupy.sqrt(cupy.power(2 * pi, 3))) * (1 / cupy.power(s, 3))) ii_ampl = cupy.float(scattering_params[0]) outmap = outmap + ii_ampl * cupy.fft.ifftn( cupy.fft.ifftshift(ampl * cupy.exp( -cupy.power(pi, 2) * (cupy.power(ii, 2) + cupy.power(jj, 2) + cupy.power(kk, 2)) / (2 * cupy.power(s, 2)) - ((2 * pi) * 1j * (ii * center[0] + jj * center[1] + kk * center[2]))))) outmap = numpy.real(cupy.asnumpy(cupy.transpose(outmap))) return outmap
def learn_mapping(self, seeds, loss_type, **kwargs): """ Learn the mapping function. The objective is l2 loss or hinge loss. Orthogonal constraint is optional. -Input: seeds: A list of two lists. seeds[0][i] and seeds[1][i] specifies a seeding pair loss_type: 'l2' or 'hinge' 'l2': min_R \sum_i |R * x_i - y_i|^2 'hinge': min_R \sum_i \sum_{j!=i} max{0, th_i + |R * x_i - y_i|^2 - |R * x_i - y_j|^2} The objetive is optimzied by SGD. Each iteration samples some random negative examples kwargs: misc parameters, mostly for hinge loss minimizer orth: (False) whether to contrain the mapping being orthogonal epochs: number of epochs in SGD optimizer if loss_type='hinge' seed_per_batch, number of seeds per minibatch in SGD lr: learning rate dist: the distance (cosine or squared Euclidean) in hinge loss. Cosine is suggested. samples: number of negative samples per seeding pair alpha: determine threshold `th_i` in the following way th_i = percentile( d(R * x_i, y_j) - d(R * x_i, y_i), alpha) constant_th: constant threshold for all pairs. If given, alpha is futile. src_vocab, tgt_vocab: source and target vocabs. If given, will report P@1 during the minimization of hinge loss queries: for reporting test accuracy, src_vocab and tgt_vocab must be given -Output: W: linear mapping """ # prepare default params orth = kwargs.get('orth') epochs = kwargs.get('epochs') dist = kwargs.get('dist') lr = kwargs.get('lr') s_per_b = kwargs.get('seed_per_batch') sample_method = kwargs.get('sample_method') ns = kwargs.get('samples') alpha = kwargs.get('alpha') constant_th = kwargs.get('constant_th') src_vocab = kwargs.get('src_vocab', None) tgt_vocab = kwargs.get('tgt_vocab', None) queries = kwargs.get('queries', None) seed_dict = {} for i, j in zip(*seeds): if i not in seed_dict: seed_dict[i] = [j] else: seed_dict[i].append(j) if orth: C = self.src_space[seeds[0]].T.dot(self.tgt_space[seeds[1]]) U, _, Vh = xp.linalg.svd(C) self.W = U.dot(Vh) else: if not gpu: self.W = xp.linalg.lstsq(self.src_space[seeds[0]], self.tgt_space[seeds[1]], rcond=None)[0] else: self.W = xp.linalg.pinv(self.src_space[seeds[0]]).dot( self.tgt_space[seeds[1]]) if loss_type == 'l2': self.loss = xp.linalg.norm( self.src_space[seeds[0]].dot(self.W)\ - self.tgt_space[seeds[1]]) ** 2 elif loss_type == 'hinge': # SGD optimizer, each iteration samples seeds and negative samples # Initialized with l2 solution self.loss = [] dim = self.src_space.shape[1] total_it = 0 th = self.determine_th(seeds, alpha, dist, constant_th) for ep in range(epochs): S = [_ for _ in zip(*(seeds + [th]))] shuffle(S) for it in range(0, len(S), s_per_b): i_s, i_t, th_i = zip(*S[it:min(len(S), it + s_per_b)]) i_s, i_t, th_i = list(i_s), list(i_t), xp.array(th_i) B = len(i_s) Wx = self.src_space[i_s].dot(self.W) D = distance_function(Wx, self.tgt_space, dist) if sample_method == 'random': j = [xp.random.choice( [_ for _ in range(self.tgt_size)\ if _ not in seed_dict[i_s_]], ns).tolist() for i_s_ in i_s] elif sample_method == 'top': j = [] for ii, i_s_ in enumerate(i_s): d_ii = xp.copy(D[ii]) d_ii[seed_dict[i_s_]] = xp.float('inf') j.append( top_k(d_ii, ns, biggest=False)[0].tolist()) delta = D[xp.tile(range(B), (ns, 1)).T, j]\ - D[xp.arange(B), i_t][:, None] # print some diagnostics ell = xp.sum(xp.maximum(th_i[:, None] - delta, 0)) / B if gpu: ell = ell.get() self.loss.append(ell) if total_it % 100 == 0: if all([src_vocab, tgt_vocab, queries]): P_at_1 = self.report_precision( src_vocab, tgt_vocab, queries) p_str = ', p@1 {}%'.format(P_at_1[0]) else: p_str = '' print("Epoch {}, Iter {}, loss {:.2f}".format( ep, total_it, ell) + p_str, flush=True) incur_loss = delta < th_i[:, None] n_incur = [xp.sum(xp.array(_)) for _ in incur_loss] if dist == 'sqeuc': delta_y = [xp.sum(self.tgt_space[j[_]][incur_loss[_]],\ axis=0) - self.tgt_space[i_t[_]] * n_incur[_]\ for _ in range(B)] grad = self.src_space[i_s].T.dot(xp.vstack(delta_y)) elif dist == 'cos': Wx_norm = xp.linalg.norm(Wx, ord=2, axis=1, keepdims=1) delta_y = [ xp.sum(self.tgt_space[j[_]][incur_loss[_]] / (eps+\ xp.linalg.norm(self.tgt_space[j[_]][incur_loss[_]], ord=2, axis=1, keepdims=1)), axis=0) -\ self.tgt_space[i_t[_]] /\ xp.linalg.norm(self.tgt_space[i_t[_]]) * n_incur[_] for _ in range(B) ] delta_cos = [xp.sum(delta[_][incur_loss[_]])\ for _ in range(B)] grad = (self.src_space[i_s] / Wx_norm).T.dot( xp.vstack(delta_y)) +\ (self.src_space[i_s] * xp.vstack(delta_cos)).T\ .dot(Wx/Wx_norm) if orth: # Use Cayley transform to maintain orthogonality A = grad.dot(self.W.T) A = A - A.T Q = xp.linalg.inv(xp.eye(dim) + lr / 2 * A).dot(xp.eye(dim) - lr / 2 * A) self.W = Q.dot(self.W) else: self.W -= lr * grad / B total_it += 1 return self.W
def sim_map(pose, mrc, apix, etbl): #import mrcfile as mrc #from pyrosetta import * #init() #import numpy import cupy #from pyrosetta.toolbox import cleanATOM #mrc = mrcfile.open('examples/1ye3.mrc') #cleanATOM('examples/1ye3.pdb') #pose = pose_from_pdb('examples/1ye3.clean.pdb') #etbl = read_etable('examples/etable.txt') #apix=1 mrcsz = mrc.data.shape print(mrcsz) outmap = cupy.zeros(mrcsz) ij = cupy.fft.fftfreq(mrcsz[1], 1) ii = numpy.zeros(mrcsz) jj = numpy.zeros(mrcsz) kk = numpy.zeros(mrcsz) for ii_ndx in range(0, mrcsz[1]): for jj_ndx in range(0, mrcsz[1]): for kk_ndx in range(0, mrcsz[1]): ii[ii_ndx, jj_ndx, kk_ndx] = ij[ii_ndx] jj[ii_ndx, jj_ndx, kk_ndx] = ij[jj_ndx] kk[ii_ndx, jj_ndx, kk_ndx] = ij[kk_ndx] print('here') ii = cupy.fft.fftshift(cupy.asarray(ii)) jj = cupy.fft.fftshift(cupy.asarray(jj)) kk = cupy.fft.fftshift(cupy.asarray(kk)) pi = cupy.pi for presi in range(1, pose.total_residue()): #get number of atoms #print(presi) for pres_atj in range(1, pose.residue(presi).natoms()): #for each atom, simulate gaussian with appropriate electron scattering factor #at_type = pose.residue(presi).atom_type(pres_atj) #print('this is the atom-type:' + at_type.str()) #scattering_params = etbl.get(at_type) scattering_params = cupy.array([1, 1]) coords = pose.residue(presi).xyz(pres_atj) center = cupy.array( [coords[0] / apix, coords[1] / apix, coords[2] / apix]) #print(center) s = cupy.float(1 / scattering_params[1]) ampl = cupy.float((1 / cupy.sqrt(cupy.power(2 * pi, 3))) * (1 / cupy.power(s, 3))) ii_ampl = cupy.float(scattering_params[0]) outmap = outmap + ii_ampl * cupy.fft.ifftn( cupy.fft.ifftshift(ampl * cupy.exp( -cupy.power(pi, 2) * (cupy.power(ii, 2) + cupy.power(jj, 2) + cupy.power(kk, 2)) / (2 * cupy.power(s, 2)) - ((2 * pi) * 1j * (ii * center[0] + jj * center[1] + kk * center[2]))))) outmap = numpy.real(cupy.asnumpy(cupy.transpose(outmap))) return outmap
def loglike(theta): """ Compute the loglikelihood function. NOTE: Not called directly by user code; the function signature must correspond to the requirements of the numerical sampler used. Parameters ---------- theta : Input parameter vector Returns ------- loglike : float """ global init_loglike, ndata_unflagged, per_bl_sig, weight_vector, data_vis, einschema if init_loglike == False: # Find total number of visibilities ndata = data_vis.shape[0]*data_vis.shape[1]*data_vis.shape[2]*2 # 8 because each polarisation has two real numbers (real & imaginary) flag_ll = np.logical_not(data_flag[:,0,0]) ndata_unflagged = ndata - np.where(flag_ll == False)[0].shape[0] * 8 print ('Percentage of unflagged visibilities: ', ndata_unflagged, '/', ndata, '=', (ndata_unflagged/ndata)*100) # Set visibility weights weight_vector=np.zeros(data_vis.shape, dtype='float') # ndata/2 because the weight_vector is the same for both real and imag parts of the vis. if not sigmaSim: per_bl_sig = np.zeros((data_nbl)) bl_incr = 0; for a1 in np.arange(data_nant): for a2 in np.arange(a1+1,data_nant): #per_bl_sig[bl_incr] = np.sqrt((sefds[a1]*sefds[a2])/(data_chanwidth*data_inttime[bl_incr])) # INI: Removed the sq(2) from the denom. It's for 2 pols. per_bl_sig[bl_incr] = (1.0/corr_eff) * np.sqrt((sefds[a1]*sefds[a2])/(2*data_chanwidth*data_inttime[bl_incr])) # INI: Added the sq(2) bcoz MeqS uses this convention weight_vector[baseline_dict[(a1,a2)]] = 1.0 / np.power(per_bl_sig[bl_incr], 2) bl_incr += 1; else: weight_vector[:] = 1.0 /np.power(sigmaSim, 2) weight_vector *= np.logical_not(data_flag) weight_vector = cp.array(weight_vector.reshape((data_vis.shape[0], data_vis.shape[1], 2, 2))) # Compute einsum schema einschema = einsum_schema(hypo) init_loglike = True # loglike initialised; will not enter on subsequent iterations # Set up arrays necessary for forward modelling # Set up the phase delay matrix lm = cp.array([[theta[1], theta[2]]]) phase = phase_delay(lm, data_uvw_cp, data_chan_freq_cp) if hypo == 1: # Set up the shape matrix for Gaussian sources gauss_shape = gaussian_shape(data_uvw, data_chan_freq, np.array([[theta[3], theta[4], theta[5]]])) gauss_shape = cp.array(gauss_shape) # Set up the brightness matrix stokes = cp.array([[theta[0], 0, 0, 0]]) brightness = convert(stokes, ['I', 'Q', 'U', 'V'], [['RR', 'RL'], ['LR', 'LL']]) '''print ('einschema: ', einschema) print ('phase.shape: ', phase.shape) print ('gauss_shape.shape: ', gauss_shape.shape) print ('brightness.shape: ', brightness.shape)''' # Compute the source coherency matrix (the uncorrupted visibilities, except for the phase delay) if hypo == 0: source_coh_matrix = cp.einsum(einschema, phase, brightness) elif hypo == 1: source_coh_matrix = cp.einsum(einschema, phase, gauss_shape, brightness) ### Uncomment the following and assign sampled complex gains per ant/chan/time to the Jones matrices '''# Set up the G-Jones matrices die_jones = cp.zeros((data_ntime, data_nant, data_nchan, 2, 2), dtype=cp.complex) if hypo == 0: for ant in np.arange(data_nant): for chan in np.arange(data_nchan): delayterm = theta[ant+12]*(chan-refchan_delay)*data_chanwidth # delayterm in 'turns'; 17th chan (index 16) freq is the reference frequency. pherr = theta[ant+3] + delayterm*360 # convert 'turns' to degrees; pherr = pec_ph + delay + rate; rates are zero re, im = pol_to_rec(1,pherr) die_jones[:, ant, chan, 0, 0] = die_jones[:, ant, chan, 1, 1] = re + 1j*im elif hypo == 1: for ant in np.arange(data_nant): for chan in np.arange(data_nchan): delayterm = theta[ant+15]*(chan-refchan_delay)*data_chanwidth # delayterm in 'turns'; 17th chan (index 16) freq is the reference frequency. pherr = theta[ant+6] + delayterm*360 # convert 'turns' to degrees; pherr = pec_ph + delay + rate; rates are zero re, im = pol_to_rec(1,pherr) die_jones[:, ant, chan, 0, 0] = die_jones[:, ant, chan, 1, 1] = re + 1j*im''' # Predict (forward model) visibilities # If the die_jones matrix has been declared above, assign it to both the kwargs die1_jones and die2_jones in predict_vis() model_vis = predict_vis(data_uniqtime_indices, data_ant1, data_ant2, die1_jones=None, dde1_jones=None, source_coh=source_coh_matrix, dde2_jones=None, die2_jones=None, base_vis=None) # Compute chi-squared and loglikelihood diff = model_vis - data_vis.reshape((data_vis.shape[0], data_vis.shape[1], 2, 2)) chi2 = cp.sum((diff.real*diff.real+diff.imag*diff.imag) * weight_vector) loglike = cp.float(-chi2/2.0 - cp.log(2*cp.pi*(1.0/weight_vector.flatten()[cp.nonzero(weight_vector.flatten())])).sum()) return loglike, []
def REE_gen(para): p_r = float(para[0]) sigma = float(para[1]) phi_pi = float(para[2]) phi_x = float(para[3]) Beta = .99 nu = float(para[5]) theta = float(para[6]) g = float(para[7]) sig_r = float(para[8]) alpha = float(para[9]) psi = float(para[10]) h = float(para[11]) gamma = float(para[12]) rho = float(para[13]) p_u = float(para[14]) sig_e = float(para[15]) sig_u = float(para[16]) alpha_p = (alpha / (1 - alpha)) k = ((1 - Beta * theta) * (1 - theta)) / ((1 + alpha_p * psi) * theta * (1 + gamma * Beta * theta)) c1 = (sigma / ((1 - h) * (1 - h * Beta))) c2 = (nu / (alpha + nu)) w1 = (1 + (h**2) * Beta + h * Beta) * ((1 + h + (h**2) * Beta)**-1) w2 = ((1 - h) * (1 - h * Beta)) * ((sigma * (1 + h + h**2 * (Beta)))**-1) w3 = (-h * Beta / (1 + h + (h**2) * Beta)) w4 = h * ((1 + h + (h**2) * Beta)**-1) n1 = k * c2 * c1 + k * (h**2) * Beta * c1 * c2 - k * alpha_p n2 = -k * (c2) * (c1) * (h) n3 = -k * h * Beta * c1 * c2 n4 = Beta * ((1 + gamma * Beta * theta)**-1) n5 = gamma * ( (1 + gamma * Beta * theta)**-1) + (-gamma * psi * alpha_p * k) x_t = 0 pi_t = 1 i_t = 2 r_t = 3 u_t = 4 Ex_t = 5 Epi_t = 6 Ei_t = 7 Ex_t2 = 8 Epi_t2 = 9 Ei_t2 = 10 ex_sh = 0 epi_sh = 1 ei_sh = 2 ex2_sh = 3 epi2_sh = 4 ei2_sh = 5 r_sh = 0 pi_sh = 1 i_sh = 2 neq = 11 neta = 6 neps = 3 GAM0 = np.zeros([neq, neq]) GAM1 = np.zeros([neq, neq]) C = np.zeros([neq, 1]) PSI = np.zeros([neq, neps]) PPI = np.zeros([neq, neta]) eq_1 = 0 eq_2 = 1 eq_3 = 2 eq_4 = 3 eq_5 = 4 eq_6 = 5 eq_7 = 6 eq_8 = 7 eq_9 = 8 eq_10 = 9 eq_11 = 10 #x_t GAM0[eq_1, x_t] = 1 GAM0[eq_1, Ex_t] = -w1 GAM0[eq_1, Epi_t] = -w2 GAM0[eq_1, i_t] = w2 GAM0[eq_1, r_t] = w2 GAM0[eq_1, Ex_t2] = -w3 GAM1[eq_1, x_t] = w4 #pi_t GAM0[eq_2, pi_t] = 1 GAM0[eq_2, x_t] = -n1 GAM1[eq_2, x_t] = n2 GAM0[eq_2, Ex_t] = -n3 GAM0[eq_2, Epi_t] = -n4 GAM1[eq_2, pi_t] = n5 GAM1[eq_2, u_t] = 1 #i_t GAM0[eq_3, x_t] = -(1 - rho) * phi_x GAM0[eq_3, pi_t] = -(1 - rho) * phi_pi GAM0[eq_3, i_t] = 1 GAM1[eq_3, i_t] = rho PSI[eq_3, i_sh] = 1 #r_t GAM0[eq_4, r_t] = 1 GAM1[eq_4, r_t] = p_r PSI[eq_4, r_sh] = 1 #u_t GAM0[eq_5, u_t] = 1 GAM1[eq_5, u_t] = p_u PSI[eq_5, pi_sh] = 1 #Epi_t GAM0[eq_6, pi_t] = 1 GAM1[eq_6, Epi_t] = 1 PPI[eq_6, epi_sh] = 1 #Ex_t GAM0[eq_7, x_t] = 1 GAM1[eq_7, Ex_t] = 1 PPI[eq_7, ex_sh] = 1 #Ex_t2 GAM0[eq_8, Ex_t] = 1 GAM1[eq_8, Ex_t2] = 1 PPI[eq_8, ex2_sh] = 1 #Ei_t GAM0[eq_9, i_t] = 1 GAM1[eq_9, Ei_t] = 1 PPI[eq_9, ei_sh] = 1 #Ei_t2 GAM0[eq_10, Ei_t] = 1 GAM1[eq_10, Ei_t2] = 1 PPI[eq_10, ei2_sh] = 1 #Epi_t2 GAM0[eq_11, Epi_t] = 1 GAM1[eq_11, Epi_t2] = 1 PPI[eq_11, epi2_sh] = 1 G1, impact, RC = gensys(GAM0, GAM1, PSI, PPI, DIV=1 + 1e-8, REALSMALL=1e-6, return_everything=False) # GAM0*x(t) = A*x(t-1) + B*Ex(t+1) + C*Ex(t+2) + E*u(t-1) + DD*eps(t) GAM0 = cp.array([[1, 0, w2], [-n1, 1, 0], [-(1 - rho) * phi_x, -(1 - rho) * phi_pi, 1]]) GAM0inv = cp.linalg.inv(GAM0) A = cp.matmul(GAM0inv, cp.array([[w4, 0, 0], [n2, n5, 0], [0, 0, rho]])) B = cp.matmul(GAM0inv, cp.array([[w1, w2, 0], [n3, n4, 0], [0, 0, 0]])) C = cp.matmul(GAM0inv, cp.array([[w3, 0, 0], [0, 0, 0], [0, 0, 0]])) DD = cp.matmul(GAM0inv, cp.array(([0, 0, 0], [0, 0, 0], [0, 0, 1]))) E = cp.matmul(GAM0inv, cp.array([[-w2, 0], [0, 1], [0, 1]])) R = cp.array([[cp.float(para[0]), 0], [0, cp.float(para[14])]]) # x(t) = A*x(t-1) + B*Ex(t+1) + C*Ex(t+2) + E*u(t-1) + DD*eps(t) V_s = cp.array([[cp.float(para[8])**2, 0, 0], [0, cp.float(para[16])**2, 0], [0, 0, cp.float(para[15])**2]]) M1 = cp.array([[1 / 100, 0, 0, -1 / 100, 0], [0, 1 / 400, 0, 0, 0], [0, 0, 1 / 400, 0, 0]]) M2 = cp.array([[-1 / 100, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]) V_m = cp.identity(3) return G1, impact, RC, A, B, C, E, DD, R, V_s, M1, M2, V_m
m = 1000 Thetasim = cp.zeros([Nsim, 17]) logpost = cp.zeros([Nsim]) LIK = cp.zeros([Nsim]) AA = cp.zeros([Nsim]) DROP = cp.zeros([Nsim]) Thetasim[i, :] = para c = .02 P3 = cp.eye(17) POST = cp.load(r"C:\Research\KF Results\no proj\PostDIST.npy") para = cp.mean(POST[10000:90000, :], 0) #P3 = cp.cov(POST[0:90000],rowvar= False) Thetasim[i, :] = para accept = 0 likij, dropoutj = PFLIKE(Thetasim[i, :], EPS, S, RR, randphi) obj = cp.float(likij) + cp.float(NK_priors(cp.asnumpy(Thetasim[i, :]))) DROP[i] = dropoutj LIK[i] = cp.float(likij) logpost[i] = obj print('likelihood:', likij) print('logposterior:', obj) # In[44]: #Continue MH Nsim = 100500 m = 1000 i = cp.load(r'C:\Research\PF Results\Results\No Proj Fac\iter.npy') Thetasim = cp.load(r'C:\Research\PF Results\Results\No Proj Fac\PostDIST.npy') logpost = cp.load(r'C:\Research\PF Results\Results\No Proj Fac\logpost.npy') AA = cp.load(r'C:\Research\PF Results\Results\No Proj Fac\Acceptance.npy')