def __init__(self, m=4, n=3, initial_path='initial_airfoil/naca0012.dat', config_fname='op_conditions.ini'): # Airfoil parameters self.m = m self.n = n # NACA 0012 as the initial airfoil try: self.airfoil0 = np.loadtxt(initial_path, skiprows=1) except: self.airfoil0 = np.loadtxt(initial_path, delimiter=',') x_min = np.min(self.airfoil0[:, 0]) x_max = np.max(self.airfoil0[:, 0]) z_min = np.min(self.airfoil0[:, 1]) z_max = np.max(self.airfoil0[:, 1]) Px = np.linspace(x_min, x_max, self.m, endpoint=True) Py = np.linspace(z_min, z_max, self.n, endpoint=True) x, y = np.meshgrid(Px, Py) P0 = np.stack((x, y), axis=-1) self.Px = P0[:, :, 0] self.alpha0 = P0[:, :, 1].flatten() self.dim = len(self.alpha0) self.bounds = np.zeros((self.dim, 2)) perturb = 0.2 self.bounds[:, 0] = self.alpha0 - perturb self.bounds[:, 1] = self.alpha0 + perturb self.y = None self.config_fname = config_fname
def import_ganZ_gen_params(gp_fold, iter_nm): n00 = np.loadtxt(gp_fold + iter_nm + '_00.csv', delimiter=',') n00 = n00.reshape(1, n00.shape[0]) n01 = np.loadtxt(gp_fold + iter_nm + '_01.csv', delimiter=',') n10 = np.loadtxt(gp_fold + iter_nm + '_10.csv', delimiter=',') n10 = n10.reshape(n10.shape[0], 1) n11 = np.loadtxt(gp_fold + iter_nm + '_11.csv', delimiter=',') n11 = n11.reshape(1) genZ_params = [(n00, n01), (n10, n11)] return genZ_params
def run(self): plt.imshow(self.data); plt.show(); plt.imshow(self.psf); plt.show(); #set up initial guesses #create initial parameters tt0 = np.zeros((self.n_grid,self.n_grid)) + self.wlim[1]; #begin with high uniform mass in each pixel tt0 = self.xi*np.log(np.exp(tt0/self.xi)-1); #print(tt0); tt0_k = fft.fft2(tt0); #take fft t_ini = self.complex_to_real(tt0_k.flatten()) #flatten to 1d for scipy and embed in 2R alpha_ini = 3.; f_ini = self.f_true; #m_1,mf = self.optimize_m(t_ini,self.xi,f_ini,alpha_ini); m_1 = np.loadtxt('mock00024.out'); m_1 = self.xi*np.log(np.exp(m_1/self.xi)-1); m_1 = fft.fft2(m_1); m_1 = self.complex_to_real(m_1.flatten()); a_1 = self.optimize_alpha(m_1,self.xi,f_ini,alpha_ini); print(a_1); mf,m_2 = self.optimize_m(tt0,self.xi,f_ini,a_1); print('new alpha'); print(a_1); return m_2;
def __init__(self, csvname): # grab input data = np.loadtxt(csvname, delimiter=',') self.x = data[:, :-1] self.y = data[:, -1] self.x.shape = (len(self.x), 1) self.y.shape = (len(self.y), 1)
def test_structure(self): nelx, nely = 60, 20 left_wall = list(range(0, 2 * (nely + 1), 2)) right_corner = [2 * (nelx + 1) * (nely + 1) - 1] fixdofs = np.asarray(left_wall + right_corner) alldofs = np.arange(2 * (nely + 1) * (nelx + 1)) freedofs = np.array(list(set(alldofs) - set(fixdofs))) forces = np.zeros(2 * (nely + 1) * (nelx + 1)) forces[1] = -1.0 args = topo_physics.default_args() args.update({ 'nelx': nelx, 'nely': nely, 'freedofs': freedofs, 'fixdofs': fixdofs, 'forces': forces }) _, x, _ = topo_physics.run_toposim(args=args, loss_only=False, verbose=False) x = abs(x) # remove negative zeros! path = os.path.join(os.path.dirname(__file__), 'truss_test.csv') # To regenerate, run the test binary directly, e.g., with # python ./neural_structural_optimization/topo_physics_test # after uncommenting this line: # np.savetxt(path, x, delimiter=",", fmt='%.0f',) target_struct = np.loadtxt(path, delimiter=',') npo.testing.assert_array_equal(x.round(0), target_struct)
def shape_fit_1d(m, step_size, u_func, num_flows=8, num_samples=1000): # Parameters h = np.tanh q_0_mu = np.array([0,0]) q_0_sigma = 10 D = q_0_mu.shape[0] # flows #lambda_flows = np.array([np.array([1., 1., 1., 1., 0.])]) #lambda_flows = np.array([np.array([1., 1., 0.])]*num_flows) lambda_flows = np.loadtxt("./data_fit_1d/flow_params.txt") # 1D samples samples = np.random.randn(num_samples)[:,np.newaxis] #samples = np.random.uniform(-1, 1, num_samples)[:,np.newaxis] start = time.time() grad_energy_bound = autograd.grad(energy_bound) # JOINT PROBABILITY IS NEW U_FUNC #print(energy_bound(lambda_flows, samples, h, u_func)) #target = lambda x: (sp.stats.norm.pdf((x-2)) + sp.stats.norm.pdf((x+2)))/2 #gradient_descent(m, lambda_flows, grad_energy_bound, samples) flowed_samples = adam_solve(lambda_flows, grad_energy_bound, samples, u_func, h, m, step_size) # Plot Transformed samples ax = setup_plot(u_func) ax.hist(flowed_samples, bins=100, alpha=0.5, density=True, label="Transformed Samples") #plt.savefig("./plots/adam_fit_test.png") ax.legend(loc='best') plt.savefig("./data_fit_1d/adam_fit.png")
def __init__(self,csvname): # grab input data = np.loadtxt(csvname,delimiter = ',') self.x = data[:-1,:] self.y = data[-1:,:] self.colors = ['salmon','cornflowerblue','lime','bisque','mediumaquamarine','b','m','g']
def load(name, suffix=[]): path = path_to_test_resource("problems", *suffix) X = anp.loadtxt(os.path.join(path, "%s.x" % name)) try: F = anp.loadtxt(os.path.join(path, "%s.f" % name)) CV = None if os.path.exists(os.path.join(path, "%s.cv" % name)): CV = anp.loadtxt(os.path.join(path, "%s.cv" % name)) except: return X, None, None return X, F, CV
def __init__(self,csvname): # grab input data = np.loadtxt(csvname,delimiter = ',').T self.x = data[:,:-1] self.y = data[:,-1] self.y.shape = (len(self.y),1) self.colors = ['salmon','cornflowerblue','lime','bisque','mediumaquamarine','b','m','g']
def load(name, n_obj): path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "resources", "WFG", "%sobj" % n_obj) X = anp.loadtxt(os.path.join(path, "%s.x" % name)) try: F = anp.loadtxt(os.path.join(path, "%s.f" % name)) CV = None if os.path.exists(os.path.join(path, "%s.cv" % name)): CV = anp.loadtxt(os.path.join(path, "%s.cv" % name)) except: return X, None, None return X, F, CV
def load_data(tag, fname, w, dim=2): d = np.loadtxt(fname, ndmin=2) if (np.shape(d)[0] != np.shape(w)[dim]): print("Number of rows=%d in file '%s' does not match weight cols=%d" % (np.shape(d)[0], fname, np.shape(w)[dim])) sys.exit() print("Loaded [%s] of %s data from '%s'" % ("x".join(map(str, np.shape(d))), tag, fname)) return d
def readImages(imageDir, nImages): imA = np.zeros((n_grid, n_grid, nImages)) for i in range(0, nImages): imageId = str(i) impath = imageDir + str(imageId) + '.dat' imA[:, :, i] = np.loadtxt(impath) return imA
def load_data(self, csvname): data = np.loadtxt(csvname, delimiter=',') self.data = data x = data[0:2, :] y = data[-1, :][np.newaxis, :] special_class = +1 return x, y, special_class
def load_weights(tag, fname, nheads): w = np.loadtxt(fname, ndmin=2) if (np.shape(w)[0] % nheads != 0): print("Number of rows=%d is not divisible by nheads=%d in file '%s'" % (np.shape(w)[0], nheads, fname)) sys.exit() w = np.reshape(w, (nheads, np.shape(w)[0] / nheads, np.shape(w)[1])) print("Loaded [%s] of %s weights from '%s'" % ("x".join(map(str, np.shape(w))), tag, fname)) return w
def __init__(self,csvname): # grab input data = np.loadtxt(csvname,delimiter = ',') self.x = data[:-1,:] self.y = data[-1:,:] self.colors = [[1,0.8,0.5],[0,0.7,1]] # if 1-d regression data make sure points are sorted if np.shape(self.x)[1] == 1: ind = np.argsort(self.x.flatten()) self.x = self.x[ind,:] self.y = self.y[ind,:]
def dump_state(self, xk): ''' callback to save the state to disk during optimization ''' filename = 'state.txt' if not os.path.exists(filename): past = np.zeros((0, xk.shape[0])) else: past = np.loadtxt(filename) if past.ndim < 2: past = past.reshape(1, -1) np.savetxt(filename, np.append(past, xk.reshape(1, -1), axis=0))
def __init__(self, csvname): # grab input data = np.loadtxt(csvname, delimiter=',') self.x = data[:-1, :] self.y = data[-1:, :] # if 1-d regression data make sure points are sorted if np.shape(self.x)[1] == 1: ind = np.argsort(self.x.flatten()) self.x = self.x[ind, :] self.y = self.y[ind, :] cost_evals = [v / float(np.size(self.y)) for v in cost_evals]
def restore_checkpoint(output_folder, shared_file_object=True, optimizer=None): i_epoch, i_batch = [ int(i) for i in np.loadtxt(os.path.join(output_folder, 'checkpoint.txt')) ] if not shared_file_object: obj = np.load(os.path.join(output_folder, 'obj_checkpoint.npy')) obj_delta = np.take(obj, 0, axis=-1) obj_beta = np.take(obj, 1, axis=-1) optimizer.restore_param_arrays_from_checkpoint() return i_epoch, i_batch, obj_delta, obj_beta else: return i_epoch, i_batch
def load_data(self, csvname): data = np.loadtxt(csvname, delimiter=',').T self.x = data[:, :-1:] self.y = data[:, -1:] # center input mean1 = np.mean(self.x[:, 0]) mean2 = np.mean(self.x[:, 1]) std1 = np.std(self.x[:, 0]) std2 = np.std(self.x[:, 1]) self.x[:, 0] -= mean1 self.x[:, 0] /= std1 self.x[:, 1] -= mean2 self.x[:, 1] /= std2
def __init__(self, csvname): # grab input data = np.loadtxt(csvname, delimiter=',') data = data.T self.x = data[:, :-1] self.y = data[:, -1:] self.colors = [ 'salmon', 'cornflowerblue', 'lime', 'bisque', 'mediumaquamarine', 'b', 'm', 'g' ] # if 1-d regression data make sure points are sorted if np.shape(self.x)[1] == 1: ind = np.argsort(self.x.flatten()) self.x = self.x[ind, :] self.y = self.y[ind, :]
def main(): global args , x , y ,logx args = parser.parse_args() datapath = 'datasets/' csvname = datapath + args.csvname data = np.loadtxt(csvname,delimiter = ',') #load in data x = data[:-1,:] y = data[-1:,:] logx = np.log(x) y = np.log(y) x = np.concatenate((np.ones((1,1498),dtype=float),logx)) weights = np.random.rand(x.shape[0],1) #initial weights weight_his, cost_his = gd(square_cost, args.lr, args.iteration, weights) if args.draw: draw_pic(weight_his,cost_his)
def get_init(init_type, ndof): if init_type == 'rand': init = np.random.random(ndof) elif init_type == 'vac': init = np.zeros(ndof) + 1e-5 * np.random.random(ndof) elif init_type == 'one': init = np.ones(ndof) else: tmp = open(init_type, 'r') initfile = np.loadtxt(tmp) if len(initfile) == ndof: init = initfile elif len(initfile) < ndof: init = np.zeros(ndof, dtype=float) init[:len(initfile)] = initfile return init
def runSB(imageDir, saveDir, psf, psf_k, imageArray): nImages = np.shape(imageArray)[2] results = imageArray * 0 for imageIdx in range(0, nImages): if imageIdx < start: continue grndpath = imageDir + str(imageIdx) + '.truth' grnd = np.loadtxt(grndpath) no_source = np.shape(grnd)[0] if len(np.shape(grnd)) < 2: no_source = 1 img = imageArray[:, :, imageIdx] sb = SparseBayes(img, psf, psf_k, no_source) results[:, :, imageIdx] = sb.res s = saveDir + str(imageIdx) + '.out' np.savetxt(s, sb.res) #plt.imshow(results[:,:,imageIdx]); #plt.show(); return results
def main(): global args, x, y args = parser.parse_args() datapath = 'datasets/' csvname = datapath + args.csvname data = np.loadtxt(csvname,delimiter = ',') x = data[:-1,:] y = data[-1:,:] x = np.concatenate((np.ones((1,x.shape[1]),dtype=float),x)) weights = np.random.rand(x.shape[0],1) alpha = [0.1,0.01] weight_his1, cost_his1, wrong_num1 = gd2(cost_function, args.lr1, args.iteration, weights) weight_his2, cost_his2, wrong_num2 = gd2(cost_function, args.lr2, args.iteration, weights) update_weight1 = weight_his1[-1] update_weight2 = weight_his2[-1] res1 = np.sign(np.dot(x.T,update_weight1)) res2 = np.sign(np.dot(x.T,update_weight2)) acc1 = accuracy(res1,y) acc2 = accuracy(res2,y) print('the accuracy when lr=0.1 after 50 iterations is {:.3f}'.format(acc1)) print('the accuracy when lr=0.01 after 50 iterations is {:.3f}'.format(acc2)) if args.draw: plt.figure(21,figsize=(8,6)) #draw pic1 plt.subplot(211) plt.tight_layout(5) plt.title('cost function history plot') plt.plot([i for i in range(args.iteration+1)],cost_his1,'b') plt.plot([i for i in range(args.iteration+1)],cost_his2,'r') plt.legend(['$lr=0.1$','$lr=0.01$']) #draw pic2 plt.subplot(212) plt.title('misclassification history plot') plt.scatter([i for i in range(args.iteration+1)],wrong_num1) plt.scatter([i for i in range(args.iteration+1)],wrong_num2) plt.legend(['$lr=0.1$','$lr=0.01$']) plt.show()
def read_image(fframe, data_set='BTLS', background_subtract=False): ''' read given image :param fframe: file name. Currently assumes that the file can be opened using PIL.Image :param background_subtract: If True, sigmaclipped background subtraction (default: False) ''' if data_set == 'BTLS': im = Image.open(fframe) imarr = np.array(im) elif dataset == 'mock_alpha': imarr = np.loadtxt(fframe) noise_level = 0. if background_subtract: # if true, simple sigmaclipped background subtraction noise_level = np.median(stats.sigmaclip(imarr.flatten(), high=3.)[0]) return imarr - noise_level
def load_data(self,csvname): data = np.loadtxt(csvname,delimiter = ',') self.data = data x = data[0:2,:] y = data[-1,:][np.newaxis,:] # remove points from one class for illustrative purposes ind0 = np.argwhere(y == -1) ind0 = [v[1] for v in ind0] ind1 = np.argwhere(y == +1) ind1 = [v[1] for v in ind1] ind0 = ind0[-5:] inds = ind0 + ind1 x = x[:,inds] y = y[:,inds] special_class = -1 return x,y,special_class
def _load_boston(): """ Attribute Information: 1. CRIM: per capita crime rate by town 2. ZN: proportion of residential land zoned for lots over 25,000 sq.ft. 3. INDUS: proportion of non-retail business acres per town 4. CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise) 5. NOX: nitric oxides concentration (parts per 10 million) 6. RM: average number of rooms per dwelling 7. AGE: proportion of owner-occupied units built prior to 1940 8. DIS: weighted distances to five Boston employment centres 9. RAD: index of accessibility to radial highways 10. TAX: full-value property-tax rate per $10,000 11. PTRATIO: pupil-teacher ratio by town 12. B: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town 13. LSTAT: % lower status of the population 14. MEDV: Median value of owner-occupied homes in $1000's """ data = np.loadtxt( os.path.join(data_dir, 'boston-housing/boston_housing.txt')) X = data[:, :-1] y = data[:, -1] return X, y
# vector of time windows T_vec = np.arange(Ts, T_bar, Ts) # number of trials M = 100 # vector of data rate_data = np.zeros((len(T_vec), M)) # input matrix B = I # output matrix C = I for pp in range(M): # upload C. elegans connectivity matrix (insert full file path) A = np.loadtxt("insert_full_path/A_C_elegans.txt", usecols=range(n)) for tt in range(279): for qq in range(279): if tt > qq: if random() < 0.5: A_tmp = A[tt, qq] A[tt, qq] = A[qq, tt] A[qq, tt] = A_tmp w, v = np.linalg.eig(A) A = A - (max(np.real(w)) + 0.1) * I # vector of transmission rates rate_vec = np.zeros(len(T_vec))
with bl.Model() as m: X = bl.Placeholder('X', dimensions=agnp.array([100, 784])) encoder = bl.ml.neural_network.DenseNeuralNetwork( 'Encoder', X, layer_dims=[784, 50, 20], nonlinearity=bl.math.utils.sigmoid) decoder = bl.ml.neural_network.DenseNeuralNetwork( 'Decoder', encoder, layer_dims=[20, 50, 784], nonlinearity=bl.math.utils.sigmoid, last_layer_nonlinearity=bl.math.utils.sigmoid) y = bl.rvs.Bernoulli('obs', decoder, observed=X) fit_params = agnp.loadtxt("pos.txt", delimiter=',') m.set_param(fit_params) out = m.evaluate(decoder, feed_dict={X: train_images}) for i in range(out.shape[1]): out[0, i, :] = out[0, i, :] > 0.5 fig, ax = plt.subplots(10, 10) for i in range(10): for j in range(10): ax[i, j].imshow(out[0, i * 10 + j, :].reshape(28, 28)) ax[i, j].axis('off') plt.show()
def __init__(self, csvname): # grab input data = np.loadtxt(csvname, delimiter=',') data = data.T self.x = data[:, :-1] self.y = data[:, -1]