Exemplo n.º 1
0
 def __init__(self,
              kernel_type='rbf',
              C=1,
              gamma=1,
              degree=3,
              tolerance=0.1,
              epsilon=0.1,
              max_iter=100,
              solver="smo"):
     self.__kernel = Kernel(kernel_type, gamma, degree)
     self.__C = C
     self.__tol = tolerance
     self.__error_cache = {}
     self.__eps = epsilon
     self.__max_iter = max_iter
     self.__solver = solver
Exemplo n.º 2
0
def train_eval(config, args, X_train, Y_train, X_test=None, Y_test=None):

    seed = str(args['seed']) if not args['split_ready'] else ''
    model_path = "%s%s_%s.pkl" % (args['model_path'], args['dataset'], seed)
    ker = Kernel(config, args['kernel_type'])
    logging.info('Training on dataset %s...' % args['dataset'])
    logging.info('\tComputing %s kernel.' % args['kernel_type'])
    K_train = ker.fit_transform(X_train)

    lins = []
    nans = []
    for col in range(Y_train.shape[1]):
        Y_train_all = Y_train[:, col]
        K_train_notnan = K_train[~np.isnan(Y_train_all)][:, ~np.
                                                         isnan(Y_train_all)]
        Y_train_notnan = Y_train_all[~np.isnan(Y_train_all)]
        nans.append(np.isnan(Y_train_all))

        if args['metric'] in ['ROC', 'PRC']:
            logging.info('\tTraining classifier on task %d.' % (col + 1))
            lin = svm.SVC(kernel='precomputed', C=10, probability=True)
            lin.fit(K_train_notnan, Y_train_notnan)
        else:
            logging.info('\tTraining regressor on task %d.' % (col + 1))
            lin = svm.SVR(kernel='precomputed', C=10)
            lin.fit(K_train_notnan, Y_train_notnan)
        lins.append(lin)

    model = {'kernel': ker, 'linear': lins, 'nans': nans}
    save_model(model, model_path)
    logging.info('\tTrained model saved to \"%s\".' %
                 (model_path.split('/')[-1]))

    if X_test is not None and Y_test is not None:
        score = evaluate(args, X_test, Y_test)
        logging.info('\tAll tasks averaged score (%s): %.6f.' %
                     (args['metric'], score))
        return score
Exemplo n.º 3
0
Arquivo: cp.py Projeto: ziyw/gp
def plot_results(time_points, values):

	axis_x = np.arange(0,5.1,0.1)
	fig = plt.figure(0)

	plt.axis([0,5,-2,2], facecolor = 'g')
	plt.grid(color='w', linestyle='-', linewidth=0.5)

	ax = fig.add_subplot(111)
	ax.spines['top'].set_visible(False)
	ax.spines['right'].set_visible(False)
	ax.patch.set_facecolor('#E8E8F1')

	# show mean 
	mu = np.zeros(axis_x.size)
	var = np.zeros(axis_x.size)

	ker = Kernel()
	ker.SE(1,1)

	gp = GP()

	for i in range(axis_x.size):
		mu[i],var[i],_ = gp.GPR(time_points = time_points,values = values, predict_point = axis_x[i], kernel = ker)

	# show covariance 

	print mu
	plt.fill_between(axis_x,mu + var,mu-var,color = '#D1D9F0')

	# show mean 
	plt.plot(axis_x, mu, linewidth = 2, color = "#5B8CEB")

	# show the points
	plt.scatter(time, value,color = '#598BEB')
	
	plt.show()
Exemplo n.º 4
0
    def __init__(self, max_num, domain):
        self.max_num = max_num

        rho0 = 1000.  #rest density [ kg/m^3 ]
        VF = .0262144  #simulation volume [ m^3 ]
        VP = VF / max_num  #particle volume [ m^3 ]
        m = rho0 * VP  #particle mass [ kg ]
        re = (VP)**(1 / 3.)  #particle radius [ m ]
        #re = (VP)**(1/2.)           #particle radius [ m ]
        print "re, m, VP", re, m, VP
        rest_distance = .87 * re  #rest distance between particles [ m ]

        smoothing_radius = 2.0 * rest_distance  #smoothing radius for SPH Kernels
        boundary_distance = .5 * rest_distance  #for calculating collision with boundary

        #the ratio between the particle radius in simulation space and world space
        print "VF", VF
        print "domain.V: ", domain.V
        print "VF/domain.V", VF / domain.V
        print "scale calc", (VF / domain.V)**(1 / 3.)
        #print "scale calc", (VF/domain.V)**(1/2.)
        sim_scale = (VF / domain.V)**(1 / 3.)  #[m^3 / world m^3 ]
        #sim_scale = (VF / domain.V)**(1/2.)     #[m^2 / world m^2 ]

        self.rho0 = rho0
        self.VF = VF
        self.mass = m
        self.VP = VP
        self.re = re
        self.rest_distance = rest_distance
        self.smoothing_radius = smoothing_radius
        self.boundary_distance = boundary_distance
        self.sim_scale = sim_scale

        print "====================================================="
        print "particle mass:", self.mass
        print "Fluid Volume VF:", self.VF
        print "simulation scale:", self.sim_scale
        print "smoothing radius:", self.smoothing_radius
        print "rest distance:", self.rest_distance
        print "====================================================="

        #Other parameters
        self.K = 15.  #Gas constant
        self.boundary_stiffness = 20000.
        self.boundary_dampening = 256.
        #friction
        self.friction_coef = 0.
        self.restitution_coef = 0.
        #not used yet
        self.shear = 0.
        self.attraction = 0.
        self.spring = 0.

        self.velocity_limit = 600.
        self.xsph_factor = .05

        self.viscosity = .01
        self.gravity = -9.8

        self.EPSILON = 10E-6

        #Domain
        self.domain = domain
        self.domain.setup(self.smoothing_radius / self.sim_scale)

        #Kernels
        self.kernels = Kernel(self.smoothing_radius)
Exemplo n.º 5
0
#Load Train and embedding
path_data = 'dataset/Xtr0.csv'
path_label = 'dataset/Ytr0.csv'

train_dataset = datahandler(path_data, path_label,features_generated=False)

train_dataset.Y[train_dataset.Y==0]=-1
train_dataset.compute_vocabulary(6)
train_dataset.mismatch_embedding(6,1,train_dataset.vocab)


X_train0, Y_train = train_dataset.X_embedded,train_dataset.Y



kernel = Kernel(Kernel.dot_product())
K0 = kernel.kernel_matrix(X_train0)

#Load Test and embedding
path_data = 'dataset/Xte0.csv'
path_label = 'dataset/Ytr0.csv'

test_dataset = datahandler(path_data, path_label,features_generated=False)

test_dataset.mismatch_embedding(6,1,train_dataset.vocab)
X_test0 = test_dataset.X_embedded

##########################################
#KERNEL 2
##########################################
train_dataset.compute_vocabulary(7)
Exemplo n.º 6
0
          'metric' : 'rmse'} 

bounds = [(1.0e-5, 1.0e-1), # learning rate
          (0.5, 0.9999), # change of learning rate
          (2, 1000)] # number of leaves

n_random_trials = 3 # initiate Bayesian optimization with 3 random draws
n_searches = 10



# Use my Bayesian Optimization
mdl = Model(data_mat, lags, n_oos, n_val, prediction_range, 
            target_vars_inds, params)

kernel = Kernel("rbf", 1)

bo = BayesianOptimization(mdl.obj_fun, bounds, kernel, 
                          expected_improvement, n_random_trials)
ind, best_para_my, y = bo.search(n_searches, 2, 25)





# Use Ax Bayesian Optimization
n_random_trials = 5 # initiate Bayesian optimization with 3 random draws
n_searches = 20

mdl = Model(data_mat, lags, n_oos, n_val, prediction_range, 
            target_vars_inds, params)
Exemplo n.º 7
0
Arquivo: gp.py Projeto: ziyw/gp
            plt.plot(axis_x, mu, linewidth=2, color=mean_color[i % k_color])

        # background color
        # plt.axis([0,5,-2,2], facecolor = 'g')
        # plt.grid(color='w', linestyle='-', linewidth=0.5)
        # ax.patch.set_facecolor('#E8E8F1')
        # show the points
        for i in range(len(GPs)):
            plt.scatter(GPs[i].time_points,
                        GPs[i].values,
                        color=mean_color[i % k_color],
                        marker='X')

        plt.show()


if __name__ == '__main__':

    X = np.matrix([1, 2, 3, 4]).reshape(-1, 1)
    Y = np.sin(X)  # np.random.randn(20,1)*0.05
    X = X.reshape(4, )
    Y = Y.reshape(4, )

    k1 = Kernel("SE", np.sqrt(2), 1)

    gp1 = GP(time_points=X.T, values=Y.T, kernel=k1)
    gp1.optimize()

    print gp1.se_function([0.697774447341, 1.61119536129, 7.64794567566e-09])

    #print k1.cal_SE()