Exemple #1
0
def main():
    meta_file = './models2/model0/model.meta'
    ckpt_file = './models2/model0/model.ckpt-0'
    # test_list = './data/300w_image_list.txt'

    image_size = 112

    image_files = 'data/test_data/list.txt'
    out_dir = 'result'
    if not os.path.exists(out_dir):
        os.mkdir(out_dir)

    with tf.Graph().as_default():
        with tf.Session() as sess:
            print('Loading feature extraction model.')
            saver = tf.train.import_meta_graph(meta_file)
            saver.restore(tf.get_default_session(), ckpt_file)

            graph = tf.get_default_graph()
            images_placeholder = graph.get_tensor_by_name('image_batch:0')
            phase_train_placeholder = graph.get_tensor_by_name('phase_train:0')

            landmark_L1 = graph.get_tensor_by_name('landmark_L1:0')
            landmark_L2 = graph.get_tensor_by_name('landmark_L2:0')
            landmark_L3 = graph.get_tensor_by_name('landmark_L3:0')
            landmark_L4 = graph.get_tensor_by_name('landmark_L4:0')
            landmark_L5 = graph.get_tensor_by_name('landmark_L5:0')
            landmark_total = [
                landmark_L1, landmark_L2, landmark_L3, landmark_L4, landmark_L5
            ]

            file_list, train_landmarks, train_attributes = gen_data(
                image_files)
            print(file_list)
            for file in file_list:
                filename = os.path.split(file)[-1]
                image = cv2.imread(file)
                # image = cv2.resize(image, (image_size, image_size))
                input = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)
                input = cv2.resize(input, (image_size, image_size))
                input = input.astype(np.float32) / 256.0
                input = np.expand_dims(input, 0)
                print(input.shape)

                feed_dict = {
                    images_placeholder: input,
                    phase_train_placeholder: False
                }

                pre_landmarks = sess.run(landmark_total, feed_dict=feed_dict)
                print(pre_landmarks)
                pre_landmark = pre_landmarks[0]

                h, w, _ = image.shape
                pre_landmark = pre_landmark.reshape(-1, 2) * [h, w]
                for (x, y) in pre_landmark.astype(np.int32):
                    cv2.circle(image, (x, y), 1, (0, 0, 255))
                cv2.imshow('0', image)
                cv2.waitKey(0)
                cv2.imwrite(os.path.join(out_dir, filename), image)
Exemple #2
0
    def __init__(self,
                 num_samples,
                 reset_period,
                 POSE_OFFSET,
                 PARAMS_TO_OFFSET,
                 batch_size=32,
                 smpl=None,
                 shuffle=True,
                 save_path="./cb_samples.npz"):
        self.num_samples = num_samples
        self.reset_period = reset_period
        if isinstance(POSE_OFFSET, (int, float)):
            k = {param: POSE_OFFSET for param in PARAMS_TO_OFFSET}
        else:
            # k must be a dict with an entry for each variable parameter
            k = POSE_OFFSET
        self.k = k
        self.PARAMS_TO_OFFSET = PARAMS_TO_OFFSET
        self.batch_size = batch_size
        if smpl is None:
            smpl = SMPLModel(
                './keras_rotationnet_v2_demo_for_hidde/basicModel_f_lbs_10_207_0_v1.0.0.pkl'
            )
        self.smpl = smpl
        self.shuffle = shuffle
        self.params, self.pcs = gen_data(POSE_OFFSET,
                                         PARAMS_TO_OFFSET,
                                         self.smpl,
                                         data_samples=num_samples,
                                         save_dir=None,
                                         render_silhouette=False)
        self.indices = np.array([i for i in range(num_samples)])
        self.save_path = save_path
        #print("generator params shape at init: " + str(self.params.shape))
        #print("generator pcs shape at init: " + str(self.pcs.shape))
        print("generator params first entry: " + str(self.params[0]))
        print("generator params final entry: " + str(self.params[-1]))

        # Hard-coded parameters
        self.epoch = 0
        self.num_examples = 5
        self.cb_samples = np.linspace(0,
                                      self.num_samples,
                                      num=self.num_examples,
                                      dtype=int)
        self.cb_samples[-1] -= 1
        print("samples: " + str(self.cb_samples))

        # Store initial data
        if self.save_path is not None:
            with open(self.save_path, 'w') as f:
                np.savez(f,
                         indices=self.indices[self.cb_samples],
                         params=self.params[self.cb_samples],
                         pcs=self.pcs[self.cb_samples])
Exemple #3
0
    def test(self):

        image_size = 112

        image_files = 'data/test_original_data/list_sample.txt'
        out_dir = 'sample_test_result'
        if not os.path.exists(out_dir):
            os.mkdir(out_dir)

        self.load_graph(self.model_filepath)
        # print operations
        # self.print_graph_operations(self.graph)
        landmark_total = self.graph.get_tensor_by_name(
            'import/pfld_inference/fc/BiasAdd:0')

        print('Loading feature extraction model.')

        file_list, train_landmarks, train_attributes, euler = gen_data(
            image_files)
        print(file_list)
        for file in file_list:
            filename = os.path.split(file)[-1]
            image = cv2.imread(file)
            # image = cv2.resize(image, (image_size, image_size))
            input = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)
            input = cv2.resize(input, (image_size, image_size))
            input = input.astype(np.float32) / 256.0
            input = np.expand_dims(input, 0)
            print(input.shape)

            feed_dict = {
                self.image_batch: input,
                self.phase_train_placeholder: False
            }

            pre_landmarks = self.sess.run(landmark_total, feed_dict=feed_dict)

            print(pre_landmarks)
            pre_landmark = pre_landmarks[0]

            h, w, _ = image.shape
            pre_landmark = pre_landmark.reshape(-1, 2) * [h, w]
            for (x, y) in pre_landmark.astype(np.int32):
                cv2.circle(image, (x, y), 1, (0, 0, 255))
            cv2.imwrite(os.path.join(out_dir, filename), image)
Exemple #4
0
import numpy as np
from generate_data import gen_data
from GDA import GDA
from time import time

print('Loading data...')
t = time()
X_train, y_train, X_test, y_test = gen_data()
print("Done! ", time() - t)
print("Training...")
t = time()
model = GDA(X_train, y_train)
model.fit()
print("Done! ", time() - t)
pre = model.predict(X_test)
result = pre == y_test
print(sum(result) / len(result))
Exemple #5
0
        
    for a,alpha in enumerate(ALPHA_VALUES):
        for r,rho in enumerate(rhos):
            
	    if regressionModel == 'ElasticNet':
		model = lm.ElasticNet(alpha = alpha, rho = rho)
	    elif regressionModel == 'Lasso':
		model = lm.Lasso(alpha = alpha)
	    elif regressionModel == 'Ridge' and alpha != 0:
		model = lm.Ridge(alpha = alpha)
	    
	    for k in range(file_num):
		
		
		
		dy, dx = generate_data.gen_data(samples, features, impFeat)
		#dy, dx = genRedundantData(100, 6, 2, 2)
                examples, features = dx.shape
		
		(weights_iter, rcors, max_position_iter, intersect_size_iter, deltatime) = regBoost(dx, dy, model, bootstrap_num, impFeat)
	    
		files_rcors[k,:] = rcors
                if np.mean(rcors) > best_mean_rcor:
			best_mean_rcor = np.mean(rcors)
			best_alpha = alpha
			if regressionModel == 'ElasticNet' and rho != 0:
				best_rho = rho
			if best_mean_max_pos < np.mean(max_position_iter):
			    best_mean_max_pos = np.mean(max_position_iter)
			if best_mean_max_intersect > np.mean(intersect_size_iter):
			    best_mean_max_intersect = np.mean(intersect_size_iter)
Exemple #6
0
import numpy as np
from generate_data import gen_data
def LDA(data1,data2):
    '''
    线性判别分析
    input:data1,data2:训练集
    output: w(直线方向),mu1(均值1),mu2(均值2)
    '''
    mu1 = np.mean(data1,0)
    mu2 = np.mean(data2,0)
    Sigma1 = (data1-mu1).T.dot((data1-mu1))
    Sigma2 = (data2-mu1).T.dot((data2-mu2))
    Sw = Sigma1+Sigma2
    u,s,v = np.linalg.svd(Sw,0)
    return v.dot(np.diag(1/s)).dot(u.T).dot(mu1-mu2),mu1,mu2

if __name__ == '__main__':
    d1,d2,c1,c2 = gen_data(1000)
    w,_,_ = LDA(d1,d2)
    print(w/np.linalg.norm(w))
    w_ = c1-c2
    print(w_/np.linalg.norm(w_))
Exemple #7
0
# @Time    : 2018/9/18 下午7:32
# @Author  : Charles
# @Contact : [email protected]
# @File    : simulation.py
# @Software: PyCharm

from queue import PriorityQueue
from generate_data import gen_data
from scheduler import VMScheduler

if __name__ == "__main__":
    # Generate the input
    num_vms = 1000
    num_slots = 1000
    num_pms = num_vms
    vm_list = gen_data(num_vms, num_slots)

    pq = PriorityQueue()
    for vm in vm_list:
        pq.put(vm)

    vmm = VMScheduler(num_pms, num_slots)

    for t in range(num_slots + 1):
        print('The {}th slot.'.format(t))
        cur_vm_list = list()
        while not pq.empty():
            vm = pq.get()
            if vm.start_time == t:
                cur_vm_list.append(vm)
            else:
 def load_database(self, file, user_num):
     self.users, self.musics = gen_data(file, user_num)
Exemple #9
0
    best_mean_max_pos = features
    best_mean_max_intersect = 0

    for a, alpha in enumerate(ALPHA_VALUES):
        for r, rho in enumerate(rhos):

            if regressionModel == 'ElasticNet':
                model = lm.ElasticNet(alpha=alpha, rho=rho)
            elif regressionModel == 'Lasso':
                model = lm.Lasso(alpha=alpha)
            elif regressionModel == 'Ridge' and alpha != 0:
                model = lm.Ridge(alpha=alpha)

            for k in range(file_num):

                dy, dx = generate_data.gen_data(samples, features, impFeat)
                #dy, dx = genRedundantData(100, 6, 2, 2)
                examples, features = dx.shape

                (weights_iter, rcors, max_position_iter, intersect_size_iter,
                 deltatime) = regBoost(dx, dy, model, bootstrap_num, impFeat)

                files_rcors[k, :] = rcors
                if np.mean(rcors) > best_mean_rcor:
                    best_mean_rcor = np.mean(rcors)
                    best_alpha = alpha
                    if regressionModel == 'ElasticNet' and rho != 0:
                        best_rho = rho
                    if best_mean_max_pos < np.mean(max_position_iter):
                        best_mean_max_pos = np.mean(max_position_iter)
                    if best_mean_max_intersect > np.mean(intersect_size_iter):
def test_mlp(learning_rate=0.01, 
            n_epochs=100, 
            batch_size=20, 
            n_hidden=200):

    datasets = gen_data(noise=0.2)

    train_set_x, train_set_y = datasets[0]
    valid_set_x, valid_set_y = datasets[1]
    test_set_x, test_set_y = datasets[2]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
    n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
    n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
        
    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print '... building the model'

    index = T.lscalar() 
    x = T.matrix('x')  
    y = T.matrix('y')  

    rng = np.random.RandomState(1234)

    # construct the MLP class
    classifier = MLP(
        rng=rng,
        input=x,
        input_dim = (batch_size,200),
        n_in= train_set_x.get_value().shape[1],
        n_hidden=n_hidden,
        n_out=2
    )

    classifier_test = classifier.TestVersion(
        rng=rng,
        input=x,
        input_dim = (batch_size,200),
        n_in= train_set_x.get_value().shape[1],
        n_hidden=n_hidden,
        n_out=2,
        classifier = classifier
    )

    cost = classifier.L2(y)

    # compiling Theano functions
    
    # Calculates the output of the test data
    test_model = theano.function(
        inputs=[index],
        outputs=classifier_test.last_layer.output,
        givens={
            x: test_set_x[index * batch_size:(index + 1) * batch_size]}
    )
    
    # Calculates the output of the hidden layer
    #hidden_model = theano.function(
    #    inputs=[index],
    #    outputs=classifier_test.hiddenLayer.output,
    #    givens={
    #        x: test_set_x[index * batch_size:(index + 1) * batch_size]}
    #)
    
    # Calculate the L2 error of the validation set
    validate_model = theano.function(
        inputs=[index],
        outputs=classifier_test.L2(y),
        givens={
            x: valid_set_x[index * batch_size:(index + 1) * batch_size],
            y: valid_set_y[index * batch_size:(index + 1) * batch_size]
        }
    )
    
    # Gradient and update function
    gparams = [T.grad(cost, param) for param in classifier.params]
    updates = [
        (param, param - learning_rate * gparam)
        for param, gparam in zip(classifier.params, gparams)
    ]
    
    # Init train model
    train_model = theano.function(
        inputs=[index],
        outputs=cost,
        updates=updates,
        givens={
            x: train_set_x[index * batch_size: (index + 1) * batch_size],
            y: train_set_y[index * batch_size: (index + 1) * batch_size]
        }
    )

    ###############
    # TRAIN MODEL #
    ###############
    print '... training'

    for n in xrange(n_epochs):
        costs = [train_model(i) for i in xrange(n_train_batches)]
        val_costs = [validate_model(i) for i in xrange(n_valid_batches)]
        print "Cost:",np.mean(costs),"Val error:",np.mean(val_costs)
    
    # Calculate outout of test set
    output = np.zeros((0,2))
    for i in xrange(n_test_batches):
        output = np.vstack((output,test_model(i)))
    
    #hidden = np.zeros((0,200))
    #for i in xrange(n_test_batches):
     #   hidden = np.vstack((hidden,hidden_model(i)))
     
    print "... finished training"
     
    print " "
    print "Mean value, a:",np.mean(output[:,0])
    print "Mean L2-error, a:",np.mean((output[:,0]-test_set_y.get_value()[:,0])**2)
    print "Mean value, b:",np.mean(output[:,1])
    print "Mean L2-error, b:",np.mean((output[:,1]-test_set_y.get_value()[:,1])**2)
    
    ########################
    ##   Olden's method   ##
    ########################
    
    # Calculate the D matrix 
    D = np.dot(classifier.params[0].get_value(),classifier.params[2].get_value())
    
    # Calculate the absolute value of the D-matrix
    cw    = np.abs(D)
    
    # Normalize
    for n in xrange(cw.shape[1]):
        cw[:,n]    /= cw[:,n].sum()
    test_x = datasets[0][0].get_value()


    f, axarr = plt.subplots(4, sharex=True)
    f.tight_layout() 
    axarr[0].plot(test_x[0],label="input")
    axarr[0].set_title('Example time series, x (m)')
    axarr[1].plot(cw[:,0],color="red") 
    axarr[1].set_title('Relative contribution, oscillation term $a$ ')
    axarr[2].plot(cw[:,1],color="blue")
    axarr[2].set_title('Relative contribution, friction term $b$')
    axarr[3].plot(np.cumsum(cw[:,0]),color="red")
    axarr[3].plot(np.cumsum(cw[:,1]),color="blue")
    axarr[3].set_title('Cummulative contribution, $a$ (red) and $b$ (blue)')
    
    plt.show()
Exemple #11
0
import numpy as np
from matplotlib import pyplot as plt
from generate_data import gen_data
from L_r import linear_regression

#计算高维线性回归并计算误差
x, y, w = gen_data(200, 5)
w_ = linear_regression(x, y)
print(w - w_)

#计算1维情况并作图
x, y, w = gen_data(200, 1)
#x,y,w = gen_data(200,1,1) #加入不合理的点
w_ = linear_regression(x, y)
x1 = np.arange(0, 10, 0.1).reshape(1, -1)
y1 = w.dot(x1)
y2 = w_.dot(x1)
plt.figure(1)
plt.plot(x.reshape(-1), y, 'r+')
plt.plot(x1.reshape(-1), y1, 'b', x1.reshape(-1), y2, 'g')
plt.show()
Exemple #12
0
    def start(self):
        start_time = datetime.datetime.now()
        T_state = gen_tar_state(self.env.columns, self.env.rows,
                                len(self.env.containers))
        generate_data = gen_data(self.net, self.env)
        test = sort(self.net, self.env)

        for difficulty in range(1, CFG.difficulty + 1):
            print('=' * 80)
            print('difficulty', difficulty, 'start')
            print('-' * 25)

            game_start_time = datetime.datetime.now()
            gen_data_start_time = datetime.datetime.now()

            # list for storing data
            value_data = []
            value_pi_data = []

            # repeat generating data until its bigger than CFG.maximum data
            while len(value_data) < CFG.maximum_data:
                # generate random target state
                T_state.containers = deepcopy(self.env.containers)
                tar_state = T_state.make_tar_state()

                # generate data with A* algorithm
                tmp_value_data, tmp_value_pi_data = generate_data.generate_data(
                    tar_state, difficulty=difficulty)
                # add data
                value_data = value_data + tmp_value_data
                value_pi_data = value_pi_data + tmp_value_pi_data

            print(len(value_data), 'data generated')
            gen_data_end_time = datetime.datetime.now()
            print('data generating time: ',
                  gen_data_end_time - gen_data_start_time)
            print('generate data complete time: ', datetime.datetime.now())

            # train value
            train_start_time = datetime.datetime.now()
            self.net.train_value(value_data)
            self.net.train_value_pi(value_pi_data)
            train_end_time = datetime.datetime.now()
            print('training time: ', train_end_time - train_start_time)
            print('difficulty', difficulty,
                  ' generating data and training complete')
            print('-' * 25)

            print('start test')
            test.search(difficulty)

            # print time consumption
            print('-' * 25, 'summary', '-' * 25)
            print('difficulty', difficulty, ' complete')
            print('completed time: ', datetime.datetime.now())
            print('data generating time: ',
                  gen_data_end_time - gen_data_start_time)
            print('training time: ', train_end_time - train_start_time)
            print('this difficulty time: ',
                  datetime.datetime.now() - game_start_time)
            print('total time: ', datetime.datetime.now() - start_time)