예제 #1
0
def active_shape_model(X, testimg, max_iter, Nr_incisor, search_length):
    """

    :param X:   Init Guess
    :param testimg:   target image
    :param max_iter:   total iteration limitation
    :param Nr_incisor:   which nr. of incisor is used to do the asm
    :return:     a model describe the target incisor on the image
    """
    img = median_filter(testimg)
    img = bilateral_filter(img)

    #img = top_hat_transform(img)
    #img = bottom_hat_transform(img)
    img = sobel(img)
    #img = canny(img)
    #img = sobel(img)
    X = Landmarks(X).show_points()
    # Initial value
    nb_iter = 0
    n_close = 0
    total_s = 1
    total_theta = 0
    # Begin to iterate.
    lm_objects = load_training_data(Nr_incisor)
    landmarks_pca = PCA.ASM(lm_objects)
    #search_length = 5
    glm_range = 3
    while (n_close < 16 and nb_iter <= max_iter):

        # 1. Examine a region of the image around each point Xi to find the
        # best nearby match for the point
        """
        Y, n_close, quality = self.__findfits(X, img, gimg, glms, m)
        if quality < best:
               best = quality
            best_Y = Y
        Plotter.plot_landmarks_on_image([X, Y], testimg, wait=False,
                                           title="Fitting incisor nr. %d" % (self.incisor_nr,))

        # no good fit found => go back to best one
        if nb_iter == max_iter:
            Y = best_Y
        """
        # Training glm
        # cov, mean = grey_level_model(lm, glm_range)
        # Y = find_the_best_score(X, img, search_length, glm_range, cov, mean)

        Y = get_max_along_normal(X, search_length, img)
        #print Y
        # 2. Update the parameters (Xt, Yt, s, theta, b) to best fit the
        # new found points X

        b, t, s, theta = parameter_update(X, Y, Nr_incisor)
        """ 
        Apply constraints to the parameters, b, to ensure plausible shapes
        We clip each element b_i of b to b_max*sqrt(l_i) where l_i is the
        corresponding eigenvalue.
        """
        b = np.clip(b, -3, 3)
        # t = np.clip(t, -5, 5)
        # limit scaling
        s = np.clip(s, 0.95, 1.05)
        if total_s * s > 1.20 or total_s * s < 0.8:
            s = 1
        total_s *= s
        # limit rotation
        theta = np.clip(theta, -math.pi / 8, math.pi / 8)
        if total_theta + theta > math.pi / 4 or total_theta + theta < -math.pi / 4:
            theta = 0
        total_theta += theta
        """Finish limitation"""

        # The positions of the model points in the image, X, are then given
        # by X = TXt,Yt,s,theta(X + Pb)
        """By updating X and apply equation 4, to map the dataset into image coordinates"""
        X = Landmarks(X).as_vector()
        X = Landmarks(X + np.dot(landmarks_pca.pc_modes, b)).T(t, s, theta)
        #Plotter.plot_landmarks_on_image([X_prev, X], testimg, wait=False,
        #                                    title="Fitting incisor nr. %d" % (Nr_incisor,))
        X = X.show_points()
        """Calibration"""
        img2 = img.copy()
        final_image = drawlines(img2, X)
        cv2.imshow('iteration results ', final_image)
        cv2.waitKey(10)
        # cv2.imwrite('Data\Configure\iteration-%d.tif' % nb_iter, final_image)
        nb_iter += 1
        # print('this is the %d iteration'% nb_iter)

    cv2.imwrite('Data\Configure\incisor-%d.tif' % Nr_incisor, final_image)
    return X
예제 #2
0
        self.w1 = w_load
        self.b1 = b_load

    def predict(self, test_x):
        """
        get the answer of test_x by calculate logistic using the param learned before, and set the self.y_predict
        param: test_x(np.array)
        return: y_predict(np.array)
        """
        print("predict......")
        y_predict = self.sigmoid(np.dot(self.w1, test_x) + self.b1)
        print("finish.......")
        y_result = list()
        for y in y_predict[0]:
            if y > 0.5:
                y_result.append(1)
            else:
                y_result.append(0)
        self.y_predict = y_result
        return y_result


if __name__ == "__main__":
    x, y = load_training_data()
    print(x.shape)
    log_reg = logistic_regression(x, y)
    log_reg.load_param()
    test_x = load_test_data()
    y_predict = log_reg.predict(test_x)
    print(y_predict)
예제 #3
0
def parameter_update(X, Y, Nr_incisor):
    """This parts strictly follow  Tim Cootes's paper as Protocol 1
    Y should be given pointset or initial guess
    X initial guess"""
    lm_objects = load_training_data(Nr_incisor)
    landmarks_pca = PCA.ASM(lm_objects)
    b = np.zeros(landmarks_pca.pc_modes.shape[1])
    b_prev = np.ones(landmarks_pca.pc_modes.shape[1])
    i = 0
    X = Landmarks(X).as_vector()
    Y = Landmarks(Y)
    while (np.mean(np.abs(b - b_prev)) >= 1e-14):
        i += 1
        # 2. Generate the model point positions using x = X + Pb
        x = Landmarks(X + np.dot(landmarks_pca.pc_modes, b))

        # 3. Find the pose parameters (Xt, Yt, s, theta) which best align the
        # model points x to the current found points Y
        t, s, theta = align_params(x, Y)
        #t, s, theta = align_params(x.get_crown(is_upper), Y.get_crown(is_upper))

        # 4. Project Y into the model co-ordinate frame by inverting the
        # transformation T
        y = Y.invT(t, s, theta)

        # 5. Project y into the tangent plane to X by scaling:
        # y' = y / (y*X).
        yacc = Landmarks(y.as_vector() / np.dot(y.as_vector(), X.T))

        # 6. Update the model parameters to match to y': b = PT(y' - X)
        b_prev = b

        b = np.dot(landmarks_pca.pc_modes.T, (yacc.as_vector() - X))

        # 7. If not converged, return to step 2
    """
    The discription is discussed in the paper.....
    This function describe the opearation of changing from given lm to the model with convergended b.
    We should iterate b first to make it convergence, then we get the final value of b, which is the only
    meaningful value in this section.  The eigen value b refers to a point in high dimension space which also 
    represents a model.
    
    In this case, if another given 'model', like initial guess or lm, we must find this b. The way to find this b,
    is to used this iteration method,
    
    After b is convergence, then we know b is static and hence the model is steady, we then abstract the 
    pose(xt, yt, theta, s) for active shape model. 
    
    
    Generally speaking, accoording to equation 2 and 3, If b maintains the same, then x = xbar, then two models alignes well.
    
    For some practical information. 
    Feed Xbar (PCA MEAN) and Y (Final goal state model).
    By iteration using eqaition 2, we have new y (which is also the x in equation).
    then we align the new obtained y to Final gaol state Y. and we have a tmp parameters.
    
    The reason why we cannot get the final parameters after the alignment should be supported by the appendix 6 in paper.
    
    In this case, we update b by using equation 3 and check if it is convergent.
    If not, it shows the the previous obtained value y is not like the given model Y.
    Then we need to continue iteration until b convergent.
    What we want to get from this function is the transmission pose parameters.
    """
    return b, t, s, theta
예제 #4
0
    for i in range(8):
        Nr_incisor = i + 1
        source = 'Data\Landmarks\c_landmarks\landmarks1-%d.txt' % Nr_incisor
        lm = Landmarks(source).show_points()
        # print lm # you can print it to check differences.
        """Initial position guess"""
        ini_pos = np.array([[570, 360, 390], [620, 470, 390], [640, 570, 370],
                            [570, 670, 370], [640, 400, 670], [630, 480, 670],
                            [620, 570, 630], [640, 650, 610]])

        # ini_pos = np.array([[570, 360, 390], [570, 470, 390], [570, 570, 370], [570, 670, 370], [570, 400, 670],
        #                    [470, 500, 680], [470, 590, 630], [470, 650, 610]])
        s = ini_pos[i, 0]
        t = [ini_pos[i, 1], ini_pos[i, 2]]
        Golden_lm = load_training_data(Nr_incisor)
        Golden_lm = rescale_withoutangle(gpa(Golden_lm)[2], t, s)  # Y
        img = cv2.imread('Data/Radiographs/01.tif', 0)
        init_guess_img = img.copy()
        crop_img_ori = crop(img, 1000, 500, 1000, 1000)
        crop_img_init_guess = crop(init_guess_img, 1000, 500, 1000, 1000)
        crop_correct_lm = crop(init_guess_img, 1000, 500, 1000, 1000)
        #=======without crop
        # crop_img_ori = img
        # crop_img_init_guess = init_guess_img
        # crop_correct_lm = init_guess_img
        """Drawing initial guess"""
        #cv2.imshow('first golden model', drawlines(crop_img_init_guess, Golden_lm))
        cv2.imwrite('Data\Configure\init_guess_incisor-%d.tif' % Nr_incisor,
                    drawlines(crop_img_init_guess, Golden_lm))
        cv2.imwrite('Data\Configure\correct_lm_incisor-%d.tif' % Nr_incisor,
예제 #5
0
###
from util import load_training_data, load_testing_data
path_prefix = './'


def train_word2vec(x):
    # 訓練 word to vector 的 word embedding
    model = word2vec.Word2Vec(x,
                              size=250,
                              window=5,
                              min_count=5,
                              workers=12,
                              iter=10,
                              sg=1)
    return model


if __name__ == "__main__":
    print("loading training data ...")
    train_x, y = load_training_data('training_label.txt')
    train_x_no_label = load_training_data('training_nolabel.txt')

    print("loading testing data ...")
    test_x = load_testing_data('testing_data.txt')

    model = train_word2vec(train_x + train_x_no_label + test_x)
    #model = train_word2vec(train_x + test_x)

    print("saving model ...")
    #model.save(os.path.join(path_prefix, 'model/w2v_all.model'))
    model.save(os.path.join(path_prefix, 'w2v_all.model'))
예제 #6
0
        'BATCH_SIZE': 1000,
        'NUM_EPOCHS': 1,
        'LEARNING_RATE': 0.001,
        'HIDDEN_LAYER_SIZE': 150,
        'WORD_VECTOR_DIM': 200,
        'SAVE_LOCATION': os.path.join('./checkpoints/', str(time.time))
        }

print("# Loading training data")
training_data_raw = open(config['TRAINING_DATA_LOCATION'],'r',encoding='latin-1').readlines()
random.shuffle(training_data_raw)
num_examples = config['NUM_EXAMPLES']
training_data_raw= training_data_raw[:num_examples]

print("# Processing training data")
x_train, y_train, vocab_processor = util.load_training_data(training_data_raw)

print(" Loading and Processing testing data")
testing_data_raw = open(config['TESTING_DATA_LOCATION'],'r',encoding='latin-1').readlines()
x_test, y_test = util.load_testing_data(testing_data_raw, vocab_processor)

print("# Creating RNN")
rnn = TextRNN(x_train.shape[1], y_train.shape[1], config['HIDDEN_LAYER_SIZE'], 
        len(vocab_processor.vocabulary_), config['WORD_VECTOR_DIM'], l2_reg=0.0)
optimizer = tf.train.AdamOptimizer(config['LEARNING_RATE'])
minimizer = optimizer.minimize(rnn.loss)

print("# Initializing Tensorflow")
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
예제 #7
0
    """
    mat = []
    for lm in landmarks:
        mat.append(lm.as_vector())
    mat = np.array(mat)
    return Landmarks(np.mean(mat, axis=0))


#def evaluation(X, Goldenlm)

if __name__ == '__main__':
    Nr_incisor = 1
    s = 500
    t = [1370, 890]
    Golden_lm = load_training_data(
        Nr_incisor
    )  # the function in util refers to the file which only contains 02-14tif.
    Golden_lm = rescale_withoutangle(gpa(Golden_lm)[2], t, s)
    lm_objects = load_training_data(Nr_incisor)
    landmarks_pca = PCA.ASM(lm_objects)
    #print(np.shape(landmarks_pca.mu_value))
    landmarks_pca_value = rescale_withoutangle(landmarks_pca.mu_value, t, s)

    img = cv2.imread('Data/Radiographs/01.tif', 0)
    img = img.copy()
    img_withlm = drawlines(img, landmarks_pca_value)
    crop_img = crop(img_withlm, 1000, 500, 1000, 1000)

    img = cv2.resize(crop_img, (0, 0), fx=0.5, fy=0.5)
    cv2.imshow('cropped_raw', crop_img)
    #sobel = sobel(crop_img)