Пример #1
0
    def get_topic_proportions_for_every_image():
       
        from dir_processing import DirProcessing

        landmarks_urls_list = []

        person_ids = DirProcessing.get_all_person_ids()
        for person_id in person_ids:
            perform_ids = DirProcessing.get_all_perform_ids_from_person_id(person_id)
            for perform_id in perform_ids:
                landmarks_urls = DirProcessing.get_all_landmarks_urls_from_sequence(person_id, perform_id)
                landmarks_urls_list.extend(landmarks_urls)
       
        doc_num = len(landmarks_urls_list)

        dt_file = '../ctm-dist/CTM46/final-lambda.dat'
        dt_vector = np.loadtxt(dt_file)
        topic_num = dt_vector.size / doc_num
        dt_matrix = np.reshape(dt_vector, (doc_num, topic_num)) 
        np.set_printoptions(suppress=True)

        final_theta = np.exp(dt_matrix)
        final_theta = final_theta / np.sum(final_theta, axis=1)[:, np.newaxis]

        return landmarks_urls_list, final_theta
    def generate_features_pool(self):
        """ generate train and test files for classification """

        from analysis import Analysis
        from dir_processing import DirProcessing
        from file_op import FileOp

        landmarks_urls_list, features = Analysis.get_topic_proportions_for_every_image()

        subsets_dict = self.divide_persons_into_subsets()
        
        for i in range(0, len(landmarks_urls_list)):
            landmarks_url = landmarks_urls_list[i]
            label_url = DirProcessing.get_label_url_from_landmarks_url(landmarks_url)
            loc = DirProcessing.get_location_from_sequence(landmarks_url, 3)

            if label_url and loc != "MIDDLE":
                person_id, perform_id, index_id = DirProcessing.get_id_from_label_url(label_url)
                subset_id = subsets_dict[person_id]
                feature = features[i, :]

                if loc == "START":
                    label = 0
                else:
                    label = FileOp.read_label_file(label_url)

                self.features_pool[subset_id].append(feature)
                self.labels_pool[subset_id].append(label)
                self.urls_pool[subset_id].append(landmarks_url)

        print "Features pools have been generated. "
Пример #3
0
    def test():
        import os
        import sys
        lib_path = os.path.abspath('../utilization/')
        sys.path.append(lib_path)

        from dir_processing import DirProcessing
        from file_op import FileOp

        LSF.build_dictionary()
        landmarks_urls = DirProcessing.get_all_landmarks_urls_from_sequence('87','4')

        test_one_sequence = True
        if test_one_sequence:
            video = LSF.lsf_from_sequence(landmarks_urls)
            print video.landmarks_urls

        test_one_landmark = False
        if test_one_landmark:
            landmarks = FileOp.read_landmarks_file(landmarks_urls[11])
            landmarks_neural = FileOp.read_landmarks_file(landmarks_urls[0])
            lsf_document = LSF.lsf(landmarks, landmarks_neural)
            landmark = landmarks[35,:]
            landmark_neural = landmarks_neural[35, :]
            word = LSF.compute_word(35, landmark, landmark_neural)
            print landmark, landmark_neural, word
    def draw_landmarks_on_sequence(img_urls):
        from dir_processing import DirProcessing
        landmarks_urls = []
        for i in xrange(0, len(img_urls)):
            img_url = img_urls[i]
            landmarks_url = DirProcessing.generate_landmarks_url_from_img_url(img_url)
            landmarks_urls.append(landmarks_url)

        from file_op import FileOp
        for i in xrange(0, len(img_urls)):
            img_url = img_urls[i]
            landmarks_url = landmarks_urls[i]
            img = FileOp.read_img_file(img_url)
            landmarks = FileOp.read_landmarks_file(landmarks_url)

            for i in xrange(0, landmarks.shape[0]):   # for every point
                loc = landmarks[i, :]
                x = int(round(loc[0]))
                y = int(round(loc[1]))
                cv2.circle(img, (x, y), 1, 255)
            
            print img_url
            cv2.imshow('image', img)

            k = cv2.waitKey(0)
            if k == 27:     # Esc key to stop
                break
        
        cv2.destroyAllWindows()
Пример #5
0
 def test():
     import os, sys
     lib_path = os.path.abspath('../utilization/')
     sys.path.append(lib_path)
     from dir_processing import DirProcessing
     label_url = DirProcessing.generate_label_url('34','3','27')
     label = FileOp.read_label_file(label_url)
     print label
    def test():
        lib_path = os.path.abspath('../utilization/')
        sys.path.append(lib_path)

        from dir_processing import DirProcessing

        img_urls = DirProcessing.get_all_img_urls_from_sequence('109','2')
        print img_urls
        Visualization.draw_landmarks_on_sequence(img_urls)
Пример #7
0
    def generate_corpus_and_write_to_file():
        """ generate the copus, write it to files and store the LSF corpus features """
        
        import os
        import sys
        lib_path = os.path.abspath('../utilization/')
        sys.path.append(lib_path)

        from dir_processing import DirProcessing

        LSF.build_dictionary()

        lsf_corpus = []

        person_ids = DirProcessing.get_all_person_ids()
        for person_id in person_ids:
            perform_ids = DirProcessing.get_all_perform_ids_from_person_id(person_id)
            for perform_id in perform_ids:
                landmarks_urls = DirProcessing.get_all_landmarks_urls_from_sequence(person_id, perform_id)
                expression_sequence = LSF.lsf_from_sequence(landmarks_urls)
                print 'The feature extraction of expression person S{} and perform time {} has ' \
                        'been done.'.format(person_id, perform_id)
                lsf_corpus.append(expression_sequence)

        import cPickle
        with open('../model/corpus.pk', 'wb') as f:
            cPickle.dump(lsf_corpus, f)
       
        with open('../model/corpus.txt', 'w') as f:
            for expression_sequence in lsf_corpus:
                lsf_sequence = expression_sequence.lsf_sequence
                for lsf_document in lsf_sequence:
                    f.write(str(len(lsf_document)))
                    for word, count in lsf_document.iteritems():
                        wid = LSF.word2id[word]
                        s = " %d:%d" %(wid, count)
                        f.write(s)
                    f.write("\n")
    def demonstrate_wrong_predictions(self):
        from dir_processing import DirProcessing
        from visualization import Visualization

        for i in range(len(self.predict_labels)):
            test_label = self.test_labels[i]
            predict_label = self.predict_labels[i]

            if test_label != predict_label:
                test_expression = Classifier.get_expression_from_label(test_label)
                predict_expression = Classifier.get_expression_from_label(predict_label)
                print "Wrong prediction: {}, Right prediction: {}.".format(predict_expression, test_expression)

                landmarks_url = self.test_urls[i]
                img_url = DirProcessing.generate_img_url_from_landmarks_url(landmarks_url)
                Visualization.draw_landmarks_on_img(img_url)
Пример #9
0
    def random_show_cluster(landmarks_urls_list, label, cluster_index):
        
        from dir_processing import DirProcessing
        from file_op import FileOp
        from visualization import Visualization

        sample_list = np.where(label == cluster_index)[0]
        sample_list = np.random.permutation(sample_list)

        print len(sample_list)
       
        landmarks_urls_sublist = []
        for i in sample_list:
            landmarks_urls_sublist.append(landmarks_urls_list[i])

        img_urls_sublist = DirProcessing.generate_img_urls_from_landmarks_urls(landmarks_urls_sublist)

        Visualization.draw_landmarks_on_sequence(img_urls_sublist)
Пример #10
0
    def divide_persons_into_subsets(self):
        from dir_processing import DirProcessing

        person_ids = DirProcessing.get_all_person_ids()
        permute_ids = np.random.permutation(person_ids)

        total_person_num = len(person_ids)

        step_arr = np.linspace(0, total_person_num, self.subset_num + 1)
        step_arr = step_arr.astype(int)

        subsets_dict = dict.fromkeys(person_ids)

        for i in range(0, self.subset_num):
            for j in range(step_arr[i], step_arr[i + 1]):
                subsets_dict[permute_ids[j]] = i

        return subsets_dict
    def draw_landmarks_on_img(img_url, drawtext=False):
        from dir_processing import DirProcessing
        landmarks_url = DirProcessing.generate_landmarks_url_from_img_url(img_url)

        from file_op import FileOp
        img = FileOp.read_img_file(img_url)
        landmarks = FileOp.read_landmarks_file(landmarks_url)

        for i in xrange(0, landmarks.shape[0]):   # for every point
            loc = landmarks[i, :]
            x = int(round(loc[0]))
            y = int(round(loc[1]))
            cv2.circle(img, (x, y), 2, 255)

            if drawtext:
                cv2.putText(img, str(i), (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.25, 255)

        cv2.imshow('image',img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()