Ejemplo n.º 1
0
    def __init__(self, img):
        self.feature_extractor = FeatureExtractor()
        self.display = Display()
        self.frames = []

        self.kpus = None
        self.des = None
        self.pts = None

        if img is not None:
            self.kpus, self.des = self.feature_extractor.extract(img)
            self.pts = [None] * len(self.kpus)
Ejemplo n.º 2
0
class ModelBase(object):
    def __init__(self):
        self.feature_extractor = FeatureExtractor()
        self.model = None

    def train(self, data_path):
        x, y = self.feature_extractor.fit_transform(data_path)
        self.model.fit(x, y)

    def validate(self, data_path):
        x, y = self.feature_extractor.transform(data_path)
        y_pred = self.model.predict(x)
        print(confusion_matrix(y, y_pred))
        print(accuracy_score(y, y_pred))
Ejemplo n.º 3
0
parser.add_argument('--batch_size', type=int, metavar='N', default=1)
parser.add_argument('--worker_num', type=int, default=16)
parser.add_argument('--width',
                    type=int,
                    default=1280,
                    help='the width pixels of target image')
parser.add_argument('--height',
                    type=int,
                    default=720,
                    help='the height pixels of target image')
parser.add_argument('--pose_dim', type=int, default=50)
parser.add_argument('--sample_rate', type=int, default=15360)
parser.add_argument('--fps', type=str, default='15')
args = parser.parse_args()

extractor = FeatureExtractor()

pose_keypoints_num = 25
face_keypoints_num = 70
hand_left_keypoints_num = 21
hand_right_keypoints_num = 21


def visualize_json(fname, json_path, img_path):
    fname_prefix = fname.split('_')[0]
    json_file = os.path.join(json_path, fname)
    img = Image.fromarray(
        read_keypoints(json_file, (args.width, args.height),
                       remove_face_labels=False,
                       basic_point_only=False))
    img.save(os.path.join(img_path, f'{fname_prefix}.jpg'))
Ejemplo n.º 4
0
import numpy as np
import cv2
import sdl2.ext
import sdl2
from display import Display
from extractor import FeatureExtractor

W = 1920 // 2
H = 1080 // 2

sdl2.ext.init()

dis = Display(W, H)
feature = FeatureExtractor()


def process_frame(img):
    img = cv2.resize(img, (W, H))
    kp, des = feature.extract(img)
    '''
    if match is None:
        return 
    
    for m in match:
        print(m)
    '''
    for p in kp:
        u, v = map(lambda x: int(round(x)), p.pt)
        cv2.circle(img, (u, v), color=(255, 0, 0), radius=3)
    dis.draw(img)
Ejemplo n.º 5
0
from extractor import FeatureExtractor
import canny, glcm, histogram

a = FeatureExtractor()

a.dump_features_multiple(histogram.Histogram, 'tramas100/tramas_list.txt','salida/out_histogram_tramas.json')
a.dump_features_multiple(histogram.Histogram, 'irma100/irma_list.txt','salida/out_histogram_irma.json')

a.dump_features_multiple(canny.Canny, 'tramas100/tramas_list.txt','salida/out_canny_tramas1.json')
a.dump_features_multiple(canny.Canny, 'irma100/irma_list.txt','salida/out_canny_irma.json')

a.dump_features_multiple(glcm.Glcm, 'tramas100/tramas_list.txt','salida/out_glcm_tramas.json')
a.dump_features_multiple(glcm.Glcm, 'irma100/irma_list.txt','salida/out_glcm_irma.json')

a.dump_to_pex('salida/out_histogram_tramas.json','salida/pex/out_histogram_tramas')
a.dump_to_pex('salida/out_histogram_irma.json','salida/pex/out_histogram_irma')

a.dump_to_pex('salida/out_canny_tramas1.json','salida/pex/out_canny_tramas')
a.dump_to_pex('salida/out_canny_irma.json','salida/pex/out_canny_irma')

a.dump_to_pex('salida/out_glcm_tramas.json','salida/pex/out_glcm_tramas')
a.dump_to_pex('salida/out_glcm_irma.json','salida/pex/out_glcm_irma')



Ejemplo n.º 6
0
Archivo: slam.py Proyecto: ruitaiS/SLAM
    #Opens file if passed as parameter
    #Else tries to open drone feed (assumes index 1)
    #Defaults to webcam
    cap = None
    if len(sys.argv) > 1:
        cap = cv2.VideoCapture(str(sys.argv[1]))
    else:
        cap = cv2.VideoCapture(1)

    if cap is None or not cap.isOpened():
        print("Video Error; Defaulting to Webcam")
        cap = cv2.VideoCapture(0)

    #Use first frame to init Feature Extractor
    #F is some kinda parameter in the intrinsic Matrix, not sure
    while (cap.read()[1] is None):
        print("Waiting for Video")
    fe = FeatureExtractor(cap.read()[1], F=1)

    #Main Detection Loop
    while cap.isOpened():
        ret, frame = cap.read()
        if ret == True:
            process_frame(frame)
            cv2.imshow("image", frame)
            cv2.waitKey(1)
        else:
            cap.release()
            break
Ejemplo n.º 7
0
    reviewsplit = int(len(reviews) * ratio)
    commentsplit = int(len(comments) * ratio)

    testset = reviews[0 : reviewsplit] + comments[0 : commentsplit]
    trainingset = reviews[reviewsplit:] + comments[reviewsplit:]

    return trainingset, testset

if __name__ == '__main__':
    conn = sqlite3.connect('data.db')
    cursor = conn.cursor()

    
    keywords = get_keywords('keywords')
    extractor = FeatureExtractor(keywords=keywords, domains=['imgur'], mean_kw=36.165, mean_len=282.192)
    net = create_network()
    print(str(net))
    datasets = create_set(get_data())
    trainingset, testset = split_set(datasets)

    # keep track of the accuracies as the net is trained
    accuracies = []
    accuracies.append(float(net.test(testset)['accuracy']))

    decrease_counter = 0
    counter = 0
    max_accuracy = 0
    while decrease_counter < 128:
        random.shuffle(trainingset)
        for c, f in trainingset:
Ejemplo n.º 8
0
class Frame:
    def __init__(self, img):
        self.feature_extractor = FeatureExtractor()
        self.display = Display()
        self.frames = []

        self.kpus = None
        self.des = None
        self.pts = None

        if img is not None:
            self.kpus, self.des = self.feature_extractor.extract(img)
            self.pts = [None] * len(self.kpus)

    @staticmethod
    def match_frames(f1, f2):
        # Matching previous frame and current frame (used in triangulation
        bf = cv2.BFMatcher(cv2.NORM_HAMMING)

        try:
            matches = bf.knnMatch(f1.des, f2.des, k=2)
        except cv2.error:
            return False

        good_matches = []
        idx_of_des_from_f1, idx_of_des_from_f2 = [], []
        idx_set1, idx_set2 = set(), set()

        # using Lowe's ratio to filter out bad matches
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                p1 = f1.kpus[m.queryIdx]
                p2 = f2.kpus[m.trainIdx]

                # ensure distance is within 32
                if m.distance < 32:
                    if m.queryIdx not in idx_set1 and m.trainIdx not in idx_set2:
                        idx_of_des_from_f1.append(m.queryIdx)
                        idx_of_des_from_f2.append(m.trainIdx)
                        idx_set1.add(m.queryIdx)
                        idx_set2.add(m.trainIdx)

                        good_matches.append((p1, p2))

        good_matches = np.array(good_matches)
        idx_of_des_from_f1 = np.array(idx_of_des_from_f1)
        idx_of_des_from_f2 = np.array(idx_of_des_from_f2)

        # # fit matrix
        # model, inliers = ransac((good_matches[:, 0], good_matches[:, 1]), EssentialMatrixTransform, min_samples=8,
        #                         residual_threshold=0.02, max_trials=100)
        #
        # print(model)

        # return idx_of_des_from_f1[inliers], idx_of_des_from_f2[inliers], fundamentalToRt(model.params)
        return good_matches, idx_of_des_from_f1, idx_of_des_from_f2

    def process_frame(self, img):
        img = cv2.resize(img, (self.display.W, self.display.H))

        # takes image from cap and turns it into a frame & gets kps/des
        frame = Frame(img)
        self.frames.append(frame)
        if len(self.frames) <= 1:
            return

        f1 = self.frames[-1]
        f2 = self.frames[-2]

        # implement some matching frame function that takes the last two frames from self.frames
        good_matches, idx1, idx2 = self.match_frames(f1, f2)

        proj_points_1 = np.array([
            np.array([kp.pt[0] for kp in good_matches[:, 0]]),
            np.array([kp.pt[1] for kp in good_matches[:, 0]])
        ])

        proj_points_2 = np.array([
            np.array([kp.pt[0] for kp in good_matches[:, 1]]),
            np.array([kp.pt[1] for kp in good_matches[:, 1]])
        ])

        # figure out how to get camera matrix 2 (Rt)
        points_in_3d = cv2.triangulatePoints(IRt, Rt, proj_points_1,
                                             proj_points_2)
        print(points_in_3d)

        # for 2D display
        kps_frame = self.display.process_kps_to_frame(img, frame.kpus)
        self.display.show(img)
        self.display.show(kps_frame)
Ejemplo n.º 9
0
import glob
import os
import pickle
from PIL import Image
from extractor import FeatureExtractor

fe = FeatureExtractor()

for img_path in sorted(glob.glob('static/animal/*.png')):
    print(img_path)
    img = Image.open(img_path)  # PIL image
    feature = fe.extract(img)
    feature_path = 'static/feature/' + os.path.splitext(
        os.path.basename(img_path))[0] + '.pkl'
    pickle.dump(feature,
                open(feature_path,
                     'wb'))  #array di save ke folder feature dg format .pkl
Ejemplo n.º 10
0
def getAll_audio(source_dir, extension='wav'):
    """
    returns the list of all audio files

    search for all the audios from top dir by matching
    the pattern provided.
    """
    audioFiles = glob.glob(source_dir + "/*/*." + extension)
    return audioFiles


if __name__ == '__main__':
    source_dir = sys.argv[1]
    output_dir = "features"

    fe = FeatureExtractor("gen")

    audioFiles = getAll_audio(source_dir)

    print "started feature extraction..."
    for af in audioFiles:
        filename = os.path.basename(af).split(".")[0] + ".json"
        oFile = os.path.join(output_dir, filename)
        fe.extract(af, oFile)

    print "feature extraction complete"
    print "features stored in %s directory" % output_dir

    f = Feeder()

    dir_path = output_dir
Ejemplo n.º 11
0
 def __init__(self):
     self.feature_extractor = FeatureExtractor()
     self.model = None
Ejemplo n.º 12
0
import time
import cv2
from display import Display2D
from extractor import FeatureExtractor
import numpy as np

W = 1920 // 2
H = 1080 // 2

F = 270
disp = Display2D(W, H)
K = np.array([[F, 0, W // 2], [0, F, H // 2], [0, 0, 1]])
print(K)
fe = FeatureExtractor(K)


def process_frame(img):
    img = cv2.resize(img, (W, H))
    matches, pose = fe.extract(img)
    if pose is None:
        return

    print("%d matches" % (len(matches)))
    print(pose)

    for pt1, pt2 in matches:
        u1, v1 = fe.denormalize(pt1)
        u2, v2 = fe.denormalize(pt2)

        cv2.circle(img, (u1, v1), color=(0, 255, 0), radius=3)
        cv2.line(img, (u1, v1), (u2, v2), color=(255, 0, 0))