Ejemplo n.º 1
0
    def _es_bulk_load(self):
        print "sending batch of " + str(len(self.temp))
        # http://www.elasticsearch.org/guide/reference/api/bulk.html
        data = ''
        for r in self.temp:
            #print("\n\nRAW DATA")
            #print(r)
            #print("\n\n To CONVERT")
            #print(self.convert(r))
            #print("\nJSON")
            #print(json.dumps( r ))

            data += json.dumps( {'index':{'_id': r['_id']}} ) + '\n'
            data += json.dumps( r ) + '\n'
        self.temp = []

        r = requests.post(Config.elasticsearch['uri_records'] + '_bulk', data=data)

        # if matching is enabled, then try to match whatever was in the batch to the rest of the index content
        if Config.importer['load']['pubmedcentral']['do_bulk_match']:
            print "matching"
            m = Matcher()
            m.citesandcitedby(self.temp)

        return r # passing back the POST info in case it is useful
Ejemplo n.º 2
0
 def __init__(self, frames_list, Hs):
     self.images = frames_list
     self.count = len(self.images)
     self.left_list, self.right_list, self.center_im = [], [], None
     self.matcher_obj = Matcher()
     self.prepare_lists()
     self.Hs = Hs
Ejemplo n.º 3
0
 def createNewJob(self, contentName, contentID, keyword, content):
     con = None
     try:
         con = psycopg2.connect(database='d1s3idai1l2u3d', user='******', password='******', host='ec2-54-197-241-24.compute-1.amazonaws.com', port='5432', sslmode='require')
         cur = con.cursor()
     except psycopg2.DatabaseError as e:
         print ('Error %s' % e)
         sys.exit(1)
     finally:
         if con:
             cur.execute("SELECT * FROM resume");
             rows = cur.fetchall()
             numRows = (len(rows))
             newJob = JobDescNode(contentID, keyword, content)
             if (numRows == 0):
                 ResumeProcessor.construct(newJob)
                 toPrint = encodeClassToJson(newJob)
                 cur.execute("INSERT INTO job VALUES (%s,%s,%s,%s)",(toPrint,'f', contentID ,contentName))
                 con.commit()
             else:
                 ResumeProcessor.construct(newJob)
                 toPrint = encodeClassToJson(newJob)
                 cur.execute("INSERT INTO job VALUES (%s,%s,%s,%s)",(toPrint,'f', contentID ,contentName))
                 con.commit()
                 f = Facade()
                 matcher = Matcher(f)
                 scorer = Scorer(f)
                 matcher.matchAll(2)
                 scorer.calculateScore()
             con.close()
Ejemplo n.º 4
0
    def loadBalance(self):

        #self.initVirtualApplication(mon, virtualMachines)
        #print self.virtualApplications
        sortedApp = self.sortByNoNodes(self.virtualApplications)
        #print sortedApp

        match = Matcher()
        match.next_fit(sortedApp)
        return sortedApp
Ejemplo n.º 5
0
 def __buttonpress(self):  # rename drugs handler
     try:
         from Matcher import Matcher
         matcher = Matcher(self.mas[0], self.mas[1], self.mas[2], self.mas[3])
         try:
             matcher.rename_drugs()
             self.showComplete()
         except IOError:
             from Widgets import Widgets
             Widgets.showFNF()
     except Exception as e:
         print(e)
Ejemplo n.º 6
0
def create_matcher():
    global ma, matchready, dbpath, querypath, labels, buttons, extractpick
    expick = extractpick.get()
    if (expick == "Extract from files"):
        ma = Matcher(dbpath, expick)
    else:
        pckpath = filedialog.askopenfilename(initialdir=" ", title="Select Pickle File...", filetypes = (("Pickle files", "*.pck"), ("All files", "*.*")))
        if(pckpath != ""):
            ma = Matcher(dbpath, expick, pckpath)
    matchready = True
    if (querypath != "" and querypath != "(No file selected)"):
        buttons["match_button"].config(state="normal", fg="purple")
    buttons["save_pickle"].config(state="normal")
Ejemplo n.º 7
0
def init():
    bot_id = '1437569240:AAEd2sZ0faC1EwPvQGJPPW4xf7ohP1hTzV8'
    updater = Updater(bot_id)
    updater.setPhotoHandler(imageHandler)

    QualityChecker.init()
    ShoeDetector.init()
    FeatureExtractor.init()
    data_structure = Indexer.build_data_structure(config.DATASET_PATH)
    Matcher.init(data_structure)

    print("Bot is running...")
    updater.start()
Ejemplo n.º 8
0
    def __init__(self, robot):
        self.h, self.w = 320, 240
        self.numLocations = 7
        # host = '134.173.24.116'
        # port = 5003
        # print('Waiting for Connection....')
        # self.ipad= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # self.ipad.connect((host,port))
        # print('Connected!')
        self.matcher = Matcher('BOW', None, self.h, self.w)
        self.frame = self.readImage()
        self.robot = robot

        # for tracking the image during runs
        self.imageIndex = 0
 def __init__(self, params=None):
     """ Constructor
     """
     if params is None:
         params = dict(detector='orb', matcher='bf')
     self.matcher = Matcher(params)
     self.F = None
     self.mask = None
     self.H = None
     self.right_e = None
     self.left_e = None
     self.cam = Camera()
     self.E = None  # Essential matrix
     self.index = 0
     self.scene = Map()
Ejemplo n.º 10
0
 def __init__(self, frames_list, Hs):
     self.images = frames_list
     self.count = len(self.images)
     self.left_list, self.right_list, self.center_im = [], [], None
     self.matcher_obj = Matcher()
     self.prepare_lists()
     self.Hs = Hs
Ejemplo n.º 11
0
    def parseStatement(self, statement, lineNumber, indentation):
        '''
		Parses statement lines. eg if matcher -> action. Also tracks indentation to create
		nested statements.
		'''
        # filter out the 'if; from the start
        tmp = statement[3:].split('->')
        if len(tmp) != 2:
            return handleParseError(lineNumber, statement, "Missing '->'.")

        condition = tmp[0].lstrip().rstrip()
        result = tmp[1].lstrip().rstrip()

        if not condition:
            return handleParseError(lineNumber, statement,
                                    "Missing Matcher before ->.")
        if condition in self.matchers:
            condition = self.matchers[condition]
        elif condition[0] == '{' and condition[-1] == '}':
            condition = Matcher(condition, statement, lineNumber)
        else:
            return handleParseError(
                lineNumber, statement,
                "Unknown Matcher or missing {} before ->.")

        # need to handle there not being a specified result
        if not result:
            filter = Filter(condition)
        else:
            if result in self.actions:
                result = self.actions[result]
            elif result[0] == '[' and result[-1] == ']':
                result = Action(result, statement, lineNumber)
            else:
                return handleParseError(
                    lineNumber, statement,
                    "Unknown Action or missing [] after ->.")
            filter = Filter(condition, result)

        if indentation == 0:
            self.filters.append(filter)
            return

        if not self.filters:
            return handleParseError(
                lineNumber, statement,
                "Invalid indentation, this line has no parent")

        parentFilter = self.filters[-1]
        indentation -= 1

        while (indentation > 0):
            if not parentFilter.childStatements:
                handleParseError(
                    lineNumber, statement,
                    "Invalid indentation, this line has no parent")
            parentFilter = parentFilter.childStatements[-1]
            indentation -= 1
        parentFilter.addChild(filter)
Ejemplo n.º 12
0
class ControllerStub(object):
    def __init__(self):
        self.facade = FacadeStub()
        self.matcher = Matcher(self.facade)

    def process(self, resume, job, keyword):
        resumeNode = ResumeNode("name", "999", "*****@*****.**", "0", resume)
        jobNode = JobDescNode("0", job, keyword)
        ResumeProcessor.construct(resumeNode)
        ResumeProcessor.construct(jobNode)
        self.facade.storeJob(jobNode)
        self.facade.storeResume(resumeNode)
        self.matcher.matchAll(0)

    def getResults(self):
        scorer = Scorer(self.facade)
        scorer.calculateScore()
def test():
    for line in sys.stdin:
        # comma seperated: file_path,words...
        file_path, *words = line.strip().split(",")
        n = get_filename(file_path)
        annotations = get_annotations(n)

        if skip_annotation(annotations):
            continue  # Skip the files we don't care about

        matcher = Matcher(annotations, words)
        word_n = len(annotations)
        if annotations:
            perfect_matches = matcher.get_perfect_matches()
            ign_symbols = matcher.get_perfect_matches_ignoring_symbols()
            perf = len(perfect_matches) / word_n * 100
            ign = len(ign_symbols) / word_n * 100
            unmatched = matcher.get_number_unmatched()
Ejemplo n.º 14
0
    def do_loadCaseLibrary(self, arg):
        """
        Loads the case library.
        Use:
            loadCaseLibrary <filename>      Filename must be a pickle file
        """
        filename = arg
        if os.path.isfile(filename):
            if filename.endswith('.pickle'):
                with open(filename, "rb") as fp:
                    self.cases = pickle.load(fp)

                self.matcher = Matcher(self.cases)
                print '%d loaded cases' % len(self.cases)
            else:
                print 'Filename must be a pickle file'
        else:
            print 'Filename does not exists'
Ejemplo n.º 15
0
 def createIndex(self):
     ''' This function creates indexes of feature '''
     matcher = Matcher(self.method, width=self.w, height=self.h)
     if self.method != 'BOW':
         for i in range(self.numLocations):
             matcher.setDirectory('map/' + str(i))
             if self.method != 'Color':
                 self.indices[i] = matcher.createFeatureIndex()
             else:
                 self.indices[i] = matcher.createColorIndex()
     else:
         matcher.writeIndices()
Ejemplo n.º 16
0
    def createNewResume(self, name, hpNumber, email, contentName, content):
        con = None
        try:
            con = psycopg2.connect(
                database='d1s3idai1l2u3d',
                user='******',
                password='******',
                host='ec2-54-197-241-24.compute-1.amazonaws.com',
                port='5432',
                sslmode='require')
            cur = con.cursor()
        except psycopg2.DatabaseError as e:
            print('Error %s' % e)
            sys.exit(1)
        finally:
            if con:
                cur.execute("SELECT * FROM job")
                rows = cur.fetchall()
                numRows = (len(rows))
                newResume = ResumeNode(name, hpNumber, email, contentName,
                                       content)

                if (numRows == 0):
                    ResumeProcessor.construct(newResume)
                    toPrint = encodeClassToJson(newResume)
                    cur.execute(
                        "INSERT INTO resume VALUES (%s,%s,%s,%s,%s,%s)",
                        (toPrint, 'f', contentName, name, hpNumber, email))
                    con.commit()
                else:
                    ResumeProcessor.construct(newResume)
                    toPrint = encodeClassToJson(newResume)
                    cur.execute(
                        "INSERT INTO resume VALUES (%s,%s,%s,%s,%s,%s)",
                        (toPrint, 'f', contentName, name, hpNumber, email))
                    con.commit()
                    f = Facade()
                    matcher = Matcher(f)
                    scorer = Scorer(f)
                    matcher.matchAll(1)
                    scorer.calculateScore()
                con.close()
Ejemplo n.º 17
0
def audio_matcher():
    """Our main control flow."""

    parser = ArgumentParser(
        description="Compare two audio files to determine if one "
        "was derived from the other. Supports WAVE and MP3.",
        prog="audiomatch")
    parser.add_argument("-f",
                        action="append",
                        required=False,
                        dest="files",
                        default=list(),
                        help="A file to examine.")
    parser.add_argument("-d",
                        action="append",
                        required=False,
                        dest="dirs",
                        default=list(),
                        help="A directory of files to examine. "
                        "Directory must contain only audio files.")

    args = parser.parse_args()

    search_paths = args.dirs + args.files

    if len(search_paths) != 2:
        die("Must provide exactly two input files or directories.")

    code = 0
    # Use our matching system
    matcher = Matcher(search_paths[0], search_paths[1])
    results = matcher.match()

    for match in results:
        if not match.success:
            code = 1
            warn(match.message)
        else:
            print match

    return code
Ejemplo n.º 18
0
 def createIndex(self):
     """
     Create the color or feature indices, depending on the method.
     """
     matcher = Matcher(self.method, width=self.w, height=self.h)
     if self.method != 'BOW':
         for i in range(self.numLocations):
             matcher.setDirectory('map/' + str(i))
             if self.method != 'Color':
                 self.indices[i] = matcher.createFeatureIndex()
             else:
                 self.indices[i] = matcher.createColorIndex()
     else:
         matcher.writeIndices()
Ejemplo n.º 19
0
def imageHandler(bot, message, chat_id, local_filename):
    bot.sendMessage(chat_id, "Hi, I'm processing your request")
    print("Processing request...")
    is_good_quality = QualityChecker.is_good_quality(
        Indexer.load_image(local_filename,
                           im_size=config.QUALITYCHECKER_IMSIZE))
    if not is_good_quality:
        bot.sendMessage(
            chat_id,
            "Your image is of a poor quality. Please, send me a better one")
        print("Message sent: image is of a poor quality.")
    else:
        is_shoe = ShoeDetector.classify_image(
            Indexer.load_image(local_filename,
                               im_size=config.CLASSIFIER_IM_SIZE))
        if not is_shoe:
            bot.sendMessage(
                chat_id,
                "Ops! Something went wrong... Make sure your image contains a shoe"
            )
            print("Message sent: the photo doesn't contain a shoe.")
        else:
            try:
                most_similar = Matcher.get_most_similar(
                    Indexer.load_image(local_filename))
                retrieved_images = Matcher.retrieve_items(most_similar)
                bot.sendMessage(chat_id,
                                "These are the most similar shoes I've found")
                for im in retrieved_images:
                    bot.sendImage(chat_id, config.DATASET_PATH + im, "")
                print("Most similar images sent.")
            except FeatureExtractionException:
                bot.sendMessage(
                    chat_id,
                    "I couldn't process your photo. Please, send me a better one"
                )
                print("Message sent: the photo can't be processed.")
    print("Request processed.")
Ejemplo n.º 20
0
    def __init__(self, max_obj=10, history=10):
        Matcher.__init__(self)

        self.maxObjs = max_obj
        self.history = history
        self.tracked = []
        self.unmatched_tracked = []
        self.matchedPairs = []
        self.enterExit = []

        # enter/exit parameters
        self.entering = 25
        self.exiting = 135
        self.error = 5

        # exit/enter for current frame
        self.currentEnter = 0
        self.currentExit = 0
        self.change = False

        for i in range(self.maxObjs):
            self.tracked.append(deque([]))
            self.enterExit.append(deque([]))
Ejemplo n.º 21
0
def audio_matcher():
    """Our main control flow."""

    parser = ArgumentParser(
        description="Compare two audio files to determine if one "
                    "was derived from the other. Supports WAVE and MP3.",
        prog="audiomatch")
    parser.add_argument("-f", action="append",
                        required=False, dest="files",
                        default=list(),
                        help="A file to examine.")
    parser.add_argument("-d", action="append",
                        required=False, dest="dirs",
                        default=list(),
                        help="A directory of files to examine. "
                             "Directory must contain only audio files.")

    args = parser.parse_args()

    search_paths = args.dirs + args.files

    if len(search_paths) != 2:
        die("Must provide exactly two input files or directories.")

    code = 0
    # Use our matching system
    matcher = Matcher(search_paths[0], search_paths[1])
    results = matcher.match()

    for match in results:
        if not match.success:
            code = 1
            warn(match.message)
        else:
            print match

    return code
Ejemplo n.º 22
0
    def __init__(self, robot):
        self.h, self.w = 320, 240
        self.numLocations = 7
        # host = '134.173.24.116'
        # port = 5003
        # print('Waiting for Connection....')
        # self.ipad= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # self.ipad.connect((host,port))
        # print('Connected!')
        self.matcher = Matcher('BOW', None, self.h, self.w)
        self.frame = self.readImage()
        self.robot = robot

        # for tracking the image during runs
        self.imageIndex = 0
Ejemplo n.º 23
0
def executeCommand(text):
    matcher = Matcher()
    command = matcher.getContext(text)
    allTaskString = ""

    if (command == Context.updateTask):
        print(1)
    elif (command == Context.getAllTask):
        allTask = getAllTask()
        # print(allTask)
        return stringAllTask(allTask)
    elif (command == Context.getRangeTimeTask):
        dates = matcher.extractDate(text)
    elif (command == Context.getSpesificTimeLeftTask):
        N = int(matcher.nDateExtractor(text))
        Endate = datetime.today() + timedelta(days=N)
        Endate = matcher.dateToString(Endate)
        allTask = getSpesificTimeLeftTask(Endate)
        return stringAllTask(allTask)
    elif (command == Context.deleteTask):
        namaMatkul = matcher.extractMatkul(text)
        jenis = matcher.extractJenis(text)
        deleteOneTask(jenis, namaMatkul)
        return ""
Ejemplo n.º 24
0
 def parseVariable(self, keyword, value, line, lineNumber):
     '''Parses variable lines. eg x = [|{ content }|]'''
     if value[0] != '=':
         return handleParseError(lineNumber, line,
                                 "Assignment operator '=' not found.")
     value = value[1:].lstrip().rstrip()
     if not value:
         return handleParseError(
             lineNumber, line, "Nothing found after assignment operator.")
     if value[0] == '{':
         if value[-1] == '}':
             self.matchers[keyword] = Matcher(value, line, lineNumber)
         else:
             return handleParseError(lineNumber, line,
                                     "failed to find closing }")
     elif value[0] == '[':
         if value[-1] == ']':
             self.actions[keyword] = Action(value, line, lineNumber)
         else:
             return handleParseError(lineNumber, line,
                                     "failed to find closing ]")
     else:
         return handleParseError(lineNumber, line,
                                 "failed to find opening brace: { or [")
def get_results():
    """
    :return: a list of lists containing
    [num_students_in_str_1, ..., num_students_in_str_n, avg_satisfaction_of_str_1, ..., avg_satisfaction_of_str_n,
    overall_avg_satisfaction]
    """
    cakes = get_cakes_for_all_situations()
    cakes_counter = 0  # just to check progress

    for cake in cakes:
        # just to check progress :
        if PRINT_EVERYTHING and cakes_counter % 10000 == 0:
            print("   Num of lines in file: " + str(cakes_counter) + "/" +
                  str(NUM_OF_TEST_CASES) + ", progress rate: " +
                  str(100 *
                      round(float(cakes_counter / NUM_OF_TEST_CASES), 2)))

        strategy_dict = get_strategy_dict(cake)
        matcher = Matcher(ALL_COURSES, strategy_dict, CLASS_SIZE).match()
        cake += get_satisfactions(matcher)

        cakes_counter += 1  # just to check progress ..

    return cakes
Ejemplo n.º 26
0
 def findInfoWithMethod(self):
     'Finds and collects all needed information'
     result = []
     list_of_sentence = sent_tokenize(self.text)
     for sent in list_of_sentence:
         res = Matcher(sent.lower(), self.keyword)
         articleDate = ""
         r = 0
         if self.method == 'optionBM':
             r = res.BMMatch()
         elif self.method == 'optionKMP':
             r = res.KMPMatch()
         elif self.method == 'optionRE':
             r = res.REMatch()
         if r > -1:
             result.append(sent)
         if articleDate == "" or articleDate == " ":
             resDate = Matcher(sent, self.keyword)
             # articleDate = resDate
     return result, articleDate
def main():
    # maybe make it read from multiple places?
    # samples = ["CRAFT", "EAST", "USC", "GCP_lang_hints", "AWS", "GCP"]
    samples = ["GCP", "GCP_crops", "CRAFT_attn"]
    # samples = ["AWS"]
    filenames = ["{}_indo.txt".format(name) for name in samples]

    guessed_words = []
    annotations = {}
    data = {i: {} for i in range(len(filenames))}
    name_dict = {i: name for i, name in enumerate(samples)}

    for i, filename in enumerate(filenames):
        with open(filename, "r") as f:
            lines = f.readlines()
            for line in lines:
                file_path, *words = line.strip().split(",")
                words = [x for x in words if len(x) != 0]
                # Get the correct words
                n = get_filename(file_path)
                if n not in annotations:
                    img_annotations = get_annotations(n)
                    annotations[n] = img_annotations

                # i represents which of the sources it came from - CRAFT, EAST,
                # USC
                guessed_words.append((i, n, words))

    possible_true_words_dict = None
    for i, n, words in guessed_words:
        # i represents the source
        # get the real annotations
        annotation = annotations[n]
        # Skip the annotation
        if skip_annotation(annotation):
            continue

        matcher = Matcher(annotation,
                          words,
                          possible_true_words_dict=possible_true_words_dict)

        possible_true_words_dict = matcher.possible_true_words_dict

        word_n = len(annotation)
        if word_n == 0:
            # TODO remove this, if we end up testing with no text files
            continue

        if annotations:
            perfect_matches = matcher.get_perfect_matches()
            ign_symbols = matcher.get_perfect_matches_ignoring_symbols()
            perf = len(perfect_matches) / word_n * 100
            ign = len(ign_symbols) / word_n * 100
            # vocab_matched = matcher.get_vocab_matches()
            # vcb = len(vocab_matched) / word_n * 100
            # mismatched = matcher.get_imperfect_matches(1)
            # msm = len(mismatched)/word_n * 100
            # percent_matched = perf + ign  + vcb# maybe lets ignore the mismatched
            percent_matched = perf + ign
            # ones?
            # percent_matched = perf
            data[i][n] = percent_matched
            unmatched = matcher.get_number_unmatched()

            if unmatched > 4:  # ignore all but craft
                # if i == 3: # ignore all but craft
                print(name_dict[i], n)
                print("ANNOTATED", *matcher.get_unmatched_annotated())
                print("DETECTED", *matcher.get_unmatched_detected())
                print(">", matcher.char_level_accuracy)
                print("=================================")
            # print(n, perf, ign, unmatched)

    for k in data:
        avg = sum(data[k].values()) / (len(list(data[k].values())))
        print(name_dict[k], '\t', round(avg, 2))
Ejemplo n.º 28
0
from Calibrator import Calibrator
from VideoStream import VideoStream
from Matcher import Matcher
from Stitcher import Stitcher
import time
import cv2


# obtain calibrate matrices
calibrator = Calibrator()
calibrator.calibrate()
matcher = Matcher()


# initialize video streams
no_of_streams = 2
vss = [VideoStream(calibrator, src=1), VideoStream(calibrator, src=0)]
calibrator.calculate_optimal_camera_matrix(vss[0].read().shape[1],vss[0].read().shape[0])

# initialize homographies
homographies = []
for i in range(no_of_streams - 1):
    homographies.append(matcher.match(vss[i+1].frame, vss[i].frame))

vss_frames_list = []
for i in range(no_of_streams):
    vss_frames_list.append(vss[i].read())

stitcher = Stitcher(vss_frames_list, homographies)

vss[0].start()
Ejemplo n.º 29
0
    #cv2.imshow("pattern_gray", imutils.resize(pattern_gray,width=600))
    pattcnt = 0
    for patt in pattlst:
        pattcnt += 1
        pattname = os.path.split(patt)[-1].split('.')[0]
        print("Processing pattern ({}) {}/{}".format(pattname, pattcnt,
                                                     len(pattlst)))
        pattern = cv2.imread(patt, cv2.IMREAD_UNCHANGED)
        #    pattern2 = cv2.resize(pattern, (600,600))
        pattern2 = pattern.copy()
        pattern2 = convert_rgba2rgb(pattern2)
        if blur_images:
            pattern2 = cv2.GaussianBlur(pattern2, BLER_KERNEL_SIZE, 0)

        if useMatcher:
            matcher = Matcher()
            (matches, status, vis) = matcher.match([image2, pattern2],
                                                   ratio=LRatio,
                                                   showMatches=True,
                                                   useRootSIFT=useRootSIFT)
#      match_ctr = np.sum(status)
#      (kpsA, featuresA) = matcher.detectAndDescribe(image2, useRootSIFT=useRootSIFT)
#      (kpsB, featuresB) = matcher.detectAndDescribe(pattern2, useRootSIFT=useRootSIFT)
        else:
            matches, vis = matchImages(pattern2, image2, useFlann=useFlann)

        if type(matches) == type(None):
            match_ctr = 0
        else:
            match_ctr = len(matches)
Ejemplo n.º 30
0
from Matcher import Matcher
from json import dumps
from sys import argv

indexd_location = argv[1]
m = Matcher(indexd_location)
print dumps(m.run())
Ejemplo n.º 31
0
class Stitcher:
    def __init__(self, frames_list, Hs):
        self.images = frames_list
        self.count = len(self.images)
        self.left_list, self.right_list, self.center_im = [], [], None
        self.matcher_obj = Matcher()
        self.prepare_lists()
        self.Hs = Hs

    def prepare_lists(self):
        self.centerIdx = self.count - 1
        self.center_im = self.images[self.centerIdx]
        for i in range(self.count):
            if (i <= self.centerIdx):
                self.left_list.append(self.images[i])
            else:
                self.right_list.append(self.images[i])

    def leftshift(self):
        a = self.left_list[0]
        tmp = None
        for i in range(1, len(self.left_list)):
            b = self.left_list[i]
            H = self.Hs[i-1]
            # since we are stitching backwards we need the inverse
            xh = np.linalg.inv(H)

            # to calculate the dimension
            f1 = np.dot(xh, np.array([0, 0, 1]))
            f1 = f1 / f1[-1]
            xh[0][-1] += abs(f1[0])
            xh[1][-1] += abs(f1[1])
            ds = np.dot(xh, np.array([a.shape[1], a.shape[0], 1]))
            offsety = abs(int(f1[1]))
            offsetx = abs(int(f1[0]))
            dsize = (int(ds[0]) + offsetx, int(ds[1]) + offsety)

            # now warp
            tmp = cv2.warpPerspective(a, xh, dsize)
            print offsetx, offsety

            tmp[offsety:b.shape[0] + offsety, offsetx:b.shape[1] + offsetx] = b
            a = tmp

        self.result = tmp

    def rightshift(self):
        for each in self.right_list:
            H = self.matcher_obj.match(each, self.leftImage)

            # # dimension
            # f1 = np.dot(H, np.array([0, 0, 1]))
            # f1 = f1 / f1[-1]
            # H[0][-1] += abs(f1[0])
            # H[1][-1] += abs(f1[1])
            # ds = np.dot(H, np.array([each.shape[1], each.shape[0], 1]))
            # offsety = abs(int(f1[1]))
            # offsetx = abs(int(f1[0]))
            # dsize = (int(ds[0]) + offsetx, int(ds[1]) + offsety)

            # to calculate dimensions of the warped image
            txyz = np.dot(H, np.array([each.shape[1], each.shape[0], 1]))
            txyz = txyz / txyz[-1]
            dsize = (int(txyz[0]) + self.leftImage.shape[1], int(txyz[1]) + self.leftImage.shape[0])

            # now warp
            tmp = cv2.warpPerspective(each, H, dsize)
            # cv2.imshow('X', tmp)
            # cv2.waitKey(0)

            # tmp[offsety:self.leftImage.shape[0] + offsety, offsetx:self.leftImage.shape[1] + offsetx] = self.leftImage

            # # to overlap
            # self.leftImage[self.leftImage.shape[0]:int(txyz[1]) + self.leftImage.shape[0],
            # self.leftImage.shape[1]:int(txyz[0]) + self.leftImage.shape[1]] = tmp

    def stitch_all(self):
        img1 = self.images[0]
        result = None
        for i in range(1,len(self.images)):
            img2 = self.images[i]
            result = cv2.warpPerspective(img1, self.Hs[i-1], (img1.shape[1] + img2.shape[1], img1.shape[0]))
            result[0:img2.shape[0], 0:img2.shape[1]] = img2
            img1 = result
        self.result = result

    def set_images(self, images):
        self.images = images
        self.left_list = images

#
# img1 = cv2.imread('./images/WC_1.jpg')
# img1 = imutils.resize(img1, width=400)
# img2 = cv2.imread('./images/WC_2.jpg')
# img2 = imutils.resize(img2, width=400)
# img3 = cv2.imread('./images/WC_3.jpg')
# img3 = imutils.resize(img3, width=400)
# img4 = cv2.imread('./images/WC_4.jpg')
# img4 = imutils.resize(img3, width=400)
#
# Hs = []
#
# stitcher  = Stitcher([img1, img2, img3, img4], None)
# stitcher.leftshift()
# cv2.imshow(' Result',stitcher.leftImage)
# cv2.waitKey(0)
Ejemplo n.º 32
0
 def matches_axiom(self, expression: Node):
     for schema in self.axiom_schemas:
         if Matcher().matches(expression, schema):
             return True
     return False
Ejemplo n.º 33
0
            if (numRows == 0):
                ResumeProcessor.construct(newResume)
                toPrint = encodeClassToJson(newResume)
                #print(type(newResume.getContent()))
                print(toPrint)
                cur.execute("INSERT INTO resume VALUES (%s,%s,%s,%s,%s,%s)",(toPrint,'f',contentName,name,hpNumber,email))
                con.commit()
            else:
                ResumeProcessor.construct(newResume)
                toPrint = encodeClassToJson(newResume)
                #print(type(newResume.getContent()))
                print(toPrint)
                cur.execute("INSERT INTO resume VALUES (%s,%s,%s,%s,%s,%s)",(toPrint,'f',contentName,name,hpNumber,email))
                con.commit()
                f = Facade()
                matcher = Matcher(f)
                scorer = Scorer(f)
                cur.execute("SELECT isonce_resume FROM once")
                rows = cur.fetchall()
                for row in rows:
                    if(row[0]=='t'):
                        matcher.matchAll(0)
                        scorer.calculateScore()
                    else:
                        cur.execute("UPDATE once SET isonce_resume=%s",('f'));
                        matcher.matchAll(1)
                        scorer.calculateScore()
            con.close()

Ejemplo n.º 34
0
	def testSingleParenthesis(self):
		self.assertFalse(Matcher.isMatching("("))
Ejemplo n.º 35
0
	def testOnePairOfBraces(self):
		self.assertTrue(Matcher.isMatching("{}"))
Ejemplo n.º 36
0
	def testOnePairOfBrackets(self):
		self.assertTrue(Matcher.isMatching("[]"))
class VisualOdometry(object):
    """ The **VisualOdometry** class contains all the required methods to
    recover the motion of the camera and the structure of the scene.

    This class has as an attribute a Dataset class instance and a Matcher
    class instance, in order to make its use easier. The algorithms implemented
    here (most of all) follow those explained in the excellent book *Multiple
    View Geometry*, written by R.Hartley and A.Zisserman (HZ_)

    **Attributes**:

        .. data:: F

           The estimated Fundamental_  matrix. Numpy ndarray (3x3).

        .. data:: E

           The estimated Essential_  matrix. Numpy ndarray (3x3).

        .. data:: H

           The estimated planar Homography_ matrix. Numpy ndarray(3x3)

        .. data:: right_e

           Right Epipole

        .. data:: left_e

           Left epipole

        .. data:: cam

           The Camera instance (**for the current frame**)

           .. seealso::

               Class :py:class:`Camera.Camera`

        .. data:: structure

           3D triangulated points (Numpy ndarray nx3)

        .. data:: mask

           Numpy array. Every element of this array which is zero is suposed
           to be an outlier. These attribute is used by the
           *FindFundamentalRansac* and *FindEssentialRansac* methods, and
           can be used to reject the KeyPoints outliers that remains after
           the filtering process.

        .. data:: index

           This parameter count the number of iterations already done by the
           system. **Whenever we iterate over the dataset (i.e, read a new image
           and recover the structure, etc) we have to increase by two this
           parameter, so we can index correctly the camera matrices**. For example,
           at the beginning it will be 0, so the first camera will be stored
           in the first position of the list of cameras, and the second one in
           the second position (0 + 1). Next, we read a new image (so the new
           one will be in the *image_2* attribute of the Dataset instance, and
           the last one will be stored in the *image_1* attribute), and increase
           the index by two, so now the *previous frame* camera matrix will be
           stored in the third position (2) and the *current frame* in the
           fourth position (4), and so on.

        .. data:: kitti

           Instance of the Dataset class.

           .. seealso::

               :py:mod:`Dataset`

        .. data:: matcher

           Instance of the matcher class

           .. seealso::

               :py:mod:`Matcher`

        .. data:: scene

           Instance of the Map class. The scene as seen by the camera.

           .. seealso::

               :py:class:`Map.Map`

    **Constructor**:

        The constructor has two optional parameters:

            1. The path to the dataset. If no path is provided, the
               current path will be used.

            2. The Matcher parameters. If no parameters are provided, the
               system will use ORB as detector and a Brute-Force based matcher.

               .. seealso::

                   Class :py:mod:`Matcher`

    .. _HZ: http://www.robots.ox.ac.uk/~vgg/hzbook/
    .. _Fundamental: https://en.wikipedia.org/wiki/Fundamental_matrix_(computer_vision)
    .. _Essential: https://en.wikipedia.org/wiki/Essential_matrix
    .. _Homography: https://en.wikipedia.org/wiki/Homography_(computer_vision)
    .. _RANSAC: http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
    .. _findFundamentalMat: http://docs.opencv.org/3.0-beta/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#findfundamentalmat
    .. _Nister: http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.86.8769&rep=rep1&type=pdf


    """
    def __init__(self, params=None, path=None):
        """ Constructor

        """
        if params is None:
            params = dict(detector='orb', matcher='bf')
        if path is None:
            path = os.getcwd()
        self.matcher = Matcher(params)
        self.kitti = Dataset(path)
        self.F = None
        self.mask = None
        self.H = None
        self.right_e = None
        self.left_e = None
        self.cam = Camera()
        self.E = None  # Essential matrix
        self.index = 0
        self.scene = Map()

    def init_reconstruction(self, optimize=True):
        """ Performs the first steps of the reconstruction.

        The first steps are:

            1. Read the two first images and match them.
            2. Get an initial estimate of the Fundamental matrix and reject
               outliers.
            3. Reestimate the Fundamental matrix without outliers.
            4. Triangulate the image points up to a projective transformation.
            5. Optimize the Fundamental matrix by minimizing the reprojection
               error.
            6. Triangulate the image points up to a scale factor.
            7. Filter out the 3D points behind the camera and too far from it.
            8. Init the map.

        :param optimize: If True performs nonlinear optimization of :math:`F`
        :type optimize: Boolean

        """
        # 1
        self.kitti.read_image()
        self.kitti.read_image()
        self.matcher.match(self.kitti.image_1, self.kitti.image_2)
        # 2
        kp1 = self.matcher.kp_list_to_np(self.matcher.good_kp1)
        kp2 = self.matcher.kp_list_to_np(self.matcher.good_kp2)
        self.FindFundamentalRansac(kp1, kp2, 'RANSAC')
        self.reject_outliers()
        kp1 = self.matcher.kp_list_to_np(self.matcher.good_kp1)
        kp2 = self.matcher.kp_list_to_np(self.matcher.good_kp2)
        # 3
        self.FindFundamentalRansac(kp1, kp2, 'RANSAC')
        if optimize:
            # 4
            self.structure = self.triangulate(kp1, kp2)
            # 5
            sol, F = self.optimize_F(kp1, kp2, self.F, self.structure)
            self.F = F
        # 6
        self.structure = self.triangulate(kp1, kp2, euclidean=True)
        # 7
        self.structure, mask = self.filter_z(self.structure)
        kp1 = kp1[mask]
        kp2 = kp2[mask]
        desc1 = np.asarray(self.matcher.good_desc1)[mask]
        desc2 = np.asarray(self.matcher.good_desc2)[mask]
        # 8
        cam1 = Camera()
        cam1.set_index(self.index)
        cam1.set_P(self.create_P1())
        # cam1.is_keyframe()
        cam1.set_points(kp1)
        cam1.set_descriptors(desc1)
        self.cam.set_index(self.index + 1)
        self.cam.set_points(kp2)
        self.cam.set_descriptors(desc2)
        # 9
        for i in range(len(self.structure)):
            descriptors = np.vstack((desc1[i], desc2[i]))
            points = np.vstack((kp1[i], kp2[i]))
            kp_properties = {
                'octave': self.matcher.good_kp2[i].octave,
                'angle': self.matcher.good_kp2[i].angle,
                'diameter': self.matcher.good_kp2[i].size
            }
            self.scene.add_mappoint(
                MapPoint(self.structure[i, :], [cam1, self.cam],
                         points,
                         descriptors,
                         properties=kp_properties))
        self.scene.add_camera(cam1)
        self.scene.add_camera(self.cam)
        self.cam.is_keyframe()
        self.index += 1

    def track_local_map(self):
        """ Tracks the local map.

        This method use the *index* attribute to retrieve the local map points
        and tries to track them in successive frames. The algorithm is as
        follows:

            1. Using the Lucas-Kanade algorithm track the local map points in
               the new frame.
            2. If the tracked map points are less than 50, then exit and
               perform again the first step of the main algorithm.
               (see :py:func:`VisualOdometry.VisualOdometry.init_reconstruction`)
            3. If we have been tracking the local map for more than 10 frames
               then exit this method and perform again the first step of the
               main algorithm.
            4. With the tracked map points estimate the Fundamental matrix, and
               from F the motion of the camera.
            5. Project non-tracked map points and look for a correspondence
               in the new frame, within a image patch centered in
               its coordinates.
            6. Using the map points tracked in 1 and 5 reestimate the
               Fundamental matrix.
            7. Perform bundle adjustment (motion only) using the tracked map
               points.

        """
        self.kitti.read_image()
        previous_image = self.kitti.image_1.copy()
        points = self.cam.points
        for i in range(4):
            # 1
            mask, lk_prev_points, lk_next_points = self.matcher.lktracker(
                previous_image, self.kitti.image_2, points)
            print("Tracked points: {}".format(len(lk_next_points)))
            # 2
            # 3
            # 4
            F = self.FindFundamentalRansac(lk_next_points, points[mask])
            E = self.E_from_F(F)
            pts1 = (np.reshape(points[mask], (len(points[mask]), 2))).T
            pts2 = (np.reshape(lk_next_points, (len(lk_next_points), 2))).T
            R, t = self.get_pose(pts1.T, pts2.T, self.cam.K, E)
            cam = Camera()
            cam.set_R(R)
            cam.set_t(t)
            cam.Rt2P(inplace=True)
            # 5
            self.scene.add_camera(cam)
            projected_map = self.scene.project_local_map(self.index + 1)
            mask = ((projected_map[:, 0] > 0) & (projected_map[:, 0] < 1230) &
                    (projected_map[:, 1] > 0) & (projected_map[:, 1] < 360))
            for point in projected_map[mask]:
                start = np.array([point[0], point[1]])
                size = np.array([100, 50])
                roi = self.kitti.crop_image(start,
                                            size,
                                            self.kitti.image_2,
                                            center=True)
            print("ROI: {}".format(roi))

            self.kitti.read_image()
        return mask, lk_prev_points, lk_next_points

    def FindFundamentalRansac(self, kpts1, kpts2, method=cv2.FM_RANSAC, tol=1):
        """ Computes the Fundamental matrix from two set of KeyPoints, using
        a RANSAC_ scheme.

        This method calls the OpenCV findFundamentalMat_ function. Note that
        in order to compute the movement from the previous frame to the current
        one we have to invert the parameters *kpts1* (points in the previous
        frame) and *kpts2* (points in the current frame).


        :param kpts1: KeyPoints from the previous frame
        :param kpts2: KeyPoints from the current frame

        :param method: Method used by the OpenCV function to compute the
                       Fundamental matrix. It can take the following values:

                           * SEVEN_POINT, 7-Point algorithm
                           * EIGHT_POINT, 8-Point algorithm
                           * RANSAC, 8-Point or 7-Point (depending on the number
                             of points provided) algorithm within a RANSAC
                             scheme
                           * LMEDS, Least Median Squares algorithm

                     For more information about these algorithms see HZ_.

        :param tol: Pixel tolerance used by the RANSAC algorithm. By default 1.
        :type kpts1: Numpy ndarray nx2 (n is the number of KeyPoints)
        :type kpts2: Numpy ndarray nx2 (n is the number of KeyPoints)
        :type method: String
        :type tol: Integer

        :returns: The estimated Fundamental matrix (3x3) and an output array of
                  the same length as the input KeyPoints. Every element of this
                  array which is set to zero means that it is an **outlier**.
        :rtype: Numpy ndarray

        """
        algorithms = dict(SEVEN_POINT=cv2.FM_7POINT,
                          EIGHT_POINT=cv2.FM_8POINT,
                          RANSAC=cv2.FM_RANSAC,
                          LMEDS=cv2.FM_LMEDS)
        kpts1 = np.float32(kpts1)
        kpts2 = np.float32(kpts2)
        if method == 'RANSAC':
            try:
                self.F, self.mask = cv2.findFundamentalMat(
                    kpts2, kpts1, algorithms[method], tol)
                return self.F
            except Exception as e:
                print(type(e))  # The exception instance
                print(e)  # Exception string

        else:
            try:
                self.F, self.mask = cv2.findFundamentalMat(
                    kpts2, kpts1, algorithms[method])
                return self.F
            except Exception as e:
                print(type(e))  # The exception instance
                print(e)  # Exception string

    def reject_outliers(self):
        """ Rejects the KeyPoints outliers.

        This method removes those KeyPoints marked as outliers by the mask
        returned by the *FindEssentialRansac* and *FindFundamentalRansac*
        methods.

        """
        if self.mask is None:
            pass
        else:
            msk_lst = self.mask.tolist()
            self.matcher.good_kp1 = [
                d for d, s in zip(self.matcher.good_kp1, msk_lst) if s[0] == 1
            ]
            self.matcher.good_desc1 = [
                d for d, s in zip(self.matcher.good_desc1, msk_lst)
                if s[0] == 1
            ]
            self.matcher.good_kp2 = [
                d for d, s in zip(self.matcher.good_kp2, msk_lst) if s[0] == 1
            ]
            self.matcher.good_desc2 = [
                d for d, s in zip(self.matcher.good_desc2, msk_lst)
                if s[0] == 1
            ]
            self.matcher.good_matches = [
                d for d, s in zip(self.matcher.good_matches, msk_lst)
                if s[0] == 1
            ]

    def draw_epilines(self, img1, img2, lines, pts1, pts2):
        """ Draw epilines in img1 for the points in img2 and viceversa

        :param img1: First image
        :param img2: Second image
        :param lines: Corresponding epilines
        :param pts1: KeyPoints in the first image (Integer values)
        :param pts2: KeyPoints in the second image (Integer values)
        :type img1: Numpy ndarray
        :type img2: Numpy ndarray
        :type lines: Numpy ndarray
        :type pts1: Numpy ndarray
        :type pts2: Numpy ndarray

        :returns: Two new images
        :rtype: Numpy ndarray


        """
        r, c, p = img1.shape
        # The next two lines don't work because the Kitti images
        # don't have color, so we can't convert them to BGR
        # img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
        # img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
        for r, pt1, pt2 in zip(lines, pts1, pts2):
            color = tuple(np.random.randint(0, 255, 3).tolist())
            x0, y0 = map(int, [0, -r[2] / r[1]])
            x1, y1 = map(int, [c, -(r[2] + r[0] * c) / r[1]])
            img1 = cv2.line(img1, (x0, y0), (x1, y1), color, 1)
            img1 = cv2.circle(img1, tuple(pt1.astype(int)), 5, color, -1)
            img2 = cv2.circle(img2, tuple(pt2.astype(int)), 5, color, -1)
        return img1, img2

    def find_epilines(self, pts):
        """ Find epilines corresponding to points in an image (where we have
        extracted *pts*) ready to plot in the other image.

        :param pts: KeyPoints of the image for which we are drawing its
                    epilines in the other image.
        :type pts: Numpy ndarray
        :returns: The epilines
        :rtype: Numpy ndarray
        """
        lines = cv2.computeCorrespondEpilines(pts.reshape(-1, 1, 2), 2, self.F)
        lines = lines.reshape(-1, 3)
        return lines

    def FindEssentialRansac(self, kpts1, kpts2):
        # Compute Essential matrix from a set of corresponding points
        # @param kpts1: list of keypoints of the previous frame
        # @param kpts2: list of keypoints of the current frame

        kpts1 = np.float32(kpts1)
        kpts2 = np.float32(kpts2)

        # findEssentialMat takes as arguments, apart from the keypoints of both
        # images, the focal length and the principal point. Looking at the
        # source code of this function
        # (https://github.com/Itseez/opencv/blob/master/modules/calib3d/src/five-point.cpp)
        # I realized that these parameters are feeded to the function because it
        # internally create the camera matrix, so they must be in pixel
        # coordinates. Hence, we take them from the already known camera matrix:

        focal = 3.37
        pp = (2.85738, 0.8681)

        # pp = (self.K[0][2], self.K[1][2])

        self.E, self.maskE = cv2.findEssentialMat(kpts2, kpts1, focal, pp,
                                                  cv2.RANSAC, 0.999, 1.0,
                                                  self.maskE)

    def FindHomographyRansac(self, kpts1, kpts2):
        # Find the homography between two images given corresponding points
        kpts1 = np.float32(kpts1)
        kpts2 = np.float32(kpts2)

        self.H, self.maskH = cv2.findHomography(kpts1, kpts2, cv2.RANSAC, 1.0)

    def get_epipole(self, F=None):
        """ Computes the **right** epipole (:math:`\\mathbf{e}`). As it is the
        right null-vector of F, it satisfies

        .. math::

            F\\mathbf{e} = \\mathbf{0}

        If we want to compute the **left** epipole (:math:`\\mathbf{e'}`), then
        pass :math:`F^{t}`, because it is the left null-vector of F:

        .. math::

            F^{t}\\mathbf{e'} = \\mathbf{0}


        :param F: Fundamental matrix associated with the required epipoles.
                  If None, (by default) then it uses the class *F* attribute.
        :type F: Numpy 3x3 ndarray
        :returns: The right epipole associated with F
        :rtype: Numpy 1x3 ndarray

        """
        U, S, V = linalg.svd(F)
        e = V[-1]
        e = e / e[2]
        return e

    def skew(self, a):
        """ Return the matrix :math:`A` such that :math:`\\mathbf{a}` is its
        null-vector (right or left), i.e, its a 3x3 *skew-symmetric matrix*:

        .. math::

            A\\mathbf{a} = \\mathbf{0}

        and

        .. math::

            A^{t}\\mathbf{a} = \\mathbf{0}

        Its form is:

            ::

                    [0  -a3  a2]
                A = [a3  0  -a1]
                    [-a2 a1   0]

        This matrix is usually denoted as :math:`[\\mathbf{a}]_x`.

        :param a: Vector

        .. math::

            \left(\\begin{matrix} a_1 & a_2 & a_3 \\end{matrix}\\right)^t

        :type a: Numpy 1x3 ndarray
        :returns: The 3x3 skew-symmetric matrix associated with
                  :math:`\\mathbf{a}`.
        :rtype: Numpy 3x3 ndarray

        """
        return np.array([[0, -a[2], a[1]], [a[2], 0, -a[0]], [-a[1], a[0], 0]])

    def P_from_F(self, F=None):
        """ Computes the second camera matrix (*current frame*) from the
        Fundamental matrix. Assuming the canonical form of camera matrices, i.e,
        the first matrix is of the simple form :math:`[I|\\mathbf{0}]`, where
        :math:`I` is the 3x3 identity matrix and :math:`\\mathbf{0}` a null
        3-vector, the second camera matrix :math:`P'` can be computed as
        follows:

        .. math::

            P' = [[\\mathbf{e'}]_x F|\\mathbf{e'}]

        Recall that we can only recover the camera matrix :math:`P'` up to a
        projective transformation. This means that the mapping between the
        Fundamental matrix :math:`F` and the pair of camera matrices :math:`P`,
        :math:`P'` **is not injective (one-to-one)**. See HZ_ chapter 9 for more
        information about this.

        :param F: Fundamental matrix. If None, then use the internal F
                  parameter.
        :type F: Numpy 3x3 ndarray

        :returns: The computed second camera matrix :math:`P'`.
        :rtype: Numpy 3x4 ndarray.


        """
        if F is None:
            F = self.F
        e = self.get_epipole(F.T)  # Left epipole

        skew_e = self.skew(e)
        return (np.vstack((np.dot(skew_e, F.T).T, e)).T)

    def create_P1(self):
        """ Create a camera matrix of the form:

            ::

                    [1  0  0  0]
                P = [0  1  0  0]
                    [0  0  1  0]

        :returns: Camera matrix with no rotation and no translation components.
        :rtype: Numpy 3x4 ndarray
        """
        P1 = (np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]))
        return P1.astype(float)

    def optimal_triangulation(self, kpts1, kpts2, P1=None, P2=None, F=None):
        """This method computes the structure of the scene given the image
        coordinates of a 3D point :math:`\\mathbf{X}` in two views and the
        camera matrices of those views.

        As Hartley and Zisserman said in their book (HZ_), *naive triangulation
        by back-projecting rays from measured image points will fail, because
        the rays will not intersect in general, due to errors in the measured
        image coordinates*. In order to triangulate properly the image points
        it is necessary to estimate a best solution for the point in
        :math:`\\mathbb{R}^3`.

        The method proposed in HZ_, which is **projective-invariant**, consists
        in estimate a 3D point :math:`\\hat{\\mathbf{X}}` which exactly
        satisfies the supplied camera geometry (i.e, the given camera matrices),
        so it projects as

        .. math::

            \\hat{\\mathbf{x}} = P\\hat{\\mathbf{X}}

        .. math::

            \\hat{\\mathbf{x}}' = P'\\hat{\\mathbf{X}}

        and the aim is to estimate :math:`\\hat{\\mathbf{X}}` from the image
        measurements :math:`\\mathbf{x}` and :math:`\\mathbf{x}'`. The MLE,
        under the assumption of Gaussian noise is given by the point
        :math:`\\hat{\\mathbf{X}}` that minimizes the **reprojection error**

        .. math::

            \\epsilon(\\mathbf{x}, \\mathbf{x}') = d(\\mathbf{x},
                                          \\hat{\\mathbf{x}})^2 + d(\\mathbf{x}'
                                           ,\\hat{\\mathbf{x}}')^2

        subject to

        .. math::

            \\hat{\\mathbf{x}}'^TF\\hat{\\mathbf{x}} = 0

        where :math:`d(*,*)` is the Euclidean distance between the points.

        .. image:: ../Images/triangulation.png

        So, the proposed algorithm by Hartley and Zisserman in their book is
        first to find the corrected image points :math:`\\hat{\\mathbf{x}}` and
        :math:`\\hat{\\mathbf{x}}'` minimizing :math:`\\epsilon(\\mathbf{x},
        \\mathbf{x}')` and then compute :math:`\\hat{\\mathbf{X}}'` using the
        DLT triangulation method (see HZ_ chapter 12).

        :param kpts1: Measured image points in the first image,
                      :math:`\\mathbf{x}`.
        :param kpts2: Measured image points in the second image,
                      :math:`\\mathbf{x}'`.
        :param P1: First camera, :math:`P`.
        :param P2: Second camera, :math:`P'`.
        :param F: Fundamental matrix.
        :type kpts1: Numpy nx2 ndarray
        :type kpts2: Numpy nx2 ndarray
        :type P1: Numpy 3x4 ndarray
        :type P2: Numpy 3x4 ndarray
        :type F: Numpy 3x3 ndarray

        :returns: The two view scene structure :math:`\\hat{\\mathbf{X}}` and
                  the corrected image points :math:`\\hat{\\mathbf{x}}` and
                  :math:`\\hat{\\mathbf{x}}'`.
        :rtype: * :math:`\\hat{\\mathbf{X}}` :math:`\\rightarrow`  Numpy nx3 ndarray
                * :math:`\\hat{\\mathbf{x}}` and :math:`\\hat{\\mathbf{x}}'`
                  :math:`\\rightarrow` Numpy nx2 ndarray.

        """

        kpts1 = np.float32(kpts1)  # Points in the first camera
        kpts2 = np.float32(kpts2)  # Points in the second camera

        # 3D Matrix : [kpts1[0] kpts[1]... kpts[n]]

        pt1 = np.reshape(kpts1, (1, len(kpts1), 2))

        pt2 = np.reshape(kpts2, (1, len(kpts2), 2))

        new_points1, new_points2 = cv2.correctMatches(self.F, pt2, pt1)

        self.correctedkpts1 = new_points1
        self.correctedkpts2 = new_points2

        # Transform to a 2D Matrix: 2xn

        kpts1 = (np.reshape(new_points1, (len(kpts1), 2))).T
        kpts2 = (np.reshape(new_points2, (len(kpts2), 2))).T

        #print(np.shape(kpts1))

        points3D = cv2.triangulatePoints(self.cam1.P, self.cam2.P, kpts2,
                                         kpts1)

        self.structure = points3D / points3D[
            3]  # Normalize points [x, y, z, 1]

        array = np.zeros((4, len(self.structure[0])))

        for i in range(len(self.structure[0])):

            array[:, i] = self.structure[:, i]

        self.structure = array

        # The individual points are selected like these:

        # self.structure[:, i]. It's a 4 x n matrix

    def opt_triangulation(self, x1, x2, P1, P2):
        # For each given point corresondence points1[i] <-> points2[i], and a
        # fundamental matrix F, computes the corrected correspondences
        # new_points1[i] <-> new_points2[i] that minimize the geometric error
        # d(points1[i], new_points1[i])^2 + d(points2[i], new_points2[i])^2,
        # subject to the epipolar constraint new_points2^t * F * new_points1 = 0
        # Here we are using the OpenCV's function CorrectMatches.

        # @param x1: points in the first camera, list of vectors x, y
        # @param x2: points in the second camera
        # @param P1: Projection matrix of the first camera
        # @param P2: Projection matrix of the second camera
        # @return points3d: Structure of the scene, 3 x n matrix

        x1 = np.float32(x1)  # Imhomogeneous
        x2 = np.float32(x2)

        # 3D Matrix : [kpts1[0] kpts[1]... kpts[n]]

        x1 = np.reshape(x1, (1, len(x1), 2))

        x2 = np.reshape(x2, (1, len(x2), 2))

        self.correctedkpts1, self.correctedkpts2 = cv2.correctMatches(
            self.F, x1, x2)
        # Now, reshape to n x 2 shape
        self.correctedkpts1 = self.correctedkpts1[0]
        self.correctedkpts2 = self.correctedkpts2[0]
        # and make homogeneous
        x1 = self.make_homog(np.transpose(self.correctedkpts1))
        x2 = self.make_homog(np.transpose(self.correctedkpts2))

        # Triangulate
        # This function needs as arguments the coordinates of the keypoints
        # (form 3 x n) and the projection matrices

        points3d = self.triangulate_list(x1, x2, P2, P1)

        self.structure = points3d  # 3 x n matrix

        return points3d

    def triangulate_point(self, x1, x2, P2, P1):
        # Point pair triangulation from least squares solution
        M = np.zeros((6, 6))
        M[:3, :4] = P1
        M[3:, :4] = P2
        M[:3, 4] = -x1
        M[3:, 5] = -x2

        U, S, V = linalg.svd(M)
        X = V[-1, :4]

        return X / X[3]

    def triangulate_list(self, x1, x2, P1, P2):
        # Two view triangulation of points in homogeneous coordinates (several)

        n = x1.shape[1]
        if x2.shape[1] != n:
            raise ValueError("Number of points don't match")

        X = [
            self.triangulate_point(x1[:, i], x2[:, i], P1, P2)
            for i in range(n)
        ]
        return np.array(X).T

    def make_homog(self, points):
        """ Convert points to homogeneus form.

        This method appends one row (fill of ones) to the passed matrix.

        :param points: Matrix of points (2D or 3D) in column form, i.e,
                       the shape of the matrix must be (2 or 3, n), where
                       n is the number of points.
        :type points: Numpy ndarray

        """
        return np.vstack((points, np.ones((1, points.shape[1]))))

    def triangulate(self, kpts1, kpts2, F=None, euclidean=False):
        """ Triangulate 3D points from image points in two views.

        This is the linear triangulation method, which is not an optimal method.
        See chapter 12 of HZ_ for more details.

        If the *euclidean* parameter is True, then the method reconstructs the
        scene up to a similarity transformation. In order to achieve this, it
        computes internally the Essential matrix from the Fundamental one,
        recover the Pose :math:`[R|\\mathbf{t}]` and form the camera projection
        matrix :math:`P'` as

        .. math::

            P' = K[R|\\mathbf{t}]

        The first camera matrix is also multiplied by the camera
        calibration matrix:

        .. math::

            P = K[I|\\mathbf{0}]

        Otherwise, the camera matrices are computed as:

        .. math::

            P' = [[\\mathbf{e'}]_xF|\\mathbf{e}']

        .. math::

            P = [I|\\mathbf{0}]

        and the reconstruction is up to an arbitrary projective transformation.

        .. note::

            If we are performing a reconstruction up to a similarity
            transformation we can filter out those points that don't pass the
            cheirality check by removing the 3D points
            :math:`\\mathbf{X}_i` for which the :math:`Z` coordinate is negative
            (i.e, those points that are projected behind the camera).

            If the reconstruction is up to a projective transformation then it's
            possible that all the triangulated points are behind the camera, so
            don't care about this.

        .. note::

            The method sets the rotation matrix :math:`R` and translation
            vector :math:`\\mathbf{t}` of the internal camera object (**which is
            associated with the second frame**).

        The method normalize the calculated 3D points :math:`\\mathbf{X}`
        internally.

        :param kpts1: Image points for the first frame, :math:`\\mathbf{x}`
        :param kpts2: Image points for the second frame, :math:`\\mathbf{x}'`
        :param F: Fundamental matrix
        :param Euclidean: If True, reconstruct structure up to an Euclidean
                          transformation (using the Essential matrix). Else,
                          reconstruct up to a projective transformation.
        :type kpts1: Numpy nx2 ndarray
        :type kpts2: Numpy nx2 ndarray
        :type F: Numpy 3x3 ndarray
        :type euclidean: Boolean

        :returns: Triangulated 3D points, :math:`\\mathbf{X}` (homogeneous)
        :rtype: Numpy nx4 ndarray

        """
        if np.shape(kpts1)[1] != 2:
            raise ValueError("The dimensions of the input image points must \
                              be (n, 2), where n is the number of points")
        print("Shape needed for recoverpose: {}".format(np.shape(kpts1)))
        print("Type needed for recoverpose: {}".format(type(kpts1)))
        print("Type: {}".format(type(kpts1[0][0])))
        kpts1 = (np.reshape(kpts1, (len(kpts1), 2))).T
        kpts2 = (np.reshape(kpts2, (len(kpts2), 2))).T
        if F is None:
            F = self.F
        if euclidean:
            E = self.E_from_F(F)
            R, t = self.get_pose(kpts1.T, kpts2.T, self.cam.K, E)
            self.cam.set_R(R)
            self.cam.set_t(t)
            P2 = self.cam.Rt2P(R, t, self.cam.K)
            P1 = np.dot(self.cam.K, self.create_P1())
        else:
            P2 = self.P_from_F()
            P1 = self.create_P1()
        points3D = cv2.triangulatePoints(P1, P2, kpts1, kpts2)
        points3D = points3D / points3D[3]
        return points3D.T

    def filter_z(self, points):
        """ Filter out those 3D points whose Z coordinate is negative and is
        likely to be an outlier, based on the median absolute deviation (MAD).

        The mask returned by the method can be used to filter the image points
        in both images, :math:`\\mathbf{x}`  and :math:`\\mathbf{x}'`.

        :param points: 3D points :math:`\\mathbf{X}`
        :type points: Numpy nx4 ndarray

        :returns: 1. 3D points filtered
                  2. Filter mask (positive depth and no outliers)
        :rtype: 1. Numpy nx4 ndarray
                2. Numpy 1xn ndarray

        """
        if np.shape(points)[1] != 4:
            raise ValueError('Shape of input array must be (n, 3)')
        mask_pos = points[:, 2] >= 0
        thresh = 3.5
        Z = points[:, 2]
        Z = Z[:, None]
        median = np.median(Z, axis=0)
        diff = np.sum((Z - median)**2, axis=1)
        diff = np.sqrt(diff)
        med_abs_deviation = np.median(diff)
        modified_z_score = 0.6745 * diff / med_abs_deviation
        mask = modified_z_score < thresh
        return points[mask & mask_pos], mask & mask_pos

    def convert_from_homogeneous(self, kpts):
        # Convert homogeneous points to euclidean points
        # @param kpts: List of homogeneous points
        # @return pnh: list of euclidean points

        # Remember that every function in OpenCV need us to specify the data
        # type. In addition, convertPointsFromHomogeneous needs the shape of the
        # arrays to be correct. The function takes a vector of points in c++
        # (ie. a list of several points), so in numpy we need a multidimensional
        # array: a x b x c where a is the number of points, b=1, and c=2 to
        # represent 1x2 point data.

        if len(kpts[0]) == 3:

            for i in range(len(kpts)):

                kpts[i].reshape(-1, 1, 3)

                kpts[i] = np.array(kpts[i], np.float32).reshape(-1, 1, 3)

            pnh = [cv2.convertPointsFromHomogeneous(x) for x in kpts]

            for i in range(len(pnh)):

                pnh = np.array(pnh[i], np.float32).reshape(1, 2, 1)

        elif len(kpts[0]) == 4:

            for i in range(len(kpts)):

                kpts[i].reshape(-1, 1, 4)

                kpts[i] = np.array(kpts[i], np.float32).reshape(-1, 1, 4)

            pnh = [cv2.convertPointsFromHomogeneous(x) for x in kpts]

            for i in range(len(pnh)):

                pnh[i] = np.array(pnh[i], np.float32).reshape(1, 3, 1)

        elif len(kpts) == 3:

            pnh = np.zeros((2, len(kpts[0])))

            for i in range(len(kpts[0])):

                pnh[:, i] = kpts[:2, i]

        return pnh

    def convert_array2d(self, kpts):

        #print(len(kpts[:, 0]))

        a = np.zeros((len(kpts[:, 0]), 2))

        for i in range(len(kpts[:, 0])):

            a[i, :] = kpts[i, :2]

        return a

    def func(self, params, x1, x2):
        """ Computes the residuals for the Fundamental matrix two view
        optimization problem.

        This is an m-dimensional function of n variables (n is the number of
        observations in the frames, the image points, and m in this case is
        :math:`2n`) that returns the residuals between the measured image points
        and the projections of  the reconstructed 3D points
        :math:`\\hat{\\mathbf{X}}`: :math:`\\hat{\\mathbf{x}}`
        and :math:`\\hat{\\mathbf{x}}'`.

        The method compute the projected points :math:`\\hat{\\mathbf{x}}` and
        :math:`\\hat{\\mathbf{x}}'` from the two camera projection matrices,
        :math:`P` (created using the
        :py:mod:`VisualOdometry.VisualOdometry.create_P1` method)
        and :math:`P'`, which is extracted from the parameters vector (the
        first twelve elements).


        :param params: Parameter vector :math:`\\mathbf{p}`, that contains the
                       second camera parameters and the 3D structure.
        :param x1: The first frame measured points :math:`\\mathbf{x}`
        :param x2: The second frame measured points :math:`\\mathbf{x}'`
        :type params: Numpy ndarray of shape :math:`k`, where :math:`k` is the
                      sum of the second camera parameters and the 3D parameters.
        :type x1: Numpy nx2 ndarray
        :type x2: Numpy nx2 ndarray


        """
        P1 = self.create_P1()
        P2 = params[0:12].reshape(3, 4)
        p = params[12:len(params)]
        l = p.shape
        X = np.reshape(p, (l[0] / 3, 3)).T  # 3xn matrix
        # Make homogeneous
        X = self.make_homog(X)
        # Project the structure
        x1_est = np.dot(P1, X)
        x1_est = x1_est / x1_est[2]
        x1_est = x1_est[:2, :]
        x2_est = np.dot(P2, X)
        x2_est = x2_est / x2_est[2]
        x2_est = x2_est[:2, :]

        error_image1 = self.residual(x1, x1_est.T).ravel()
        error_image2 = self.residual(x2, x2_est.T).ravel()
        error = np.append(error_image1, error_image2)
        return error

    def residual(self, x1, x2):
        """Given two nx2 vectors :math:`\\mathbf{x}` and
        :math:`\\hat{\\mathbf{x}}`, compute the difference between their
        coordinates:

        .. math::

            residual_i(\\mathbf{x}_i, \\hat{\\mathbf{x}}_i) = (x_i-\\hat{x}_i,
            y_i-\\hat{y}_i)

        :param x1: :math:`\\mathbf{x}`
        :param x2: :math:`\\hat{\\mathbf{x}}`
        :type x1: Numpy nx2 ndarray
        :type x2: Numpy nx2 ndarray
        :returns: Residual vector :math:`\\mathbf{x} - \\hat{\\mathbf{x}}`
        :rtype: Numpy nx2 ndarray

        """
        return x1 - x2

    def optimize_F(self,
                   x1,
                   x2,
                   F=None,
                   structure=None,
                   method='lm',
                   robust_cost_f='linear'):
        """ Minimize the cost

        .. math::

            \\epsilon(\\mathbf{x}, \\mathbf{x}') = \\sum_i d(\\mathbf{x}_i,
            \\hat{\\mathbf{x}}_i)^2 +
            d(\\mathbf{x}_i', \\hat{\\mathbf{x}}_i')^2

        over an initial estimate of :math:`\\hat{F}` and
        :math:`\\hat{\\mathbf{X}}_i`, :math:`i=1,\\dots, n`

        The cost is minimized using a nonlinear minimization algorithm over
        :math:`3n+12` variables: :math:`3n` for the 3D points
        :math:`\\hat{\\mathbf{X}}_i` and 12 for the camera matrix
        :math:`P'=[M|\\mathbf{t}]`, with :math:`\\hat{F}=[\\mathbf{t}]_xM` and

        .. math::

            \\hat{\\mathbf{x}}_i = P\\mathbf{x}_i

            \\hat{\\mathbf{x}}_i' = P'\\mathbf{x}_i'

        The available algorithms are:

            * **trf**: Trust Region Reflective algorithm, see :cite:`branch1999`
            * **dogbox**: Modified Powell's Dogleg algorithm, see
              :cite:`powell1970new` and :cite:`voglisrectangular`.
            * **lm**: Levenberg-Marquardt algorithm, see
              :cite:`more1978levenberg`.

        In order to reduce the influence of outliers on the solution we can
        modify the cost function :math:`\\epsilon(\\mathbf{x}, \\mathbf{x}')`
        using the robust_cost_f argument:

            * **linear**: Standard least-squares (no modification)
            * **soft_l1**: Pseudo-Huber cost function:

                .. math::

                    C(\\epsilon) = 2\\sqrt{1*\\epsilon}-1

            * **Huber**: Huber cost function:

                .. math::

                    C(\\epsilon) = \\epsilon \\ \\mathbf{if \\ \\epsilon\\leq 1}

                    \\mathbf{else} \\ C(\\epsilon) = 2\\sqrt{\\epsilon}

            * **Cauchy**: Cauchy cost function:

                .. math::

                    C(\\epsilon) = ln(1+\\epsilon)

        .. warning::

            If we are using the Levenberg-Marquardt algorithm the cost function
            must be the **linear** one. Otherwise the algorithm will raise an
            error.

        :param x1: The previous frame measured image points, :math:`\\mathbf{x}`
        :param x2: The current frame measured image points, :math:`\\mathbf{x}'`
        :param F: Fundamental matrix. If None, then the internal attribute will
                  be used.
        :param structure: 3D scene structure, :math:`\\hat{\\mathbf{X}}`
        :param method: Minimization algorithm to be used.
        :param robust_cost_f: Robust cost function to be used.
        :type x1: Numpy nx2 ndarray
        :type x2: Numpy nx2 ndarray
        :type F: Numpy 3x3 ndarray
        :type structure: Numpy nx4 Numpy ndarray
        :type method: String
        :type robust_cost_f: String

        :returns: 1. Instance of the scipy.optimize.OptimizeResult (contains
                     all the information returned by the minimization algorithm)

                  2. Optimized Fundamental matrix.

        :rtype:

                1. :math:`F`: Numpy 3x3 ndarray
                2. :py:mod:`scipy.optimize.OptimizeResult` instance.


        """
        if F is None:
            F = self.F
        vec_P2 = np.hstack(self.P_from_F())
        # Transform the structure (matrix 3 x n) to 1d vector
        if structure is None:
            structure = self.structure
        vec_str = structure[:, :3]  # The ones aren't parameters
        vec_str = vec_str.reshape(-1)
        param = vec_P2
        param = np.append(param, vec_str)
        solution = optimize.least_squares(self.func,
                                          param,
                                          method=method,
                                          args=(x1, x2),
                                          loss=robust_cost_f)
        P = solution.x[:12].reshape((3, 4))
        M = P[:, :3]
        t = P[:, 3]
        F = np.dot(self.skew(t), M)
        return solution, F

    def E_from_F(self, F=None, K=None):
        """ This method computes the Essential matrix from the Fundamental
        matrix.

        The equation is the following:

        .. math::

            E = K^{t}FK

        where :math:`K` is the camera calibration matrix, a 3x3 matrix that
        contains the intrinsic parameters of the camera:

        ::

                    [f    px]
            K  =    [  f  py]
                    [     1 ]


        For a detailed discussion about these topics see HZ_ chapters 6 and 9.

        .. _HZ: http://www.robots.ox.ac.uk/~vgg/hzbook/

        :param F: Fundamental matrix. If None, use the internal attribute.
        :type F: Numpy 3x3 ndarray
        :param K: Camera calibration matrix
        :type K: Numpy 3x3 ndarray

        :returns: The estimated Essential matrix E
        :rtype: Numpy ndarray (3x3)

        """
        if F is None:
            F = self.F
        if K is None:
            K = self.cam.K
        self.E = K.transpose().dot(F).dot(K)
        return self.E

    def get_pose(self, pts1, pts2, camera_matrix, E=None, inplace=True):
        """ Recover the rotation matrix :math:`R` and the translation
        vector :math:`\\mathbf{t}` from the Essential matrix.

        As Hartley and Zisserman states in their book, the camera pose can be
        recovered from the Essential matrix up to scale. For a given Essential
        matrix :math:`E`, and first camera matrix :math:`P=[I|\\mathbf{0}]`,
        there are four possible solutions for the second camera matrix
        :math:`P'` (see HZ_ section 9.6.2).

        A reconstructed point :math:`\\mathbf{X}` will be in front of both
        cameras in one of these four solutions only. Thus, testing with a single
        point to determine if it is in front of both cameras is sufficient to
        decide between the four different solutions for the camera :math:`P'`.

        OpenCV 3 has an implementation of the Nister_ five point algorithm to
        extract the pose from the Essential matrix and a set of corresponding
        image points (KeyPoints). The algorithm follow these steps:

            1. Extract the two possible solutions for the rotation matrix
               :math:`R` and also the two solutions for the translation vector
               :math:`\\mathbf{t}`, so we have the four possible solutions:

               .. math::

                   P_1 = [UWV^T|\\mathbf{t}]

               .. math::

                   P_2 = [UWV^T|-\\mathbf{t}]

               .. math::

                   P_3 = [UW^TV^T|\\mathbf{t}]

               .. math::

                   P_4 = [UW^TV^T|-\\mathbf{t}]


              with :math:`R=UWV^T` or :math:`R=UW^TV^T` and :math:`\\mathbf{t}`
              being the last column of :math:`U`.

            2. For all the four possible solutions do:

                2.1. Triangulate the set of corresponding
                     KeyPoints and normalize them, i.e, divide all the
                     vector elements by its fourth coordinate
                     (we are working with **homogeneous**  coordinates here):

                     .. math::

                         for \\ every \\ 3D \\ triangulated \\ point \\ \\mathbf{X}_i:



                         \\mathbf{X}_i = \\frac{\\mathbf{X}_i}{\\mathbf{X}_i^3}

                3.1. Next, Nister uses a threshold distance to filter out far
                     away points (i.e, points at infinity). Then, the algorithm
                     filter those triangulated points that have the third
                     coordinate (depth) less than zero and count the number of
                     them that meet these constraints (the valid points)

            4. The solution that have more valid triangulated points is the
               true one.

        .. note::

                In order to compute the pose of the second frame with respect
                to the first one we invert the order of the parameters *pts* and
                *pts2* when passing them to the OpenCV method recoverPose.

        :param E: Essential matrix, if None then used the internal one.
        :param pts1: Points from the first image
        :param pts2: Points from the second image
        :param camera_matrix: Camera calibration matrix
        :param inplace: If True, then fill the :math:`R` and :math:`\\mathbf{t}`
                        vectors of the current camera. Also, compute the
                        camera projection matrix :math:`P` **up to scale**.
        :type E: Numpy 3x3 ndarray
        :type pts1: Numpy nx2 ndarray
        :type pts2: Numpy nx2 ndarray
        :type camera_matrix: Numpy 3x3 ndarray

        :returns: The rotation matrix :math:`R`, the translation vector and
                  a mask vector with the points that have passed the cheirality
                  check.
        :rtype: Numpy ndarrays

        """
        if E is None:
            E = self.E
        R = np.zeros([3, 3])
        t = np.zeros([3, 1])
        pp = tuple(camera_matrix[:2, 2])
        f = camera_matrix[0, 0]
        pts1 = pts1.astype(np.float64)
        pts2 = pts2.astype(np.float64)
        cv2.recoverPose(E, pts2, pts1, R, t, f, pp)
        if inplace:
            self.cam.set_R(R)
            self.cam.set_t(t)
            self.cam.set_P(self.cam.Rt2P(R, t, self.cam.K))
        return R, t

    def compute_scale(self, plane_model, scene):
        # Compute the scale of the scene based on a plane fitted to the 3D point
        # cloud represented by scene. The plane_model is fitted using a
        # least-squares approach inside a RANSAC scheme (see PlaneModel.py)
        # @param plane_model: the parameters of the plane (numpy array)
        # @param scene: 3D points marked as inliers by the RANSAC algorithm in
        # the process of estimating the plane. (4 x n) numpy array
        # @return scale: scale of the scene (float)

        # First compute the distance for every inlier and take the mean as the
        # final distance
        distance_sum = 0
        for i in range(np.shape(scene)[1]):
            distance = (np.dot(plane_model, scene[:, i])) / \
                        np.linalg.norm(plane_model)
            distance_sum += distance
        # Compute the mean distance and the corresponding scale as H / d
        mean = distance_sum / np.shape(scene)[1]
        scale = self.height / mean

        return scale

    def compute_scale2(self, scene, pitch=0):
        # Compute the scale using an heuristic approach. For every triangulated
        # point compute it's height and the height difference with respect to
        # the other points. Sum all this differences and use a heuristic
        # function to decide which height is selected
        # @param pitch: The pitch angle of the camera (by default zero)
        # @param scene: 3D points of the hypothetical ground plane (4 x n)
        max_sum = 0
        for i in range(np.shape(scene)[1]):
            h = scene[1][i] * cos(pitch) - scene[2][i] * sin(pitch)
            height_sum = 0
            for j in range(np.shape(scene)[1]):
                h_j = scene[1][j] * cos(pitch) - scene[2][j] * sin(pitch)
                height_diff = h_j - h
                height_sum += exp(-50 * height_diff * height_diff)
            if height_sum > max_sum:
                max_sum = height_sum
                best_idx = i
        scale = scene[1][best_idx] * cos(pitch) - \
                scene[2][best_idx] * sin(pitch)
        return scale
Ejemplo n.º 38
0
 rows = cur.fetchall()
 numRows = (len(rows))
 newJob = JobDescNode(contentID, contentFile, keyword)
 if (numRows == 0):
     ResumeProcessor.construct(newJob)
     toPrint = encodeClassToJson(newJob)
     cur.execute("INSERT INTO job VALUES (%s,%s,%s,%s)",(toPrint,'f', contentID ,contentName))
     con.commit()
     print('just store job')
 else:
     ResumeProcessor.construct(newJob)
     toPrint = encodeClassToJson(newJob)
     cur.execute("INSERT INTO job VALUES (%s,%s,%s,%s)",(toPrint,'f', contentID ,contentName))
     con.commit()
     f = Facade()
     matcher = Matcher(f)
     scorer = Scorer(f)
     cur.execute("SELECT isonce_resume FROM once")
     rows = cur.fetchall()
     for row in rows:
         if(row[0] is True):
             cur.execute("UPDATE once SET isonce_resume=%s",('f',))
             con.commit()
             print('calling match 0 --1 ')
             matcher.matchAll(0)
             scorer.calculateScore()
             print('calling match 0 --2')
         else:
             matcher.matchAll(2)
             scorer.calculateScore()
             print('fdsfds')
Ejemplo n.º 39
0
class Localize(object):

    def __init__(self, robot):
        self.h, self.w = 320, 240
        self.numLocations = 7
        # host = '134.173.24.116'
        # port = 5003
        # print('Waiting for Connection....')
        # self.ipad= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # self.ipad.connect((host,port))
        # print('Connected!')
        self.matcher = Matcher('BOW', None, self.h, self.w)
        self.frame = self.readImage()
        self.robot = robot

        # for tracking the image during runs
        self.imageIndex = 0

    def BOWMatch(self, image):
        self.matcher.setImage(image)
        results = []
        for i in range(self.numLocations):
            self.matcher.setDirectory('map/' + str(i))
            # matcher.setFeatureIndex(self.featureIndices[i])
            totalMatches, probL = self.matcher.run()
            results.append([totalMatches, probL])
        return results

    def write(self, probL, filename):
        file = open(filename, 'w')
        for circle in probL:
            totalMatches = circle[0]
            probs = circle[1]
            file.write(str(totalMatches) + ', ')
            for prob in probs:
                file.write(str(prob) + ', ')
            file.write('\n')

    def readImage(self):
        stream = urlopen(url)
        stream.readline()

        sz = 0
        rdbuffer = None

        clen_re = re.compile(b'Content-Length: (\d+)\\r\\n')
        stream.readline()                    # content type
        
        try:                                 # content length
            m = clen_re.match(stream.readline()) 
            clen = int(m.group(1))
            # indexOfImage += 1
        except:
            print('oops')
        
        stream.readline()                    # timestamp
        stream.readline()                    # empty line
        
        # Reallocate buffer if necessary
        if clen > sz:
            sz = clen*2
            rdbuffer = bytearray(sz)
            rdview = memoryview(rdbuffer)
        
        # Read frame into the preallocated buffer
        stream.readinto(rdview[:clen])
        
        stream.readline() # endline
        stream.readline() # boundary
            
        # This line will need to be different when using OpenCV 2.x
        img = cv2.imdecode(np.frombuffer(rdbuffer, count=clen, dtype=np.byte), flags=cv2.IMREAD_COLOR)
        return img

    def ts(self, message):
       self.robot.send(str(message).encode()) 
       data = ''
       data = self.robot.recv(1024).decode()
       print (data)

    def localize(self):
        file = open('commands.txt', 'w')
        # self.host = '134.173.25.106'
        # self.port = 5000
        # self.robot = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # print("Connecting...")
        # self.robot.connect((self.host,self.port))
        # print("Connected!!")
        self.ts('RESET')
        imageName = 0
        while True:
            self.ts('r')
            time.sleep(0.2)
            self.ts('s')
            time.sleep(3)
            image = self.readImage()
            cv2.imwrite('cam1_img/' + str(imageName).zfill(4) + '.png', image)
            file.write(str(imageName).zfill(4) + ":" + 'r\n')
            imageName += 1
            if imageName == 10:
                self.ts('s')
                break

    def save(self, index):
        self.frame = self.readImage()
        image = self.frame
        cv2.imwrite('cam1_img/' + str(index).zfill(4) + '.png', image)

    def delete(self):
        for file in glob.glob('cam1_img/*.png'):
            os.remove(file)

    def analyze(self):
        # a = analyzer('BOW', 800, 600)
        a = analyzer('SIFT', 800, 600)
        a.createRawP()
        a.processRaw()
        self.delete()

        # bestGuess = readBestGuess('bestGuess.txt')
        # return bestGuess[-1]

    def run(self):
        # print('Analyzing...')
        # previousProbs = []
        # for i in range(self.numLocations):
        #     previousProbs.append([1, [1/75] * 25 ])

        # while True:
        #     # Reading Angles and Gyro
        #     previousAngle = (self.readGyro() * 180./math.pi) % 360
        #     self.frame = self.readImage()
        #     # cv2.imwrite('cam/' + str(counter).zfill(4) + '.png', self.frame)
        #     currentAngle = (self.readGyro() * 180./math.pi) % 360
        #     command = 's'
        #     cv2.imshow('captured', self.frame)
        #     cv2.waitKey(1)
        #     # Calculating Action
        #     diff = currentAngle - previousAngle
        #     if (diff > 1 and diff < 300) or diff < -300:
        #         command = 'l'
        #     elif (diff < -1 and diff > -300) or diff > 300:
        #         command = 'r'

        #     blurFactor = self.Laplacian(self.frame)
        #     probL = self.BOWMatch(self.frame)
        #     # print(probL)
        #     accountAction = self.accountCommand(command, previousProbs)
        #     adjusted = self.prevWeight(accountAction, probL)
        #     blurCorrect = self.blurCorrect(previousProbs, probL, blurFactor)
        #     previousProbs = blurCorrect
        #     self.write(blurCorrect, 'out.txt')
        #     # counter += 1
        self.localize()
        self.analyze()

    ############################
    ### Probability Updating ###
    ############################

    def blurCorrect(self, previousP, currentP, blurFactor):
        '''this function weighted the probability list according to the blurriness factor'''
        currentWeight = 0
        if blurFactor > 20:
            currentWeight = 0.85
        else:
            currentWeight = (blurFactor / 200) * 0.85
        previousWeight = 1 - currentWeight

        # Assigning the weight to each list
        truePosition = []
        for i in range(self.numLocations):
            truePosition.append([0, []])


        for circleIndex in range(len(truePosition)):
            currentCircle = currentP[circleIndex]
            previousCircle = previousP[circleIndex]

            # Number of matches 
            current_num_matches = currentCircle[0]
            previous_num_matches = previousCircle[0]
            
            # Each probability list
            current_probList = currentCircle[1]
            previous_probList = previousCircle[1]


            truePosition[circleIndex][0] = (currentWeight * current_num_matches + previousWeight * previous_num_matches)
            for probIndex in range(len(currentP[circleIndex][1])): 

                current_prob = current_probList[probIndex]
                previous_prob = previous_probList[probIndex]

                truePosition[circleIndex][1].append(currentWeight * current_prob + previousWeight * previous_prob)

        return truePosition

    def prevWeight(self, previousP, currentP):
        '''this function weighted the probability list according to the blurriness factor'''
        currentWeight = 0.7
        previousWeight = 1- currentWeight

        # Assigning the weight to each list
        truePosition = []
        for i in range(self.numLocations):
            truePosition.append([0, []])

        for circleIndex in range(len(truePosition)):
            currentCircle = currentP[circleIndex]
            previousCircle = previousP[circleIndex]

            # Number of matches 
            current_num_matches = currentCircle[0]
            previous_num_matches = previousCircle[0]
            
            # Each probability list
            current_probList = currentCircle[1]
            previous_probList = previousCircle[1]


            truePosition[circleIndex][0] = (currentWeight * current_num_matches + previousWeight * previous_num_matches)
            for probIndex in range(len(currentP[circleIndex][1])): 

                current_prob = current_probList[probIndex]
                previous_prob = previous_probList[probIndex]

                truePosition[circleIndex][1].append(currentWeight * current_prob + previousWeight * previous_prob)

        return truePosition

    def accountCommand(self, command, previousP):
        '''this funciton accounts for the command robot is given at the moment'''
        # Left
        copy = previousP[:]
        if command == 'l':
            for circles in copy:
                circles[1] = circles[1][1:] + circles[1][0:1]
        elif command == 'r':
            for circles in copy:
                circles[1] = circles[1][-1:] + circles[1][0:-1]
        elif command == 'f':
            bestCircleIndex = previousP.index(max(previousP))
            bestAngleIndex = previousP[bestCircleIndex][1].index(max(previousP[bestCircleIndex][1]))
            factor = 0.05 * abs(math.sin(bestAngleIndex*15 * 180/math.pi))
            if bestCircleIndex < self.numLocations - 1 and bestAngleIndex*15 < 180 and bestAngleIndex > 0:
                copy[bestCircleIndex+1][0] *= (1 + factor)
            elif bestCircleIndex > 0 and bestAngleIndex*15 > 180 and bestAngleIndex*15 < 360: 
                copy[bestCircleIndex-1][0] *= (1 + factor)
        return copy

    def Laplacian(self, img):
        ''' this function calcualte the blurriness factor'''
        # img = cv2.imread(imagePath, 0)
        var = cv2.Laplacian(img, cv2.CV_64F).var()
        return var
Ejemplo n.º 40
0
from CVImage import CVImage
from Matcher import Matcher
import cv2
import numpy as np
from VisualOdometry import VisualOdometry
import matplotlib as mplt
import matplotlib.pyplot as plt

match = Matcher()
img = CVImage('/home/cesar/Documentos/Computer_Vision/01/image_0')
img.read_image()
img.copy_image()
img.acquire()


cv2.namedWindow('Invariant', cv2.WINDOW_NORMAL)
cv2.imshow('Invariant', img.invariant_image)
cv2.waitKey(0)
cv2.destroyAllWindows()

h = img.new_image.shape[0]
w = img.new_image.shape[1]
n = 2  # Number of roi's
size = np.array([[w / n], [h / n]], np.int32)
start = np.array([[0], [0]], np.int32)
# Create roi
roi = img.crop_image(start, size, img.new_image)
roi_prev = img.crop_image(start, size, img.prev_image)


match.match(roi, roi_prev)
Ejemplo n.º 41
0
    def optP(self):
        if self.method != 'BOW':
            print('Creating indices...')
            self.createIndex()
        blurP = []
        previousProbs = []
        bestAngleIndex = None
        bestCircleIndex = None
        for i in range(self.numLocations):
            previousProbs.append([1, [1/75] * 25])
        matcher = Matcher(self.method, width=self.w, height=self.h)
        start = time.time()
        print('Matching...')
        for imagePath in glob.glob('cam1_img' + '/*' + extension):
            p = []
            matcher.setQuery(imagePath)
            results = []
            if bestCircleIndex == None:
                for i in range(self.numLocations):
                    matcher.setDirectory('map/' + str(i))
                    if self.method != 'Color':
                        matcher.setIndex(self.indices[i])
                    else:
                        matcher.setColorIndex(self.indices[i])
                    totalMatches, probL = matcher.optRun(bestAngleIndex)
                    results.append([totalMatches, probL])
            else:
                lower = bestCircleIndex - 2
                upper = bestCircleIndex + 2
                for i in range(self.numLocations):
                    if i >= lower and i <= upper:
                        if self.method != 'BOW':
                            matcher.setDirectory('map/' + str(i))
                            if self.method != 'Color':
                                matcher.setIndex(self.indices[i])
                            else:
                                matcher.setColorIndex(self.indices[i])
                            totalMatches, probL = matcher.optRun(bestAngleIndex)
                            results.append([totalMatches, probL])
                        else:
                            totalMatches, probL = matcher.run()
                            results.append([totalMatches, probL])
                    else:
                        results.append([1, [1/75] * 25])


            p.extend(results)  
            print('\t' + imagePath)
            blurFactor = self.Laplacian(imagePath)

            # Reading Command
            command = self.commands[imagePath.replace('cam1_img/', '').replace(extension, '')]

            # Account for Command
            actionAccount = self.accountCommand(command, previousProbs)

            # Adjusting for Command
            adjusted = self.prevWeight(actionAccount, p)

            # Adjusting for Blur
            adjusted = self.probUpdate(actionAccount, adjusted, blurFactor)


            # Getting best guess
            # this will get the max of the first variable
            bestCircleIndex = adjusted.index(max(adjusted))
            bestAngleIndex = adjusted[bestCircleIndex][1].index(max(adjusted[bestCircleIndex][1]))
            self.bestGuess.extend([[bestCircleIndex, bestAngleIndex]])
            blurP.extend(adjusted)
            previousProbs = adjusted
            # print(imagePath)

        self.blurP = blurP
        self.writeProb(self.blurP, 'out.txt', 'w')
        self.writeProb(self.bestGuess, 'bestGuess.txt', 'w')
        end = time.time()
        print('Time elapsed: %0.1f' % (end-start))
Ejemplo n.º 42
0
	def testUnmatchedSinglePair(self):
		self.assertFalse(Matcher.isMatching("(]"))
Ejemplo n.º 43
0
    def createRawP(self):
        ''' This function generates a list of raw probabilities directly from image matching'''
        if self.method != 'BOW':
            print('Creating indices...')
            self.createIndex()
        start = time.time()
        p = []
        matcher = Matcher(self.method, width=self.w, height=self.h)
        print('Matching...')
        for imagePath in glob.glob('cam1_img' + '/*' + extension):
            matcher.setQuery(imagePath)
            results = []
            for i in range(self.numLocations):
                matcher.setDirectory('map/' + str(i))
                if self.method != 'Color':
                    matcher.setIndex(self.indices[i])
                else:
                    matcher.setColorIndex(self.indices[i])
                totalMatches, probL = matcher.run()
                results.append([totalMatches, probL])

            p.extend(results)  
            print('\t' + imagePath)
        self.rawP = p
        self.writeProb(p, 'rawP.txt', 'w')

        end = time.time()
        print('Time elapsed: %0.1f' % (end-start))
Ejemplo n.º 44
0
	def testComplexMatched(self):
		self.assertTrue(Matcher.isMatching("(){asdf[[asdf](alksjdf)]asdf}"))
Ejemplo n.º 45
0
 def matches_tenth_axiom(self, expression: Node):
     return Matcher().matches(expression, self.tenth_axiom_schema)
Ejemplo n.º 46
0
	def testComplexFailure(self):
		self.assertFalse(Matcher.isMatching("({[(])})"))
Ejemplo n.º 47
0
def run(MOVIE_DIR, HTML_OUTPUT_FLAG, LIMIT):

    movielookup = MovieLookup()                         #A class to help lookup movie titles
    movieDataUtil = MovieDataUtil()                     #A helper class for movie json data
    matcher = Matcher(movieMatchRegex, allowedFiletypes)#Match files in a given directory
    normaliser = Normaliser()                           #
    idFinder = IdFinder()                               #Used to find an imdb id from movie filename

    #First, let's match files which match the regex and have the required file extensions in the given directory
    matcher.findInDirectory(MOVIE_DIR)
    movieMatches = matcher.getMatches()
    unMatched = matcher.getIgnored()

    #normalise the matches (the filenames will be used as movie titles)
    normalisedMovieMatches = []
    for item in movieMatches:
        normalisedItem = item
        normalisedItem = normaliser.removeTrailingNumber(normalisedItem)
        normalisedItem = normaliser.normalise(normalisedItem)
        normalisedMovieMatches.append(normalisedItem)

    #Now we lookup successful matches
    movieData = {}      #successful lookup data will go here
    failedLookups = []  #we will do something with failed lookups later...

    count = 0   #used to limit the number of lookups we will do
    for title in normalisedMovieMatches:
        count += 1
        if count >= LIMIT:#check that we don't go over the arbitrary limit
            break

        #look up each movie in the list
        lookupData = movielookup.lookupByTitle(title)

        #check if we found a movie
        if movieDataUtil.isValidLookupResult(lookupData):
            movieData[title] = lookupData
        else:
            failedLookups.append(title)

    #now we will try to correct the failed lookups by using google to find each imdb id
    idLookupDict = idFinder.findIdByTitleList(failedLookups)

    #reset the failed lookups
    failedLookups = []      #there should be a lot less now...
    titleCorrections = 0    #count how many corrections we actually found

    #Now lookup using the new ids which we found
    for title, foundId in idLookupDict.items():
        if foundId != None:
            #we found an id, now let's look the movie up by its id
            lookupData = movielookup.lookupById(foundId)

            #theoretically this should always be true unless we got an invalid id somehow...
            if movieDataUtil.isValidLookupResult(lookupData):
                movieData[title] = lookupData
                titleCorrections += 1
            else:
                failedLookups.append(title)
        else:
            failedLookups.append(title)

    #sort the data by imdb id
    movieData = movieDataUtil.sortMovieData(movieData)

    #Output the data
    if HTML_OUTPUT_FLAG:
        templateEnvironment = Environment(loader=FileSystemLoader(templateDirectory),trim_blocks=True)
        print templateEnvironment.get_template('main.html').render(
            movieLookupData=movieData,
            failedLookups=failedLookups,
            unMatched=unMatched,
            titleCorrections=titleCorrections,
            dateTime = time.strftime("%c"),
            version = __version__,
        )
    else:
        simpleOutput(movieData, failedLookups, unMatched)
Ejemplo n.º 48
0
	def testComplexUnmatchedException(self):
		self.assertFalse(Matcher.isMatching("(asdf[dfdas{}(asdf])"))
Ejemplo n.º 49
0
class Interface(Console):
    def __init__(self):
        print 'Interface init'
        Console.__init__(self)

    def do_loadCaseLibrary(self, arg):
        """
        Loads the case library.
        Use:
            loadCaseLibrary <filename>      Filename must be a pickle file
        """
        filename = arg
        if os.path.isfile(filename):
            if filename.endswith('.pickle'):
                with open(filename, "rb") as fp:
                    self.cases = pickle.load(fp)

                self.matcher = Matcher(self.cases)
                print '%d loaded cases' % len(self.cases)
            else:
                print 'Filename must be a pickle file'
        else:
            print 'Filename does not exists'

    def do_loadCase(self, arg):
        """
        Load a case to classify.

        loadCase                            Show loaded case
        loadCase manual <SL> <SW> <PL> <PW>    Load a case manually specifying the 4 values of a case
        loadCase automatic <file>           Load a case from a file
        """

        if arg in '':
            print 'Current Load Case:'
            try:
                self.load_case
            except AttributeError:
                print 'No case was loaded'
            else:
                print self.load_case
        elif len(arg) > 1:
            if 'manual' in arg:
                params = arg.split(' ')[1:]
                if len(params) == 4:
                    item = {
                        'SepalLength': params[0],
                        'SepalWidth': params[1],
                        'PetalLength': params[2],
                        'PetalWidth': params[3],
                        'IrisClass': 'null'
                    }
                    self.load_case = Case(item)
                else:
                    print 'Unrecognized Parameters'
                    Console.do_help(self, 'loadCase')

        else:
            print 'Unrecognized Parameters'
            Console.do_help(self, 'loadCase')

    def do_printCases(self, arg):
        """
        Print the cases loaded to the Case Library
        """
        try:
            self.cases
        except AttributeError:
            print 'No cases on Case Library'
        else:
            for c in self.cases:
                print c
            print '%d printed cases' % len(self.cases)

    def do_classify(self, arg):
        """
        Classifies the current loaded case
        """
        try:
            self.load_case
        except AttributeError:
            print 'No case was loaded'
        else:
            print 'Current loaded case: ', self.load_case
            try:
                self.matcher
            except AttributeError:
                print 'No matcher was loaded'
            else:
                result = self.matcher.match(self.load_case, 1)
                print 'result: '
                for r in result:
                    print r
                    print '\n'
Ejemplo n.º 50
0
	def testEmptyString(self):
		self.assertTrue(Matcher.isMatching(""))
Ejemplo n.º 51
0
    def match(apps):

        match = Matcher()
        #match.simpleallocator(apps)
        match.allocate(apps)
        match.allocateWithMemoryMigration(apps, 80)
Ejemplo n.º 52
0
from CVImage import CVImage
from Matcher import Matcher
import cv2
import numpy as np
from VisualOdometry import VisualOdometry
import matplotlib as mplt
import matplotlib.pyplot as plt
import time

match = Matcher()
img = CVImage('/home/cesar/Documentos/Computer_Vision/01/image_0')
img.read_image()
img.copy_image()
img.acquire()
h = img.new_image.shape[0]
print "IMAGE HEIGHT", h
w = img.new_image.shape[1]
print "IMAGE WITH", w
n = 2  # Number of roi's
size = np.array([[w / n], [h / n]], np.int32)
start = np.array([[0], [0]], np.int32)
# Create roi
roi = img.crop_image(start, size, img.new_image)
roi_prev = img.crop_image(start, size, img.prev_image)

match.match(roi, roi_prev)
print "good_matches bf", len(match.good_matches)

print "good_kp1", len(match.good_kp1)

print "good_kp2", len(match.good_kp2)
Ejemplo n.º 53
0
def run():
    match = Matcher()
    img = CVImage('/home/cesar/Documentos/Computer_Vision/01/image_0')
    # img = CVImage('/home/cesar/Documentos/Computer_Vision/images_test')

    # Load images
    img.read_image()
    img.copy_image()
    img.acquire()
    # t = threading.Thread(target=plot_image, args=(img.new_image, ))
    # t.start()

    # Correlate

    p1, p2 = correlate_image(match, img, 2, 7)
    print ("Total number of keypoints in second image: \
           {}".format(len(match.global_kpts1)))

    print ("Total number of keypoints in first image: \
           {}".format(len(match.global_kpts2)))

    if not match.is_minmatches:
        print "There aren't matches after filtering. Iterate to next image..."
        return

    # Plot keypoints
    plot_matches(match, img)
    # t.__stop()

    # Now, estimate F
    vo = VisualOdometry()
    match.global_kpts1, match.global_kpts2 = \
        vo.EstimateF_multiprocessing(match.global_kpts2, match.global_kpts1)

    # Get structure of the scene, up to a projectivity
    scene = get_structure(match, img, vo)

    # Optimize F
    # param_opt, param_cov = vo.optimize_F(match.global_kpts1, match.global_kpts2)
    # vo.cam2.set_P(param_opt[:9].reshape((3, 3)))
    # scene = vo.recover_structure(param_opt)

    # Plot it
    plot_scene(scene)

    # Get the Essential matrix
    vo.E_from_F()
    print vo.F
    print vo.E

    # Recover pose
    R, t = vo.get_pose(match.global_kpts1, match.global_kpts2,
                       vo.cam1.focal, vo.cam1.pp)
    print R

    print t

    # Compute camera matrix 2
    print "CAM2", vo.cam2.P
    vo.cam2.compute_P(R, t)
    print "CAM2", vo.cam2.P

    # Get the scene
    scene = get_structure_normalized(match, img, vo)
    plot_scene(scene)

    # What have we stored?
    print ("Permanent Keypoints in the first image stored: \
           {}".format(type(match.curr_kp[0])))
    print ("Permanent descriptors in the first image stored: \
           {}".format(len(match.curr_dsc)))

    print ("Format of global keypoints: \
           {}".format(type(match.global_kpts1)))

    print ("Format of global keypoints: \
            {}".format(type(match.global_kpts1[0])))
    print ("Shape of global kpts1: {}".format(np.shape(match.global_kpts1)))

    # print ("global keypoint: \
    #       {}".format(match.global_kpts1[0]))
    # Acquire image
    img.copy_image()
    img.acquire()
    d, prev_points, points_tracked = match.lktracker(img.prev_image, \
                                                     img.new_image,
                                                     match.global_kpts2)
    print ("Points tracked: \ {}".format(len(points_tracked)))

    plot_two_points(np.reshape(match.global_kpts2,
                               (len(match.global_kpts2), 2)),
                    prev_points, img)
    test = []
    for (x, y), good_flag in zip(match.global_kpts2, d):
        if not good_flag:
            continue
        test.append((x, y))
    # plot_two_points(np.reshape(match.global_kpts2, (len(match.global_kpts2), 2)),
     #                np.asarray(points_tracked), img)
    plot_two_points(np.asarray(test), np.asarray(points_tracked), img)
    # points, st, err = cv2.calcOpticalFlowPyrLK(img.prev_grey, img.new_image,
    #                                            match.global_kpts2, None,
    #                                            **lk_params)
    # print len(points)
    print "Shape of p1: {}".format(np.shape(p1))
    plane = vo.opt_triangulation(p1, p2,
                                 vo.cam1.P, vo.cam2.P)
    plot_scene(plane)
    print "Shpe of plane: {}".format(np.shape(plane))
    print "Type of plane: {}".format(type(plane))
    print np.transpose(plane[:, :3])
    plane = np.transpose(plane)
    print "shape plane: {}".format(np.shape(plane))
    plane_inhomogeneous = np.delete(plane, 3, 1)
    print "shape plane: {}".format(np.shape(plane_inhomogeneous))
    print plane_inhomogeneous[:3, :]
    # Use ransac to fit a plane
    debug = False
    plane_model = RansacModel(debug)
    ransac_fit, ransac_data = ransac.ransac(plane_inhomogeneous,
                                            plane_model,
                                            4, 1000, 1e-4, 50,
                                            debug=debug, return_all=True)
    print "Ransac fit: {}".format(ransac_fit)
    # PLot the plane
    X, Y = np.meshgrid(np.arange(-0.3, 0.7, 0.1), np.arange(0, 0.5, 0.1))
    Z = -(ransac_fit[0] * X - ransac_fit[1] * Y - ransac_fit[3]) / ransac_fit[2]
    plot_plane(X, Y, Z, plane_inhomogeneous[ransac_data['inliers']])
Ejemplo n.º 54
0
from CVImage import CVImage
from Matcher import Matcher
import cv2
import numpy as np
from VisualOdometry import VisualOdometry
import matplotlib as mplt
import matplotlib.pyplot as plt

match = Matcher()
img = CVImage('/home/cesar/Documentos/Computer_Vision/01/image_0')
img.read_image()
img.copy_image()
img.acquire()
h = img.new_image.shape[0]
w = img.new_image.shape[1]
n = 2  # Number of roi's
size = np.array([[w / n], [h / n]], np.int32)
start = np.array([[0], [0]], np.int32)
# Create roi
roi = img.crop_image(start, size, img.new_image)
roi_prev = img.crop_image(start, size, img.prev_image)

match.match(roi, roi_prev)
print "good_matches bf", len(match.good_matches)

print "good_kp1", len(match.good_kp1)

print "good_kp2", len(match.good_kp2)

match.draw_matches(roi, match.good_matches)
cv2.namedWindow('roi', cv2.WINDOW_NORMAL)
Ejemplo n.º 55
0
	def testOnePairOfParenthesis(self):
		self.assertTrue(Matcher.isMatching("()"))
Ejemplo n.º 56
0
def run(movie_dir, html_output_flag, limit):
    """This is the real entry point for the program"""

    #A class to help lookup movie titles
    movielookup = MovieLookup()

    #Match files in a given directory
    matcher = Matcher(Config.movie_match_regex, Config.allowed_file_types)

    #Used to find an imdb id from movie filename
    id_finder = IdFinder()

    #Used for caching movie data
    movie_cache = Cache(Config.movie_cache_file)

    #First, let's match files which match the regex and have the
    #required file extensions in the given directory
    matcher.find_in_directory(movie_dir)
    movie_matches = matcher.get_matches()
    unmatched = matcher.get_ignored()

    #normalise the matches (the filenames will be used as movie titles)
    normalised_movie_matches = Normaliser\
        .normalise_list_and_remove_trailing_number(movie_matches)

    #Now we lookup successful matches, first in the cache, then online
    movie_data = {}      #successful lookup data will go here
    failed_lookups = []  #we will do something with failed lookups later...

    count = 0   #used to limit the number of lookups we will do
    for title in normalised_movie_matches:
        count += 1
        if count >= limit:#check that we don't go over the arbitrary limit
            break

        #Check if the movie is in our cache
        cached_movie = movie_cache.get(title)
        if cached_movie:
            movie_data[title] = cached_movie
        #Otherwise, lookup using API
        else:
            #look up each movie in the list
            lookup_data = movielookup.lookup_by_title(title)

            #check if we found a movie
            if MovieDataUtil.is_valid_lookup_result(lookup_data):
                movie_data[title] = lookup_data
                #great, let's also add it to the cache
                movie_cache.add_to_cache(title, lookup_data)
            else:
                failed_lookups.append(title)

    #now we will try to correct the failed lookups
    #by using google to find each imdb id
    id_lookup_dict = id_finder.find_id_by_title_list(failed_lookups)

    #reset the failed lookups
    failed_lookups = []      #there should be a lot less now...
    title_corrections = 0    #count how many corrections we actually found

    #Now lookup using the new ids which we found
    for title, found_id in id_lookup_dict.items():
        if found_id != None:
            #we found an id, now let's look the movie up by its id
            lookup_data = movielookup.lookup_by_id(found_id)

            #theoretically this should always be true
            #unless we got an invalid id somehow...
            if MovieDataUtil.is_valid_lookup_result(lookup_data):
                movie_data[title] = lookup_data
                title_corrections += 1
                #great, let's also add it to the cache
                movie_cache.add_to_cache(title, lookup_data)
            else:
                failed_lookups.append(title)
        else:
            failed_lookups.append(title)

    #Save the updated cache
    movie_cache.save_cache_to_disk()

    #sort the data by imdb id
    movie_data = MovieDataUtil.sort_movie_data(movie_data)

    #Output the data
    if html_output_flag:
        logging.debug('Loading template from: %s', Config.template_directory)
        template_environment = Environment( \
                        loader=FileSystemLoader( \
                        Config.template_directory), trim_blocks=True)
        print template_environment.get_template('main.html').render(
            movie_lookup_data=movie_data,
            failed_lookups=failed_lookups,
            unmatched=unmatched,
            title_corrections=title_corrections,
            datetime=time.strftime("%c"),
            version=__version__,
            author=__author__,
            cache_stats=movie_cache.cache_stats(),
        )
    else:
        simple_output(movie_data, failed_lookups, unmatched)