Esempio n. 1
0
def create_matcher():
    global ma, matchready, dbpath, querypath, labels, buttons, extractpick
    expick = extractpick.get()
    if (expick == "Extract from files"):
        ma = Matcher(dbpath, expick)
    else:
        pckpath = filedialog.askopenfilename(initialdir=" ", title="Select Pickle File...", filetypes = (("Pickle files", "*.pck"), ("All files", "*.*")))
        if(pckpath != ""):
            ma = Matcher(dbpath, expick, pckpath)
    matchready = True
    if (querypath != "" and querypath != "(No file selected)"):
        buttons["match_button"].config(state="normal", fg="purple")
    buttons["save_pickle"].config(state="normal")
Esempio n. 2
0
    def _es_bulk_load(self):
        print "sending batch of " + str(len(self.temp))
        # http://www.elasticsearch.org/guide/reference/api/bulk.html
        data = ''
        for r in self.temp:
            #print("\n\nRAW DATA")
            #print(r)
            #print("\n\n To CONVERT")
            #print(self.convert(r))
            #print("\nJSON")
            #print(json.dumps( r ))

            data += json.dumps( {'index':{'_id': r['_id']}} ) + '\n'
            data += json.dumps( r ) + '\n'
        self.temp = []

        r = requests.post(Config.elasticsearch['uri_records'] + '_bulk', data=data)

        # if matching is enabled, then try to match whatever was in the batch to the rest of the index content
        if Config.importer['load']['pubmedcentral']['do_bulk_match']:
            print "matching"
            m = Matcher()
            m.citesandcitedby(self.temp)

        return r # passing back the POST info in case it is useful
Esempio n. 3
0
    def createRawP(self):
        ''' This function generates a list of raw probabilities directly from image matching'''
        if self.method != 'BOW':
            print('Creating indices...')
            self.createIndex()
        start = time.time()
        p = []
        matcher = Matcher(self.method, width=self.w, height=self.h)
        print('Matching...')
        for imagePath in glob.glob('cam1_img' + '/*' + extension):
            matcher.setQuery(imagePath)
            results = []
            for i in range(self.numLocations):
                matcher.setDirectory('map/' + str(i))
                if self.method != 'Color':
                    matcher.setIndex(self.indices[i])
                else:
                    matcher.setColorIndex(self.indices[i])
                totalMatches, probL = matcher.run()
                results.append([totalMatches, probL])

            p.extend(results)  
            print('\t' + imagePath)
        self.rawP = p
        self.writeProb(p, 'rawP.txt', 'w')

        end = time.time()
        print('Time elapsed: %0.1f' % (end-start))
Esempio n. 4
0
 def __init__(self, frames_list, Hs):
     self.images = frames_list
     self.count = len(self.images)
     self.left_list, self.right_list, self.center_im = [], [], None
     self.matcher_obj = Matcher()
     self.prepare_lists()
     self.Hs = Hs
Esempio n. 5
0
    def parseStatement(self, statement, lineNumber, indentation):
        '''
		Parses statement lines. eg if matcher -> action. Also tracks indentation to create
		nested statements.
		'''
        # filter out the 'if; from the start
        tmp = statement[3:].split('->')
        if len(tmp) != 2:
            return handleParseError(lineNumber, statement, "Missing '->'.")

        condition = tmp[0].lstrip().rstrip()
        result = tmp[1].lstrip().rstrip()

        if not condition:
            return handleParseError(lineNumber, statement,
                                    "Missing Matcher before ->.")
        if condition in self.matchers:
            condition = self.matchers[condition]
        elif condition[0] == '{' and condition[-1] == '}':
            condition = Matcher(condition, statement, lineNumber)
        else:
            return handleParseError(
                lineNumber, statement,
                "Unknown Matcher or missing {} before ->.")

        # need to handle there not being a specified result
        if not result:
            filter = Filter(condition)
        else:
            if result in self.actions:
                result = self.actions[result]
            elif result[0] == '[' and result[-1] == ']':
                result = Action(result, statement, lineNumber)
            else:
                return handleParseError(
                    lineNumber, statement,
                    "Unknown Action or missing [] after ->.")
            filter = Filter(condition, result)

        if indentation == 0:
            self.filters.append(filter)
            return

        if not self.filters:
            return handleParseError(
                lineNumber, statement,
                "Invalid indentation, this line has no parent")

        parentFilter = self.filters[-1]
        indentation -= 1

        while (indentation > 0):
            if not parentFilter.childStatements:
                handleParseError(
                    lineNumber, statement,
                    "Invalid indentation, this line has no parent")
            parentFilter = parentFilter.childStatements[-1]
            indentation -= 1
        parentFilter.addChild(filter)
    def loadBalance(self):

        #self.initVirtualApplication(mon, virtualMachines)
        #print self.virtualApplications
        sortedApp = self.sortByNoNodes(self.virtualApplications)
        #print sortedApp

        match = Matcher()
        match.next_fit(sortedApp)
        return sortedApp
Esempio n. 7
0
 def findInfoWithMethod(self):
     'Finds and collects all needed information'
     result = []
     list_of_sentence = sent_tokenize(self.text)
     for sent in list_of_sentence:
         res = Matcher(sent.lower(), self.keyword)
         articleDate = ""
         r = 0
         if self.method == 'optionBM':
             r = res.BMMatch()
         elif self.method == 'optionKMP':
             r = res.KMPMatch()
         elif self.method == 'optionRE':
             r = res.REMatch()
         if r > -1:
             result.append(sent)
         if articleDate == "" or articleDate == " ":
             resDate = Matcher(sent, self.keyword)
             # articleDate = resDate
     return result, articleDate
Esempio n. 8
0
 def __buttonpress(self):  # rename drugs handler
     try:
         from Matcher import Matcher
         matcher = Matcher(self.mas[0], self.mas[1], self.mas[2], self.mas[3])
         try:
             matcher.rename_drugs()
             self.showComplete()
         except IOError:
             from Widgets import Widgets
             Widgets.showFNF()
     except Exception as e:
         print(e)
Esempio n. 9
0
 def createIndex(self):
     ''' This function creates indexes of feature '''
     matcher = Matcher(self.method, width=self.w, height=self.h)
     if self.method != 'BOW':
         for i in range(self.numLocations):
             matcher.setDirectory('map/' + str(i))
             if self.method != 'Color':
                 self.indices[i] = matcher.createFeatureIndex()
             else:
                 self.indices[i] = matcher.createColorIndex()
     else:
         matcher.writeIndices()
Esempio n. 10
0
 def createIndex(self):
     """
     Create the color or feature indices, depending on the method.
     """
     matcher = Matcher(self.method, width=self.w, height=self.h)
     if self.method != 'BOW':
         for i in range(self.numLocations):
             matcher.setDirectory('map/' + str(i))
             if self.method != 'Color':
                 self.indices[i] = matcher.createFeatureIndex()
             else:
                 self.indices[i] = matcher.createColorIndex()
     else:
         matcher.writeIndices()
Esempio n. 11
0
    def __init__(self, robot):
        self.h, self.w = 320, 240
        self.numLocations = 7
        # host = '134.173.24.116'
        # port = 5003
        # print('Waiting for Connection....')
        # self.ipad= socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        # self.ipad.connect((host,port))
        # print('Connected!')
        self.matcher = Matcher('BOW', None, self.h, self.w)
        self.frame = self.readImage()
        self.robot = robot

        # for tracking the image during runs
        self.imageIndex = 0
 def __init__(self, params=None):
     """ Constructor
     """
     if params is None:
         params = dict(detector='orb', matcher='bf')
     self.matcher = Matcher(params)
     self.F = None
     self.mask = None
     self.H = None
     self.right_e = None
     self.left_e = None
     self.cam = Camera()
     self.E = None  # Essential matrix
     self.index = 0
     self.scene = Map()
Esempio n. 13
0
    def do_loadCaseLibrary(self, arg):
        """
        Loads the case library.
        Use:
            loadCaseLibrary <filename>      Filename must be a pickle file
        """
        filename = arg
        if os.path.isfile(filename):
            if filename.endswith('.pickle'):
                with open(filename, "rb") as fp:
                    self.cases = pickle.load(fp)

                self.matcher = Matcher(self.cases)
                print '%d loaded cases' % len(self.cases)
            else:
                print 'Filename must be a pickle file'
        else:
            print 'Filename does not exists'
def test():
    for line in sys.stdin:
        # comma seperated: file_path,words...
        file_path, *words = line.strip().split(",")
        n = get_filename(file_path)
        annotations = get_annotations(n)

        if skip_annotation(annotations):
            continue  # Skip the files we don't care about

        matcher = Matcher(annotations, words)
        word_n = len(annotations)
        if annotations:
            perfect_matches = matcher.get_perfect_matches()
            ign_symbols = matcher.get_perfect_matches_ignoring_symbols()
            perf = len(perfect_matches) / word_n * 100
            ign = len(ign_symbols) / word_n * 100
            unmatched = matcher.get_number_unmatched()
Esempio n. 15
0
    def createNewResume(self, name, hpNumber, email, contentName, content):
        con = None
        try:
            con = psycopg2.connect(
                database='d1s3idai1l2u3d',
                user='******',
                password='******',
                host='ec2-54-197-241-24.compute-1.amazonaws.com',
                port='5432',
                sslmode='require')
            cur = con.cursor()
        except psycopg2.DatabaseError as e:
            print('Error %s' % e)
            sys.exit(1)
        finally:
            if con:
                cur.execute("SELECT * FROM job")
                rows = cur.fetchall()
                numRows = (len(rows))
                newResume = ResumeNode(name, hpNumber, email, contentName,
                                       content)

                if (numRows == 0):
                    ResumeProcessor.construct(newResume)
                    toPrint = encodeClassToJson(newResume)
                    cur.execute(
                        "INSERT INTO resume VALUES (%s,%s,%s,%s,%s,%s)",
                        (toPrint, 'f', contentName, name, hpNumber, email))
                    con.commit()
                else:
                    ResumeProcessor.construct(newResume)
                    toPrint = encodeClassToJson(newResume)
                    cur.execute(
                        "INSERT INTO resume VALUES (%s,%s,%s,%s,%s,%s)",
                        (toPrint, 'f', contentName, name, hpNumber, email))
                    con.commit()
                    f = Facade()
                    matcher = Matcher(f)
                    scorer = Scorer(f)
                    matcher.matchAll(1)
                    scorer.calculateScore()
                con.close()
Esempio n. 16
0
def audio_matcher():
    """Our main control flow."""

    parser = ArgumentParser(
        description="Compare two audio files to determine if one "
        "was derived from the other. Supports WAVE and MP3.",
        prog="audiomatch")
    parser.add_argument("-f",
                        action="append",
                        required=False,
                        dest="files",
                        default=list(),
                        help="A file to examine.")
    parser.add_argument("-d",
                        action="append",
                        required=False,
                        dest="dirs",
                        default=list(),
                        help="A directory of files to examine. "
                        "Directory must contain only audio files.")

    args = parser.parse_args()

    search_paths = args.dirs + args.files

    if len(search_paths) != 2:
        die("Must provide exactly two input files or directories.")

    code = 0
    # Use our matching system
    matcher = Matcher(search_paths[0], search_paths[1])
    results = matcher.match()

    for match in results:
        if not match.success:
            code = 1
            warn(match.message)
        else:
            print match

    return code
Esempio n. 17
0
def executeCommand(text):
    matcher = Matcher()
    command = matcher.getContext(text)
    allTaskString = ""

    if (command == Context.updateTask):
        print(1)
    elif (command == Context.getAllTask):
        allTask = getAllTask()
        # print(allTask)
        return stringAllTask(allTask)
    elif (command == Context.getRangeTimeTask):
        dates = matcher.extractDate(text)
    elif (command == Context.getSpesificTimeLeftTask):
        N = int(matcher.nDateExtractor(text))
        Endate = datetime.today() + timedelta(days=N)
        Endate = matcher.dateToString(Endate)
        allTask = getSpesificTimeLeftTask(Endate)
        return stringAllTask(allTask)
    elif (command == Context.deleteTask):
        namaMatkul = matcher.extractMatkul(text)
        jenis = matcher.extractJenis(text)
        deleteOneTask(jenis, namaMatkul)
        return ""
def get_results():
    """
    :return: a list of lists containing
    [num_students_in_str_1, ..., num_students_in_str_n, avg_satisfaction_of_str_1, ..., avg_satisfaction_of_str_n,
    overall_avg_satisfaction]
    """
    cakes = get_cakes_for_all_situations()
    cakes_counter = 0  # just to check progress

    for cake in cakes:
        # just to check progress :
        if PRINT_EVERYTHING and cakes_counter % 10000 == 0:
            print("   Num of lines in file: " + str(cakes_counter) + "/" +
                  str(NUM_OF_TEST_CASES) + ", progress rate: " +
                  str(100 *
                      round(float(cakes_counter / NUM_OF_TEST_CASES), 2)))

        strategy_dict = get_strategy_dict(cake)
        matcher = Matcher(ALL_COURSES, strategy_dict, CLASS_SIZE).match()
        cake += get_satisfactions(matcher)

        cakes_counter += 1  # just to check progress ..

    return cakes
Esempio n. 19
0
 def parseVariable(self, keyword, value, line, lineNumber):
     '''Parses variable lines. eg x = [|{ content }|]'''
     if value[0] != '=':
         return handleParseError(lineNumber, line,
                                 "Assignment operator '=' not found.")
     value = value[1:].lstrip().rstrip()
     if not value:
         return handleParseError(
             lineNumber, line, "Nothing found after assignment operator.")
     if value[0] == '{':
         if value[-1] == '}':
             self.matchers[keyword] = Matcher(value, line, lineNumber)
         else:
             return handleParseError(lineNumber, line,
                                     "failed to find closing }")
     elif value[0] == '[':
         if value[-1] == ']':
             self.actions[keyword] = Action(value, line, lineNumber)
         else:
             return handleParseError(lineNumber, line,
                                     "failed to find closing ]")
     else:
         return handleParseError(lineNumber, line,
                                 "failed to find opening brace: { or [")
def main():
    # maybe make it read from multiple places?
    # samples = ["CRAFT", "EAST", "USC", "GCP_lang_hints", "AWS", "GCP"]
    samples = ["GCP", "GCP_crops", "CRAFT_attn"]
    # samples = ["AWS"]
    filenames = ["{}_indo.txt".format(name) for name in samples]

    guessed_words = []
    annotations = {}
    data = {i: {} for i in range(len(filenames))}
    name_dict = {i: name for i, name in enumerate(samples)}

    for i, filename in enumerate(filenames):
        with open(filename, "r") as f:
            lines = f.readlines()
            for line in lines:
                file_path, *words = line.strip().split(",")
                words = [x for x in words if len(x) != 0]
                # Get the correct words
                n = get_filename(file_path)
                if n not in annotations:
                    img_annotations = get_annotations(n)
                    annotations[n] = img_annotations

                # i represents which of the sources it came from - CRAFT, EAST,
                # USC
                guessed_words.append((i, n, words))

    possible_true_words_dict = None
    for i, n, words in guessed_words:
        # i represents the source
        # get the real annotations
        annotation = annotations[n]
        # Skip the annotation
        if skip_annotation(annotation):
            continue

        matcher = Matcher(annotation,
                          words,
                          possible_true_words_dict=possible_true_words_dict)

        possible_true_words_dict = matcher.possible_true_words_dict

        word_n = len(annotation)
        if word_n == 0:
            # TODO remove this, if we end up testing with no text files
            continue

        if annotations:
            perfect_matches = matcher.get_perfect_matches()
            ign_symbols = matcher.get_perfect_matches_ignoring_symbols()
            perf = len(perfect_matches) / word_n * 100
            ign = len(ign_symbols) / word_n * 100
            # vocab_matched = matcher.get_vocab_matches()
            # vcb = len(vocab_matched) / word_n * 100
            # mismatched = matcher.get_imperfect_matches(1)
            # msm = len(mismatched)/word_n * 100
            # percent_matched = perf + ign  + vcb# maybe lets ignore the mismatched
            percent_matched = perf + ign
            # ones?
            # percent_matched = perf
            data[i][n] = percent_matched
            unmatched = matcher.get_number_unmatched()

            if unmatched > 4:  # ignore all but craft
                # if i == 3: # ignore all but craft
                print(name_dict[i], n)
                print("ANNOTATED", *matcher.get_unmatched_annotated())
                print("DETECTED", *matcher.get_unmatched_detected())
                print(">", matcher.char_level_accuracy)
                print("=================================")
            # print(n, perf, ign, unmatched)

    for k in data:
        avg = sum(data[k].values()) / (len(list(data[k].values())))
        print(name_dict[k], '\t', round(avg, 2))
Esempio n. 21
0
from Matcher import Matcher
from json import dumps
from sys import argv

indexd_location = argv[1]
m = Matcher(indexd_location)
print dumps(m.run())
Esempio n. 22
0
    def optP(self):
        """
        Dynamically Optimized Retrieval (DOR), which works by only considering the nearest particles
        to the current position and angle, and assigning small non-zero probabilities to the other
        particles
        """

        if self.method != 'BOW':
            print('Creating indices...')
            self.createIndex()

        blurP = []
        previousProbs = []
        bestAngleIndex = None
        bestCircleIndex = None

        # initialize probability list
        for i in range(self.numLocations):
            previousProbs.append([1, [1 / 75] * 25])

        matcher = Matcher(self.method, width=self.w, height=self.h)
        start = time.time()
        print('Matching...')

        for imagePath in glob.glob('cam1_img' + '/*' + extension):
            p = []
            matcher.setQuery(imagePath)
            results = []
            if bestCircleIndex == None:
                for i in range(self.numLocations):
                    matcher.setDirectory('map/' + str(i))
                    if self.method != 'Color':
                        matcher.setIndex(self.indices[i])
                    else:
                        matcher.setColorIndex(self.indices[i])

                    # Call the optimized image matching algorithm in Matcher
                    totalMatches, probL = matcher.optRun(bestAngleIndex)
                    results.append([totalMatches, probL])

            else:

                # Only consider the positions that are 2 locations away from current position
                lower = bestCircleIndex - 2
                upper = bestCircleIndex + 2

                for i in range(self.numLocations):
                    if i >= lower and i <= upper:
                        matcher.setDirectory('map/' + str(i))
                        if self.method != 'Color':
                            matcher.setIndex(self.indices[i])
                        else:
                            matcher.setColorIndex(self.indices[i])
                        totalMatches, probL = matcher.optRun(bestAngleIndex)
                        results.append([totalMatches, probL])
                    else:
                        results.append([1, [1 / 75] * 25])

            p.extend(results)
            print('\t' + imagePath)

            # Read and account for command
            command = self.commands[imagePath.replace('cam1_img/', '').replace(
                extension, '')]
            actionAccount = self.accountCommand(command, previousProbs)

            # Weight the previous generation of probabilities
            adjusted = self.prevWeight(actionAccount, p)

            # Adjusting for Blur
            blurFactor = self.Laplacian(imagePath)
            adjusted = self.probUpdate(actionAccount, adjusted, blurFactor)

            # Calculate position and angle
            bestCircleIndex = adjusted.index(max(adjusted))
            bestAngleIndex = adjusted[bestCircleIndex][1].index(
                max(adjusted[bestCircleIndex][1]))
            self.bestGuess.extend([[bestCircleIndex, bestAngleIndex]])
            blurP.extend(adjusted)
            previousProbs = adjusted

        self.blurP = blurP
        self.writeProb(self.blurP, 'out.txt', 'w')
        self.writeProb(self.bestGuess, 'bestGuess.txt', 'w')
        end = time.time()
        print('Time elapsed: %0.1f' % (end - start))
    def match(apps):

        match = Matcher()
        #match.simpleallocator(apps)
        match.allocate(apps)
        match.allocateWithMemoryMigration(apps, 80)
Esempio n. 24
0
    #cv2.imshow("pattern_gray", imutils.resize(pattern_gray,width=600))
    pattcnt = 0
    for patt in pattlst:
        pattcnt += 1
        pattname = os.path.split(patt)[-1].split('.')[0]
        print("Processing pattern ({}) {}/{}".format(pattname, pattcnt,
                                                     len(pattlst)))
        pattern = cv2.imread(patt, cv2.IMREAD_UNCHANGED)
        #    pattern2 = cv2.resize(pattern, (600,600))
        pattern2 = pattern.copy()
        pattern2 = convert_rgba2rgb(pattern2)
        if blur_images:
            pattern2 = cv2.GaussianBlur(pattern2, BLER_KERNEL_SIZE, 0)

        if useMatcher:
            matcher = Matcher()
            (matches, status, vis) = matcher.match([image2, pattern2],
                                                   ratio=LRatio,
                                                   showMatches=True,
                                                   useRootSIFT=useRootSIFT)
#      match_ctr = np.sum(status)
#      (kpsA, featuresA) = matcher.detectAndDescribe(image2, useRootSIFT=useRootSIFT)
#      (kpsB, featuresB) = matcher.detectAndDescribe(pattern2, useRootSIFT=useRootSIFT)
        else:
            matches, vis = matchImages(pattern2, image2, useFlann=useFlann)

        if type(matches) == type(None):
            match_ctr = 0
        else:
            match_ctr = len(matches)
Esempio n. 25
0
 def to_matcher(self):
     """->create a matching graph from this graph"""
     return Matcher(self.to_graph(), undirected=self.undirected)
Esempio n. 26
0
 def matches_tenth_axiom(self, expression: Node):
     return Matcher().matches(expression, self.tenth_axiom_schema)
Esempio n. 27
0
 def matches_axiom(self, expression: Node):
     for schema in self.axiom_schemas:
         if Matcher().matches(expression, schema):
             return True
     return False
Esempio n. 28
0
 rows = cur.fetchall()
 numRows = (len(rows))
 newJob = JobDescNode(contentID, contentFile, keyword)
 if (numRows == 0):
     ResumeProcessor.construct(newJob)
     toPrint = encodeClassToJson(newJob)
     cur.execute("INSERT INTO job VALUES (%s,%s,%s,%s)",(toPrint,'f', contentID ,contentName))
     con.commit()
     print('just store job')
 else:
     ResumeProcessor.construct(newJob)
     toPrint = encodeClassToJson(newJob)
     cur.execute("INSERT INTO job VALUES (%s,%s,%s,%s)",(toPrint,'f', contentID ,contentName))
     con.commit()
     f = Facade()
     matcher = Matcher(f)
     scorer = Scorer(f)
     cur.execute("SELECT isonce_resume FROM once")
     rows = cur.fetchall()
     for row in rows:
         if(row[0] is True):
             cur.execute("UPDATE once SET isonce_resume=%s",('f',))
             con.commit()
             print('calling match 0 --1 ')
             matcher.matchAll(0)
             scorer.calculateScore()
             print('calling match 0 --2')
         else:
             matcher.matchAll(2)
             scorer.calculateScore()
             print('fdsfds')
Esempio n. 29
0
    def optP(self):
        if self.method != 'BOW':
            print('Creating indices...')
            self.createIndex()
        blurP = []
        previousProbs = []
        bestAngleIndex = None
        bestCircleIndex = None
        for i in range(self.numLocations):
            previousProbs.append([1, [1/75] * 25])
        matcher = Matcher(self.method, width=self.w, height=self.h)
        start = time.time()
        print('Matching...')
        for imagePath in glob.glob('cam1_img' + '/*' + extension):
            p = []
            matcher.setQuery(imagePath)
            results = []
            if bestCircleIndex == None:
                for i in range(self.numLocations):
                    matcher.setDirectory('map/' + str(i))
                    if self.method != 'Color':
                        matcher.setIndex(self.indices[i])
                    else:
                        matcher.setColorIndex(self.indices[i])
                    totalMatches, probL = matcher.optRun(bestAngleIndex)
                    results.append([totalMatches, probL])
            else:
                lower = bestCircleIndex - 2
                upper = bestCircleIndex + 2
                for i in range(self.numLocations):
                    if i >= lower and i <= upper:
                        if self.method != 'BOW':
                            matcher.setDirectory('map/' + str(i))
                            if self.method != 'Color':
                                matcher.setIndex(self.indices[i])
                            else:
                                matcher.setColorIndex(self.indices[i])
                            totalMatches, probL = matcher.optRun(bestAngleIndex)
                            results.append([totalMatches, probL])
                        else:
                            totalMatches, probL = matcher.run()
                            results.append([totalMatches, probL])
                    else:
                        results.append([1, [1/75] * 25])


            p.extend(results)  
            print('\t' + imagePath)
            blurFactor = self.Laplacian(imagePath)

            # Reading Command
            command = self.commands[imagePath.replace('cam1_img/', '').replace(extension, '')]

            # Account for Command
            actionAccount = self.accountCommand(command, previousProbs)

            # Adjusting for Command
            adjusted = self.prevWeight(actionAccount, p)

            # Adjusting for Blur
            adjusted = self.probUpdate(actionAccount, adjusted, blurFactor)


            # Getting best guess
            # this will get the max of the first variable
            bestCircleIndex = adjusted.index(max(adjusted))
            bestAngleIndex = adjusted[bestCircleIndex][1].index(max(adjusted[bestCircleIndex][1]))
            self.bestGuess.extend([[bestCircleIndex, bestAngleIndex]])
            blurP.extend(adjusted)
            previousProbs = adjusted
            # print(imagePath)

        self.blurP = blurP
        self.writeProb(self.blurP, 'out.txt', 'w')
        self.writeProb(self.bestGuess, 'bestGuess.txt', 'w')
        end = time.time()
        print('Time elapsed: %0.1f' % (end-start))
Esempio n. 30
0
def run(movie_dir, html_output_flag, limit):
    """This is the real entry point for the program"""

    #A class to help lookup movie titles
    movielookup = MovieLookup()

    #Match files in a given directory
    matcher = Matcher(Config.movie_match_regex, Config.allowed_file_types)

    #Used to find an imdb id from movie filename
    id_finder = IdFinder()

    #Used for caching movie data
    movie_cache = Cache(Config.movie_cache_file)

    #First, let's match files which match the regex and have the
    #required file extensions in the given directory
    matcher.find_in_directory(movie_dir)
    movie_matches = matcher.get_matches()
    unmatched = matcher.get_ignored()

    #normalise the matches (the filenames will be used as movie titles)
    normalised_movie_matches = Normaliser\
        .normalise_list_and_remove_trailing_number(movie_matches)

    #Now we lookup successful matches, first in the cache, then online
    movie_data = {}  #successful lookup data will go here
    failed_lookups = []  #we will do something with failed lookups later...

    count = 0  #used to limit the number of lookups we will do
    for title in normalised_movie_matches:
        count += 1
        if count >= limit:  #check that we don't go over the arbitrary limit
            break

        #Check if the movie is in our cache
        cached_movie = movie_cache.get(title)
        if cached_movie:
            movie_data[title] = cached_movie
        #Otherwise, lookup using API
        else:
            #look up each movie in the list
            lookup_data = movielookup.lookup_by_title(title)

            #check if we found a movie
            if MovieDataUtil.is_valid_lookup_result(lookup_data):
                movie_data[title] = lookup_data
                #great, let's also add it to the cache
                movie_cache.add_to_cache(title, lookup_data)
            else:
                failed_lookups.append(title)

    #now we will try to correct the failed lookups
    #by using google to find each imdb id
    id_lookup_dict = id_finder.find_id_by_title_list(failed_lookups)

    #reset the failed lookups
    failed_lookups = []  #there should be a lot less now...
    title_corrections = 0  #count how many corrections we actually found

    #Now lookup using the new ids which we found
    for title, found_id in id_lookup_dict.items():
        if found_id != None:
            #we found an id, now let's look the movie up by its id
            lookup_data = movielookup.lookup_by_id(found_id)

            #theoretically this should always be true
            #unless we got an invalid id somehow...
            if MovieDataUtil.is_valid_lookup_result(lookup_data):
                movie_data[title] = lookup_data
                title_corrections += 1
                #great, let's also add it to the cache
                movie_cache.add_to_cache(title, lookup_data)
            else:
                failed_lookups.append(title)
        else:
            failed_lookups.append(title)

    #Save the updated cache
    movie_cache.save_cache_to_disk()

    #sort the data by imdb id
    movie_data = MovieDataUtil.sort_movie_data(movie_data)

    #Output the data
    if html_output_flag:
        logging.debug('Loading template from: %s', Config.template_directory)
        template_environment = Environment( \
                        loader=FileSystemLoader( \
                        Config.template_directory), trim_blocks=True)
        print template_environment.get_template('main.html').render(
            movie_lookup_data=movie_data,
            failed_lookups=failed_lookups,
            unmatched=unmatched,
            title_corrections=title_corrections,
            datetime=time.strftime("%c"),
            version=__version__,
            author=__author__,
            cache_stats=movie_cache.cache_stats(),
        )
    else:
        simple_output(movie_data, failed_lookups, unmatched)