Пример #1
0
def main():
    urlheader = 'http://s.weibo.com/weibo/'
    para = raw_input('请输入搜索内容:\n')
    page = 1
    userlists = open('userlists').readlines()
    reg1 = re.compile(
        r'\\u4f60\\u7684\\u884c\\u4e3a\\u6709\\u4e9b\\u5f02\\u5e38\\uff0c\\u8bf7\\u8f93\\u5165\\u9a8c\\u8bc1\\u7801\\uff1a'
    )  #你的行为有些异常,请输入验证码
    reg2 = re.compile(
        r'\\u62b1\\u6b49\\uff0c\\u672a\\u627e\\u5230')  #抱歉,未找到搜索结果
    for userlist in userlists:
        username = userlist.split()[0]
        password = userlist.split()[1]
        weibologin = WeiboLogin(username, password)
        if weibologin.Login() == True:
            print '登录成功'
            user = True  #帐号可用
        while page <= 50 and user:
            url = urlheader + para + '&page=' + str(page)
            print '获取第%d页。。' % page
            f = urllib2.urlopen(url)
            ###开始匹配网页内容###
            for line in f:
                if re.search(r'pid":"pl_weibo_direct"', line):  #匹配一定要准确!!
                    if reg2.search(line):
                        print '抱歉,未找到结果。。。'
                        return
                    else:
                        Matcher.matcher(line)
                        page += 1
                        break
                if re.search(r'pid":"pl_common_sassfilter', line):
                    if reg1.search(line):
                        print '此帐号被锁,使用下一个帐号'
                        user = False  #帐号不可用
Пример #2
0
def main():
    urlheader='http://s.weibo.com/weibo/'
    para=raw_input('请输入搜索内容:\n')
    page=1
    userlists=open('userlists').readlines()
    reg1=re.compile(r'\\u4f60\\u7684\\u884c\\u4e3a\\u6709\\u4e9b\\u5f02\\u5e38\\uff0c\\u8bf7\\u8f93\\u5165\\u9a8c\\u8bc1\\u7801\\uff1a')    #你的行为有些异常,请输入验证码
    reg2=re.compile(r'\\u62b1\\u6b49\\uff0c\\u672a\\u627e\\u5230')#抱歉,未找到搜索结果
    for userlist in userlists:
        username=userlist.split()[0]
        password=userlist.split()[1]
        weibologin=WeiboLogin(username,password)
        if weibologin.Login()==True:
            print '登录成功'
            user=True    #帐号可用
        while page<=50 and user:
            url=urlheader+para+'&page='+str(page)
            print '获取第%d页。。' % page
            f=urllib2.urlopen(url)
            ###开始匹配网页内容###
            for line in f:
                if re.search(r'pid":"pl_weibo_direct"',line):    #匹配一定要准确!!
                    if reg2.search(line):
                        print '抱歉,未找到结果。。。'
                        return
                    else:
                        Matcher.matcher(line)
                        page+=1
                        break
                if re.search(r'pid":"pl_common_sassfilter',line):
                    if reg1.search(line):
                        print '此帐号被锁,使用下一个帐号'
                        user=False    #帐号不可用
def get_dataset(path):
    dataset=[]
    for filename in os.listdir(path):
        t = open(path + filename,"r").read() 
        dataset.append([filename, re.sub('[^a-zA-Z]+', ' ', t)])  
    dico = matcher.get_dico(dataset) # dico is a column with the matching scores of the MDAs versus the Finance Dictionary 
    df=pd.DataFrame(dataset)
    df[1] = pd.Series(dico)
    blob = matcher.get_blob(df)                   
    df[2] = pd.Series(blob)
    df.columns = ['Filename','MatchDico','TextBlob']
    return df
def Alignfunction(Detector_name, Descriptor_name, Matcher_name,
                  ratio_test_threshold, imReference, imtoAlign):
    #I plan to wrap this around a try exception block like if some name is not their it will throw exception.

    #Convert images to grayscale
    im1Gray = cv2.cvtColor(imtoAlign, cv2.COLOR_BGR2GRAY)
    im2Gray = cv2.cvtColor(imReference, cv2.COLOR_BGR2GRAY)

    #Selecting the type of detector and descriptor
    detector, descriptor = DDI.Initialize_detector_descriptor(
        Detector_name, Descriptor_name)

    #Detecting keypoints and finding descriptors for the corresponding keypoints.
    keypoints1 = detector.detect(im1Gray, None)
    keypoints2 = detector.detect(im2Gray, None)
    keypoints1, descriptors1 = descriptor.compute(im1Gray, keypoints1)
    keypoints2, descriptors2 = descriptor.compute(im2Gray, keypoints2)

    #Selecting which matcher to use
    Matcher = Matcher.Initialize_Matcher(Matcher_name)
    #Matches = Matcher.match(descriptors1,descriptors2,None)
    Matches = Matcher.knnMatch(descriptors1, descriptors2, k=2)

    #Applying the ratio test and getting Good_Matches from Matches
    Good_Matches = Ratio_Test(Matches, ratio_test_threshold)

    #Extract location of good matches
    #points1 = np.zeros((len(Good_Matches),2), dtype=np.float32)
    #points2 = np.zeros((len(Good_Matches),2), dtype=np.float32)

    #for i, match in enumerate(Good_Matches):
    #points1[i, :] = keypoints1[match.queryIdx].pt
    #points2[i, :] = keypoints2[match.trainIdx].pt

    #Find Homography
    #h_parameters, mask = cv2.findHomography(points1, points2, cv2.RANSAC)

    #Using the Homography parameters to align imtoAlign
    #height, width, channels = imReference.shape
    #imRegistered = cv2.warpPerspective(imtoAlign, h_parameters, (width, height))
    src_pts = np.float32([keypoints1[m.queryIdx].pt
                          for m in Good_Matches]).reshape(-1, 1, 2)
    dst_pts = np.float32([keypoints2[m.trainIdx].pt
                          for m in Good_Matches]).reshape(-1, 1, 2)
    h_parameters, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    height, width, channels = imReference.shape
    imRegistered = cv2.warpPerspective(imtoAlign, h_parameters,
                                       (width, height))

    return imRegistered, h_parameters
Пример #5
0
 def joinCS(self,master,slave):
     """Joins csv CSTS variables based on user defined matching.
     master = str() existing CS variable to serve as the master.
     slave = str() field name from the data file for matching.
     """        
     cs = self.data2Join
     slaveVals = cs.newVars[slave]
     masterVals = self.getVariable(master)[0]
     masterVals = [ i.strip("\"") for i in masterVals ]
     bridge = MATCH.createBridge(slaveVals,masterVals)
     print bridge
     fields = cs.names
     fields.remove(slave)
     for i in fields:
         new = MATCH.mapValues(bridge,cs.newVars[i])
         self.createVariable(i, "CS", [new] )
Пример #6
0
 def pruneJobNames(self):
     if self.data is None:
         return
     while True:
         pruned = False
         for ii, xx in enumerate(self.data):
             if 'name' not in xx:
                 continue
             if not Matcher.matchAny(xx['name'],
                                     self.matchAny) or not Matcher.matchAll(
                                         xx['name'], self.matchAll):
                 self.data.pop(ii)
                 pruned = True
                 break
         if not pruned:
             break
Пример #7
0
def main(argv):

    inputfile = ''

    try:
        opts, args = getopt.getopt(argv, "hi:", ["ifile="])
    except getopt.GetoptError:
        logging.error('PHPDetective.py -i <inputfile>')
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-h':
            logging.info('PHPDetective.py -i <inputfile>')
            sys.exit(0)
        elif opt in ("-i", "--ifile"):
            inputfile = arg
            logging.debug('Input file is %s' % inputfile)

    patParser = PatternParser.PatternParser('Patterns')

    patParser.parseAll()

    patterns = patParser.getKnownPatterns()

    entryPoints = utilities.getEntries(patterns)
    validation = utilities.getVals(patterns)
    sensitiveSinks = utilities.getSinks(patterns)

    slic = SliceParser.fileParser(inputfile, entryPoints, validation,
                                  sensitiveSinks)

    patt = Matcher.match(slic, patterns)

    utilities.printResults(slic, patt)
Пример #8
0
 def joinCS(self, master, slave):
     """Joins csv CSTS variables based on user defined matching.
     master = str() existing CS variable to serve as the master.
     slave = str() field name from the data file for matching.
     """
     cs = self.data2Join
     slaveVals = cs.newVars[slave]
     masterVals = self.getVariable(master)[0]
     masterVals = [i.strip("\"") for i in masterVals]
     bridge = MATCH.createBridge(slaveVals, masterVals)
     print bridge
     fields = cs.names
     fields.remove(slave)
     for i in fields:
         new = MATCH.mapValues(bridge, cs.newVars[i])
         self.createVariable(i, "CS", [new])
Пример #9
0
def match(frames, matcherMethod):
  if(matcherMethod):
    frame = frames['temp'] if(matcherMethod != 'template') else frames['gray']
    try:
      res = Matcher.runMatcher(frame)
    except Exception, e:
      print '[X] Error calling matcher method: %s' % (e)
      print_exc()
      res = (2, None, 0, None)
Пример #10
0
 def joinCSTS(self,master,slave):
     """Joins csv CSTS variables based on user defined matching.
     master = str() existing CS variable to serve as the master.
     slave = str() field name from the data file for matching.
     """
     cs = self.data2Join
     slaveVals = cs.newVars[slave]
     masterVals = self.getVariable(master)[0]
     masterVals = [ i.strip("\"") for i in masterVals ]
     bridge = MATCH.createBridge(slaveVals,masterVals)
     
     fields = cs.names
     fields.remove(slave)
     batch = MATCH.batchSplit(fields)
     varNames = batch['strings']
     varNames.sort()
     timeInfo = map(int,batch['ints'])
     timeInfo.sort()
     timeInfo = map(str,timeInfo)
     ordered = {}
     for i in varNames:
         ts = map(str,timeInfo)
         names = [ i+ts[t] for t in range(len(ts)) ]
         ordered[i] = names
     for i in ordered.keys():
         newName = i
         allTime = ordered[i]
         newVar = []
         for t in allTime:
             column = cs.newVars[t]
             newVar.append(column)
         newVar = UTIL.TransposeList(newVar)
         reordered = []
         for ind in bridge:
             reordered.append(newVar[ind])
         reordered = UTIL.TransposeList(reordered)
         self.createVariable(i, "CSTS", reordered )
Пример #11
0
    def joinCSTS(self, master, slave):
        """Joins csv CSTS variables based on user defined matching.
        master = str() existing CS variable to serve as the master.
        slave = str() field name from the data file for matching.
        """
        cs = self.data2Join
        slaveVals = cs.newVars[slave]
        masterVals = self.getVariable(master)[0]
        masterVals = [i.strip("\"") for i in masterVals]
        bridge = MATCH.createBridge(slaveVals, masterVals)

        fields = cs.names
        fields.remove(slave)
        batch = MATCH.batchSplit(fields)
        varNames = batch['strings']
        varNames.sort()
        timeInfo = map(int, batch['ints'])
        timeInfo.sort()
        timeInfo = map(str, timeInfo)
        ordered = {}
        for i in varNames:
            ts = map(str, timeInfo)
            names = [i + ts[t] for t in range(len(ts))]
            ordered[i] = names
        for i in ordered.keys():
            newName = i
            allTime = ordered[i]
            newVar = []
            for t in allTime:
                column = cs.newVars[t]
                newVar.append(column)
            newVar = UTIL.TransposeList(newVar)
            reordered = []
            for ind in bridge:
                reordered.append(newVar[ind])
            reordered = UTIL.TransposeList(reordered)
            self.createVariable(i, "CSTS", reordered)
Пример #12
0
 def makeGalWeightsAgg(self, wtType=weight.WT_ROOK):
     galInfo = weight.spweight(self.shapeFileName, wtType=type)
     galInfo.fixIslands()
     csids = range(self.n)
     sout = []
     sout.append("%d" % self.n)
     for i in csids:
         neighs = []
         shapeIds = self.id2Row[i]
         for shape in shapeIds:
             neighs = neighs + galInfo.neighbors[shape]
         neighs = [self.row2Id[neigh] for neigh in neighs]
         unique = MATCH.uniqueList(neighs)
         try:
             unique.remove(i)
         except:
             pass
         unique = map(str, unique)
         sout.append("%d %d" % (i, len(unique)))
         sout.append(" ".join(unique))
     sout = "\n".join(sout)
     return sout
Пример #13
0
 def makeGalWeightsAgg(self,wtType=weight.WT_ROOK):
     galInfo = weight.spweight(self.shapeFileName, wtType=type)
     galInfo.fixIslands()
     csids = range(self.n)
     sout= []
     sout.append("%d"%self.n)
     for i in csids:
         neighs = []
         shapeIds = self.id2Row[i]
         for shape in shapeIds:
             neighs = neighs + galInfo.neighbors[shape]
         neighs = [ self.row2Id[neigh] for neigh in neighs ]
         unique = MATCH.uniqueList(neighs) 
         try:
             unique.remove(i)
         except:
             pass
         unique = map(str,unique)
         sout.append("%d %d"%(i, len(unique)))
         sout.append(" ".join(unique))
     sout = "\n".join(sout)
     return sout    
Пример #14
0
    def createNamesAndIDs(self, var1=[], var2=[], var3=[], delim="_"):
        """Create Names and IDs for Cross-Sections in the STARS Project.

        var1 = (list) = optional list of values for matching. default = shape integers
        var2 = (list) = optional list of values for joining before matching.
        var3 = (list) = optional list of values for CS labels AFTER matching.
        delim = (str) = delimiter for joining var1 and var2 forr matching.
        """
        name = "csnames"
        if var1:
            var1 = var1
        else:
            var1 = self.initial["Shape IDs"]
        if var2:
            var = UTIL.joinListValues(var1, var2, delim=delim)
        else:
            var = var1
        m = MATCH.Matcher(name, var)
        self.matchedIDs = m.matched
        self.unique = m.unique
        self.csIDs = range(len(self.unique))
        self.id2Row = m.scheme2Master
        self.row2Id = m.master2Scheme
        if var3:
            self.unique = [var3[self.id2Row[i][0]] for i in self.csIDs]
        self.varDict[name] = self.unique
        self.createVariable(name, 'CS', [self.unique])
        self.createVariable('csids', 'CS', [self.csIDs])
        self.n = len(self.unique)
        self.csNameVar = name
        self.name2CSID = {}
        for i in range(len(self.unique)):
            self.name2CSID[self.unique[i]] = i
        if len(self.row2Id.keys()) != self.n:
            print """Aggregation Employed:  More shapes than cross-sectional units."""
            self.aggOn = 1
        else:
            self.aggOn = 0
Пример #15
0
    def _createMatcher(self, featureFunc, descriptorFunc, matcherFunc, 
                       epipolarTol, ratioThres):

        """
        Creates the Matcher object and runs all computations
        
        If the functions arn't given as args, sift.detect, sift.compute and
        BFMatcher.knnMatch are used

        Args:
            Same as described in class description
        """

        #Create standard functions if not spesified by class var

        self.matcher = Matcher.Matcher(self.imgLeft , self.imgRight, 
                                       featureFunc, descriptorFunc, matcherFunc, 
                                       epipolarTol=epipolarTol,
                                       ratioThres=ratioThres)
        self.matcher.computeAll()
        self.goodMatches = self.matcher.goodMatches
        self.keypointsLeft = self.matcher.keypointsLeft
        self.keypointsRight = self.matcher.keypointsRight
Пример #16
0
def hexrays_events_callback_m(*args):
    global LEV
    global NAME
    ev = args[0]
    # print "Got {}:".format(EVENTS_HEXR[ev])
    if ev == idaapi.hxe_maturity:
        fcn = args[1]
        level = args[2]
        # print "Got level {}".format(CMAT_LEVEL[level])
        if level == idaapi.CMAT_FINAL:
            for i in used_pats:
                func_proc = FuncProcessor(fcn)
                matcher = Matcher(func_proc.fcn, None)
                matcher.set_pattern(i[0])
                matcher.chain = i[2]
                matcher.replacer = i[1]
                func_proc.pattern = matcher
                func_proc.DEBUG = DEBUG
                func_proc.traverse_function()
    return 0
Пример #17
0
print "Determining coverage area"
p0 = raw_points[0]
x_min = p0[0]
x_max = p0[0]
y_min = p0[1]
y_max = p0[1]
for p in raw_points:
    if p[0] < x_min: x_min = p[0]
    if p[0] > x_max: x_max = p[0]
    if p[1] < y_min: y_min = p[1]
    if p[1] > y_max: y_max = p[1]
print "Area coverage = %.1f,%.1f to %.1f,%.1f (%.1f x %.1f meters)" % \
    (x_min, y_min, x_max, y_max, x_max-x_min, y_max-y_min)

# compute number of connections and cycle depth per image
Matcher.groupByConnections(proj.image_list, matches_sba)

# start with empty triangle lists
# format: [ [v[0], v[1], v[2], u, v], .... ]
for image in proj.image_list:
    image.tris = []

good_tris = 0
failed_tris = 0

# compute image.PROJ for each image
for image in proj.image_list:
    rvec, tvec = image.get_proj_sba()
    R, jac = cv2.Rodrigues(rvec)
    image.PROJ = np.concatenate((R, tvec), axis=1)
Пример #18
0
    import urllib2
    import re
    path = 'https://raw.githubusercontent.com/vincentarelbundock/Rdatasets/master/csv/datasets/iris.csv'
    iris = pd.read_csv(urllib2.urlopen(path), index_col=0)
    # make binary
    iris['treatment'] = [i in ('setosa', 'virginica') for i in iris.Species]
    iris = iris.drop('Species', axis=1)
    iris.columns = [re.sub('\.', '', i) for i in iris.columns]
    test, control = iris[iris.treatment == True], iris[iris.treatment == False]
    return test, control


THRESHOLD = 0.01

test, control = get_sample_data()
m = Matcher(test, control)
m.match(threshold=THRESHOLD)


def test_indices():
    # control index should be shifted by test index
    assert max(m.test.index) + 1 == min(m.control.index)


def test_match_properties():
    for match_id in np.unique(m.matched_data.match_id):
        current = m.matched_data[m.matched_data.match_id == match_id]
        s1, s2 = current.scores
        t1, t2 = current.treatment

        # matched scores should be within THRESHOLD
Пример #19
0
 def mainRun(self):
     self.result_image, self.mat = Matcher.run(self.input_image,
                                               self.train_folder, self.MAlg,
                                               self.MCount)
     self.result_idx = 0
     self.update_image()
Пример #20
0
def init(names, virtual_name, is_master):
    """
    <function internal="yes">
      <summary>
        Default init() function provided by Zorp
      </summary>
      <description>
        This function is a default <function>init()</function> calling the init function
        identified by the <parameter>name</parameter> argument. This way several Zorp
        instances can use the same policy file.
      </description>
      <metainfo>
        <attributes>
          <attribute maturity="stable">
            <name>names</name>
            <type></type>
            <description>Names (instance name and also-as names) of this instance.</description>
          </attribute>
          <attribute maturity="stable">
            <name>virtual_name</name>
            <type>string</type>
            <description>
              Virtual instance name of this process. If a Zorp instance is backed by multiple
              Zorp processes using the same configuration each process has a unique virtual
              instance name that is used for SZIG communication, PID file creation, etc.
            </description>
          </attribute>
          <attribute>
            <name>is_master</name>
            <type>int</type>
            <description>
              TRUE if Zorp is running in master mode, FALSE for slave processes. Each Zorp instance
              should have exactly one master process and an arbitrary number of slaves.
            </description>
          </attribute>
        </attributes>
      </metainfo>
    </function>
    """
    import __main__
    import SockAddr, Matcher, Rule
    import errno

    Globals.virtual_instance_name = virtual_name

    # miscelanneous initialization
    if config.audit.encrypt_certificate_file:
        try:
            config.audit.encrypt_certificate = open(config.audit.encrypt_certificate_file, 'r').read()
        except IOError:
            log(None, CORE_ERROR, 1, "Error reading audit encryption certificate; file='%s'", (config.audit.encrypt_certificate_file))

    if config.audit.encrypt_certificate_list_file:
        try:
            config.audit.encrypt_certificate_list = [ ]
            for list in config.audit.encrypt_certificate_list_file:
                newlist = [ ]
                for file in list:
                    try:
                        newlist.append( open(file, 'r').read() )
                    except IOError:
                        log(None, CORE_ERROR, 1, "Error reading audit encryption certificate; file='%s'", (file))
                config.audit.encrypt_certificate_list.append( newlist )
        except TypeError:
            log(None, CORE_ERROR, 1, "Error iterating encryption certificate file list;")

    if config.audit.encrypt_certificate_list == None and config.audit.encrypt_certificate:
        config.audit.encrypt_certificate_list = [ [ config.audit.encrypt_certificate ] ]

    if config.audit.sign_private_key_file:
        try:
            config.audit.sign_private_key = open(config.audit.sign_private_key_file, 'r').read()
        except IOError:
            log(None, CORE_ERROR, 1, "Error reading audit signature's private key; file='%s'", (config.audit.sign_private_key_file))

    if config.audit.sign_certificate_file:
        try:
            config.audit.sign_certificate = open(config.audit.sign_certificate_file, 'r').read()
        except IOError:
            log(None, CORE_ERROR, 1, "Error reading audit signature's certificate; file='%s'", (config.audit.sign_certificate_file))

    Globals.rules = Rule.RuleSet()

    if config.options.kzorp_enabled:
        import kzorp.communication
        # ping kzorp to see if it's there
        try:
            h = kzorp.communication.Handle()
            Globals.kzorp_available = True
        except:
            Globals.kzorp_available = False
            log(None, CORE_ERROR, 0, "Error pinging KZorp, it is probably unavailable; exc_value='%s'" % (sys.exc_value))

    Globals.instance_name = names[0]
    for i in names:
        try:
            func = getattr(__main__, i)
        except AttributeError:
            ## LOG ##
            # This message indicates that the initialization function of
            # the given instance was not found in the policy file.
            ##
            log(None, CORE_ERROR, 0, "Instance definition not found in policy; instance='%s'", (names,))
            return FALSE
        func()

    Matcher.validateMatchers()

    if Globals.kzorp_available:
        import KZorp
        try:
            KZorp.downloadKZorpConfig(names[0], is_master)
        except:
            ## LOG ##
            # This message indicates that downloading the necessary information to the
            # kernel-level KZorp subsystem has failed.
            ##
            log(None, CORE_ERROR, 0, "Error downloading KZorp configuration, Python traceback follows; error='%s'" % (sys.exc_value))
            for s in traceback.format_tb(sys.exc_traceback):
                for l in s.split("\n"):
                    if l:
                        log(None, CORE_ERROR, 0, "Traceback: %s" % (l))

            # if kzorp did respond to the ping, the configuration is erroneous -- we die here so the user finds out
            return FALSE

    return TRUE
Пример #21
0
    if len(matches_new) == len(matches_group):
        done = True
    else:
        matches_group = list(matches_new)  # shallow copy
print "unique features (after grouping):", len(matches_group)

# count the match groups that are longer than just pairs
group_count = 0
for m in matches_group:
    if len(m) > 3:
        group_count += 1

print "Number of groupings:", group_count

print "Original match connector"
Matcher.groupByConnections(proj.image_list)

groups = Matcher.simpleGrouping(proj.image_list, matches_group)

image_width = proj.image_list[0].width
camw, camh = proj.cam.get_image_params()
scale = float(image_width) / float(camw)
print 'scale:', scale

sba = SBA.SBA(args.project)
sba.prepair_data(proj.image_list, groups[0], matches_direct,
                 proj.cam.get_K(scale))
cameras, features, cam_index_map, feat_index_map, error_images = sba.run_live(
    mode='')

if len(error_images):
Пример #22
0
from progress.bar import Bar
import scipy.spatial

sys.path.append('../lib')
import Matcher
import Pose
import ProjectMgr

# working on matching features ...

parser = argparse.ArgumentParser(description='Keypoint projection.')
parser.add_argument('--project', required=True, help='project directory')

args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.load_match_pairs()

print "Loading original (direct) matches ..."
matches_direct = pickle.load( open( args.project + "/matches_direct", "rb" ) )

print "Total unique features =", len(matches_direct)

Matcher.groupByConnections(proj.image_list, matches_direct)

# save the results
for image in proj.image_list:
    image.save_meta()
Пример #23
0
parser.add_argument('--index', type=int, help='show specific image by index')
parser.add_argument('--direct',
                    action='store_true',
                    help='show matches_direct')
parser.add_argument('--sba', action='store_true', help='show matches_sba')
args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)
proj.load_image_info()
proj.load_features()
proj.load_match_pairs()

# setup SRTM ground interpolator
ref = proj.ned_reference_lla

m = Matcher.Matcher()

order = 'fewest-matches'

if args.image:
    i1 = proj.findImageByName(args.image)
    if i1 != None:
        for j, i2 in enumerate(proj.image_list):
            if len(i1.match_list[j]):
                print "Showing %s vs %s" % (i1.name, i2.name)
                status = m.showMatchOrient(i1,
                                           i2,
                                           i1.match_list[j],
                                           orient=args.orient)
    else:
        print "Cannot locate:", args.image
Пример #24
0
            print((depthDiff > 400).sum() / totalPixels)

        return dispDiff


if __name__ == '__main__':

    #Define image paths
    imgLeftPath = "../Images/middelburyLeft.png"
    imgRightPath = "../Images/middelburyRight.png"
    picklePath = "../Pickle/middelburyLeft.p"
    imgLeft = cv2.imread('../Images/middelburyLeft.png')

    matcherFunc = Matcher.SGBFMatcher(imgLeft.shape,
                                      lowThres=100,
                                      highThres=300,
                                      epipolarTol=5,
                                      localSize=25,
                                      localNum=8)

    # Create the MLP data
    mlpDataProsessor = MLPDataProsessor.MLPDataProsessor(
        imgLeftPath,
        imgRightPath,
        picklePath=picklePath,
        f=3971.415,
        bx=195.187,
        doffs=146.53,
        matcherFunc=matcherFunc,
        shouldUseDisp=True,
        edgeRemoveRadius=4)
    mlpDataProsessor.normalizeMLPData()
Пример #25
0
    def convertCSTSVariableBatch(self):
        d = sd.SDialogue('Convert Initial Fields to a STARS Panel Variables')
        varNames = self.proj.getDBFVariableNames()
        batch = MATCH.batchSplit(varNames)
        varNames = batch['strings']
        varNames.sort()
        timeInfo = batch['ints']
        timeInfo.sort()
        txt = """Select the fields to create panel variables via the batch method."""
        time = str(self.proj.t)
        add = """Remember that field must have " + time + " time periods
        associated with it."""
        txt = txt + "\n" + add
        title = "Choose fields for batch CSTS creation"
        sd.DualListBoxes(d, varNames, title=title, helpText=txt)

        txt = """Choose a variable associated with the first time period in
        your study, and an additional oone for the year time period.  You may
        also type this in manuallly."""
        timeStuff = ['Start Period for Batch', 'End Period for Batch']
        sd.MultiEntry(d,
                      timeInfo,
                      timeStuff,
                      title='Time Period Arguments',
                      helpText=txt)

        txt = """Provide the time period increment:
            I.e.    Annual:   1
                    BiAnnual: 2
                    Decadal:  10
            """
        sd.UserEntry(d,
                     label="Integer Value",
                     align="LEFT",
                     title="User Defined Time Increment",
                     helpText=txt)

        entries = ['Aggregation Method']
        txt = """If the same cross-sectional unit has more than one value
        associated with it, ProjectMaker will have to combine the values in
        some way.  You have the following options:
            Sum: will sum up any values associated with the same cross-section.
            Max: will take the maximum value of any values associated with the same cross-section.
            Min: will take the minimum value of any values associated with the same cross-section.
            Average: will average the values associated with the same cross-section.
            String: will essentially use the value of the last instance for
            each cross-section.  Furthermore the value is a string.  Use this
            for categorical data.
            
        ***The default method is "Average"."""
        types = ['Sum', 'Max', 'Min', 'Average', 'String']
        sd.MultiEntry(d,
                      types,
                      entries,
                      title='Optional Arguments',
                      helpText=txt)
        d.draw()
        if d.status:
            vars = MATCH.Matcher('vars', d.results[0])
            varList = vars.unique
            start = int(d.results[1]['Start Period for Batch'])
            end = int(d.results[1]['End Period for Batch'])
            step = int(d.results[2])
            cohesion = d.results[3]['Aggregation Method']
            if cohesion:
                pass
            else:
                cohesion = 'Average'
            for var in varList:
                try:
                    newVar = [
                        var + str(i) for i in range(start, end + step, step)
                    ]
                    createVar = self.proj.convertArcViewVariable(
                        cohesion, var, newVar)
                except:
                    beg = "Could not create new variable for " + var + "."
                    end = "\nPerhaps the the time series does not match."
                    self.report(beg + end)
                    self.report(self.proj.variableSummary())
Пример #26
0
    (result_list, mre, stddev) \
        = proj.compute_reprojection_errors(cam_dict, matches_direct)
    if start_mre < 0.0: start_mre = mre
    print "mre = %.4f stddev = %.4f features = %d" % (mre, stddev,
                                                      len(matches_direct))

    cull_outliers = False
    if cull_outliers:
        mark_outliers(result_list, mre + stddev * 4, matches_direct)
        mark_weak_images(matches_direct)
        delete_marked_matches(matches_direct)

        # after outlier deletion, re-evalute matched pairs and connection
        # cycles.
        match_pairs = proj.generate_match_pairs(matches_direct)
        group_list = Matcher.groupByConnections(proj.image_list,
                                                matches_direct, match_pairs)
        mark_non_group(group_list[0], matches_direct)
        delete_marked_matches(matches_direct)
    else:
        # keep accounting structures happy
        mark_weak_images(matches_direct)

    # get the affine transformation required to bring the new camera
    # locations back inqto a best fit with the original camera
    # locations
    A = get_recenter_affine(cam_dict)

    # thought #1: if we are triangulating, this could be done once at the
    # end to fix up the solution, not every iteration?  But it doesn't
    # seem to harm the triangulation.
Пример #27
0
 def convertCSTSVariableBatch(self):
     d = sd.SDialogue('Convert Initial Fields to a STARS Panel Variables')
     varNames = self.proj.getDBFVariableNames()
     batch = MATCH.batchSplit(varNames)
     varNames = batch['strings']
     varNames.sort()
     timeInfo = batch['ints']
     timeInfo.sort()
     txt="""Select the fields to create panel variables via the batch method."""
     time = str(self.proj.t)
     add = """Remember that field must have " + time + " time periods
     associated with it."""
     txt = txt + "\n" + add
     title = "Choose fields for batch CSTS creation"
     sd.DualListBoxes(d,varNames,title=title, helpText=txt)
     
     txt = """Choose a variable associated with the first time period in
     your study, and an additional oone for the year time period.  You may
     also type this in manuallly."""
     timeStuff = ['Start Period for Batch', 'End Period for Batch']
     sd.MultiEntry(d,timeInfo, timeStuff, title='Time Period Arguments',
                   helpText=txt)
     
     txt="""Provide the time period increment:
         I.e.    Annual:   1
                 BiAnnual: 2
                 Decadal:  10
         """        
     sd.UserEntry(d,label="Integer Value", align="LEFT", 
                  title="User Defined Time Increment",helpText=txt)        
     
     entries = ['Aggregation Method']
     txt = """If the same cross-sectional unit has more than one value
     associated with it, ProjectMaker will have to combine the values in
     some way.  You have the following options:
         Sum: will sum up any values associated with the same cross-section.
         Max: will take the maximum value of any values associated with the same cross-section.
         Min: will take the minimum value of any values associated with the same cross-section.
         Average: will average the values associated with the same cross-section.
         String: will essentially use the value of the last instance for
         each cross-section.  Furthermore the value is a string.  Use this
         for categorical data.
         
     ***The default method is "Average"."""
     types = ['Sum', 'Max', 'Min', 'Average', 'String']
     sd.MultiEntry(d,types, entries, title='Optional Arguments', helpText=txt)
     d.draw()
     if d.status:
         vars = MATCH.Matcher('vars',d.results[0])
         varList = vars.unique
         start = int( d.results[1]['Start Period for Batch'] )
         end = int( d.results[1]['End Period for Batch'] )
         step = int( d.results[2] )
         cohesion = d.results[3]['Aggregation Method']
         if cohesion:
             pass
         else:
             cohesion = 'Average'
         for var in varList:
             try:
                 newVar = [ var+str(i) for i in range(start,end+step,step) ]
                 createVar = self.proj.convertArcViewVariable(cohesion,var,newVar)
             except:
                 beg = "Could not create new variable for " + var + "."
                 end = "\nPerhaps the the time series does not match."
                 self.report(beg+end)            
                 self.report(self.proj.variableSummary())
Пример #28
0
    """
    try:
        input_file = InputFile(sys.argv[1])
    except IOError, e:
        print("ERROR: {e}".format(e=e))
        return

    sample_rate_adjust_factor = int(NORMAL_SAMPLE_RATE /
                                    input_file.get_sample_rate())

    freq_chunks = FFT(input_file,
                      CHUNK_SIZE / sample_rate_adjust_factor).series()

    norm = LogNorm(0.000000001, numpy.amax(freq_chunks))

    winners = Matcher._bucket_winners(freq_chunks)

    # initialize an empty window
    master = Tk()
    master.wm_title(" ".join(sys.argv[1:]))
    chunks = len(freq_chunks)
    first_chunk = 0
    lines = UPPER_LIMIT
    blockSizeX = 2
    blockSizeY = 2
    w = Canvas(master, width=chunks * blockSizeX, height=lines * blockSizeY)
    w.pack()
    # for each chunk (which will be the X axis)
    for i in range(chunks):
        print i
        # for each line (the Y axis)
Пример #29
0
    for i in range(int(sys.argv[1])):
        word1 = word1 + "a"
        regex = regex + "a?"
    regex = regex + word1
    print("- N: %s" % sys.argv[1])
    print("- Word: %s" % word1)
    reg = r.add_concatenation(regex)
    print("- Regex: " + reg)
    postfix = r.get_postfix(reg)
    #print(postfix)
    NFA = r.get_nfa(postfix)
    word2 = "abbc"
    #print("NFA:")
    #print (r.to_str(NFA.start))
    m = Matcher(NFA.start, word1)
    t1 = datetime.datetime.utcnow()
    print("Our implementation ")
    print("Match?: ")
    print(m.searchMatch(word1))
    t2 = datetime.datetime.utcnow()
    delta = t2 - t1
    print("Time:  %s.%s" % (delta.seconds, delta.microseconds))

    t3 = datetime.datetime.utcnow()
    print("Python implementation ")
    print("Match?: ")
    print(re.match(regex, word1) != None)

    t4 = datetime.datetime.utcnow()
    delta = t4 - t3
Пример #30
0
def init(names, virtual_name, is_master):
    """
    <function internal="yes">
      <summary>
        Default init() function provided by Zorp
      </summary>
      <description>
        This function is a default <function>init()</function> calling the init function
        identified by the <parameter>name</parameter> argument. This way several Zorp
        instances can use the same policy file.
      </description>
      <metainfo>
        <attributes>
          <attribute maturity="stable">
            <name>names</name>
            <type></type>
            <description>Names (instance name and also-as names) of this instance.</description>
          </attribute>
          <attribute maturity="stable">
            <name>virtual_name</name>
            <type>string</type>
            <description>
              Virtual instance name of this process. If a Zorp instance is backed by multiple
              Zorp processes using the same configuration each process has a unique virtual
              instance name that is used for SZIG communication, PID file creation, etc.
            </description>
          </attribute>
          <attribute>
            <name>is_master</name>
            <type>int</type>
            <description>
              TRUE if Zorp is running in master mode, FALSE for slave processes. Each Zorp instance
              should have exactly one master process and an arbitrary number of slaves.
            </description>
          </attribute>
        </attributes>
      </metainfo>
    </function>
    """
    import __main__
    import SockAddr, KZorp, Matcher, Rule
    import kzorp.netlink
    import kzorp.kzorp_netlink
    import errno

    # miscelanneous initialization
    if config.audit.encrypt_certificate_file:
        try:
            config.audit.encrypt_certificate = open(config.audit.encrypt_certificate_file, 'r').read()
        except IOError:
            log(None, CORE_ERROR, 1, "Error reading audit encryption certificate; file='%s'", (config.audit.encrypt_certificate_file))

    if config.audit.encrypt_certificate_list_file:
        try:
            config.audit.encrypt_certificate_list = [ ]
            for list in config.audit.encrypt_certificate_list_file:
                newlist = [ ]
                for file in list:
                    try:
                        newlist.append( open(file, 'r').read() )
                    except IOError:
                        log(None, CORE_ERROR, 1, "Error reading audit encryption certificate; file='%s'", (file))
                config.audit.encrypt_certificate_list.append( newlist )
        except TypeError:
            log(None, CORE_ERROR, 1, "Error iterating encryption certificate file list;")

    if config.audit.encrypt_certificate_list == None and config.audit.encrypt_certificate:
        config.audit.encrypt_certificate_list = [ [ config.audit.encrypt_certificate ] ]

    if config.audit.sign_private_key_file:
        try:
            config.audit.sign_private_key = open(config.audit.sign_private_key_file, 'r').read()
        except IOError:
            log(None, CORE_ERROR, 1, "Error reading audit signature's private key; file='%s'", (config.audit.sign_private_key_file))

    if config.audit.sign_certificate_file:
        try:
            config.audit.sign_certificate = open(config.audit.sign_certificate_file, 'r').read()
        except IOError:
            log(None, CORE_ERROR, 1, "Error reading audit signature's certificate; file='%s'", (config.audit.sign_certificate_file))

    Globals.rules = Rule.RuleSet()

    if config.options.kzorp_enabled:
        # ping kzorp to see if it's there
        try:
            h = kzorp.kzorp_netlink.Handle()
            Globals.kzorp_available = True
        except:
            Globals.kzorp_available = False
            log(None, CORE_ERROR, 0, "Error pinging KZorp, it is probably unavailable; exc_value='%s'" % (sys.exc_value))

    Globals.instance_name = names[0]
    for i in names:
        try:
            func = getattr(__main__, i)
        except AttributeError:
            ## LOG ##
            # This message indicates that the initialization function of
            # the given instance was not found in the policy file.
            ##
            log(None, CORE_ERROR, 0, "Instance definition not found in policy; instance='%s'", (names,))
            return FALSE
        func()

    Matcher.validateMatchers()

    if Globals.kzorp_available:
        try:
            KZorp.downloadKZorpConfig(names[0], is_master)
        except:
            ## LOG ##
            # This message indicates that downloading the necessary information to the
            # kernel-level KZorp subsystem has failed.
            ##
            log(None, CORE_ERROR, 0, "Error downloading KZorp configuration, Python traceback follows; error='%s'" % (sys.exc_value))
            for s in traceback.format_tb(sys.exc_traceback):
                for l in s.split("\n"):
                    if l:
                        log(None, CORE_ERROR, 0, "Traceback: %s" % (l))

            # if kzorp did respond to the ping, the configuration is erroneous -- we die here so the user finds out
            return FALSE

    return TRUE
Пример #31
0
    # measure our current mean reprojection error and trim mre
    # outliers from the match set (any points with mre 4x stddev) as
    # well as any weak images with < 25 matches.
    (result_list, mre, stddev) \
        = proj.compute_reprojection_errors(cam_dict, matches_direct)
    if start_mre < 0.0: start_mre = mre
    print "mre = %.4f stddev = %.4f features = %d" % (mre, stddev, len(matches_direct))
    mark_outliers(result_list, mre + stddev*4, matches_direct)
    mark_weak_images(matches_direct)
    delete_marked_matches(matches_direct)

    # after outlier deletion, re-evalute matched pairs and connection
    # cycles.
    match_pairs = proj.generate_match_pairs(matches_direct)
    group_list = Matcher.groupByConnections(proj.image_list, matches_direct, match_pairs)
    mark_non_group(group_list[0], matches_direct)
    delete_marked_matches(matches_direct)

    # get the affine transformation required to bring the new camera
    # locations back inqto a best fit with the original camera
    # locations
    A = get_recenter_affine(cam_dict)

    # thought #1: if we are triangulating, this could be done once at the
    # end to fix up the solution, not every iteration?  But it doesn't
    # seem to harm the triangulation.

    # thought #2: if we are projecting onto the dem surface, we
    # probably shouldn't transform the cams back to the original
    # becuase this could perpetually pull things out of convergence
Пример #32
0
def upload():

    if request.method == 'POST':
        filedelet.create_dir()
        data = []
        csvFile = request.files['csv']
        csv_ext = str(csvFile).split("'")
        test_name = request.form['testname']
        if ".csv" in csv_ext[1]:
            if csvFile and allowed_file(csvFile.filename):
                csvname = secure_filename(csvFile.filename)
                csvFile.save(os.path.join(app.config['UPLOAD_FOLDER'],
                                          csvname))
                csv_path = mypath + '/storage/' + csvname

                Matcher.csv_handling(csv_path)
        else:
            fail = '''Please upload only CSV file.'''
            return render_template('upload.html', fail=fail)

        queFile = request.files['que']
        que_ext = str(csvFile).split("'")

        if ".csv" in que_ext[1]:
            if csvFile and allowed_file(queFile.filename):
                quename = secure_filename(queFile.filename)
                queFile.save(os.path.join(app.config['UPLOAD_FOLDER'],
                                          quename))
                que_path = mypath + '/storage/' + quename

                Data = []
                Data.append(Questions.question_extract(que_path, test_name))
                mongoDb.questions_store(Data)
        else:
            fail = '''Please upload only CSV file.'''
            return render_template('upload.html', fail=fail)

        for f in request.files.getlist('file[]'):
            pdf_ext = str(f).split("'")
            if ".pdf" in pdf_ext[1]:
                if f and allowed_file(f.filename):
                    filename = secure_filename(f.filename)
                    f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))

                    pdf_path = mypath + '/storage/' + filename
                    name, mail, result = Matcher.create_database(
                        pdf_path, filename)
                    data.append(
                        Matcher.header_Cal(name, mail, result, test_name))

            elif ".docx" in pdf_ext[1]:
                if f and allowed_file(f.filename):
                    filename = secure_filename(f.filename)
                    f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))

                    pdf_path = mypath + '/storage/' + filename
                    name, mail, result = Matcher.create_database(
                        pdf_path, filename)
                    data.append(
                        Matcher.header_Cal(name, mail, result, test_name))

            elif ".txt" in pdf_ext[1]:
                if f and allowed_file(f.filename):
                    filename = secure_filename(f.filename)
                    f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))

                    pdf_path = mypath + '/storage/' + filename
                    name, mail, result = Matcher.create_database(
                        pdf_path, filename)
                    data.append(
                        Matcher.header_Cal(name, mail, result, test_name))

            else:
                fail = '''upload only pdf,docx and txt files'''
                return render_template('upload.html', fail=fail)

        # print("-----------")
        mongoDb.collection_store(data)
        Success = "Success"
        filedelet.Delete_files()
        return render_template('upload.html', suc=Success)
    # logo_path = os.path.join(app.config['IMAGES'], "CA-Logo.svg")
    # print(logo_path)
    return render_template('upload.html')
Пример #33
0
        for i in range(int(sys.argv[1])):
            word1 = word1 + "a"
            regex = regex + "a?"
        regex = regex + word1
        print("- N: %s" % sys.argv[1])
        print("- Word: %s" % word1)
        reg = r.add_concatenation(regex)
        print("- Regex: " +reg)
        postfix = r.get_postfix(reg)
        #print(postfix)
        NFA = r.get_nfa(postfix)
        word2 = "abbc"
        #print("NFA:")
        #print (r.to_str(NFA.start))
        m = Matcher(NFA.start, word1)
        t1 = datetime.datetime.utcnow()
        print("Our implementation ")
        print("Match?: ")
        print(m.searchMatch(word1))
        t2 = datetime.datetime.utcnow()
        delta = t2-t1
        print("Time:  %s.%s" % (delta.seconds, delta.microseconds))

        t3 = datetime.datetime.utcnow()
        print ("Python implementation ")
        print("Match?: ")
        print(re.match(regex,word1) != None)

        t4 = datetime.datetime.utcnow()
        delta = t4-t3
Пример #34
0
    def begin(self):
        #print("Starting processing")
        imageProcessor = ImageProcessing(self.meeting, self.arrayStudents,
                                         self.size)
        confidenceMatrix, baseAttendaceDict = imageProcessor.processImageAndGetConfidenceMatrix(
        )
        print("done confidence:")
        for i in range(len(confidenceMatrix)):
            print(confidenceMatrix[i])

        useDelete = True
        useSocial = True
        matcher = Matcher(self.meeting, self.arrayStudents)

        #attendance with social data
        attendance = matcher.matchStudents(confidenceMatrix, False, useSocial)
        f = open(self.meeting.getMeetingDirectory() + '\AttendanceSocial.txt',
                 "w+")
        count = 0
        for item in attendance:
            f.write('student: ' + str(item) +
                    ' is found to be cropped face: ' + str(count) + '\n')
            count = count + 1

        #attendance to log without social data
        attendanceNoSocial = matcher.matchStudents(confidenceMatrix, False,
                                                   False)
        f = open(
            self.meeting.getMeetingDirectory() + '\AttendanceNoSocial.txt',
            "w+")
        count = 0
        for item in attendanceNoSocial:
            f.write('student: ' + str(item) +
                    ' is found to be cropped face: ' + str(count) + '\n')
            count = count + 1

        output = Output(self.meeting, self.arrayStudents,
                        self.meeting.getCroppedFaces(), attendance)
        imageOrginalWithAttedance, imageAttendancePath = output.createAndWriteAttendacePiture(
            "MatchingWithSocial")

        #create attendace picture for social not used
        output.createAndWriteAttendancePictureTwo("MatchingNoSocial",
                                                  attendanceNoSocial)

        #create attendance picture for base recognition
        output.createAndWriteAttendancePictureTwo("BaseRecognition",
                                                  baseAttendaceDict)

        #write the attedance picture to the db
        self.database.writeImageWithAttendance(
            self.db, imageOrginalWithAttedance,
            self.meeting.getMeetingDirectory(), imageAttendancePath)

        #write the attedance to the db
        self.database.writeAttendance(self.db, self.arrayStudents)

        #write the average social matrix to the db if its
        if (self.meeting.getFirstMeeting() == True):
            finalAverageSocialMatrix = output.findSocialMatrixFirstMeeting()
        else:
            finalAverageSocialMatrix = output.findAverageSocialMatrix()

        self.database.writeSocialMatrix(self.db, self.meeting,
                                        finalAverageSocialMatrix,
                                        self.arrayStudents)

        output.printAttendance()
        if (1 == 2):
            useDelete = False
            useSocial = True
            matcher = Matcher(self.meeting, self.arrayStudents)
            attendance = matcher.matchStudents(confidenceMatrix, useDelete,
                                               useSocial)
            output4 = Output(self.meeting, self.arrayStudents,
                             self.meeting.getCroppedFaces(), attendance)
            output4.createAndWriteAttendacePiture("SocialNoDelete")
            output4.printAttendance()

            useDelete = True
            useSocial = False
            matcher = Matcher(self.meeting, self.arrayStudents)
            attendance = matcher.matchStudents(confidenceMatrix, useDelete,
                                               useSocial)
            output3 = Output(self.meeting, self.arrayStudents,
                             self.meeting.getCroppedFaces(), attendance)
            output3.createAndWriteAttendacePiture("Social")
            output3.printAttendance()

            useDelete = False
            useSocial = False
            matcher = Matcher(self.meeting, self.arrayStudents)
            attendance = matcher.matchStudents(confidenceMatrix, useDelete,
                                               useSocial)
            output2 = Output(self.meeting, self.arrayStudents,
                             self.meeting.getCroppedFaces(), attendance)
            output2.createAndWriteAttendacePiture("MatchingNoDelete")
            output2.printAttendance()
Пример #35
0
    def createMatcher(self, utype, passwd=0):

        datas = d_matchs.datas.get(utype)
        if datas is None:
            ERROR_MSG(
                "matchers::createMatcher get matchData error , d_matchs.datas hava on key : %d"
                % utype)
            return None

        DEBUG_MSG(
            "%s::createMatcher: self.lastNewRoomKey=%i, self.matchers:%s" %
            (self.name, self.lastNewRoomKey, str(self.matchers.keys())))
        """
        if self.lastNewRoomKey  in self.matchers.keys():
            ERROR_MSG("matchers::createMatcher have in mathers ,self.lastNewRoomKey : %d" % self.lastNewRoomKey)
            return self.matchers[self.lastNewRoomKey]
        """
        self.lastNewRoomKey = KBEngine.genUUID64()
        matherData = Matcher(self.lastNewRoomKey)

        matchType = datas.get('matchRule', None)
        if matchType is None:
            ERROR_MSG(
                "matchers::createMatcher matchType:%d is None d_matchs.datas "
                % datas)
            return None
        else:
            ruleData = d_matchs.matchRuleDatas.get(matchType, None)
            if ruleData is None:
                ERROR_MSG(
                    "matchers::createMatcher d_matchs.matchRuleDatas.get(matchType,None) matchType: %d"
                    % matchType)
                return None
            if ruleData['utype'] == "player":
                matherData.matchRule = PlayerMatchRule(ruleData['id'],ruleData['name'],ruleData['teamACount'],ruleData['teamBCount'],\
                                                  ruleData['minPlayers'],ruleData['maxPlayers'])
            elif ruleData['utype'] == 'npc':
                matherData.matchRule = NPCMatchRule(ruleData['id'],ruleData['name'],ruleData['teamACount'],ruleData['teamBCount'],\
                                                  ruleData['minPlayers'],ruleData['maxPlayers'])
            elif ruleData['utype'] == 'customize':
                matherData.matchRule = CustomizeMatchRule(ruleData['id'],ruleData['name'],ruleData['teamACount'],ruleData['teamBCount'],\
                                                  ruleData['minPlayers'],ruleData['maxPlayers'])
            else:
                ERROR_MSG(
                    "matchers::createMatcher d_matchs.ruleData.get(utype,None) utype: %s"
                    % ruleData['utype'])
                return None

        roomType = datas.get('roomRule', None)
        if roomType is None:
            ERROR_MSG(
                "matchers::createMatcher roomType:%d is None d_matchs.datas " %
                datas)
            return None
        else:
            ruleData = d_matchs.roomRuleDatas.get(roomType, None)
            if ruleData is None:
                ERROR_MSG(
                    "matchers::createMatcher d_matchs.roomRuleDatas.get(roomType,None) roomType: %d"
                    % roomType)
                return None

            if ruleData['roomType'] == "SummonerCanyon":
                matherData.roomRule = SummonerCanyonRule(
                    self.lastNewRoomKey, self.lastRoomNumber, ruleData, passwd)
            elif ruleData['roomType'] == 'TwistedJungle':
                matherData.roomRule = TwistedJungleRule(
                    self.lastNewRoomKey, self.lastRoomNumber, ruleData, passwd)
            elif ruleData['roomType'] == 'PolarChaos':
                matherData.roomRule = PolarChaosRule(self.lastNewRoomKey,
                                                     self.lastRoomNumber,
                                                     ruleData, passwd)
            elif ruleData['roomType'] == 'CryingAbyss':
                matherData.roomRule = CryingAbyssRule(self.lastNewRoomKey,
                                                      self.lastRoomNumber,
                                                      ruleData, passwd)
            else:
                ERROR_MSG(
                    "matchers::createMatcher d_matchs.roomRuleDatas.get(utype,None) utype: %s"
                    % ruleData['roomType'])
                return None
            self.lastRoomNumber += 1

        self.matcherIDsMapUtype[matherData.matchID] = utype
        self.matchers[matherData.matchID] = matherData
        DEBUG_MSG("%s::createMatcher: matchID=%i, utype:%i" %
                  (self.name, matherData.matchID, utype))
        return matherData
Пример #36
0
                        (0, 0, 255), 1, cv2.LINE_AA)

        return depthImg


if __name__ == '__main__':

    #Load images
    #imgLeft = cv2.imread('../Images/CityLeft1.png')
    #imgRight = cv2.imread('../Images/CityRight1.png')

    imgLeft = cv2.imread('../Images/middelburyLeft.png')
    imgRight = cv2.imread('../Images/middelburyRight.png')

    #Create the functions that should be used in Matcher
    matcher = Matcher.Matcher(imgLeft, imgRight)
    matcher.computeAll()

    #f and bx from the kitty dataset
    f = 7.215377e+02
    bx = 4.485728e+01 + 3.395242e+02

    sd = StereoDepth(matcher.goodMatches, matcher.keypointsLeft,
                     matcher.keypointsRight, f, bx)
    sd.computeAll()

    img = sd.getDepthImage(matcher.imgLeft)

    cv2.imshow("Original image", matcher.imgLeft)
    cv2.imshow("Depth image", img)
    cv2.imwrite("../Results/StereoDepth.png", img)
Пример #37
0
test1.append([2, 3, 4, 5, 1])
results1 = [0, 1, 2, 3, 4]

#two have same top match test
test2 = [] 
test2.append([10, 20, 30, 40, 50])
test2.append([11, 13, 30, 40, 50])
test2.append([20, 30, 10, 40, 50])
test2.append([20, 30, 40, 10, 50])
test2.append([20, 30, 40, 50, 10])
results2 = [0, 1, 2, 3, 4]

#someone absent test
test3 = [] 
test3.append([10, 20, 30, 40, 50, 55])
test3.append([11, 13, 30, 40, 50, 60])
test3.append([20, 30, 10, 40, 50, 65])
test3.append([20, 30, 40, 10, 50, 70])
test3.append([20, 30, 40, 50, 10, 74])
results3 = [0, 1, 2, 3, 4]


matcher = Matcher(1, 1)
results = matcher.matchStudents(test3)
print()
print("Results:")
print(results)
print("Expected Results:")
print(results3)

Пример #38
0
parser = argparse.ArgumentParser(description='Set the initial camera poses.')
parser.add_argument('--project', required=True, help='project directory')
parser.add_argument(
    '--stddev',
    type=float,
    default=5,
    help='how many stddevs above the mean for auto discarding features')

args = parser.parse_args()

proj = ProjectMgr.ProjectMgr(args.project)
proj.load_images_info()
proj.load_features()
proj.undistort_keypoints()

matcher = Matcher.Matcher()

print("Loading match points (direct)...")
matches = pickle.load(open(os.path.join(args.project, "matches_direct"), "rb"))

print('num images:', len(proj.image_list))

# traverse the matches structure and create a pair-wise match
# structure.  (Start with an empty n x n list of empty pair lists,
# then fill in the structures.)
pairs = []
homography = []
averages = []
stddevs = []
status_flags = []
dsts = []
Пример #39
0
    def begin(self):
        #print("Starting processing")
        imageProcessor = ImageProcessing(self.meeting, self.arrayStudents,
                                         self.size)
        confidenceMatrix = imageProcessor.processImageAndGetConfidenceMatrix()
        #print("done confidence:")
        #for i in range(len(confidenceMatrix)):
        #    print(confidenceMatrix[i])

        useDelete = True
        useSocial = True
        matcher = Matcher(self.meeting, self.arrayStudents)
        attendance = matcher.matchStudents(confidenceMatrix, useDelete,
                                           useSocial)
        output = Output(self.meeting, self.arrayStudents,
                        self.meeting.getCroppedFaces(), attendance)
        imageOrginalWithAttedance, imageAttendancePath = output.createAndWriteAttendacePiture(
            "Matching")

        #write the attedance picture to the db
        self.database.writeImageWithAttendance(
            self.db, imageOrginalWithAttedance,
            self.meeting.getMeetingDirectory(), imageAttendancePath)

        #write the attedance to the db
        self.database.writeAttendance(self.db, self.arrayStudents)

        #write the average social matrix to the db if its
        if (self.meeting.getFirstMeeting() == True):
            finalAverageSocialMatrix = output.findSocialMatrixFirstMeeting()
        else:
            finalAverageSocialMatrix = output.findAverageSocialMatrix()

        self.database.writeSocialMatrix(self.db, self.meeting,
                                        finalAverageSocialMatrix,
                                        self.arrayStudents)

        output.printAttendance()
        if (1 == 2):
            useDelete = False
            useSocial = True
            matcher = Matcher(self.meeting, self.arrayStudents)
            attendance = matcher.matchStudents(confidenceMatrix, useDelete,
                                               useSocial)
            output4 = Output(self.meeting, self.arrayStudents,
                             self.meeting.getCroppedFaces(), attendance)
            output4.createAndWriteAttendacePiture("SocialNoDelete")
            output4.printAttendance()

            useDelete = True
            useSocial = False
            matcher = Matcher(self.meeting, self.arrayStudents)
            attendance = matcher.matchStudents(confidenceMatrix, useDelete,
                                               useSocial)
            output3 = Output(self.meeting, self.arrayStudents,
                             self.meeting.getCroppedFaces(), attendance)
            output3.createAndWriteAttendacePiture("Social")
            output3.printAttendance()

            useDelete = False
            useSocial = False
            matcher = Matcher(self.meeting, self.arrayStudents)
            attendance = matcher.matchStudents(confidenceMatrix, useDelete,
                                               useSocial)
            output2 = Output(self.meeting, self.arrayStudents,
                             self.meeting.getCroppedFaces(), attendance)
            output2.createAndWriteAttendacePiture("MatchingNoDelete")
            output2.printAttendance()
Пример #40
0
import DiffParser
import Matcher
import GIXDSimAnneal
'''
main for indexing
'''
figname = 'sva.fig'
Cell_Init_Guess = [10., 10., 10., 90., 90., 90.]

DP = DiffParser.DiffParser(fig_name=figname)
DP.plt_foundpeaks()   # uncomment to see peaks identified
Parsed_sgs_Matrix = DP.sgs_matrix
Smooth_Erode, Expt_Peaks = DP.detect_peaks()
M = Matcher.Matcher(Parsed_sgs_Matrix, Expt_Peaks, Cell_Init_Guess, sg=1)

tsp = GIXDSimAnneal.OptProblem(Cell_Init_Guess, M)
tsp.copy_strategy = "slice"

# # auto find anneal param
# print(tsp.auto(minutes=60))
# # 'tmax': 140.0, 'tmin': 0.67, 'steps': 150000, 'updates': 100 on dlxlogin3-1

tsp.Tmax = 140
tsp.Tmin = 0.5
tsp.steps = 150000
tsp.updates = 100
state, e = tsp.anneal()
with open('anneal.out','a') as f:
   f.write('#--------------------\n')
   f.write(' '.join([str(round(i, 4)) for i in state]) + ' ' + str(round(e, 4)) + '\n')
Пример #41
0
def get_dataset(path):
    dataset=[]
    for filename in os.listdir(path):
        if filename.endswith("pos"):                             
            t = open(path + filename,"r").read() 
            dataset.append([re.sub('[^a-zA-Z]+', ' ', t), re.sub(r"(?:_pos)$",'', filename), "pos"])  
        elif filename.endswith("neg"):                          
            t = open(path + filename,"r").read()
            dataset.append([re.sub('[^a-zA-Z]+', ' ', t), re.sub(r"(?:_neg)$",'', filename), "neg"])                  
    return dataset

### Main function

# FEATURE 1 - Match with the McDonald Dictionary
dataset = get_dataset("../mdatest/")
dico = matcher.get_dico(dataset)        # dico is a column with the matching scores of the MDAs versus the Finance Dictionary 
df=pd.DataFrame(dataset)
df[3] = pd.Series(dico)
df.columns = ['MD&A_Text','Filename','Actual','MatchDico'] 

# FEATURE 2 and 3 - Match with the Compustat financial data to get the indices 'delta_sales' and 'delta_at'
compustat = pd.read_csv('compustat_filenames.csv', sep=',')
de = compustat['delta_sale']
dt = compustat['delta_at']
ds = pd.merge(df, compustat, left_on='Filename', right_on='Filename')


# We split the global matrix "result" into a training and a testing set
train, test = validator.split(ds,0.5)

# We fit a Random Forest model 	 (n_estimators default=10, min_samples_leaf default=1)
Пример #42
0
      s.send(result)

    s.close()
    cv.destroyWindow('Client Frame')
    for a in range(10):
      cv.waitKey(100)
    
  return


# ======================================================================
# Main
# ======================================================================
def main():
  matcherMethod = 'bf'

  try:
    matcherMethod = argv[1]
  except Exception, e:
    print '[!] One argument expected (matcherMethod [bf, flann, template, svm, knn]).'
  print "[!] Using default matcher method %s." % (matcherMethod)

  Matcher.configureMatcher(matcherMethod)
  dispatch(matcherMethod)

  return


if(__name__ == '__main__'):
  main()
Пример #43
0
print "Determining coverage area"
p0 = raw_points[0]
x_min = p0[0]
x_max = p0[0]
y_min = p0[1]
y_max = p0[1]
for p in raw_points:
    if p[0] < x_min: x_min = p[0]
    if p[0] > x_max: x_max = p[0]
    if p[1] < y_min: y_min = p[1]
    if p[1] > y_max: y_max = p[1]
print "Area coverage = %.1f,%.1f to %.1f,%.1f (%.1f x %.1f meters)" % \
    (x_min, y_min, x_max, y_max, x_max-x_min, y_max-y_min)

# compute number of connections and cycle depth per image
Matcher.groupByConnections(proj.image_list, matches_sba)

# start with empty triangle lists
# format: [ [v[0], v[1], v[2], u, v], .... ]
for image in proj.image_list:
    image.tris = []
    
good_tris = 0
failed_tris = 0

# compute image.PROJ for each image
for image in proj.image_list:
    rvec, tvec = image.get_proj_sba()
    R, jac = cv2.Rodrigues(rvec)
    image.PROJ = np.concatenate((R, tvec), axis=1)
Пример #44
0
    """Display a graph that shows which frequencies we
    will use in our hashing algorithm.
    """
    try:
        input_file = InputFile(sys.argv[1])
    except IOError, e:
        print ("ERROR: {e}".format(e=e))
        return

    sample_rate_adjust_factor = int(NORMAL_SAMPLE_RATE / input_file.get_sample_rate())

    freq_chunks = FFT(input_file, CHUNK_SIZE/sample_rate_adjust_factor).series()

    norm = LogNorm(0.000000001, numpy.amax(freq_chunks))

    winners = Matcher._bucket_winners(freq_chunks)

    # initialize an empty window
    master = Tk()
    master.wm_title(" ".join(sys.argv[1:]))
    chunks = len(freq_chunks)
    first_chunk = 0
    lines = UPPER_LIMIT
    blockSizeX = 2
    blockSizeY = 2
    w = Canvas(master, width=chunks*blockSizeX, height=lines*blockSizeY)
    w.pack()
    # for each chunk (which will be the X axis)
    for i in range(chunks):
        print i
        # for each line (the Y axis)