Esempio n. 1
0
    def doHttpGet(self, extPath):
        headers = {'User-Agent': self.USER_AGENT, 'Username': self.get_user_name()}

        if self.username is not None:
            auth = 'Basic ' + string.strip(base64.encodestring(self.username + ':' + self.password))
            headers['Authorization'] = auth

        try:
            httpServ = httplib.HTTPConnection(self.hostname, self.port)
            httpServ.request('GET', self.context + extPath, None, headers)
            response = httpServ.getresponse()
            return response
        except httplib.HTTPException:
            print "ERROR! Looks like the server is not running on " + self.hostname
            exit()
Esempio n. 2
0
    def doHttpGet(self, extPath):
        headers = {
            'User-Agent': self.USER_AGENT,
            'Username': self.get_user_name()
        }

        if self.username is not None:
            auth = 'Basic ' + string.strip(
                base64.encodestring(self.username + ':' + self.password))
            headers['Authorization'] = auth

        try:
            httpServ = httplib.HTTPConnection(self.hostname, self.port)
            httpServ.request('GET', self.context + extPath, None, headers)
            response = httpServ.getresponse()
            return response
        except httplib.HTTPException:
            print "ERROR! Looks like the server is not running on " + self.hostname
            exit()
Esempio n. 3
0
    def doHttpPost(self, extPath, postdata='', headers=None):
        if postdata and self.message:
            postdata += '&YRS_MESSAGE=' + str(self.message)
        if not headers:
            headers = {}
        headers['User-Agent'] = self.USER_AGENT
        headers['Username'] = self.get_user_name()

        if self.username is not None:
            auth = 'Basic ' + string.strip(base64.encodestring(self.username + ':' + self.password))
            headers['Authorization'] = auth
        try:
            httpServ = httplib.HTTPConnection(self.hostname, self.port)
            httpServ.connect()
            httpServ.request('POST', self.context + extPath, postdata, headers)
            response = httpServ.getresponse()
            return response
        except httplib.HTTPException:
            print "ERROR! Looks like the server is not running on " + self.hostname
            exit()
Esempio n. 4
0
    def __init__(self, AOT_DIR):
        """ Загрузка DLL. """
        
        print 'RML is', os.environ.get('RML')
#        os.environ['RML'] = AOT_DIR                 # нужно бинарникам АОТ для работы. Они тупые.

        # интерфейс доступа к библиотечке
        self.gra_dll = ctypes.cdll.LoadLibrary(os.path.join(AOT_DIR, "sentencer.dll"))

        r = self.gra_dll.init_lib()

        if r:
            print "init error", r
            exit()

        print "SENTENCER LOAD COMPLETE"

        self.gra_dll.next_sentence.restype = ctypes.c_char_p

        self.ready = True
Esempio n. 5
0
    def doHttpPost(self, extPath, postdata='', headers=None):
        if postdata and self.message:
            postdata += '&YRS_MESSAGE=' + str(self.message)
        if not headers:
            headers = {}
        headers['User-Agent'] = self.USER_AGENT
        headers['Username'] = self.get_user_name()

        if self.username is not None:
            auth = 'Basic ' + string.strip(
                base64.encodestring(self.username + ':' + self.password))
            headers['Authorization'] = auth
        try:
            httpServ = httplib.HTTPConnection(self.hostname, self.port)
            httpServ.connect()
            httpServ.request('POST', self.context + extPath, postdata, headers)
            response = httpServ.getresponse()
            return response
        except httplib.HTTPException:
            print "ERROR! Looks like the server is not running on " + self.hostname
            exit()
Esempio n. 6
0
def main():
    parser = optparse.OptionParser('usage %prog -H <tgtHost> -u <user> -d <direction>')
    parser.add_option('-H', dest='tgtHost', type='string', help='specify target host')
    parser.add_option('-u', dest='user', type='string', help='user 用户')
    parser.add_option('-d', dest='direction', type='string', help='弱密钥 所在目录')


    (options,args) = parser.parse_args()
    tgtHost = options.tgtHost
    user = options.user
    direction = options.direction

    if tgtHost == None or user == None or direction == None:
        print options.usage
        exit(0)

    for fileName in os.listdir(direction):
        if Stop:
            print('[*] Exiting: Key Found.')
            exit(0)
        if Fails > 5:
            print('[!] Exiting: Too Many Connections Closed By Remote Host.')
            print('[!] Adjust number of simultaneous threads.')
            exit(0)
        connection_lock.acquire()
        fullPath = os.path.join(direction,fileName)
        print('[-] Testing keyfile ' + str(fullPath))
        thread_temp = threading.Thread(target=connect, args=(user, tgtHost, fullPath, True))
        thread_temp.start()
Esempio n. 7
0
def main():
    
#    # Part 1: Load data
#    
#    DIR_categories=os.listdir('../input_data/training/');      # list and store all categories (classes)
#    allFeatures=[]                                             # this list will store all training examples 
#    imLabels=[]                                               # this list will store all labels
#    labelCount=0;                                                      # this integer will effectively be the class label in our code (i = 1 to 101)
#    labelNames = []                                           # we will need the label names in order to plot their cardinalities later on
#    labelCardinalities = []                                   # see above
#    for cat in DIR_categories:                                # loop through all categories
#        if os.path.isdir('../input_data/training/'+ cat):   
#            labelNames.append(cat)
#            labelCount=labelCount+1;                                             # i = current class label
#            count = 0
#        
#            DIR_image=os.listdir('../input_data/training/'+ cat +'/');      # store all images of category "cat" 
#            for im in DIR_image:                                           # loop through all images of the current category
#                if (not '._image_' in im):                                 # protect ourselves against those pesky Mac OS X - generated files
#                    F = np.genfromtxt('../input_data/training/'+cat+'/'+im, delimiter=' '); # F is now an 2-D numpy ndarray holding all features of an image
#                    F = np.reshape(F,21*28);                               # F is now a 588 - sized 1-D ndarray holding all features of the image
#                    F = F.tolist();                                        # listify the vector
#                    F.append(labelCount)                                   # we'd like to store the label alongside the example
#                    count = count + 1
#                    allFeatures.append(F);                                  # store the vector
#                    imLabels.append(labelCount);                                    # store the label
#                    labelCardinalities.append(count)
#    print "training data loaded!"
    
    # Store some data on disk so we don't have to 
    # re-read it every time.
#    print " We will now count the counts of all classes to see whether something's wrong with them"
#    exIndex = 1
#    for label in range(1, labelCount + 1):
#        examplesOfLabel = [examples for examples in allFeatures if examples[-1] == label]
#        print "There are: " + str(len(examplesOfLabel)) + " examples of class " + str(label)
#     
#    print "We will now exit"
#    exit()
#    try: 
#        fp = open("../proc_data/trainingData.pdat",'wb')
#        pk.dump(allFeatures, fp)
#        fp.close()
#    except Exception as e:
#        print 'Pickling failed for object allFeatures: Exception: ' + e.message
#        exit()
#        
#    print "Training data stored on disk"
#    
#    try: 
#        fp = open("../proc_data/labelCount.pdat",'wb')
#        pk.dump(labelCount, fp)
#        fp.close()
#    except Exception as e:
#        print 'Pickling failed for object labelCount: Exception: ' + e.message
#        exit()
#    
#    print "Label count stored on disk"
#    
#    # Part 2: Initialize OVA structure and classifiers in memory
#    
#    
#        # First of all we need to draw training and tuning
#        # data from our original Caltech data.
#        # Reminder: testing (development) data has already been made available
#        # to us, so we don't need to partition the original data any further.
     
#        allFeatures = util.load("../proc_data/trainingData.pdat")
#        labelCount = util.load("../proc_data/labelCount.pdat")    
#        numTrainExamples = int(np.floor(.8 * len(allFeatures)))                      # need to convert ndarray scalar to int
#        np.random.seed(1)
#        np.random.shuffle(allFeatures)                                               # this achieves a degree of randomness
#    
#        trainingData = allFeatures[:numTrainExamples]                                # pull training data
#        tuningData = allFeatures[numTrainExamples:]                                  # pull tuning data
#        
##        print "Now that we have the training data in our hands, we will count the cardinalities of class within it: "
##        for label in range(1, labelCount + 1):
##            examplesOfLabel = [examples for examples in trainingData if examples[-1] == label]
##            print "There are: " + str(len(examplesOfLabel)) + " examples of class " + str(label)
#     
#
#        # Once we're done with data, we need to define the 
#        # OVA object in memory and add 101 classifiers inside it.
#        
#        ovaStructure = OVA(trainingData, tuningData, labelCount)                                
#        for _ in range(labelCount):
#            ovaStructure.addClassifier(AveragedPerceptron(5))            # training those classifiers for maxiter = 5
#        
#        print "Created an " + str(ovaStructure)
#    
#        ovaStructure.dump('../proc_data/stored_classifiers/firstOVA_untuned.pdat')
#        print "OVA object dumped in disk."
        
    # Part 3: Tune all classifiers and store the OVA object in memory.
    
        #ovaStructure = util.load("../proc_data/stored_classifiers/firstOVA_untuned.pdat")
        #print "Resumed the following OVA object: " + str(ovaStructure) + "."
        # ovaStructure.printInfo()                                                    # a debugging method that prints some stuff
        #ovaStructure.tune()
        #ovaStructure.dump("../proc_data/stored_classifiers/firstOVA_tuned.pdat")
        #print "We tuned all classifiers of the OVA object and stored them in memory." 
    
    
    
    #Part 4: Test the trained classifiers on the Caltech 101 development data.
    
        # The first thing we need to do is read the development data in memory.
        # We will use the same logic we used to scan the training data.
    
#        validationClasses=os.listdir('../input_data/validation/');      # list and store all categories (classes)
#        validationData=[]                                             # this list will store all training examples
#        label = 0 
#        for cat in validationClasses:                                # loop through all categories
#            if os.path.isdir('../input_data/validation/'+ cat):   
#                DIR_image=os.listdir('../input_data/validation/'+ cat +'/');      # store all images of category "cat"
#                label = label + 1 
#                for im in DIR_image:                                           # loop through all images of the current category
#                    if (not '._image_' in im):                                 # protect ourselves against those pesky Mac OS X - generated files
#                        F = np.genfromtxt('../input_data/validation/'+cat+'/'+im, delimiter=' '); # F is now an 2-D numpy ndarray holding all features of an image
#                        F = np.reshape(F,21*28);                               # F is now a 588 - sized 1-D ndarray holding all features of the image
#                        F = F.tolist();                                        # listify the vector
#                        F.append(label)                                   # we'd like to store the label alongside the example
#                        validationData.append(F);                                  # store the vector
#        print "Validation data loaded!"
#    
#        # We would like to have this representation of data stored in our hard disk
#        # so that we don't have to read it each and every time
#        
#        fp = open("../proc_data/validationData.pdat",'wb')
#        fp2 = open("../proc_data/labelCount.pdat",'wb')
#        pk.dump(validationData, fp)
#        pk.dump(label, fp2)
#        fp2.close()
#        fp.close()
#        fp2.close()
#
#        print "Validation data stored on disk."
        
        # In order to adhere to the project's specifications, we need to 
        # train the OVA scheme in 5, 10, 20, 30, 40, 50, 60 random images per category
        # and then test it against the validation data.
        
        # To do this, we simply need to train 7 different OVA objects, which means 7 * 101 Averaged Perceptrons,
        # and test the accuracy of each against the validation data. We will use the getRandomLabeledExamples()
        # method to retrieve the random examples required, and then we will train our OVA
        
    try:   
#        labelCount = util.load("../proc_data/labelCount.pdat")
#        trainingData = util.load("../proc_data/trainingData.pdat")
#        validationData = util.load("../proc_data/validationData.pdat")
#        accuracy = []
#        for exampleNums in [5, 10, 20, 30, 40, 50, 60]:
#            reducedTrainingData = getReducedDataset(range(1, labelCount + 1), exampleNums, trainingData)
#            ovaClassifier = OVA(reducedTrainingData, None, labelCount)                 # No tuning data provided because we don't need to (2nd argument is None)
#
#            for _label_ in range(labelCount):
#                ovaClassifier.addClassifier(AveragedPerceptron())               # default AveragedPerceptron class MaxIter hyper-parameter for training without having tuned first: 15
#            print "Training " + str(ovaClassifier)
#            ovaClassifier.train()
#            print "Testing " + str(ovaClassifier) + " on validation data."
#            accuracy.append(1.0 - ovaClassifier.test(validationData))           # OVA.test returns error rate, so we subtract that from 1 to retrieve accuracy
#            
#        
#        # We will use drawError() to draw the accuracy.
#        print "Drawing accuracy results"
#        util.drawError([5, 10, 20, 30, 40, 50, 60], accuracy, "Learning curve for multi-class Averaged Perceptron.")
#        
#        # We will store the accuracy for future reference and plotting
#        
#        print "Dumping accuracy results to disk."
#        acFP = open("../proc_data/learningCurve.pdat","wb")
#        pk.dump(accuracy, acFP)
#        acFP.close()
#        
#        print "We stored the accuracy on disk."
#        print "Exiting..."
            
        # Q 7 : Learn the Perceptron with a varying number of iterations
        #
        #
        
        labelCount = util.load("../proc_data/labelCount.pdat")
        trainingData = util.load("../proc_data/trainingData.pdat")
        validationData = util.load("../proc_data/validationData.pdat")
        accuracy = []
        for maxIterVal in [1,10,50,100, 500]:                                                               # not going over 100, took too much time
            reducedTrainingData = getReducedDataset(range(1, labelCount+1), 50, trainingData)                # get 50 examples per class
            ovaClassifier = OVA(reducedTrainingData, None, labelCount)
            for _label_ in range(labelCount):
                ovaClassifier.addClassifier(AveragedPerceptron())
            ovaClassifier.setAllHyperparams(maxIterVal)                                                      # brute-force the perceptrons in this case
            print "Training " + str(ovaClassifier)
            ovaClassifier.train()
            print "Testing " + str(ovaClassifier) + " on validation data."
            accuracy.append(1.0 - ovaClassifier.test(validationData))           # OVA.test returns error rate, so we subtract that from 1 to retrieve accuracy
            
        # We will use drawError() to draw the accuracy.
        print "Drawing accuracy results"
        util.drawSimplePlot([1,10,50,100, 500], accuracy, "Accuracy per maxIter for the Averaged Perceptron", "maxIter value", "Accuracy")
        
        # We will store the accuracy for future reference and plotting
        
        print "Dumping accuracy results to disk."
        acFP = open("../proc_data/accPerMaxIter.pdat","wb")
        pk.dump(accuracy, acFP)
        acFP.close()
        
        print "We stored the accuracy on disk."
        print "Exiting..."
            
    except DatasetError as d:
        print "A dataset-related error occured: " + str(d) + "."
        exit() 
    except LogicalError as l:
        print "A  logical error occured: " + str(l) + "."
        exit()
    except Exception as exc:
        print "An exception occurred: " + str(exc) + "."
        exit()
    except:
        print "An unknown error occurred."
        exit()
Esempio n. 8
0
logging.basicConfig(level=logging.INFO)

url = 'http://webservice.webxml.com.cn/WebServices/MobileCodeWS.asmx?wsdl'
# url = 'http://webservice.webxml.com.cn/webservices/DomesticAirline.asmx?wsdl'
# url = 'http://webservice.webxml.com.cn/webservices/ChinaTVprogramWebService.asmx?wsdl'
url = 'http://webservice.webxml.com.cn/WebServices/IpAddressSearchWebService.asmx?wsdl'
client = suds.client.Client(url)
# print client

#result = client.service.getDatabaseInfo()
a=1
while a:
    ip_name =  raw_input("请输入你的ip地址(退出输入exit):")
    if ip_name=='exit':
        print '正常退出'
        exit()
        
    result = client.service.getCountryCityByIp(ip_name)
    print result
    print result
    print type(result)
    
#     a=0
#     for k, v in enumerate(result):
#         print v[1][1][0]
# print result
# result = client.service.getMobileCodeInfo('170789456')
# logging.info(result)


Esempio n. 9
0
logging.basicConfig(level=logging.INFO)

url = 'http://webservice.webxml.com.cn/WebServices/MobileCodeWS.asmx?wsdl'
# url = 'http://webservice.webxml.com.cn/webservices/DomesticAirline.asmx?wsdl'
# url = 'http://webservice.webxml.com.cn/webservices/ChinaTVprogramWebService.asmx?wsdl'
url = 'http://webservice.webxml.com.cn/WebServices/IpAddressSearchWebService.asmx?wsdl'
client = suds.client.Client(url)
# print client

#result = client.service.getDatabaseInfo()
a=1
while a:
    ip_name =  raw_input("请输入你的ip地址(退出输入exit):")
    if ip_name=='exit':
        print '正常退出'
        exit()
        
    result = client.service.getCountryCityByIp(ip_name)
    print result
    print result
    print type(result)
#     a=0
#     for k, v in enumerate(result):
#         print v[1][1][0]
# print result
# result = client.service.getMobileCodeInfo('170789456')
# logging.info(result)