Example #1
0
    def testWriteObjectToFileAsJosn(self):
        obj=[1,2,3]
        testFile="testWriteTofile.txt"
        Util.writeObjectToFileAsJson(obj,testFile)

        res=Util.readJsonFileIntoObject(testFile)
        self.assertEqual(obj,res)
 def setUp(self):
     '''
     Chama o setup do Pai e instala a calculadora
     '''
     super(TestesCalculadora,self).setUp()
     util = Util (self.device,self.vc)
     util.instala_calculadora()
Example #3
0
 def QuotePost(self, svc, post_id, xid, include_mode, index_mode, include_data):
     if (index_mode == 'junk' or index_mode == 'deleted'):
         raise NoPerm("invalid index_mode!")
     (post, _) = self.FindPost(post_id, xid, index_mode)
     if (post is None):
         raise NotFound("referred post not found")
     quote = Post.Post.DoQuote(include_mode, self.GetBoardPath(post.filename), True, include_data)
     orig_title = ''
     if (post.title[:3] == "Re:"):
         # Re: <title>
         orig_title = post.title[4:]
     elif (post.title[:3] == u"├ ".encode('gbk')):
         orig_title = post.title[3:]
     elif (post.title[:3] == u"└ ".encode('gbk')):
         orig_title = post.title[3:]
     else:
         orig_title = post.title
     if include_mode == 'C':
         quote_title = orig_title
     else:
         quote_title = "Re: " + orig_title
     quote_obj = {}
     quote_obj['title'] = Util.gbkDec(quote_title)
     quote_obj['content'] = Util.gbkDec(quote)
     svc.writedata(json.dumps(quote_obj))
 def testOK(self):
     msg="AAAAA"
     msgSeq=Util.encodeStringIntoByteList(msg)
     self.level.inputMsg=msgSeq
     resultLevel=self.levelEncryptor.encryptLevel()
     decMsg=Util.decodeByteListIntoString(resultLevel.inputMsg)
     self.assertEqual(msg,decMsg)
Example #5
0
    def GetInfo(self, mode = 'post'):
        post = {'title': Util.gbkDec(self.title)}
        post['attachflag'] = self.attachflag
        post['attachment'] = self.attachment
        post['owner'] = Util.gbkDec(self.owner)
        try:
            post['posttime'] = self.GetPostTime()
        except:
            post['posttime'] = 0
        flags = []
        if (self.IsMarked()):
            flags += ['marked']
        if (mode == 'post'):
            post['xid'] = self.id
            post['thread'] = self.groupid
            post['reply_to'] = self.reid
            post['size'] = self.eff_size
            if (self.CannotReply()):
                flags += ['noreply']
            if (self.InDigest()):
                flags += ['g']
        if (mode == 'mail'):
            if (self.IsReplied()):
                flags += ['replied']
            if (self.IsForwarded()):
                flags += ['forwarded']
            if (self.IsRead()):
                post['read'] = True
            else:
                post['read'] = False

        post['flags'] = flags

        return post
def main():
	cates_feature=collect_cates()
	all_features=loadFeatures()
	rocchioIndex=loadIndex()
	totalentroy=loadEntroy()
	myutil.makedirectory('cache/subkNNs/')
	while True:
		test_entry=(yield)
		vectorOfme=createVector(test_entry,all_features)
		entroy_of_me={k:v for k,v in totalentroy.items() if k in vectorOfme}
		if not isEmptyVector(vectorOfme):
			yield (test_entry.url,-1)
			continue
		first_candidate=choose_candidate_cate(vectorOfme,rocchioIndex,entroy_of_me)
		if not first_candidate:
			yield (test_entry.url,-1)
			continue
		candidate_cates,cates_tfidf=\
		further_choose_candidate_cate(test_entry,cates_feature,first_candidate,all_features)
		
		threads=[]
		for cate in candidate_cates:
			t=threading.Thread(target=exec_Maper,args=(cate,vectorOfme,entroy_of_me,))
			threads.append(t)
		for t in threads:t.start()
		for t in threads:t.join()
		
		result=Reducer.main(tfidf=cates_tfidf,cateweight=candidate_cates)
		test_entry.thinkbe=result
		
		yield (test_entry.url,test_entry.thinkbe)
Example #7
0
File: Post.py Project: net9/pybbs
    def PrepareHeader(user, in_mail, board, title, anony, mode, session):
        result = ""
        uid = Util.gbkDec(user.name[:20])
        uname = Util.gbkDec(user.userec.username[:40])
        if not in_mail:
            bname = board.name.decode('gbk')
        bbs_name = Config.Config.GetString('BBS_FULL_NAME', 'Python BBS')

        if (in_mail):
            result += u'寄信人: %s (%s)\n' % (uid, uname)
        else:
            if (anony):
                pid = (binascii.crc32(session.GetID()) % 0xffffffff) % (200000 - 1000) + 1000
                result += u'发信人: %s (%s%d), 信区: %s\n' % (bname, Config.Config.GetString('NAME_ANONYMOUS', 'Anonymous'), pid, bname)
            else:
                result += u'发信人: %s (%s), 信区: %s\n' % (uid, uname, bname)

        result += u'标  题: %s\n' % (title)

        if (in_mail):
            result += u'发信站: %s (%24.24s)\n' % (bbs_name, time.ctime())
            result += u'来  源: %s \n' % session._fromip
        elif (mode != 2):
            result += u'发信站: %s (%24.24s), 站内\n' % (bbs_name, time.ctime())
        else:
            result += u'发信站: %s (%24.24s), 转信\n' % (bbs_name, time.ctime())

        result += '\n'
        return result
Example #8
0
 def __transmit(self, args):
     """Thread method to transmit an apdu;"""
     
     if not self.__checkContext(): return
     if not self.__checkCardInfo(): return
     
     cmd = args[0]
     t0AutoGetResponse = args[1]
     handlerArgs = args[2]
     
     transtime = 0
     try:
         commandValue = Util.s2vs(cmd)
         
         self.__handler.handleAPDUCommand("".join("%02X " %(ord(vb)) for vb in commandValue), handlerArgs)
         timeStart = timeit.default_timer()
         rsp = gp.sendApdu(self.__context, self.__cardInfo, None, Util.s2vs(cmd))
         timeStop = timeit.default_timer()
         transtime = timeStop - timeStart
         self.__handler.handleAPDUResponse("".join("%02X " %(ord(vb)) for vb in rsp), transtime, handlerArgs)
         
         if t0AutoGetResponse and (rsp[0] == '\x61') and (len(handlerArgs) == 0):
             cmd = '\x00\xC0\x00\x00' + rsp[1]
             self.__handler.handleAPDUCommand("".join("%02X " %(ord(vb)) for vb in cmd))
             timeStart = timeit.default_timer()
             rsp = gp.sendApdu(self.__context, self.__cardInfo, None, cmd)
             timeStop = timeit.default_timer()
             transtime = timeStop - timeStart
             self.__handler.handleAPDUResponse("".join("%02X " %(ord(vb)) for vb in rsp), transtime)
     except Exception, e:
         self.__handler.handleAPDUResponse('', transtime, handlerArgs)
         self.__handler.handleException(e)
Example #9
0
 def force_learn(self, text):
     ## some checks
     assert (self.click_matrix.shape[0] == self.click_matrix.shape[1]), \
             "Something wrong with the dimentions of the click matrix!"
     assert (self.click_matrix.shape[0] == len(self.known_urls)), \
             "Something wrong with the number of known urls!"
     assert (len(self.spend_time) == len(self.known_urls)), \
             "Time/url mismatch: {}-{}".format(len(self.spend_time), 
                                               len(self.known_urls))
     
     info = Util.parse_log_line(text)
     if info != None:
         if Guesser.use_derived_urls:
             all_urls = [info.url]
             all_urls.extend(Util.get_derived_urls(info.url))
             all_urls2 = [info.url2]
             all_urls2.extend(Util.get_derived_urls(info.url2))
             
             for idx, url in enumerate(reversed(all_urls)):
                 for idx2, url2 in enumerate(reversed(all_urls2)):
                     info.url = url
                     info.url2 = url2
                     self.force_learn_from_info(info, idx + idx2)
         else:
             self.force_learn_from_info(info)
Example #10
0
File: Driver.py Project: atulkum/ml
def main():
     
    algo = sys.argv[1]
    trainfile = sys.argv[2]
    weightfile = sys.argv[3]
    testfilename = sys.argv[4]
    isCr = sys.argv[5]
    
    if(algo == 'LR'):
        D = 2**20   
        lr = LR(D)
        lr.train(trainfile)
        lr.saveWeight(weightfile)
        #lr.readWeight(weightfile)
        if(isCr == 'cr'):
            Util.cross_validation(testfilename, lr.get_features, lr.get_prediction)
    elif(algo == 'LR_CONV'):
        D = 2**30   
        lr = LR(D)
        lr.train_conjecture(trainfile)
        lr.saveWeight(weightfile)
        if(isCr == 'cr'):
            Util.cross_validation(testfilename, lr.get_features_conjectured, lr.get_prediction)
        else:
            Util.test(testfilename, 'LR_CONV_TEST', lr.get_features_conjectured, lr.get_prediction)
    elif(algo == 'PROBIT'):
        D = 2**20   
        pr = PROBIT_R(D, 0.3, 0.05, 0.01)
        pr.train(trainfile)
        pr.saveWeight(weightfile)
        if(isCr == 'cr'):
            Util.cross_validation(testfilename, pr.get_features, pr.predict)
        else:
            Util.test(testfilename, 'PROBIT_TEST',pr.get_features, pr.predict)
 def setFromBytes(self, midiData):
     eventData = Util.stripLeadingVariableLength(midiData[2:])
     # True for major, False for minor
     self.majorKey = (Util.intFromBytes(eventData[1:2]) == 0)
     # true for sharps, false for flats
     self.sharpKey = (Util.intFromBytes(eventData[0:1], True) > 0)
     self.numberOfAccidentals = abs(Util.intFromBytes(eventData[0:1], True))
Example #12
0
    def genIndex(desPath, fileInfo, param):
        defaultIndexTemplate = Util.readAll(param["defaultIndexPath"])

        category= GenIndex.getCateGory("/site", fileInfo, param)
        indexContent = re.sub(r"\{[\s]*\{[\s]*category[\s]*\}[\s]*\}", category, defaultIndexTemplate)
        #pureCategory = GenIndex.getCateGory("/site", fileInfo, param)
        #indexContent = re.sub(r"\{[\s]*\{[\s]*pureCategory[\s]*\}[\s]*\}", pureCategory, defaultIndexTemplate)
        Util.fwrite(desPath, indexContent)
Example #13
0
    def checkDeprecatorOptions(options, parser):
        MarketplaceUtil.checkEndpointOption(options)

        if not P12Certificate.checkOptions(options):
            parser.error('Missing credentials. Please provide %s' % P12Certificate.optionString)

        if not options.email:
            parser.error('Missing email address. Please provide email of endorser')
 def setFromBytes(self, midiData):
     eventData = Util.stripLeadingVariableLength(midiData[2:])
     # default is 4
     self.numerator = Util.intFromBytes(eventData[0:1])
     # default is 4 (or encoded 2 since 2^2 is 4)
     self.denominator = math.pow(2, Util.intFromBytes(eventData[1:2]))
     # default is 1 (or 24 encoded since 24/24 = 1)
     self.beatsPerTick = Util.intFromBytes(eventData[2:3]) / 24
     # default is 8
     self.thirtySecondNotesPerBeat = Util.intFromBytes(eventData[3:])
Example #15
0
 def _mutualAuthButtonOnButtonClick( self, event ):
     scp = -1
     scpi = -1
     if self._scpinfoMethod.GetSelection() == 1:
         scp = self._scpChoice.GetSelection() + 1
         scpi = int(self._scpiTextCtrl.GetValue(), 0x10)
     key1 = Util.s2vs(self._key1TextCtrl.GetValue())
     key2 = Util.s2vs(self._key2TextCtrl.GetValue())
     key3 = Util.s2vs(self._key3TextCtrl.GetValue())
     self.__controller.doMutualAuth(scp, scpi, key1, key2, key3)
Example #16
0
 def initV6Db(self):
     db = self.db
     self.osLen = Util.byte2int(db[6])
     # print('offset: %s' % self.osLen)
     self.ipLen = Util.byte2int(db[7])
     self.dLen = self.osLen + self.ipLen
     # print('iplen: %s' % self.ipLen)
     self.size = Util.byte2int(db[8:0x10], False)
     # print('total: %s' % self.size)
     self.dbAddr = Util.byte2int(db[0x10:0x18], False)
 def executar(dirBase, listN, listL, comConsultaRetirada, comQuebraLinha, comComentariosELiterais, comTermos1Ocorrencia):
     resultadosExperimentos = []
     
     for n in listN:
         preparador = Preparador(config.dirBasePreparada, config.extensaoPadrao, n, comQuebraLinha, comComentariosELiterais)
         # Configura o preparador da consulta (o baseline e comComentariosELiterais = False)
         preparadorConsulta = Preparador(config.dirBasePreparada, config.extensaoPadrao, n, comQuebraLinha, False)
         
         # Antes de iniciar a preparacao dos arquivos, esvazia o diretorio onde os n-grams ficarao
         Util.esvaziarDiretorio(config.dirBasePreparada)
         # Recupera e salva as caracteristicas relevantes dos arquivos para posterior indexacao
         arquivosParaPreparar = glob.glob(os.path.join(dirBase, "*" + config.extensaoAceita))
         preparador.prepararArquivos(arquivosParaPreparar)
         
         for L in listL:
             indexador = Indexador(preparador, L, comTermos1Ocorrencia)
             # Configura o indexador da consulta (o baseline e comTermos1Ocorrencia = True)
             indexadorConsulta = Indexador(preparadorConsulta, L, True)
             buscador = Buscador(preparadorConsulta, indexadorConsulta)
             experimento = Experimento(indexador, buscador, comConsultaRetirada)
             
             if (comComentariosELiterais):
                 print "COM comentarios e literais"
             else: print "SEM comentarios e literais"
             
             if (comTermos1Ocorrencia):
                 print "COM termos com 1 ocorrencia"
             else: print "SEM termos com 1 ocorrencia"
             
             print "Para n = ", n
             print "Para L = ", L
             print "=> RESULTADO <="
             
             # Indexa os arquivos do diretorio de acordo com as regras do algoritmo spi
             arquivosParaIndexar = glob.glob(os.path.join(config.dirBasePreparada, "*" + config.extensaoPadrao))
             # dictPerfilAutores = {"autor", "vocabularioAutorIndexado"}
             dictPerfilAutores = indexador.indexarArquivos(arquivosParaIndexar)
             
             # Antes de salvar os indides para validacao, esvazia o diretorio onde eles ficarao
             Util.esvaziarDiretorio(config.dirIndicesValidacao)
             indexador.salvarValidacaoIndices(config.dirIndicesValidacao, dictPerfilAutores, config.extensaoPadrao)
             
             # Compara 1 arquivo-consulta com todos da base, depois outro com todos e assim por diante...
             # Reindexando o perfil do autor desse arquivo antes da comparacao
             # A fim de encontrar o autor do arquivo
             arquivosConsulta = glob.glob(os.path.join(dirBase, "*" + config.extensaoAceita))
             # resultadoExperimento = numExperimentos + numAcertos + acuracia
             resultadoExperimento = experimento.testar(arquivosConsulta, dictPerfilAutores, config.dirBasePreparada, config.extensaoPadrao)
             resultadosExperimentos.append(ExecucaoExperimento.guardarResultado(n, L, comComentariosELiterais, comTermos1Ocorrencia, resultadoExperimento))
         
         print "[FIM DO n]"
         print "******************************************************************************\n"
     print "[FIM DO TESTE]"
     print "******************************************************************************\n"
     return "".join(resultadosExperimentos)
Example #18
0
    def __atualizaPosicao(self, passaro):
        velocidade_atual = sum(passaro.velocidade)/TSPConstants.N_DIMENSION;

        #A velocidade atual vai definir o numero de mudan�as que v�o precisar ser feitas
        for j in range(int(velocidade_atual)):
            # 50/50 chance.
            if random.random() > 0.5:
                Util.dispor_aleatoriamente(passaro)

            # Push it closer to it's best neighbor.
            Util.copiar_da_particula(passaro.g, passaro.posicao)
Example #19
0
 def initV4Db(self):
     db = self.db
     self.dbAddr = Util.byte2int(db[0:4], False)
     # print('dbaddr: %s' % self.dbAddr)
     endAddr = Util.byte2int(db[4:8], False)
     # print('endAddr: %s' % endAddr)
     self.osLen = 3
     # print('offset: %s' % self.osLen)
     self.ipLen = 4
     # print('iplen: %s' % self.ipLen)
     self.dLen = self.osLen + self.ipLen
     self.size = (endAddr - self.dbAddr) / self.dLen
Example #20
0
    def openWebpageForPosition(self, modelIndex):
        """
        @param QModelIndex modelIndex
        """

        exchange = self.positionsModel.getExchange(modelIndex)
        ticker = self.positionsModel.getTicker(modelIndex)
        exchangeTicker = "%s:%s" % (exchange, ticker)

        googleUrl = self.prop.getGoogleUrl()
        url = "http://%s/finance?q=%s" % (googleUrl, exchangeTicker)
        qDebug("Opening URL: %s" % (url))
        Util.openUrl(url)
Example #21
0
    def quote_mail(self, folder_name, mode, index):
        folder = self.get_folder(folder_name)
        if folder is None:
            raise NotFound("no such mailbox: %s" % folder_name)
        entry = folder.get_entry(index - 1)
        path = self.path_of(entry.filename)

        quote_content = Util.gbkDec(Post.Post.DoQuote(mode, path, False))
        if entry.title[:3] == "Re:":
            quote_title = Util.gbkDec(entry.title)
        else:
            quote_title = "Re: " + Util.gbkDec(entry.title)
        return (quote_title, quote_content)
Example #22
0
    def testar(self, arquivosConsulta, dictPerfilAutor, dirBasePreparada, extensaoPadrao):
        # Informacoes do experimento
        numExperimentos = 0
        numAcertos = 0
        autorAnterior = ""
        
        for arquivoConsulta in arquivosConsulta:
            autorVerdadeiro = Util.getNomeAutor(arquivoConsulta)
            
            # Se (comConsultaRetirada = True), retira o arquivo-consulta da base para nao constar na comparacao
            if (self.comConsultaRetirada):
                if (autorAnterior != "" and autorAnterior != autorVerdadeiro):
                    # Reindexa todos os codigos do autor anterior
                    arquivosParaIndexar = glob.glob(os.path.join(dirBasePreparada, autorAnterior + "*" + extensaoPadrao))
                    vocabularioAutorAnteriorIndexado = self.indexador.indexarArquivos(arquivosParaIndexar)
                    vocabularioAutorAnteriorIndexado = dict(vocabularioAutorAnteriorIndexado[autorAnterior])
                    dictPerfilAutor[autorAnterior] = vocabularioAutorAnteriorIndexado
                
                # Reindexa os codigo deste autor (sem o arquivoParaRetirar)
                arquivosParaIndexar = glob.glob(os.path.join(dirBasePreparada, autorVerdadeiro + "*" + extensaoPadrao))
                arquivoParaRetirar = Util.getNomeArquivo(arquivoConsulta)
                arquivoParaRetirar = dirBasePreparada[0:len(dirBasePreparada)-1] + "\\" + arquivoParaRetirar + extensaoPadrao

                vocabularioAutorIndexado = self.indexador.indexarArquivosSemArquivoEspecifico(arquivosParaIndexar, arquivoParaRetirar)
                vocabularioAutorIndexado = dict(vocabularioAutorIndexado[autorVerdadeiro])
                dictPerfilAutor[autorVerdadeiro] = vocabularioAutorIndexado
                
                # Verifica a presenca de termos com 1 ocorrencia na base indexada
                #for autor, ngramsIndexado in dictPerfilAutor.iteritems():
                #    for ngrams, frequencia in ngramsIndexado.iteritems():
                #        if (frequencia == 1):
                #            print "igual a 1"
                #            print autor
                #            print ngrams, " : " , frequencia
            
            # Faz a consulta/comparacao e sugere quem e o autor do arquivo
            autorScap = self.buscador.compararComTodosDaBase(arquivoConsulta, dictPerfilAutor)
            autorAnterior = autorVerdadeiro
            
            numExperimentos += 1
            if (autorVerdadeiro == autorScap):
                numAcertos += 1
            acuracia = numAcertos/float(numExperimentos)
            
            #self.imprimirResultado(arquivoConsulta, autorVerdadeiro, autorScap, numExperimentos, numAcertos, acuracia)
            # if para imprimir somente o ultimo resultado, ou seja, a acuracia total do algoritmo
            if (numExperimentos == len(arquivosConsulta)):
                self.imprimirResultado(numExperimentos, numAcertos, acuracia)
        
        return self.guardarResultado(numExperimentos, numAcertos, acuracia)
Example #23
0
	def solve (self, n):
		""" Solves the euler problem 026 """
		util = Util()
		denominator = 2
		maxCycle = 0
		d = 2
		bestD = 0
		while d<n:
			cycle = util.periodSize(d)
			if cycle > maxCycle:
				maxCycle=cycle
				bestD=d
			d+=1
		return bestD
Example #24
0
    def processPayload(self, payload):
        stripped = Util.removeNewLine(payload)

        jsonRegexResult = self.JSON_RE.search(stripped)
        jsonRegexMatched = jsonRegexResult.group(0)

        jsonRegexMatched = Util.evalJson(jsonRegexMatched)
        jsonObjects = Util.loadsJsonString(jsonRegexMatched, "utf-8")
        for jsonObj in jsonObjects:
            exchange = jsonObj['e']
            symbol = jsonObj['t']
            self.storeQuote(exchange, symbol, jsonObj)

        self.emit(SIGNAL("quotesCached"))
Example #25
0
 def getLoc(self, index):
     self.checkIndex(index)
     addr = self.dbAddr + index * self.dLen
     ip = Util.byte2int(self.db[addr : addr + self.ipLen], False)
     lAddr = Util.byte2int(self.db[addr + self.ipLen : addr + self.dLen], False)
     # print('ip_addr: %d ip: %d lAddr:%d' % (addr, ip, lAddr))
     if self.type == 4:
         lAddr += 4
     loc = self.readLoc(lAddr, True)
     if self.type == 4:
         loc = loc.decode("cp936").encode("gbk")
     if self.type == 6:
         loc = loc.decode("utf-8").encode("gbk")
     return loc
Example #26
0
    def main():

        print("******************************************")
        print("***          FileSet Report            ***")
        print("******************************************")
        print()

        fileORdir = Util.getCommandLineArgument(1)
        level = Util.getCommandLineArgument(2)
        files = FileSet(fileORdir, "hack")
        files.report()

        print()
        print("******************************************")
        print("***         Processing Report          ***")
        print("******************************************")
        print()

        while files.hasMoreFiles():
            inputFileSpec = files.nextFile()
            print("Processing: %s" % inputFileSpec)
            outputFileSpec = os.path.splitext(inputFileSpec)[0]+".dis"
            inputFile = open(inputFileSpec, "rU")
            outputFile = open(outputFileSpec, "w")
            parser = Parser(inputFile)
            while parser.hasMoreInstructions():
                parser.advance()
                if (parser.instructionType() == "A_TYPE"):
                    value = parser.value()
                    inst = Code.a_type(value)
                if (parser.instructionType() == "C_TYPE"):
                    dest = parser.dest()
                    comp = parser.comp()
                    jump = parser.jump()
                    destMnemonic = Code.destMnemonic(dest)
                    compMnemonic = Code.compMnemonic(comp)
                    jumpMnemonic = Code.jumpMnemonic(jump)
                    inst = Code.c_type(destMnemonic, compMnemonic, jumpMnemonic)
                if (parser.instructionType() == "INVALID"):
                    inst = Code.invalid_type()
                inst += Util.repeatedChar(" ", 20-len(inst))
                inst += "// %05i:" % parser.address()
                inst += " [%s]" % parser.hexInstruction()
                inst += " %s\n" % parser.parsedInstruction()
                outputFile.write(inst)
            outputFile.close()
            inputFile.close()

        print()
        print("Processing of file(s) complete.")
Example #27
0
File: Post.py Project: net9/pybbs
 def ReadPostText(path, start = 0, count = 0):
     try:
         postf = open(path, 'rb')
     except IOError:
         raise ServerError("fail to load post")
     try:
         ret = ''
         if (start == 0 and count == 0):
             while (True):
                 data = postf.read(512)
                 i = data.find('\0')
                 if (i != -1):
                     ret = ret + data[:i]
                     break
                 else:
                     ret = ret + data
                     if (len(data) < 512):
                         break
             return (Util.gbkDec(ret), len(ret), True)
         else:
             current = 0
             has_end = False
             while True:
                 data = postf.read(512)
                 nullpos = data.find('\0')
                 if nullpos != -1:
                     # this makes data shorter
                     # so len(data) must <512
                     data = data[:nullpos]
                     assert len(data) < 512
                 final = len(data) < 512
                 newline = data.find('\n')
                 while newline != -1:
                     if count != 0 and current >= start + count:
                         break
                     if current >= start:
                         ret += data[:newline + 1]
                     data = data[newline + 1:]
                     newline = data.find('\n')
                     current += 1
                 if count != 0 and current >= start + count:
                     break
                 if current >= start:
                     ret += data
                 if final:
                     has_end = True
                     break
             return (Util.gbkDec(ret), 0, has_end)
     finally:
         postf.close()
Example #28
0
 def getMostCommonHex(self, position):
     #Position should be either 22, 23, 24
     equalsGroupNum = [] #Dev_Data_3 sometimes equals the group number.
     elements = []
     for _, address in enumerate(self.aldb):
         elements.append(address[position])
         equalsGroupNum.append(address[position] == address[17] and position == Device.DEV_DATA_3)
         if position == Device.DEV_END and address[position] != 0x00:
             return 0xbad
         
     #The below method returns true if true is the most common value
     if Util.most_common(equalsGroupNum):
         return 0xbad
     return Util.most_common(elements)
Example #29
0
    def new_entry(self, owner, title, content):
        if not self.create():
            raise ServerError("fail to create mail dir for user '%s'" % self.name)
        entry = PostEntry.PostEntry()
        entry.owner = owner
        entry.title = Util.gbkEnc(title)
        # create file
        entry.filename = Post.Post.GetPostFilename(self.path_of(""), False)
        encoded_content = Util.gbkEnc(content)
        entry.eff_size = len(encoded_content)
        path = self.path_of(entry.filename)

        with open(path, "wb") as f:
            f.write(encoded_content)
        return entry
 def application(self,request):
     # Dispatcher is dictionary {<method_name>: callable}
     bmcUtil = Util()
     bmcListOfMethods = {}
     bmcListOfMethod = bmcUtil.readLocalAppVariableFile("BMC_JSONRPCCALL_METHODS")
     bmcListOfMethod = bmcUtil.removeDoubleQuotas(bmcListOfMethod)
     bmcListOfMethods = bmcListOfMethod.split(",")
     for methodName in bmcListOfMethods:
         dispatcher[methodName] = lambda methodName: methodName
         #dispatcher["event"] = lambda a: a
     response = JSONRPCResponseManager.handle(
     request.data, dispatcher)
     print response.json
     bmcUtil.writeRPCCallDataTOFile(response.json)
     return Response(response.json, mimetype='application/json')
Example #31
0
 def get_diagonal(self, case_name):
     mat_build, normalized_mat_build = \
         Util.read_array(case_name, self.m_lambda, self.N, "input")
     return np.diag(mat_build)
Example #32
0
 def WriteHeader(fp, user, in_mail, board, title, anony, mode, session):
     header = Post.PrepareHeader(user, in_mail, board, title, anony, mode,
                                 session)
     fp.write(Util.gbkEnc(header))
Example #33
0
 def GetOriginLine(self):
     for line in self.GetBody().split('\n'):
         if Post.IsOriginLine(Util.gbkEnc(line)):
             return line
     return ""
# This tries to find an algorithm that finds the longest suffix of any given word with length N while using only M
# comparisons
import time
from cmath import sqrt
from multiprocessing import Pool
from time import gmtime, strftime

from anytree import Node
from bitarray._bitarray import bitarray

from Util import Util

n = 5
m = 4
DEBUG = True
MY_UTIL = Util(n, m)
NR_WORKERS = 1
ALL_COMPS = MY_UTIL.comp_pairs
NR_COMPS = len(ALL_COMPS)

WD_TIME = 0
COPY_TIME = 0
NR_CALLS = 0
NR_COMP_EVALS = 0
# define how many comparisons are allowed that do not extend the underlying dependency graph
max_m = int((4 * n - 5) / 3)
max_non_endogeneous = max_m - n + 1


# Generates an initial decision tree for M comparisons with given root value
# Anytree helps navigating, manipulating and printing the tree (i.e. finding children, parents etc.)
Example #35
0
 def gen_gateway_ips(self):
     for subnet_ip in self.subnets:
         gw_ip = Util.get_gateway_ip(subnet_ip)
         self.gateway_ips[subnet_ip] = gw_ip
     return
Example #36
0
 def calculate_file_size_score(self, number_of_terms, max, min):
     return 1 / (1 + math.e**
                 (-1 * Util.normalization(number_of_terms, max, min)))
Example #37
0
sys.path.append(os.getcwd() + "/runtime")

from SessionManagerProvider import SessionManagerProvider
from ConfigurationProvider import ConfigurationProvider
from ApplicationManagerProvider import ApplicationManagerProvider
from Util import Util

config_provider = ConfigurationProvider()
appmgr_provider = ApplicationManagerProvider()
appmgr = appmgr_provider.get_app_mgr(config_provider.get_config('DM_APP_MGR'))

sessionmgr_provider = SessionManagerProvider()
sessionmgr = sessionmgr_provider.get_session_mgr(
    config_provider.get_config('DM_SESSION_MGR'))

util = Util()

status = False
action = os.environ['PAS_ACTION_DM_CUSTOM_ACTION_TYPE']
if action == "START_APP":
    status=appmgr.start_app(config_provider.get_config('PAS_EXECUTABLE'), config_provider.get_config('PAS_ACTION_DM_APP_ARGS'), \
   config_provider.get_config('PAS_ACTION_DM_APP_ENVS'),config_provider.get_config('PAS_ACTION_DM_APP_WDIR'), util.getValue("display"))
elif action == "GET_OTP":
    status = sessionmgr.gen_password()
elif action == "WAIT_ON_SESSION":
    sessionmgr.set_waitflag()
elif action == "STOP_SESSION":
    status = sessionmgr.stop_session()
elif action == "GET_SESSION":
    status = sessionmgr.get_session()
elif action == "SET_SESSION_EXPIRY":
Example #38
0
    def solve(self, filterVector=[0, 0, 0, 0, 0], nbThread=1):

        if nbThread > 1:
            return self.solveMultiThread(nbThread, filterVector)

        tStart = time.time()

        opt = False
        while not opt:
            self.nbIteration += 1
            opt = True
            t = time.time()
            self.objRelax = self.master.solve(self.verbose)
            if self.nbIteration == param.nbIterationMaxFoCG or (
                    time.time() - tStart) > (self.timeLimit * 0.80):
                break
            duals, constraintOnePath, constraintLinkCapacity, constraintNodeCapacity, constraintVnfUsed = self.master.getDuals(
            )
            self.timeMaster += time.time() - t

            t = time.time()
            for sub in self.subs:
                listPath = []
                for step in range(self.nbStepsReconf, 0, -1):
                    sub.updateObjective(duals,
                                        constraintOnePath[sub.slice.id][step],
                                        constraintLinkCapacity,
                                        constraintNodeCapacity,
                                        constraintVnfUsed, step)
                    reduceCost, path = sub.solve(step)
                    if reduceCost < 0:
                        listPath.append(path)
                        self.nbColumn += 1
                        opt = False
                        #print(path.alloc)
                for path in listPath:
                    self.master.addPath(path, sub.slice)
            self.timeSubs += time.time() - t

            if self.stableStop:
                stableCycle = param.stableCycle
                self.oldObj.append(self.objRelax)
                if len(self.oldObj) > stableCycle:
                    oldObj = self.oldObj.popleft()
                    if (oldObj - self.objRelax) / float(oldObj) * 100 < 0.1:
                        opt = True

        t = time.time()
        if (filterVector[0]):
            self.master.reduceNumberOfPath1()
            if (self.verbose):
                print("Filtre reduceNumberOfPath1")
        elif (filterVector[1]):
            self.master.reduceNumberOfPath2()
            if (self.verbose):
                print("Filtre reduceNumberOfPath2")
        elif (filterVector[2]):
            self.master.reduceNumberOfPath3()
            if (self.verbose):
                print("Filtre reduceNumberOfPath3")
        elif (filterVector[3]):
            self.master.reduceNumberOfPath4()
            if (self.verbose):
                print("Filtre reduceNumberOfPath4")
        elif (filterVector[4]):
            self.master.reduceNumberOfPath5()
            if (self.verbose):
                print("Filtre reduceNumberOfPath5")

        limit = max(self.timeLimit * 0.2, time.time() - tStart)
        limit = max(limit, 5)

        self.master.solveOpt(limit)
        self.timeOptimal = time.time() - t
        self.timeTotal += time.time() - tStart
        res_Reconf, NumPathUsed_Reconf, pathUsed_Reconf = self.master.getResult(
            checkSolution=self.checkSolution)
        self.obj, self.bwUsed, self.objVnf = Util.objective(
            self.nodes, self.listSlice, res_Reconf, self.beta)

        self.master.terminate()
        for sub in self.subs:
            sub.terminate()

        return res_Reconf, pathUsed_Reconf
Example #39
0
import socket
from Util import Util
util = Util()
import time

serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.bind(('127.0.0.1',8082))
serv.listen(1)

trusted_initializer = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
trusted_initializer.connect(util.server_address_builder())

desired_input = input('Please input an integer: \n')
desired_operation = input('Secure Two-party Computation for addition or multiplication (a/m): \n')
y = int(desired_input)
prime = util.closest_large_prime_finder(y)
y_a, y_b = util.two_party_secret_share(y,prime)

alice_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
alice_client.connect(('127.0.0.1',8081))
alice_client.send(str('y_a:' + str(y_a)).encode())

if desired_operation == 'a':
    trusted_initializer.send('y_prime:-1'.encode())
    initialized_share = str(trusted_initializer.recv(1024).decode())
    x_b = -1
    alice_sub_addition = -1
    alice_addition = -1
    while True:
        alice_conn, addr = serv.accept()
        data = str(alice_conn.recv(4096).decode())
Example #40
0
 def punishDelta(self, delta, avg):
     if avg < .1:
         return
     diff = avg - .1
     gradient = Util.sigmoid_prime(diff)
     self.delta = self.delta + (delta * gradient * -.1)
Example #41
0
from operate.base import BaseClass
import codecs
from Util import Util
from redis_operate import RecisClusterOperate
from redis_operate.RedisStruct import RedisHash
from DispatchControl import PaperJournalReptileDispatch, PaperReptileDispatch
from PaperReptile import PaperReptile
import time
import re
import Util
import sys

reload(sys)
sys.setdefaultencoding('utf-8')

year = Util.get_last_year()


def sql_get_paper_data():
    print "开始从数据库读入相关信息"
    sqlConn = base_operate.connection_sql("techpooldata")
    papertable = BaseClass(sqlConn, "paper")
    columns = ["PAPER_ID", "url"]
    fp_w = codecs.open(r"result\paper%s" % year, "wb", encoding="utf-8")
    for infos in papertable.select_term(
            *columns, where="year = '%s' and url is not NULL" % year):
        uuid, url = infos
        fp_w.write("%s\t%s\n" % (uuid, url))
    fp_w.close()
    print "读取信息成功"
Example #42
0
def setup_periodic_tasks(sender, **kwargs):
    Util.bootstrap()
Example #43
0
                                                                              'asize3', 'asize2', 'asize1',
                                                                              'bsize1', 'bsize2', 'bsize3']]  # 20160622, MK
            px_mid_this_day_this_stk['ask1'] = px_mid_this_day_this_stk['ask1']/Util.px_multiplier
            px_mid_this_day_this_stk['bid1'] = px_mid_this_day_this_stk['bid1']/Util.px_multiplier
            px_mid_this_day_this_stk['mid_prc'] = (px_mid_this_day_this_stk['bid1'] + px_mid_this_day_this_stk['ask1'])/2

            transaction_data_set_index = self.transaction_data_from_csv_filter_time
            volume_buy = transaction_data_set_index[transaction_data_set_index['bs_flag'] == 66][['trade_volume']]  # 66 is ascii code for 'B'
            volume_sell = transaction_data_set_index[transaction_data_set_index['bs_flag'] == 83][['trade_volume']]  # 83 is ascii code for 'S

            self.px_mid_this_day_this_stk = px_mid_this_day_this_stk.loc[px_mid_this_day_this_stk['mid_prc'] > 0]
            self.volume_buy = volume_buy
            self.volume_sell = volume_sell
            # px_mid_this_day_this_stk = (self.data_from_csv_filter_time['bid1'] +
            #                             self.data_from_csv_filter_time['ask1'])/2/10000
            # self.px_mid_this_day_this_stk = px_mid_this_day_this_stk[px_mid_this_day_this_stk>0]

        except Exception as e:
            print('data_error, {}, {}, {}'.format(self.date_str, self.stk_str, e))
            self.px_mid_this_day_this_stk = pd.DataFrame({"mid_prc": [], "ask1": [], "bid1": [],
                                                          "asize3": [], "asize2": [], "asize1": [],
                                                          "bsize1": [], "bsize2": [], "bsize3": [],
                                                          'volume_buy': [], 'volume_sell': []})
            self.volume_buy = pd.Series()
            self.volume_sell = pd.Series()


if __name__ == "__main__":
    test_data = DataFramework("20160401", "20160401", Util.get_path_tickdata())
    test_tickdata = TickdataOneDayOneStk("20160401", "000005", test_data)
from Tree import BuiltIn
from Tree import Environment
from Tree import Closure
from Special import Special
from Util import Util

if __name__ == "__main__":
    # Initialization file with Scheme definitions of built-in functions
    ini_file = "ini.scm"

    prompt = "> "

    # Create scanner that reads from standard input
    scanner = Scanner(sys.stdin)

    util = Util()
    Cons.setUtil(util)
    BuiltIn.setUtil(util)
    Closure.setUtil(util)
    Special.setUtil(util)

    if (len(sys.argv) > 2 or (len(sys.argv) == 2 and sys.argv[1] != "-d")):
        sys.stderr.write("Usage: python3 SPP.py [-d]\n")
        sys.stderr.flush()
        sys.exit(2)

    # If command line option -d is provided, debug the scanner.
    if len(sys.argv) == 2 and sys.argv[1] == "-d":
        tok = scanner.getNextToken()
        while tok != None:
            tt = tok.getType()
Example #45
0
 def cpu_put(self):
     mass_list_temp = Util.copy_mass_list(self.board.mass_list)
     x, y = self.players[self.turn].put(mass_list_temp)
     self.update(x, y)
Example #46
0
    def solveMultiThread(self, nbThreadSub, filterVector=[0, 0, 0, 0, 0]):
        def doYourJobYouUselessThread(listSub, duals, constraintOnePath,
                                      constraintLinkCapacity,
                                      constraintNodeCapacity,
                                      constraintVnfUsed, nbStepsReconf,
                                      dictPath):
            for sub in listSub:
                listPath = []
                for step in range(nbStepsReconf, 0, -1):
                    sub.updateObjective(duals,
                                        constraintOnePath[sub.slice.id][step],
                                        constraintLinkCapacity,
                                        constraintNodeCapacity,
                                        constraintVnfUsed, step)
                    reduceCost, path = sub.solve(step)
                    if reduceCost < 0:
                        listPath.append(path)
                dictPath[sub.slice.id] = listPath

        tStart = time.time()

        #On creer les lists pour le partage dessubs parmis les threads
        listSubThread = [[] for i in range(nbThreadSub)]
        listSubTmp = copy(self.subs)
        shuffle(listSubTmp)
        nbSubByHtread = len(listSubTmp) // nbThreadSub
        dictSubThread = Manager().dict()
        it = 0
        #On remplit les listes
        for i in range(nbSubByHtread):
            for subThread in listSubThread:
                subThread.append(listSubTmp[it])
                dictSubThread[listSubTmp[it].slice.id] = []
                it += 1
        for i in range(len(listSubTmp) % len(listSubThread)):
            listSubThread[i].append(listSubTmp[it])
            dictSubThread[listSubTmp[it].slice.id] = []
            it += 1

        opt = False
        while not opt:
            self.nbIteration += 1
            opt = True
            t = time.time()
            self.objRelax = self.master.solve(self.verbose)
            if self.nbIteration == 150 or (time.time() -
                                           tStart) > (self.timeLimit * 0.80):
                break
            duals, constraintOnePath, constraintLinkCapacity, constraintNodeCapacity, constraintVnfUsed = self.master.getDuals(
            )
            self.timeMaster += time.time() - t

            t = time.time()

            #We launch all the threads
            listProcess = []
            for listSub in listSubThread:
                p = Process(target=doYourJobYouUselessThread,
                            args=(listSub, duals, constraintOnePath,
                                  constraintLinkCapacity,
                                  constraintNodeCapacity, constraintVnfUsed,
                                  self.nbStepsReconf, dictSubThread))
                p.start()
                listProcess.append(p)
            #We wait for the ends of the threads
            for p in listProcess:
                p.join()
            #We add the new paths
            for sub in self.subs:
                listPath = dictSubThread[sub.slice.id]
                if len(listPath) > 0:
                    opt = False
                    self.nbColumn += len(listPath)
                    for path in listPath:
                        self.master.addPath(path, sub.slice)
            self.timeSubs += time.time() - t

            for p in listProcess:
                p.terminate()

            if self.stableStop:
                stableCycle = param.stableCycle
                self.oldObj.append(self.objRelax)
                if len(self.oldObj) > stableCycle:
                    oldObj = self.oldObj.popleft()
                    if (oldObj - self.objRelax) / float(oldObj) * 100 < 0.1:
                        opt = True

        #print("Reconfiguration GC subs Ok")
        t = time.time()
        if (filterVector[0]):
            self.master.reduceNumberOfPath1()
            if (self.verbose):
                print("Filtre reduceNumberOfPath1")
        elif (filterVector[1]):
            self.master.reduceNumberOfPath2()
            if (self.verbose):
                print("Filtre reduceNumberOfPath2")
        elif (filterVector[2]):
            self.master.reduceNumberOfPath3()
            if (self.verbose):
                print("Filtre reduceNumberOfPath3")
        elif (filterVector[3]):
            self.master.reduceNumberOfPath4()
            if (self.verbose):
                print("Filtre reduceNumberOfPath4")
        elif (filterVector[4]):
            self.master.reduceNumberOfPath5()
            if (self.verbose):
                print("Filtre reduceNumberOfPath5")
        else:
            if (self.verbose):
                print("Pas de Filtre")
        #limit = max(self.timeLimit - time.time()-tStart + 5, 5)
        limit = min(self.timeLimit * 0.2, (time.time() - tStart) * 0.25)
        limit = max(limit, 5)
        self.master.solveOpt(limit)
        self.timeOptimal = time.time() - t
        self.timeTotal += time.time() - tStart
        res_Reconf, NumPathUsed_Reconf, pathUsed_Reconf = self.master.getResult(
            checkSolution=self.checkSolution)
        self.bwUsed, self.objVnf = Util.objective(self.topology.listAllDC,
                                                  self.listSlice,
                                                  self.functions, res_Reconf)

        self.obj = self.bwUsed + (self.objVnf * self.beta)

        self.master.terminate()
        for sub in self.subs:
            sub.terminate()

        return res_Reconf, pathUsed_Reconf
Example #47
0
    def DoQuote(include_mode, quote_file, for_post, include_data=None):
        """ Quote modes:
            R: full text
            C: full text, add comment
            N: empty
            S: short quote, limited lines
            A: full quote
            """

        if (quote_file == ""):
            return ""
        if (include_mode == 'N'):
            return "\n"
        quser = ""
        result = ""
        with open(quote_file, "rb") as inf:
            buf = Post.SkipAttachFgets(inf)
            match_user = re.match('[^:]*: *(.*\))[^)]*$', buf)
            if (match_user):
                quser = match_user.group(1)
            if include_mode != 'R' and include_mode != 'C':
                if (for_post):
                    result = result + (u"\n【 在 %s 的大作中提到: 】\n".encode('gbk') %
                                       quser)
                else:
                    result = result + (u"\n【 在 %s 的来信中提到: 】\n".encode('gbk') %
                                       quser)
            if (include_mode == 'A'):
                while (True):
                    buf = Post.SkipAttachFgets(inf)
                    if (buf == ""):
                        break
                    result += ": %s" % buf
            else:
                # skip header
                while (True):
                    buf = Post.SkipAttachFgets(inf)
                    if (buf == "" or buf[0] == '\n'):
                        break
                if include_mode == 'R':
                    while (True):
                        buf = Post.SkipAttachFgets(inf)
                        if (buf == ""):
                            break
                        if (not Post.IsOriginLine(buf)):
                            result += buf
                elif include_mode == 'C':
                    while True:
                        buf = Post.SkipAttachFgets(inf)
                        if buf == "" or buf == "--\n":
                            break
                        if not Post.IsOriginLine(buf):
                            result += buf
                else:
                    line_count = 0
                    while (True):
                        buf = Post.SkipAttachFgets(inf)
                        if (buf == "" or buf == "--\n"):
                            break
                        if (len(buf) > 250):
                            buf = Util.CutLine(buf, 250) + "\n"
                        if (not Post.IsGarbageLine(buf)):
                            result += ": %s" % buf
                            if (include_mode == 'S'):
                                line_count += 1
                                if (line_count >= Config.QUOTED_LINES):
                                    result += ": ..................."
                                    break
        if include_mode == 'C' and include_data is not None:
            result += Util.gbkEnc(include_data['comment'])
        result += "\n"
        return result
Example #48
0
    inputDataPath='../originalData/'
    outputDataPath='../cleaneddata/'
    # 删除没用的列
    '静态方法可以没有self参数,可以访问类变量,不能访问实例变量'
    @staticmethod
    def deleteUsenessColumns(data,delColumnList):
        print '删除属性个数:',len(delColumnList)
        return data.drop(delColumnList,axis=1)
    
    '删除原始数据的周围info等额外数据'
    def delExtraData(self,data,outputPath):      
        data=data.iloc[7:,10:] #除去最前面的9列和前5行
        data.index=[i for i in range(len(data))] # 从0开始重新索引
        print '总的属性个数:',len(data.columns)
        data=DataClean.deleteUsenessColumns(data,DataClean.usernessColumns)  #删除没有的列
        print '剩余属性个数:',len(data.columns)
        data.to_excel(outputPath)
        
if __name__=='__main__':
    print '在此模块中运行'
    cleanData=DataClean()
    fileList=Util.readAllFileNameFromDir(cleanData.inputDataPath) #先调用读取文件夹下面所有的文件名
    # print len(fileList)
    for i in range(len(fileList)):
        if fileList[i]=='.DS_Store':
            continue;
        data=Util.readDataFromExcel(cleanData.inputDataPath+fileList[i])
        cleanData.delExtraData(data,cleanData.outputDataPath+fileList[i][:3]+'.xls') #循环处理
        
        
 def setFromBytes(self, midiData):
     eventData = Util.stripLeadingVariableLength(midiData[2:])
     self.copyrightNotice = eventData.decode()
 def isConnectionEstablished(self):
     bmcUtil = Util()
     bmcPluginName = bmcUtil.readPluginName()
     bmcIsRpcORStdout = bmcUtil.readLocalAppVariableFile(
         "BMC_TYPE_OF_OUTPUT")
     bmcIsRpcORStdout = bmcUtil.removeDoubleQuotas(bmcIsRpcORStdout)
     if bmcIsRpcORStdout == "stdout":
         bmcfileData = bmcUtil.readVagrantStdoutFile(bmcPluginName)
         bmcMsg = bmcUtil.readLocalAppVariableFile("BMC_ERROR_MSG")
         bmcMsg = bmcUtil.removeDoubleQuotas(bmcMsg)
         bmcIsConnectionEstablished = bmcUtil.serchSubstring(
             bmcfileData, bmcMsg)
     else:
         bmcfileData = bmcUtil.readVagrantRPCLogFile(bmcPluginName)
         bmcMsg = bmcUtil.readLocalAppVariableFile("BMC_ERROR_MSG")
         bmcMsg = bmcUtil.removeDoubleQuotas(bmcMsg)
         bmcIsConnectionEstablished = bmcUtil.serchSubstring(
             bmcfileData, bmcMsg)
     return bmcIsConnectionEstablished
from statistics import median

from anytree import Node, LevelOrderIter
import matplotlib.pyplot as plt
from Util import Util

n = 8
m = 8

MY_UTIL = Util(n, m)

root = MY_UTIL.load_working_tree([0, 3])

for node in LevelOrderIter(root):
    new_children = []
    for i, child in enumerate(node.children):
        if child.is_leaf:
            new_children.append(
                Node(child.name, obj=child.obj, parent=node, nr_words=0))
        else:
            new_children.append(child)
    node.children = tuple(new_children)

for words in MY_UTIL.generate_all_words():
    for w in words:
        current_node = root
        while not current_node.is_leaf:
            i1, i2 = current_node.obj
            c1 = w[i1]
            c2 = w[i2]
            if c1 < c2:
Example #52
0
 def rewardDelta(self, delta, avg):
     if avg > .9:
         return
     diff = .9 - avg
     gradient = Util.sigmoid_prime(diff)
     self.delta = self.delta + (delta * gradient * .1)
Example #53
0
class Processor:
	dirs = Util()

	def __init__(self,usingCuda=False,verbose=False):
		self.imgDim = 96
		networkModel = os.path.join(Processor.dirs.openfaceModelDir,'nn4.small2.v1.t7')
		self.net = openface.TorchNeuralNet(networkModel, self.imgDim, cuda=usingCuda) # model used for formatting	
		self.dlibFacePredictor = os.path.join(Processor.dirs.dlibModelDir, "shape_predictor_68_face_landmarks.dat")
		self.align = openface.AlignDlib(self.dlibFacePredictor)
		self.verbose = verbose
		openface.helper.mkdirP(Processor.dirs.alignedImgsDir)
	

	def processImage(self,imgObject,isTrain=False):
		"""
		Get aligned reps and their bounding boxes
		"""
		reps = [] 
		print("=== {} ===".format(imgObject.path))

		# Preprocess image
		if isTrain: 
			outDir = os.path.join(Processor.dirs.alignedImgsDir, imgObject.cls)
			openface.helper.mkdirP(outDir)
			outputPrefix = os.path.join(outDir, imgObject.name)
			imgName = outputPrefix + ".png"
			# TODO check if file is already found. if so, then nothing needs to be done.
			# Otherwise, continue.
			if os.path.isfile(imgName):
				if self.verbose:
					print("  + Already found, skipping.")
				return []

		imgPath = imgObject.path
		rgbImg = imgObject.getRGB()
		if rgbImg is None:
			raise Exception("Unable to load image: {}".format(imgPath))

		bbs = self.align.getAllFaceBoundingBoxes(rgbImg)
		if bbs is None:
	 		raise Exception("Unable to find a face: {}".format(imgPath)) 
		
		
		
		for bb in bbs:
			start = time.time()
			alignedFace = self.align.align(self.imgDim, rgbImg, bb, 
				landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)

			if alignedFace is None:
				raise Exception("Unable to align image: {}".format(imgPath))
			if self.verbose:
				print("  + Face alignment took {} seconds.".format(time.time() - start))

			# Stored aligned face into directory if we are training this mofo
			if isTrain:
				cv.imwrite(imgName,alignedFace)

			# Pass these reps through NN to get vec representation
			start = time.time()
			rep = self.net.forward(alignedFace)
			if self.verbose:
				print("  + OpenFace forward pass took {} seconds.".format(time.time() - start))
				print("Representation:")
				# print(rep)
				print("-----\n")
			reps.append((rep,bb)) 

		# Return the bb's and their vec representations. Rep[0]=bounding box Rep[1]=vector 
		return reps
	
	def rect_to_css(self,rect):
		"""
		Convert a dlib 'rect' object to a tuple(top,right,bottom,left)
		"""
		return rect.top(), rect.right(), rect.bottom(), rect.left()
	
	def getTextScale(fontFace, thickness, height):
		view_x = 0
		# TODO 
	def markFace(self,frame,faces):
		"""
		Adds a bounding box with a label to an image.
		
		Params:
		frame - cv numpy array?
		faces - list of (string, float, rectangle)
		"""
		# Draw rectangle around faces
		for face in faces:
			name = face[0]
			confidence = face[1]
			(top,right,bottom,left) = self.rect_to_css(face[2]) # or is it (top,right,bottom,left?
			text = "{0} ({1:.2f}%)".format(name,confidence*100)

			# Draw a box around the face
			cv.rectangle(frame, (left, top), (right,bottom), (0, 255, 0), 2)

			# Draw a label with a name below the face
			# cv.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), 2)
			font = cv.FONT_HERSHEY_DUPLEX
			cv.putText(frame,text, (left + 6, bottom - 6), font, 0.5, (128, 128, 0), 1)
		return frame
Example #54
0
 def build_kernel(self, case_name):
     self.build_kernel_mat(case_name)
     return Util.normalized_array(self.mat_build)
 def setFromBytes(self, midiData):
     eventData = Util.stripLeadingVariableLength(midiData[2:])
     self.instrumentName = eventData.decode()
Example #56
0
from selenium import webdriver
import time
from Util import Util
from MenuUtil import MenuUtil
commonUtil=Util();
menuUtil=MenuUtil();

driver = webdriver.Chrome()
#登陆
url='http://localhost:3001/#/onlineFeedbackShopList'
driver.get(url)
driver.set_window_size(1410,900)
commonUtil.checkIsLoading(driver, time)
hashRunList = [];
for i in range(0,menuUtil.getCycleCount()):
    menuUtil.openSubMenuNode(driver);
    commonUtil.checkIsLoading(driver, time)
    menuUtil.runSubMenuNode(driver, hashRunList);
 def setFromBytes(self, midiData):
     eventData = Util.stripLeadingVariableLength(midiData[2:])
     self.cuePoint = eventData.decode()
Example #58
0
 def add_entry(self, entry):
     return (Util.AppendRecord(self.path, entry.pack()) == 0)
class DividendsBuyBacks(object):
    util = Util()

    def __init__(self):
        pass

        # questions:
        # how does cash buy back figure into retained earnings?

    def calcDividendBuyBacks(self, incomeStmtData, balanceSheetData,
                             cashFlowData):
        print "--------------- Dividend Buybacks --------------------------------------------"

        # Calculate Retained earnings per share
        retainedearningspershare = balanceSheetData.loc[:,
                                                        "retainedearnings"] / incomeStmtData.loc[:,
                                                                                                 "weightedavedilutedsharesos"]
        retainedearningspershare.name = "retainedearningspershare"

        # Calculate earnings per share
        # calculatedearningspershare = incomeStmtData.loc[:, "netincometocommon"] / incomeStmtData.loc[:,
        #                                                                        "weightedavedilutedsharesos"]
        # calculatedearningspershare.name = "calculated diluted earnings per share"
        # print calculatedearningspershare, incomeStmtData.loc[:, 'dilutedeps']

        retainedData = pd.concat([
            incomeStmtData.loc[:, 'netincometocommon'],
            balanceSheetData.loc[:, 'retainedearnings'],
            incomeStmtData.loc[:, 'dilutedeps'],
            incomeStmtData.loc[:, 'weightedavedilutedsharesos'],
            retainedearningspershare,
            incomeStmtData.loc[:, "cashdividendspershare"],
            cashFlowData.loc[:, "paymentofdividends"],
            cashFlowData.loc[:, 'repurchaseofcommonequity']
        ],
                                 axis=1)

        retainedData = self.util.dropNaInAllColumns(retainedData)

        print "\nReturn as percent of income"
        print "Year            Div     StockBuyBack    Total %"
        for index, row in retainedData.iterrows():
            print "{}         {:5,.2f}%     {:5,.2f}%          {:5,.2f}%     ".format(
                index, ((abs(row['paymentofdividends']) * 100) /
                        row['netincometocommon']),
                (abs(row['repurchaseofcommonequity'] * 100)) /
                row['netincometocommon'],
                (abs(row['paymentofdividends'] * 100)) /
                row['netincometocommon'] +
                (abs(row['repurchaseofcommonequity'] * 100)) /
                row['netincometocommon'])

        print "\nYear       Dividend per share"
        for index, row in retainedData.iterrows():
            print "{}         ${:0,.2f}".format(index,
                                                row['cashdividendspershare'])
        print "Over {} years:".format(
            len(retainedData.loc[:, 'cashdividendspershare'].index))
        print "Total Dividend payout of ${:0,.2f}".format(
            retainedData['cashdividendspershare'].sum())
        payoutRatio = retainedData.loc[:, 'cashdividendspershare'].sum(
        ) / retainedData.loc[:, 'dilutedeps'].sum()
        print "Total Dividend payout as percent of earnings is {:0,.2f}%".format(
            payoutRatio * 100)
        print "Total Retained ratio is {:0,.2f}%".format(
            float(1.0 - payoutRatio) * 100)

        print "------------------------------------------------------------------------------\n"
Example #60
0
 def compute(self, obs):
     self.personalPrediction = Util.sigmoid(
         np.dot(self.w + self.delta, obs.transpose()))
     self.prediction = self.personalPrediction
     return self.prediction