def test_read_whitespace_csv(self): f = FileUtils(filename="test_file_whitespace_delimited.csv", skip_header=True, whitespace_delim=True) all_rows = f.get_arrays_from_csv() print("First Row: {0}".format(all_rows[0])) self.assertTrue(all_rows[0] == [1, 0.1, 0.2, 0.3])
def label_predict(self, sentence): """ In this function, we divide sentence into chunks, and predict label of each chunk; To determine the score, we sum the score for each chunk and divide it by num of chunks :param sentence: String :return: label, score """ index_words = FileUtils.index_sentence(sentence, self.word_to_index) chunks = FileUtils.divide_sentence(index_words, Settings.seq_size) result = np.zeros(Settings.class_num) if Settings.cuda: self.model.cuda() for chunk in chunks: with torch.no_grad(): chunk = torch.from_numpy(np.asarray(chunk)).view( 1, Settings.seq_size) if Settings.cuda: chunk = chunk.cuda() predict = self.model(chunk) if Settings.cuda: predict = predict.cpu() predict = predict.numpy()[0] result += predict result /= len(chunks) target_index = np.argmax(result) + 1 label = self.index_to_label.get(str(target_index)) score = np.max(result) return label, score
def label_predict(self, sentence): """ In this function, we divide sentence into chunks, and predict label of each chunk; To determine the score, we sum the score for each chunk and divide it by num of chunks :param sentence: String :return: label, score """ index_words = FileUtils.index_sentence(sentence, self.word_to_index) chunks = FileUtils.divide_sentence(index_words, Settings.seq_size) result = np.zeros(Settings.class_num) for chunk in chunks: with torch.no_grad(): chunk = torch.from_numpy(np.asarray(chunk)).view( 1, Settings.seq_size) predict = self.model(chunk) predict = predict.numpy()[0] result += predict result /= len(chunks) result = list(result) result = [(str(i + 1), float(score)) for i, score in enumerate(result)] top = sorted(result, key=lambda x: -x[1]) res = list() for t in top: map = {"label": t[0], "score": t[1]} res.append(map) return res
def update_datproducts(self, curr_dir, project_id, imaging_id, db_model): """ Used by stage5 process_target functionality :param project_id: :param imaging_id: :param db_model: :return: """ fileutils = FileUtils() products_list = glob.glob(curr_dir + '/*') for each_product in products_list: current_time_in_sec = time.time() product_data = { 'project_id': project_id, 'imaging_id': imaging_id, "file_size": fileutils.calculalate_file_sizse_in_MB(each_product), "file_type": each_product.split('.')[-1], "file_name": each_product.split('/')[-1], "generated": datetime.datetime.fromtimestamp(current_time_in_sec).strftime( '%Y-%m-%d %H:%M:%S'), "status": "processed" } print db_model.insert_into_table("dataproducts", product_data, tableSchema.dataproductsId)
def rsa_generate(keypath, keyname, passphrase): """ Generates an RSA key pair default size in 2048 bits If a previous file exists same name it overwrites to those files Therefore, it is suggested to check it before creating new keys. File format will be as for public keys filename_pub.pem, and for private keys filename_priv.pem. :param filename: :param passphrase in order to export private key encrypted with 3DES :return: """ rsa_key = RSA.generate(bits=2048) public_exponent = rsa_key.publickey().exportKey("PEM") private_exponent = rsa_key.exportKey(format="PEM", passphrase=passphrase) rsa_key.exportKey() # write public key pubkey_file_name = os.path.join(keypath, (keyname + "_pub.pem")) FileUtils.write_data_to_file(filename=pubkey_file_name, data=public_exponent) # write private key privkey_file_name = os.path.join(keypath, (keyname + "_priv.pem")) FileUtils.write_data_to_file(filename=privkey_file_name, data=private_exponent) return
def run_ltacomb(self, files_list, destination): print("##########run_ltacomb########") fileutils = FileUtils() lta_list = [] self.status = "failed" print(files_list, destination) for each_file in files_list: print("Copying " + each_file + " " + destination) fileutils.copy_files(each_file, destination) lta_list.append(destination + '/' + os.path.basename(each_file)) lta_list.sort() to_comb = ",".join(lta_list) print(".......... joining ", to_comb) os.chdir(destination) print("::::::::", lta_list) try: print("/home/gadpu/gadpu_pipeline/ltacomb -i " + to_comb) os.system("/home/gadpu/gadpu_pipeline/ltacomb -i " + to_comb) self.status = "success" except Exception as ex: print(ex) self.status = ex print("?????????????????????????????????????????????") print("rm " + to_comb.replace(',', ' ')) os.system("rm " + to_comb.replace(',', ' ')) print("mv ltacomb_out.lta " + os.path.basename(lta_list[0])) os.system("mv ltacomb_out.lta " + os.path.basename(lta_list[0])) return str(self.status)
def main(input_file_name, q_mle, e_mle): start = datetime.now() sentences = FileUtils.read_lines(input_file_name) dict_q, dict_e = create_dicts(sentences) FileUtils.write_events_count(q_mle, dict_q) FileUtils.write_events_count(e_mle, dict_e) end = datetime.now() print('Running Time: {0}'.format(end - start))
def encrypt_file(options): try: FileUtils.replace_file_content( options.file, AESCipher().encrypt(FileUtils.read_file(options.file))) except: print("Error in " + options.file + " possibly can not decrypt file")
def main(corpus_file, features_file): start = datetime.now() lines = FileUtils.read_lines(corpus_file) dict_q, dict_e = MLETrain.create_dicts(lines) features = extract_features(lines, dict_e) FileUtils.write_features(features_file, features) end = datetime.now() print('Running Time: {0}'.format(end - start))
def encrypt(self): if not self._areParametersVerified: print("Verify the parameters first!") return # create 16 byte random nonce random_nonce = base64.encodestring(Random.get_random_bytes(16)) # first create a 256 bit AES key key_size = 32 # bytes self._symmetrickey = Random.get_random_bytes(key_size) # encrypt the session key with public key encrypted_session_key = \ EncryptionUtils.rsa_encrypt(public_key_file_name=self._pubkeydirectory, data=self._symmetrickey) # prepare data for signature data_for_sign = self._operation_directory + encrypted_session_key \ + random_nonce \ + self._keyname \ # get sha256 data_for_sign_sha256 = EncryptionUtils.sha256( data=data_for_sign).hexdigest() # sign with private key signature = EncryptionUtils.rsa_sign( private_key_file_name=self._privkeydirectory, data=data_for_sign_sha256, passphrase=self._passphrase) # generate and write the json configuration_json = { "operation_directory": self._operation_directory, "encrypted_session_key": encrypted_session_key, "random_nonce": random_nonce, "key_name": self._keyname, "signature": signature } configuration_json_str = json.dumps(configuration_json) configuration_file_name = os.path.relpath(self._operation_directory, ".") + ".InfoVault" FileUtils.write_data_to_file( filename=(self._operation_directory + "/../" + configuration_file_name), data=configuration_json_str) # encryption is performed via provided callback function FileUtils.walk_in_directory(directory=self._operation_directory, callback=self.encrypt_callback) return
def main(input_file_name, q_mle, e_mle, greedy_hmm_output, extra_file_name): start = datetime.now() sentences = FileUtils.read_lines(input_file_name) dict_q = DictUtils.convert_line_to_dict(FileUtils.read_lines(q_mle)) dict_e = DictUtils.convert_line_to_dict(FileUtils.read_lines(e_mle)) unk_tag_list = DictUtils.possible_tags(UNK, dict_e) tagged_text = greedy(sentences, dict_q, dict_e, unk_tag_list) FileUtils.write_tagged_text(greedy_hmm_output, tagged_text) end = datetime.now() print('Running Time: {0}'.format(end - start))
def simulacaoAleatoria(totalSimulacoes): util = FileUtils() pesosLidos,valoresLidos,pesoMaximo = util.readConfigurationFile("mochila2.txt") melhorCromossomo = Cromossomo(len(pesosLidos)) melhorCromossomo.setFitness(0) for i in range(totalSimulacoes): cromo = Cromossomo(len(pesosLidos)) calculaFitness(cromo,pesosLidos,valoresLidos,pesoMaximo) if(cromo.getFitness() > melhorCromossomo.getFitness()): melhorCromossomo = cromo print "S:-" + str(i) + getConfiguracaoMochila(melhorCromossomo,pesosLidos,valoresLidos)
def main(features_file, model_file, feature_map_file): start = datetime.now() all_features, labels = FileUtils.read_features(features_file) counters_dict, word_tag_dict, unk_tad_dict = DictUtils.extract_features( all_features, labels) transform_of_features, features_map, model = create_features_format( all_features, labels) FileUtils.write_feature_map(feature_map_file, features_map, counters_dict) FileUtils.write_logistic_regression_model(model_file, model) end = datetime.now() print('Running Time: {0}'.format(end - start))
def send_backups(): lines = FileUtils.read_lines(config.BACKUP_FILE) lines = lines[-config.BACKUP_ENTRIES_TO_SEND:] for line in lines: sensors_data = line.split(";") smart_pot = SmartPotData(sensors_data[0], sensors_data[1], sensors_data[2], sensors_data[3], sensors_data[4], sensors_data[5]) HttpUtils.post(config.API + "SaveMeasurement", smart_pot) FileUtils.remove_file(config.BACKUP_FILE) print("Backups sent")
def main(input_file_name, q_mle, e_mle, hmm_viterbi_predictions, extra_file_name): start = datetime.now() sentences = FileUtils.read_lines(input_file_name) dict_q = DictUtils.convert_line_to_dict(FileUtils.read_lines(q_mle)) dict_e = DictUtils.convert_line_to_dict(FileUtils.read_lines(e_mle)) unk_tag_list = DictUtils.possible_tags('*UNK*', dict_e) tagged_text = viterbi(sentences, dict_q, dict_e, unk_tag_list) FileUtils.write_tagged_text(hmm_viterbi_predictions, tagged_text) end = datetime.now() print('Running Time: {0}'.format(end - start))
def simulacaoAleatoria2(totalSimulacoes): util = FileUtils() pesosLidos,valoresLidos,pesoMaximo = util.readConfigurationFile("mochila2.txt") melhorCromossomo = Cromossomo(len(pesosLidos)) melhorCromossomo.setFitness(0) for i in range(0,totalSimulacoes): cromo = Cromossomo(len(pesosLidos)) calculaFitness(cromo,pesosLidos,valoresLidos,pesoMaximo) if(cromo.getFitness() > melhorCromossomo.getFitness()): melhorCromossomo = cromo if (i % 100 == 0): # print "\t"+str(i) sys.stdout.write("\r"+str(int((100*i)/totalSimulacoes))+" %") sys.stdout.flush() print getConfiguracaoMochila(melhorCromossomo,pesosLidos,valoresLidos)
def main(input_file_name, model_file_name, feature_map_file, output_file_name): start = datetime.now() clf, vec = FileUtils.read_logistic_regression_model(model_file_name) classes = clf.classes_.tolist() sentences = FileUtils.read_lines(input_file_name) feature_map_lines = FileUtils.read_lines(feature_map_file) features_map, counters_dict = DictUtils.create_features_dicts( feature_map_lines) tagged_text = viterbi(sentences, features_map, counters_dict, clf, classes) FileUtils.write_tagged_text(output_file_name, tagged_text) end = datetime.now() print('Running Time: {0}'.format(end - start))
def main(input_file_name, model_file_name, feature_map_file, output_file_name): start = datetime.now() clf, vec = FileUtils.read_logistic_regression_model(model_file_name) sentences, max_sentence_len = FileUtils.read_sentences(input_file_name) feature_map_lines = FileUtils.read_lines(feature_map_file) features_map, counters_dict = DictUtils.create_features_dicts( feature_map_lines) sentences_predictions = memm_greedy(sentences, max_sentence_len, features_map, counters_dict, clf) FileUtils.write_prediction(output_file_name, sentences, sentences_predictions) end = datetime.now() print('Running Time: {0}'.format(end - start))
def check0x00(web, dirpath, headers): try: for dirs in dirpath: web0x00 = web + dirs req = requests.get(web0x00, headers=headers, allow_redirects=False, timeout=7, verify=False) try: if (req.headers['content-length'] is not None): size = int(req.headers['content-length']) else: size = 0 except (KeyError, ValueError, TypeError): size = len(req.content) finally: size = FileUtils.sizeHuman(size) resp = str(req.status_code) if (resp == '200' or resp == '302' or resp == '304'): print(G+' [*] Found : ' + O + web0x00 +GR+' - '+ size + G + ' ('+resp+')') file_paths.append(web0x00) else: print(C+' [*] Checking : ' + B + web0x00 + R + ' ('+resp+')') return file_paths except Exception as e: print(R+' [-] Unknown Exception Encountered!') print(R+' [-] Exception : '+str(e)) return file_paths
def decrypt(self): if not self._areParametersVerified: print("Verify the parameters first!") return # findout the configuration file configuration_file_name = os.path.relpath(self._operation_directory, ".") + ".InfoVault" if not os.path.isfile(path=configuration_file_name): print("Configuration file " + configuration_file_name + " does not exists") return FileVault_configuration_json = FileUtils.read_data_from_file( filename=configuration_file_name) # decode the json FileVault_configuration = json.loads(FileVault_configuration_json) # parse the json try: operation_directory = FileVault_configuration[ "operation_directory"] encrypted_session_key = FileVault_configuration[ "encrypted_session_key"] random_nonce = FileVault_configuration["random_nonce"] key_name = FileVault_configuration["key_name"] signature = FileVault_configuration["signature"] except Exception, e: print( "Error occurred while processing configuration file, aborting..." + str(e)) return
def check_rsa_priv_passphase(priv_key_file_name, passphrase): private_key_data = FileUtils.read_data_from_file(filename=priv_key_file_name) try: rsa_priv_key = RSA.importKey(externKey=private_key_data, passphrase=passphrase) return True except Exception, e: print(str(e)) return False
def readBasePath(self, argvBasePath: str) -> str: """- 获取存储路径(文件或文件夹)。 - param - `argvBasePath` 命令行传入的存储路径 - return 获取到的存储路径(文件或文件夹) """ return FileUtils.inputPath("Input jpg file or dir: ", argvBasePath)
def digest(self, filename): """ generate a cryptographic hash of the actual data combined with a shared secret key """ stream = FileUtils.get_stream(filename) digest = self.crypto_algorithm() digest.update(self.secret_shared_key) digest.update(stream) return digest.hexdigest()
def buildImageFileList(self, basePath: str) -> List[str]: """- 生成路径下所有 jpg 文件的列表。 - param - `basePath` 存储路径 - return 该路径下所有的 jpg 文件 """ imageFileList: List[str] = FileUtils.listAllPaths(basePath, type=FileUtils.FILE, \ extList=["jpg"]) logging.info("Building image file list finished.") return imageFileList
def buildDbFileList(self, basePath: str) -> List[str]: """- 生成路径下所有 db 文件的列表。 - param - `basePath` 存储路径 - return 该路径下所有的 db 文件 """ dbFileList: List[str] = FileUtils.listAllPaths(basePath, type=FileUtils.FILE, \ extList=["db"]) logging.info("Building db file list finished.") return dbFileList
def stage1(self, gdata): print("Started Stage1: ") spamutils = SpamUtils() fileutils = FileUtils() data = gdata[0] path = gdata[1] obs_no = [] co = 0 for each_obs in data: co += 1 file_path = data[each_obs]['file_path'] dest_path = path + str( int(each_obs)) + '/' + file_path.split('/')[-2] print(file_path, dest_path) lta_file = "" lta_list = glob.glob(file_path + '/*.lta*') lta_list.sort() print(lta_list) status = "unprocessed" if lta_list: if len(lta_list) > 1: checked = [] for each_lta in lta_list: if each_lta not in checked: to_comb_lta = glob.glob(each_lta + '*') for x in to_comb_lta: checked.append(x) if len(to_comb_lta) > 1: to_comb_lta.sort() # print(dest_path, to_comb_lta) status = spamutils.run_ltacomb( to_comb_lta, dest_path) lta_file = to_comb_lta[0] else: lta_file = each_lta fileutils.copy_files(each_lta, dest_path) print(each_lta, dest_path) else: lta_file = lta_list[0] fileutils.copy_files(lta_file, dest_path) # print("---------------------------", lta_list) if co == 3000: break fileutils.insert_details([lta_file], dest_path, 'false', data[each_obs]['cycle_id'], status, each_obs)
def rsa_decrypt(private_key_file_name, cipher_base64, passphrase): """ Decrpyt with rsa private key :param private_key_file_name: :param cipher_base64: will be decrypted should be provided in base64 form :param passphrase in order to open encrypted private key :return: plain text will be returned """ cipher_decoded = base64.decodestring(cipher_base64) private_key = FileUtils.read_data_from_file(filename=private_key_file_name) if private_key is not None: rsa_private_key = RSA.importKey(externKey=private_key, passphrase=passphrase) return rsa_private_key.decrypt(ciphertext=cipher_decoded) return None
def rsa_encrypt(public_key_file_name, data): """ Encrypt with rsa public key :param public_key_file_name: file name to use to encrypt :param data: data that will be encrypted :return: byte array that is encrypted in base64 form, in case of error None """ # read pub key from file public_key = FileUtils.read_data_from_file(filename=public_key_file_name) if public_key is not None: rsa_public_key = RSA.importKey(externKey=public_key) return base64.encodestring(rsa_public_key.encrypt(plaintext=data, K=12)[0]) return None
def rsa_verify(public_key_file_name, data, signature): """ Perform signing with rsa private key :param public_key_file_name: :param data to validate the signature of :param signature: should be provided in base64 format :return: bool if signing is ok, True, False """ public_key = FileUtils.read_data_from_file(filename=public_key_file_name) signature = long(base64.decodestring(signature)) if public_key is not None: rsa_public_key = RSA.importKey(externKey=public_key) return rsa_public_key.verify(M=data, signature=(signature,)) return False
def rsa_sign(private_key_file_name, data, passphrase): """ Perform signing with rsa private key :param private_key_file_name: :param data: :param passphrase: :return: """ private_key = FileUtils.read_data_from_file(filename=private_key_file_name) if private_key is not None: rsa_private_key = RSA.importKey(externKey=private_key, passphrase=passphrase) # k is a random value does not effect the functionality signature = rsa_private_key.sign(M=data, K=12)[0] return base64.encodestring(str(signature)) return None
class TestUserModel: user_text = "" model = "" model_name = "" def remove_pattern(self, input_txt, pattern): r = re.findall(pattern, input_txt) for i in r: input_txt = re.sub(i, '', input_txt) return input_txt def __init__(self, text, userModel): self.user_text = text self.model = userModel self.file_utils_obj = FileUtils() def download_user_model(self): self.model_name = self.file_utils_obj.download(self.model, False) return self.model_name def classify(self): #Loading vectorizer which was fitted during training vectorizer = pickle.load(open("./vectorizer/tfidf_final.pkl", 'rb')) # Loading model model_path = self.model_name user_model = pickle.load(open(model_path, 'rb')) # Preprocessing of data text_to_classify = self.remove_pattern(self.user_text, "@[\w]*") text_to_classify = text_to_classify.replace("[^a-zA-Z#]", " ") text_to_classify = ' '.join( [w for w in text_to_classify.split() if len(w) > 3]) test_input = [text_to_classify] tfidf_text = vectorizer.transform(test_input) #print(len(tfidf_text.toarray()[0])) prediction = user_model.predict(tfidf_text) if (prediction[0] == 1): return POSITIVE else: return NEGATIVE
def encrypt_files_recurcive(options): files = FileUtils.recursiv_list_files_in(options.path) for file in files: if options.encrypt: try: FileUtils.replace_file_content( file, AESCipher().encrypt(FileUtils.read_file(file))) except: print("Error in " + file + " possibly can not encrypt file") else: try: FileUtils.replace_file_content( file, AESCipher().decrypt(FileUtils.read_file(file))) except: print("Error in " + file + " possibly can not decrypt file")
def run(self): futils = FileUtils(filename="./data/normalized_training_data_2.csv", skip_header=True, whitespace_delim=True) comp = Comparator(reference_dict=futils.get_arrays_from_csv(), start_comparison_col=2) predictions = [] test_file = FileUtils(filename="./data/normalized_test_data_2.csv", skip_header=True, whitespace_delim=True) for test_record in test_file.get_arrays_from_csv(): match = comp.get_closes_match(test_record) print("Closes match for {0} is {1}".format(test_record, match)) predictions.append("{0},{1}".format(int(test_record[0]), int(match[1]))) for p in predictions: print(p)
def shouldArchive(self, directoryFilepath): fileCount = FileUtils.countFiles(directoryFilepath) self.logger.log("[ArchiveManager] Found " + str(fileCount) + " files in " + directoryFilepath) return FileUtils.countFiles(directoryFilepath) >= self.fileCount
def makeArchive(self, directoryName, name): return FileUtils.makeTar(name, self.baseDataDirectory, self.backupDirectory, directoryName, self.useCompressed, logger)
def __init__(self): self.configuracoesMochila = [] # lista de configuracoes das mochilas (contem os itens com seus pesos e valores) self.itensDisponiveis = [] self.fileUtils = FileUtils()
class AGMain(object): def __init__(self): self.configuracoesMochila = [] # lista de configuracoes das mochilas (contem os itens com seus pesos e valores) self.itensDisponiveis = [] self.fileUtils = FileUtils() def errorMessage(self): print("\nErro: argumentos incorretos." + "\nModo correto: $ python main.py gerações pontos_corte peso_maximo" + "\n\n\t\tgeracoes: número de gerações" + "\n\t\tpontos_corte: número de pontos de corte do cruzamento" + "\n\t\tpeso_maximo: quantidade de peso que a mochila suporta" + "\n\nExemplo: $ python main.py 10000 2 30000") def successMessage(self, executionTime): print('Terminou com sucesso em %.2f segundos' % executionTime) def main(self): os.system("clear") # gera itens aleatorios e grava em itens.csv if len(sys.argv) == 3: fileName = sys.argv[1] nrItens = int(sys.argv[2]) self.itensDisponiveis = self.fileUtils.generateRandomItens(nrItens, 100, 100) self.fileUtils.writeItensFile(self.itensDisponiveis, fileName) # roda algoritmo lendo o arquivo itens.csv elif len(sys.argv) == 4: startTime = timeit.default_timer() nrGeracoes = int(sys.argv[1]) qtdePontosCruzamento = int(sys.argv[2]) pesoMaximo = int(sys.argv[3]) print "Configurado para %d gerações, %d pontos de cruzamento e mochila com peso máximo %d" %(nrGeracoes, qtdePontosCruzamento, pesoMaximo) print "Lendo arquivo de entrada de dados..." self.itensDisponiveis = self.fileUtils.readItensFromFile("itens.csv") print "Criando a populacao inicial..." ag = AlgoritmoGenetico(len(self.itensDisponiveis), 100) ag.pesos = [item.peso for item in self.itensDisponiveis] ag.valores = [item.valor for item in self.itensDisponiveis] ag.pesoMaximo = pesoMaximo ag.probabilidadeCruzamento = 95 ag.probabilidadeMutacao = 5 print "\n***** POPULAÇÃO INICIAL" ag.calculaFitness() for i in range(nrGeracoes): print "\n\n***** GERAÇÃO: " + str(i + 1) ag.cruza(qtdePontosCruzamento) ag.muta() ag.seleciona() tempPesos, tempValores = ag.getMelhoresPesosValores(ag.getMelhorIndividuo()) self.configuracoesMochila.append(Mochila(tempPesos, tempValores)) print ag.getConfiguracaoMochila(ag.getMelhorIndividuo()) ultimaMochila = self.configuracoesMochila[nrGeracoes - 1] tempFile = open("ultimaMochila.csv", "w") tempFile.write("GERAÇÃO: " + str(nrGeracoes) + "\nPESO TOTAL: " + str(ultimaMochila.getPesoOcupado()) + "\nVALOR TOTAL: " + str(ultimaMochila.getValorTotal()) + "\nQUANTIDADE DE ITENS: " + str(ultimaMochila.getNrItens()) + "\nLISTA DE ITENS (NUMERO, PESO, VALOR):\n") for index, item in enumerate(ultimaMochila.itens): tempFile.write(str(index) + ", " + str(item.peso) + ", " + str(item.valor) + "\n") tempFile.close() elapsed = timeit.default_timer() - startTime self.successMessage(elapsed) else: self.errorMessage()
def datagramReceived(self, data, address): ipaddr, port = address dir = FileUtils.get_dir_to_write(os.path.join(self.logdir, ipaddr)) print ">>> Writing (%r) into (%s).." % (data, dir)
from AlgoritmoGenetico import AlgoritmoGenetico from FileUtils import FileUtils if __name__ == '__main__': util = FileUtils() # util.writeConfigurationFile("mochila500.txt",500,100) print "Lendo arquivo de entrada de dados" pesos,valores,pesoMaximo = util.readConfigurationFile("mochila2.txt") print "Criando a populacao inicial..." ag = AlgoritmoGenetico(len(pesos),10) ag.pesos = pesos ag.valores = valores ag.pesoMaximo = pesoMaximo ag.probabilidadeCruzamento = 95 ag.probabilidadeMutacao = 1 nrGeracoes = 100000 print "*********** populacao inicial ***************" print "calculando o fitness..." ag.calculaFitness() print "selecionando..." ag.seleciona() for i in range(nrGeracoes): # print "\n\n************* Geracao: "+ str(i) + " *************" ag.cruza() ag.muta() ag.seleciona() print "G:-"+ str(i) + ag.getConfiguracaoMochila(ag.getMelhorIndividuo())