def check_model_1(word): model=load_model("model2.h5") crs=[] d={} d=verify_enc.initializare_big() test = [] encoding = verify_enc.verify_encoding(word,d) litere_mari = verify_enc.count_litere_mari(word) litere_mici = verify_enc.count_litere_mici(word) litere_peste_f = verify_enc.count_litere_mai_mari_de_f(word) count_cifre = verify_enc.count_cifre(word) lungime = verify_enc.get_len(word) test.append(float(encoding)) test.append(float(litere_mari)) test.append(float(litere_mici)) test.append(float(litere_peste_f)) test.append(float(lungime)) #test.append(float(count_cifre)) crs.append(test) X_train=np.array(crs) # ============================================================================= # scaler = StandardScaler() # test_scaled=scaler.fit(X_train) # ============================================================================= out = model.predict(crs) print (int(out[0][0]))
def check_model_2(word): model=load_model("model3.h5") crs=[] d={} d=verify_enc.initializare(d) test = [] encoding = verify_enc.verify_encoding(word,d) test.append(float(encoding)) #test.append(float(count_cifre)) crs.append(test) X_train=np.array(crs) # ============================================================================= # scaler = StandardScaler() # test_scaled=scaler.fit(X_train) # ============================================================================= out = model.predict(crs) print (int(out[0][0])) print (encoding)
def check_model_3(word): model = load_model("./modules/MachineLearning/models/model4.5.h5") crs = [] d = {} d = verify_enc.initializare_extins() test = [] encoding = verify_enc.verify_encoding(word, d) if (encoding == None): encoding = 0 litere_mari = verify_enc.count_litere_mari(word) litere_mici = verify_enc.count_litere_mici(word) litere_peste_f = verify_enc.count_litere_mai_mari_de_f(word) lungime = verify_enc.get_len(word) cifre = verify_enc.count_cifre(word) puncte = verify_enc.count_dots(word) minus = verify_enc.count_lines(word) underscore = verify_enc.count_underscore(word) slash = verify_enc.count_slashes(word) entropy = verify_enc.get_entropy(word) all_lens = verify_enc.get_every_len(word) test.append(float(encoding)) test.append(float(litere_mari)) test.append(float(litere_mici)) test.append(float(litere_peste_f)) test.append(float(lungime)) test.append(float(cifre)) test.append(float(puncte)) test.append(float(minus)) test.append(float(underscore)) test.append(float(slash)) test.append(float(entropy)) for z in all_lens: test.append(float(z)) crs.append(test) X_train = np.array(crs) # ============================================================================= # scaler = StandardScaler() # test_scaled=scaler.fit(X_train) # ============================================================================= out = model.predict(crs) return int(out[0][0])
model2.add(layers.Dense(units=1, activation="sigmoid")) X2_train_scaled, Y2_train=shuffle(X2_train_scaled, Y2_train) model2.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy']) model2.fit(X2_train_scaled, Y2_train, validation_split=0.1, epochs=5) model2.save("model3.h5") print ("Am terminat cu al doilea model") ''' true_ads = verify_enc.read_file('true_extins.csv') false_ads = verify_enc.read_file('false_extins.csv') d = {} d = verify_enc.initializare_extins() for i in true_ads: encoding = verify_enc.verify_encoding(i[0], d) litere_mari = verify_enc.count_litere_mari(i[0]) litere_mici = verify_enc.count_litere_mici(i[0]) litere_peste_f = verify_enc.count_litere_mai_mari_de_f(i[0]) lungime = verify_enc.get_len(i[0]) cifre = verify_enc.count_cifre(i[0]) puncte = verify_enc.count_dots(i[0]) minus = verify_enc.count_lines(i[0]) underscore = verify_enc.count_underscore(i[0]) slash = verify_enc.count_slashes(i[0]) entropy = verify_enc.get_entropy(i[0]) all_lens = verify_enc.get_every_len(i[0]) array = [] if (encoding == None): encoding = 0 array.append(float(encoding))