def checkText(request): print('getting title') article = request.GET.get('article') title = request.GET.get('title') text = lemmatization(title + " " + article) if float(model.predict(text)) < 0.50: msg = "This article seems REAL !" else: if float(model.predict(text)) > 0.75: msg = "This article is Fake !" else: msg = "This article is probably Fake !" response = {'percent': round(model.predict(text), 2), 'msg': msg} return JsonResponse(response)
def test_predict(): """Tests the predict function by predicting a sample image""" img_path = "data/mendeley/kneeKL299/train/4/9039627L.png" img = cv2.imread(img_path, 0).astype("float") processed_image = preprocess(img) logits, probabilities = predict(processed_image) assert isinstance(logits, np.ndarray) assert logits.shape == (5, ) assert isinstance(probabilities, np.ndarray) assert probabilities.shape == (5, )
def predict(): ''' get current sound data and predict ''' y = sound.get_data() y1 = [0] _yy = [0, 0, 0, 0, 0, 0] if y: _y = list(y) y1 = model.predict(preprocessor(y)).tolist()[0] _yy = model.predict_proba(preprocessor(_y)).tolist()[0] # probas y2 = _yy[0] y3 = _yy[1] y4 = _yy[2] y5 = _yy[3] y6 = _yy[4] y7 = _yy[5] rv = jsonify(points=[y1, y2, y3, y4, y5, y6, y7]) return rv
def checkUrl(request): print('getting title') url = request.GET.get('url') r = requests.get(url) print('parsing page') tree = fromstring(r.content) title = tree.findtext('.//title') title = lemmatization(title) prediction = round(float(model.predict(title)), 2) if prediction < 0.50: msg = "This article seems REAL !" else: if prediction > 0.75: msg = "This article is Fake !" else: msg = "This article is probably Fake !" response = {'percent': prediction, 'msg': msg} return JsonResponse(response)
n = 0 for sample_rate in sample_rate_list: # resample audio and put them into a single h5 file. util.make_h5('./audio_test', sr=sample_rate) #load test data from h5 file. X_test = util.load_test_data(sample_rate) # load trained 5 models' path (same models but 5-fold cross validated) model_list = glob.glob('saved_models/%s/**/*.h5' % sample_rate) for model_name in model_list: #load each model m = model.load_model(model_name) # model predict pred = model.predict(m, X_test, n_class=41) # ensemble with geometric mean if n == 0: total_pred = pred else: total_pred *= pred n += 1 result = total_pred**(1 / float(n)) ####################################### # save submission by the format for MAP@3 evaluation. util.write_csv(result, './submission.csv')
kl = -1 stop_after = -1 # Reduce the amount of samples to evaluate for quicker results. -1 for "all" for filepath_kl in filepaths: count = 0 kl += 1 print("current KL", kl) for filename in os.listdir(filepath_kl): if stop_after != -1 and stop_after <= count: break img = cv2.imread(os.path.join(filepath_kl, filename), 0).astype("float") processed_image = preprocess(img) logits, probabilities = predict(processed_image) prediction = np.argmax(probabilities) def overrule(prediction, probabilities): """ Overrule prediction for KL=0 and KL=2 to KL=1 if conditions are met Args: prediction: int probabilities: np.array Returns: prediction """ is_kl2 = prediction == 2 kl1_falls_between = probabilities[0] > probabilities[ 1] and probabilities[2] > probabilities[1]
# Create the CNN and compile the model model = model.create_cnn(64, 64, 3, regress=True) opt = Adam(lr=1e-3, decay=1e-3 / 200) model.compile(loss="mean_absolute_percentage_error", optimizer=opt) # train the model print("[INFO] training model...") model.fit(trainImagesX, trainY, validation_data=(testImagesX, testY), epochs=200, batch_size=8) # Make predictions on testing data print("[INFO] predicting house prices...") preds = model.predict(testImagesX) diff = preds.flatten() - testY percentDiff = (diff / testY) * 100 absPercentDiff = np.abs(percentDiff) # Compute the mean and standard deviation of the absolute percentage difference mean = np.mean(absPercentDiff) std = np.std(absPercentDiff) # Show some statistics on our model locale.setlocale(locale.LC_ALL, "en_US.UTF-8") print("[INFO] avg. house price: {}, std house price: {}".format( locale.currency(df["price"].mean(), grouping=True), locale.currency(df["price"].std(), grouping=True)))
# importing libraries from sklearn.metrics import accuracy_score from utils import read_dataset from utils import model # reading data X_train, X_test, y_train, y_test = read_dataset.read_irisdata("../dataset/Iris.csv") # number of K k_value = 2 epoch = 5 ''' method : train() arguments : number of clusters, features, labels, number of epochs returns : k number of centroids ''' centroids = model.train(k_value, X_train, y_train, epoch) ''' method : predict() arguments : number of clusters, k centroids, testing set returns : predicted class label ''' class_label = model.predict(k_value, centroids, X_test) # accuracy score accuracy = accuracy_score(y_test, class_label) print('Test Accuracy: {}\n\n'.format(accuracy))