def main(arguments): # load the features of the dataset features = datasets.load_breast_cancer().data # standardize the features features = StandardScaler().fit_transform(features) # get the number of features num_features = features.shape[1] # load the corresponding labels for the features labels = datasets.load_breast_cancer().target # transform the labels to {-1, +1} labels[labels == 0] = -1 # split the dataset to 70/30 partition: 70% train, 30% test train_features, test_features, train_labels, test_labels = train_test_split( features, labels, test_size=0.3, stratify=labels) train_size = train_features.shape[0] test_size = test_features.shape[0] # slice the dataset as per the batch size train_features = train_features[:train_size - (train_size % BATCH_SIZE)] train_labels = train_labels[:train_size - (train_size % BATCH_SIZE)] test_features = test_features[:test_size - (test_size % BATCH_SIZE)] test_labels = test_labels[:test_size - (test_size % BATCH_SIZE)] # instantiate the SVM class model = SVM( alpha=LEARNING_RATE, batch_size=BATCH_SIZE, svm_c=arguments.svm_c, num_classes=NUM_CLASSES, num_features=num_features, ) # train the instantiated model model.train( epochs=arguments.num_epochs, log_path=arguments.log_path, train_data=[train_features, train_labels], train_size=train_features.shape[0], validation_data=[test_features, test_labels], validation_size=test_features.shape[0], result_path=arguments.result_path, ) test_conf, test_accuracy = utils.plot_confusion_matrix( phase="testing", path=arguments.result_path, class_names=["benign", "malignant"]) print("True negatives : {}".format(test_conf[0][0])) print("False negatives : {}".format(test_conf[1][0])) print("True positives : {}".format(test_conf[1][1])) print("False positives : {}".format(test_conf[0][1])) print("Testing accuracy : {}".format(test_accuracy))
def test_svm_predict(self): fv = FeatureVector() fv.append(TestSVM.BogusFeature()) fv.append(TestSVM.BogusFeature()) fv.append(TestSVM.BogusFeature()) svm = SVM(feature_vector=fv) svm.train(reviews=TestSVM.sample_reviews, labels=TestSVM.sample_labels) assert svm.predict(['HI']) == 0 or svm.predict(['HI']) == 1 assert svm.predict(['earth']) == 0 or svm.predict(['earth']) == 1
def main(): reviews = retrieve_reviews(5000) # Split reviews into a training and testing portion train_reviews = reviews[:4500] test_reviews = reviews[4500 + 1:] # Separate text and label to use during the training process text, labels = zip(*train_reviews) vector = FeatureVector() # Add features into feature vector vector.append(sentiment.SentimentAnalysis()) vector.append(tfidf.TfIdf()) vector.append(readability.Readability()) vector.append(food_sophistication.FoodSophistication()) vector.append(average_word_length.AverageWordLength()) vector.append(rarity.Rarity()) vector.append(spelling.Spelling()) vector.append(sentence_topic.SentenceTopic()) # Train all of the features individually vector.train(text, labels) model = SVM(vector) model.train(text, labels) # Separate text and label to use during the testing process text, labels = zip(*test_reviews) matches = 0 distance = {} for i in range(len(labels)): predicted_score = model.predict(text[i]) actual_score = labels[i] # count how many predicted scores match with the actual ones if predicted_score == actual_score: matches += 1 # get a histogram of how far predicted scores differ from the actual dist = abs(predicted_score - actual_score) distance[dist] = distance.get(dist, 0) + 1 print('Matches = {:.2%}'.format(matches / len(labels))) for distance, count in distance.items(): print("{} : {}".format(distance, count))
def main(): reviews = retrieve_reviews(5000) # Split reviews into a training and testing portion train_reviews = reviews[:4000] test_reviews = reviews[4001 + 1:] # Separate text and label to use during the training process text, labels = zip(*train_reviews) vector = FeatureVector() # Add features into feature vector vector.append(average_word_length.AverageWordLength()) vector.append(sentiment_analysis.SentimentAnalysis()) vector.append(rarity_analysis.Rarity()) vector.append(tfidf.TfIdf()) vector.append(readability.Readability()) vector.append(spelling.Spelling()) # Train all of the features individually vector.train(text, labels) model = SVM(vector) model.train(text, labels) # Separate text and label to use during the testing process text, labels = zip(*test_reviews) matches = 0 distance = {} for i in range(len(labels)): predicted_score = model.predict(text[i]) actual_score = labels[i] # count how many predicted scores match with the actual ones if predicted_score == actual_score: matches += 1 # get a histogram of how far predicted scores differ from the actual dist = abs(predicted_score - actual_score) distance[dist] = distance.get(dist, 0) + 1 print('Matches = {0:.2f}%'.format((matches / len(labels)) * 100)) for distance, count in distance.items(): print("{} : {}".format(distance, count))