def __init__(self): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() self.training_data = []
class KNN_strings(object): ''' classdocs ''' def __init__(self, n_neighbors=1): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() self.data = [] self.knn = KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='auto', metric=self.lev_metric) def lev_metric(self, x, y): i, j = int(x[0]), int(y[0]) # extract indices # if self.data[i] == self.data[j]: # print self.data[i], self.data[j], edit_dist(self.data[i], self.data[j]) return edit_dist(self.data[i], self.data[j]) def knn_train(self, dataset, cv=1, datasplit=0.7): images_dataset= self.dsr.read_dataset_images(dataset) freeman_code_dict = self.fenc.encode_freeman_dataset(images_dataset) _, codes, labels = self.dsr.gen_labelled_arrays(freeman_code_dict) self.data = codes X = np.arange(len(self.data)).reshape(-1, 1) if cv <= 1: self.knn.fit(X, labels) elif cv > 1: cv_result = cross_validation.cross_val_score(self.knn, X, labels, cv=cv) print cv_result print 'Training Done!' def knn_predict(self, test_data, score=False): images_dataset= self.dsr.read_dataset_images(test_data) freeman_code_dict = self.fenc.encode_freeman_dataset(images_dataset) _, codes, labels = self.dsr.gen_labelled_arrays(freeman_code_dict) X_pred = np.arange(len(codes)).reshape(-1, 1) predictions = self.knn.predict(X_pred) if score == True: accuracy = self.knn.score(X_pred, labels) print "Test Accuracy: ", accuracy return predictions def knn_predict_one(self, test_image): image_code = self.fenc.encode_freeman(test_image) print image_code data = [image_code] X_pred = np.arange(len(data)).reshape(-1, 1) prediction = self.knn.predict(X_pred) return prediction
def __init__(self): ''' Constructor ''' ml_alg_base.__init__(self) self.dsr = DatasetReader() self.learning_model = naive_bayes.GaussianNB()
def __init__(self, n_neighbors=1): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() self.data = [] self.knn = KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='auto', metric=self.lev_metric)
def __init__(self): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() states = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] symbols = ['0', '1', '2', '3', '4', '5', '6', '7'] self.learning_model = HiddenMarkovModelTrainer(states=states, symbols=symbols) self.model = None
def build_stacked_ae(path): """ Build the stacked auto-encoder neural network, and evaluate its performance :param path: Path to the genetic dataset :return: Accuracy of classification of cell cycle phase. """ ############### Stacked Auto-Encoders ############## dr = DatasetReader(path) train = dr.load_data() ae = StackedAutoencoder(train[0], train[1], train[2], 3) ae.create_autoencoder() result = ae.evaluate_autoencoder() return result[1] * 100 print("Accuracy: %.2f%%" % (result[1] * 100))
def evaluationMode(settings, read, detailEva): print("Evaluation mode!") clinicalNotes = DatasetReader.readClinicalNotes(settings["dataset"]["directory"], settings["dataset"]["name"]) if read: nejiAnnotations = Annotator.readNejiAnnotations(settings["dataset"]["neji_annotations"]) else: nejiAnnotations = Annotator.annotate(clinicalNotes) Evaluator.evaluateNeji(clinicalNotes, nejiAnnotations, detailEva) annotations = Annotator.postProcessing(clinicalNotes, nejiAnnotations, settings["post_vocabularies"]) Evaluator.evaluateAnnotations(clinicalNotes, annotations, detailEva) print("Done!")
def annotationMode(settings, read): print("Annotation mode!") clinicalNotes = DatasetReader.readClinicalNotes(settings["dataset"]["directory"], settings["dataset"]["name"]) if read: nejiAnnotations = Annotator.readNejiAnnotations(settings["dataset"]["neji_annotations"]) else: nejiAnnotations = Annotator.annotate(clinicalNotes) Writer.writeAnnotations(nejiAnnotations, settings["dataset"]["neji_annotations"]) annotations = Annotator.postProcessing(clinicalNotes, nejiAnnotations, settings["post_vocabularies"]) matrix = Writer.writeMatrix(annotations, settings["dataset"]["matrix_location"]) print("Done!") return matrix, clinicalNotes
def build_binary_classifiers(path_g1_sg2m, path_g1s_g2m): """ Build the stacked neural network with single output neuron for binary classification to G1 vs. S+G2M and G1+S vs. G2M phases, and evaluate its performance :param path_g1_sg2m: Path to the labeled dataset in two labels : G1 and SG2M :param path_g1s_g2m: Path to the labeled dataset in two labels : G1S and G2M :return: Accuracy of classification of each model. """ ############### Ordinal Classifier ################# dr1 = DatasetReader(path_g1_sg2m) dr2 = DatasetReader(path_g1s_g2m) binary_train1 = dr1.load_data() binary_train2 = dr2.load_data() oc1 = OrdinalClassifier(binary_train1[0], binary_train1[1]) oc2 = OrdinalClassifier(binary_train2[0], binary_train2[1]) r1 = oc1.classify() r2 = oc2.classify() return r1, r2
class HMM(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() states = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] symbols = ['0', '1', '2', '3', '4', '5', '6', '7'] self.learning_model = HiddenMarkovModelTrainer(states=states, symbols=symbols) self.model = None def generate_labelled_sequences(self, freeman_codes_dict): labeled_sequences = [] labeled_symbols = [] codes_list = freeman_codes_dict.items() for tup in codes_list: for code in tup[1]: temp = [] for symbol in code: temp.append((symbol, tup[0])) labeled_symbols.append(temp) for tup in codes_list: for code in tup[1]: labeled_sequences.append((code, tup[0])) codes = numpy.array([x[0] for x in labeled_sequences]) labels = numpy.array([y[1] for y in labeled_sequences]) return labeled_symbols, labeled_sequences, codes, labels def learning_curve(self, dataset, n_iter, train_sizes=numpy.linspace(0.1, 1.0, 5)): cv_scores = [] train_scores = [] for i in train_sizes: data = dataset[:int(len(dataset) * i)] cv_score = [] t_score = [] for j in range(n_iter): cv_score.extend(self.training(dataset, cv=10, n_iter=1)) train_score, test_score = self.training(dataset, n_iter=1) t_score.extend(train_score) cv_scores.append(cv_score) train_scores.append(t_score) cv_scores = numpy.array(cv_scores) train_scores = numpy.array(train_scores) print cv_scores.shape print train_scores.shape return train_scores, cv_scores def get_data(self, dataset_path): dataset = self.dsr.read_dataset_images(dataset_path) freeman_codes_dict = self.fenc.encode_freeman_dataset(dataset) labeled_symbols, labeled_sequence, codes, labels = self.generate_labelled_sequences( freeman_codes_dict) return labeled_symbols, labeled_sequence, codes, labels def training(self, dataset, cv=1, n_iter=1): if isinstance(dataset, basestring): labeled_symbols, labeled_sequence, codes, labels = self.get_data( dataset) else: labeled_symbols, labeled_sequence, codes, labels = dataset self.model = self.learning_model.train(labeled_symbols) if cv > 1: cv_scores = [] for i in range(n_iter): skf = cross_validation.KFold(len(labels), n_folds=10, shuffle=True) iter_score = [] for train_index, test_index in skf: train_data = list( numpy.array(labeled_symbols)[train_index]) test_data = list(numpy.array(labeled_symbols)[test_index]) self.model = self.learning_model.train(train_data) fold_score = self.model.evaluate(test_data) iter_score.append(fold_score) cv_scores.append(numpy.mean(iter_score)) return cv_scores else: skf = cross_validation.ShuffleSplit(len(labels), n_iter=n_iter, test_size=0.2, random_state=0) training_score = [] test_score = [] for train_index, test_index in skf: train_data = list(numpy.array(labeled_symbols)[train_index]) test_data = list(numpy.array(labeled_symbols)[test_index]) self.model = self.learning_model.train(train_data) training_score.append(self.model.evaluate(train_data)) test_score.append(self.model.evaluate(test_data)) if n_iter == 1: predict_labels = [] for i in range(len(list(codes[test_index]))): predicted_states = self.model.tag( list(codes[test_index])[i]) predict_labels.append(predicted_states[0][1]) self.ConfusionMatrix = ConfusionMatrix( list(labels[test_index]), predict_labels) return training_score, test_score def predict(self, image_path): if os.path.isfile(image_path): image_array = self.dsr.read_img_bw(image_path) freeman_code = self.fenc.encode_freeman(image_array) else: freeman_code = image_path predicted_states = self.model.tag(freeman_code) predicted_states = [x[1] for x in predicted_states] if len(set(predicted_states)) == 1: predicted_class = list(set(predicted_states))[0] return predicted_class ## TESTING CODE (WILL BE REMOVED) ## # from HMM import HMM # hmm = HMM() # cv_scores = hmm.training('I:\\eclipse_workspace\\CharacterRecognition\\teams_dataset', cv=10, n_iter=50) # train_score, test_score = hmm.training('I:\\eclipse_workspace\\CharacterRecognition\\teams_dataset', n_iter=1) # with open('hmm_confusion_matrix.txt', 'w') as fp: # fp.write(hmm.ConfusionMatrix.__str__()) # # with open("./Results/hmm.txt", 'w') as fp: # for i in range(len(cv_scores)): # text = str(cv_scores[i]) + ',' + str(train_score[i]) + ',' + str(test_score[i]) + '\n' # print text # print '--------------------------------' # fp.write(text)
if (dropAll): m20Connector.dropAll() m20Connector.close() sys.exit(0) if (initDB): """ Create tables insert first level tables = movies, genome_tags """ m20Connector.initDB() # Init to read moviesDS = DatasetReader.initWithFraction(dataset_path + '/movies.csv', 1.0, ',', init=True) gtagsDS = DatasetReader.initWithFraction(dataset_path + '/genome-tags.csv', 1.0, ',', init=True) linksDS = DatasetReader.initWithFraction(dataset_path + '/links.csv', 1.0, ',', init=True) #Just init ratingsDS = DatasetReader(dataset_path + "/ratings.csv", init=True) tagsDS = DatasetReader(dataset_path + "/tags.csv", init=True) gscoresDS = DatasetReader(dataset_path + "/genome-scores.csv",
class HMM(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() states = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] symbols = ['0', '1', '2', '3', '4', '5', '6', '7'] self.learning_model = HiddenMarkovModelTrainer(states=states, symbols=symbols) self.model = None def generate_labelled_sequences(self, freeman_codes_dict): labeled_sequences = [] labeled_symbols = [] codes_list = freeman_codes_dict.items() for tup in codes_list: for code in tup[1]: temp = [] for symbol in code: temp.append((symbol, tup[0])) labeled_symbols.append(temp) for tup in codes_list: for code in tup[1]: labeled_sequences.append((code,tup[0])) codes = numpy.array([x[0] for x in labeled_sequences]) labels = numpy.array([y[1] for y in labeled_sequences]) return labeled_symbols, labeled_sequences, codes, labels def learning_curve(self, dataset, n_iter, train_sizes=numpy.linspace(0.1, 1.0, 5)): cv_scores = [] train_scores = [] for i in train_sizes: data = dataset[:int(len(dataset)*i)] cv_score = [] t_score = [] for j in range(n_iter): cv_score.extend(self.training(dataset, cv=10, n_iter=1)) train_score, test_score = self.training(dataset, n_iter=1) t_score.extend(train_score) cv_scores.append(cv_score) train_scores.append(t_score) cv_scores = numpy.array(cv_scores) train_scores = numpy.array(train_scores) print cv_scores.shape print train_scores.shape return train_scores, cv_scores def get_data(self, dataset_path): dataset = self.dsr.read_dataset_images(dataset_path) freeman_codes_dict = self.fenc.encode_freeman_dataset(dataset) labeled_symbols, labeled_sequence, codes, labels = self.generate_labelled_sequences(freeman_codes_dict) return labeled_symbols, labeled_sequence, codes, labels def training(self, dataset, cv=1, n_iter=1): if isinstance(dataset, basestring): labeled_symbols, labeled_sequence, codes, labels = self.get_data(dataset) else: labeled_symbols, labeled_sequence, codes, labels = dataset self.model = self.learning_model.train(labeled_symbols) if cv > 1: cv_scores = [] for i in range(n_iter): skf = cross_validation.KFold(len(labels), n_folds=10, shuffle=True) iter_score = [] for train_index, test_index in skf: train_data = list(numpy.array(labeled_symbols)[train_index]) test_data = list(numpy.array(labeled_symbols)[test_index]) self.model = self.learning_model.train(train_data) fold_score = self.model.evaluate(test_data) iter_score.append(fold_score) cv_scores.append(numpy.mean(iter_score)) return cv_scores else: skf = cross_validation.ShuffleSplit(len(labels), n_iter=n_iter, test_size=0.2, random_state=0) training_score = [] test_score = [] for train_index, test_index in skf: train_data = list(numpy.array(labeled_symbols)[train_index]) test_data = list(numpy.array(labeled_symbols)[test_index]) self.model = self.learning_model.train(train_data) training_score.append(self.model.evaluate(train_data)) test_score.append(self.model.evaluate(test_data)) if n_iter==1: predict_labels = [] for i in range(len(list(codes[test_index]))): predicted_states = self.model.tag(list(codes[test_index])[i]) predict_labels.append(predicted_states[0][1]) self.ConfusionMatrix = ConfusionMatrix(list(labels[test_index]), predict_labels) return training_score, test_score def predict(self, image_path): if os.path.isfile(image_path): image_array = self.dsr.read_img_bw(image_path) freeman_code = self.fenc.encode_freeman(image_array) else: freeman_code = image_path predicted_states = self.model.tag(freeman_code) predicted_states = [x[1] for x in predicted_states] if len(set(predicted_states)) == 1: predicted_class = list(set(predicted_states))[0] return predicted_class ## TESTING CODE (WILL BE REMOVED) ## # from HMM import HMM # hmm = HMM() # cv_scores = hmm.training('I:\\eclipse_workspace\\CharacterRecognition\\teams_dataset', cv=10, n_iter=50) # train_score, test_score = hmm.training('I:\\eclipse_workspace\\CharacterRecognition\\teams_dataset', n_iter=1) # with open('hmm_confusion_matrix.txt', 'w') as fp: # fp.write(hmm.ConfusionMatrix.__str__()) # # with open("./Results/hmm.txt", 'w') as fp: # for i in range(len(cv_scores)): # text = str(cv_scores[i]) + ',' + str(train_score[i]) + ',' + str(test_score[i]) + '\n' # print text # print '--------------------------------' # fp.write(text)
class KNN_strings(object): ''' classdocs ''' def __init__(self, n_neighbors=1): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() self.data = [] self.knn = KNeighborsClassifier(n_neighbors=n_neighbors, algorithm='auto', metric=self.lev_metric) def lev_metric(self, x, y): i, j = int(x[0]), int(y[0]) # extract indices # if self.data[i] == self.data[j]: # print self.data[i], self.data[j], edit_dist(self.data[i], self.data[j]) return edit_dist(self.data[i], self.data[j]) def knn_train(self, dataset, cv=1, datasplit=0.7): images_dataset = self.dsr.read_dataset_images(dataset) freeman_code_dict = self.fenc.encode_freeman_dataset(images_dataset) _, codes, labels = self.dsr.gen_labelled_arrays(freeman_code_dict) self.data = codes X = np.arange(len(self.data)).reshape(-1, 1) if cv <= 1: self.knn.fit(X, labels) elif cv > 1: cv_result = cross_validation.cross_val_score(self.knn, X, labels, cv=cv) print cv_result print 'Training Done!' def knn_predict(self, test_data, score=False): images_dataset = self.dsr.read_dataset_images(test_data) freeman_code_dict = self.fenc.encode_freeman_dataset(images_dataset) _, codes, labels = self.dsr.gen_labelled_arrays(freeman_code_dict) X_pred = np.arange(len(codes)).reshape(-1, 1) predictions = self.knn.predict(X_pred) if score == True: accuracy = self.knn.score(X_pred, labels) print "Test Accuracy: ", accuracy return predictions def knn_predict_one(self, test_image): image_code = self.fenc.encode_freeman(test_image) print image_code data = [image_code] X_pred = np.arange(len(data)).reshape(-1, 1) prediction = self.knn.predict(X_pred) return prediction
from datasets.caltechpedestrian import CaltechPedestrian from datasets.bdd100k import BDD100K from datasets.citypersons import CityPersons from DatasetReader import DatasetReader import logging import os logging.basicConfig(filename='example.log',level=logging.DEBUG) base_dir_bdd100k = '/data/stars/share/STARSDATASETS/bdd100k' for subset in ['train', 'val']: db = BDD100K(name='bdd100k-{}'.format(subset), base_dir=base_dir_bdd100k, save_dir='./bdd100k-{}'.format(subset), subset=subset) db.writedataframe() reader = DatasetReader('./bdd100k-{}'.format(subset)) df = reader.get_annotations(query='category == "person"') reader.plot_annotations(df=df, plot_cols=['xmin', 'ymin', 'xmax', 'ymax', 'category']) base_dir_caltech = '/data/stars/user/uujjwal/datasets/pedestrian/caltech/caltechall-train' db = CaltechPedestrian(name='caltechall-train', base_dir=base_dir_caltech, save_dir='./caltechall-train') db.writedataframe() reader = DatasetReader('./caltechall-train') df = reader.get_annotations(query='object == "person"') reader.plot_annotations(df=df, plot_cols=['xmin_full', 'ymin_full', 'xmax_full', 'ymax_full', 'object']) base_dir_caltech = '/data/stars/user/uujjwal/datasets/pedestrian/caltech/caltechall-test' db = CaltechPedestrian(name='caltechall-test', base_dir=base_dir_caltech, save_dir='./caltechall-test') db.writedataframe() reader = DatasetReader('./caltechall-test')
if (dropAll): m20Connector.dropAll() m20Connector.close() sys.exit(0) if (initDB): """ Create tables insert first level tables = movies, genome_tags """ m20Connector.initDB() # Init to read moviesDS = DatasetReader.initWithFraction('datasets/data/movies.csv', 1.0, ',', init=True) gtagsDS = DatasetReader.initWithFraction( 'datasets/data/genome-tags.csv', 1.0, ',', init=True) linksDS = DatasetReader.initWithFraction('datasets/data/links.csv', 1.0, ',', init=True) #Just init ratingsDS = DatasetReader("datasets/data/ratings.csv", init=True) tagsDS = DatasetReader("datasets/data/tags.csv", init=True) gscoresDS = DatasetReader("datasets/data/genome-scores.csv", init=True) if (init_clear == False): for movie in moviesDS.readPercentage():
from DatasetReader import DatasetReader if __name__ == '__main__': moviesDS = DatasetReader.initWithFraction('datasets/movies.csv', 1.0, ',') for m in moviesDS.readPercentage()[:10]: print m
def __init__(self): self.reader = DatasetReader()
class KNN(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() self.training_data = [] def generate_labelled_sequences(self, freeman_codes_dict): labelled_sequences = [] codes_list = freeman_codes_dict.items() for tup in codes_list: for code in tup[1]: labelled_sequences.append((tup[0], code)) return labelled_sequences def prepare_data(self, datas, training=[], test=[], split=0.80): # Separate data into 2 sets, 1 is training and 1 is test,split is the ratio (the default is 0.70) for data in range(len(datas) - 1): if random.random() < split: training.append(datas[data]) else: test.append(datas[data]) def get_neighbors(self, training, test_instance, k): # Get the list of nearest neighbors to a test instance distances = [] for i in range(len(training) - 1): dist = edit_dist(test_instance, training[i][1]) distances.append((training[i], dist)) distances.sort(key=operator.itemgetter(1)) neighbors = [] for x in range(0, k): neighbors.append(distances[x][0]) return neighbors def get_label(self, neighbors): # Determine the label of a test instance base on its nearest neighbors max = 0 labels = {} for neighbor in neighbors: if neighbor[0] not in labels: labels[neighbor[0]] = 1 else: labels[neighbor[0]] += 1 sorted_labels = sorted(labels.items(), key=operator.itemgetter(1), reverse=True) return sorted_labels[0][0] def evaluation(self, training, test): # Evaluate the accuracy of knn correct_count = 0 # k = int(math.ceil(len(training)/10)) k = 1 for test_data in test: neighbors = self.get_neighbors(training, test_data[1], k) label = self.get_label(neighbors) if int(label) == int(test_data[0]): correct_count += 1 print(float(correct_count) / len(test)) * 100 def knn_train(self, dataset_path, train_test_split=0.8): dataset = self.dsr.read_dataset_images(dataset_path) freeman_codes_dict = self.fenc.encode_freeman_dataset(dataset) labelled_sequences = self.generate_labelled_sequences( freeman_codes_dict) training = [] test = [] # print labelled_sequences self.prepare_data(labelled_sequences, training, test, split=train_test_split) self.training_data = training if train_test_split != 1.0: print "Training:" + len(training).__str__() print "Test:" + len(test).__str__() self.evaluation(training, test) def knn_predict_one(self, image, k=1): if os.path.isfile(image): image_array = self.dsr.read_img_bw(image) test = self.fenc.encode_freeman(image_array) else: test = image # Try to find the nearest neighbors of the first sequences in training neighbors = self.get_neighbors(self.training_data, test, k) label = self.get_label(neighbors) return label
class NaiveBayes(ml_alg_base): ''' classdocs ''' def __init__(self): ''' Constructor ''' ml_alg_base.__init__(self) self.dsr = DatasetReader() self.learning_model = naive_bayes.GaussianNB() def get_data(self, dataset_path="./teams_dataset"): data_dict = self.dsr.read_dataset_images(dataset_path) _, data_set_x, data_set_y = self.dsr.gen_labelled_arrays(data_dict) data_set_x = data_set_x.reshape(len(data_set_x), -1) return data_set_x, data_set_y def training(self, dataset_path, cv=1): dataset = self.dsr.read_dataset_images(dataset_path) _, images, labels = self.dsr.gen_labelled_arrays(dataset) images = numpy.array(images) #reshape images for input data = images.reshape(len(images), -1) if cv <= 1: self.learning_model.fit(data, labels) elif cv > 1: cv_result = cross_validation.cross_val_score(self.learning_model, data, labels, cv=cv) return cv_result pickle.dump(self.learning_model, open("./Models/naivebayes_model.p", "wb")) def predict(self, image_path): try: self.learning_model = pickle.load( open("./Models/naivebayes_model.p", "rb")) except: print "Please train the Naive Bayes model first" if isbasestring(image_path): image = self.dsr.read_img_bw(image_path) else: image = image_path image = image.reshape(-1, image.shape[0] * image.shape[1]) result = self.learning_model.predict(image) return result # from NaiveBayes import NaiveBayes # NB = NaiveBayes() # # NB.training('I:\\eclipse_workspace\\CharacterRecognition\\digits_dataset_clean', cv=5) # # print NB.predict('I:\\eclipse_workspace\\CharacterRecognition\\test1.jpg') # data_x, data_y = NB.get_data() # print data_x.shape, data_y.shape # NB.first_exp(data_x, data_y, NB.learning_model, algorithm_name='NaiveBayes' ,num_iter=50)
class KNN_statistic(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() self.training_data = [] def generate_labelled_sequences(self, freeman_codes_dict): labelled_sequences = [] codes_list = freeman_codes_dict.items() for tup in codes_list: for code in tup[1]: labelled_sequences.append((tup[0], code)) return labelled_sequences def prepare_data(self, arrays_data=[], arrays_labels=[], split=0.2): # Separate data into 2 sets, 1 is training and 1 is test,split is the ratio (the default is 0.20) ad_train, ad_test, al_train, al_test = train_test_split( arrays_data, arrays_labels, test_size=split, random_state=42) return ad_train, ad_test, al_train, al_test def get_neighbors(self, data, data_label, test_instance, k): # Get the list of nearest neighbors to a test instance distances = [] for i in range(len(data)): dist = edit_dist(test_instance, data[i]) distances.append((data[i], data_label[i], dist)) distances.sort(key=operator.itemgetter(2)) neighbors = [] for x in range(0, k): neighbors.append([distances[x][0], distances[x][1]]) return neighbors def get_label(self, neighbors): # Determine the label of a test instance base on its nearest neighbors labels = {} for neighbor in neighbors: if neighbor[1] not in labels: labels[neighbor[1]] = 1 else: labels[neighbor[1]] += 1 sorted_labels = sorted(labels.items(), key=operator.itemgetter(1), reverse=True) return sorted_labels[0][0] def evaluation(self, data, data_for_distance_caculation, data_label, data_for_distance_calculation_label, k=3): # Evaluate the accuracy of knn correct_count = 0 for instance in range(0, len(data) - 1): neighbors = self.get_neighbors( data_for_distance_caculation, data_for_distance_calculation_label, data[instance], k) label = self.get_label(neighbors) if int(label) == int(data_label[instance]): correct_count += 1 return (float(correct_count) / len(data)) def knn_train(self, dataset_path, train_test_split=0.2): dataset = self.dsr.read_dataset_images(dataset_path) freeman_codes_dict = self.fenc.encode_freeman_dataset(dataset) _, arrays_data, arrays_label = self.dsr.gen_labelled_arrays( freeman_codes_dict) arrays_data, arrays_label = shuffle(arrays_data, arrays_label) ad_train, ad_test, al_train, al_test = self.prepare_data( arrays_data, arrays_label, split=train_test_split) # Cross validation with 5 folds kf = KFold(len(ad_train), 5) result = 0 for train_index, test_index in kf: ad_train_kfold, ad_test_kfold = ad_train[train_index], ad_train[ test_index] al_train_kfold, al_test_kfold = al_train[train_index], al_train[ test_index] result += self.evaluation(ad_test_kfold, ad_train_kfold, al_test_kfold, al_train_kfold, k=2) result_average = result / 5 # Result with the training result_training = self.evaluation(ad_train, ad_train, al_train, al_train, k=2) # Result with the test result_test = self.evaluation(ad_test, ad_train, al_test, al_train, k=2) return result_average, result_training, result_test # knn = KNN_strings(n_neighbors=1) # knn = KNN_statistic() # results = [] # for x in range(50): # result_average, result_training, result_test = knn.knn_train("/home/thovo/PycharmProjects/CharacterRecognition/digits_dataset", 0.2) # text = result_average.__str__() + " , " + result_training.__str__() + " , " + result_test.__str__() + "\n" # results.append(text) # # # f = open("Results/knn.txt", "w") # for item in results: # f.write(item) # # f.close()
class KNN(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() self.training_data = [] def generate_labelled_sequences(self, freeman_codes_dict): labelled_sequences = [] codes_list = freeman_codes_dict.items() for tup in codes_list: for code in tup[1]: labelled_sequences.append((tup[0],code)) return labelled_sequences def prepare_data(self, datas, training=[], test=[], split=0.80): # Separate data into 2 sets, 1 is training and 1 is test,split is the ratio (the default is 0.70) for data in range(len(datas)-1): if random.random() < split: training.append(datas[data]) else: test.append(datas[data]) def get_neighbors(self, training, test_instance, k): # Get the list of nearest neighbors to a test instance distances =[] for i in range(len(training)-1): dist = edit_dist(test_instance, training[i][1]) distances.append((training[i], dist)) distances.sort(key=operator.itemgetter(1)) neighbors = [] for x in range(0, k): neighbors.append(distances[x][0]) return neighbors def get_label(self, neighbors): # Determine the label of a test instance base on its nearest neighbors max = 0 labels = {} for neighbor in neighbors: if neighbor[0] not in labels: labels[neighbor[0]] = 1 else: labels[neighbor[0]] += 1 sorted_labels = sorted(labels.items(), key=operator.itemgetter(1), reverse=True) return sorted_labels[0][0] def evaluation(self, training, test): # Evaluate the accuracy of knn correct_count = 0 # k = int(math.ceil(len(training)/10)) k = 1 for test_data in test: neighbors = self.get_neighbors(training, test_data[1], k) label = self.get_label(neighbors) if int(label) == int(test_data[0]): correct_count += 1 print (float(correct_count)/len(test))*100 def knn_train(self, dataset_path, train_test_split=0.8): dataset = self.dsr.read_dataset_images(dataset_path) freeman_codes_dict = self.fenc.encode_freeman_dataset(dataset) labelled_sequences = self.generate_labelled_sequences(freeman_codes_dict) training = [] test = [] # print labelled_sequences self.prepare_data(labelled_sequences,training,test, split=train_test_split) self.training_data = training if train_test_split != 1.0: print "Training:" + len(training).__str__() print "Test:" + len(test).__str__() self.evaluation(training,test) def knn_predict_one(self, image, k=1): if os.path.isfile(image): image_array = self.dsr.read_img_bw(image) test = self.fenc.encode_freeman(image_array) else: test = image # Try to find the nearest neighbors of the first sequences in training neighbors = self.get_neighbors(self.training_data, test, k) label = self.get_label(neighbors) return label
class KNN_statistic(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.dsr = DatasetReader() self.fenc = FreemanEncoder() self.training_data = [] def generate_labelled_sequences(self, freeman_codes_dict): labelled_sequences = [] codes_list = freeman_codes_dict.items() for tup in codes_list: for code in tup[1]: labelled_sequences.append((tup[0],code)) return labelled_sequences def prepare_data(self, arrays_data=[], arrays_labels=[], split=0.2): # Separate data into 2 sets, 1 is training and 1 is test,split is the ratio (the default is 0.20) ad_train, ad_test, al_train, al_test = train_test_split(arrays_data, arrays_labels, test_size=split, random_state=42) return ad_train, ad_test, al_train, al_test def get_neighbors(self, data, data_label, test_instance, k): # Get the list of nearest neighbors to a test instance distances = [] for i in range(len(data)): dist = edit_dist(test_instance, data[i]) distances.append((data[i], data_label[i], dist)) distances.sort(key=operator.itemgetter(2)) neighbors = [] for x in range(0, k): neighbors.append([distances[x][0], distances[x][1]]) return neighbors def get_label(self, neighbors): # Determine the label of a test instance base on its nearest neighbors labels = {} for neighbor in neighbors: if neighbor[1] not in labels: labels[neighbor[1]] = 1 else: labels[neighbor[1]] += 1 sorted_labels = sorted(labels.items(), key=operator.itemgetter(1), reverse=True) return sorted_labels[0][0] def evaluation(self, data, data_for_distance_caculation, data_label, data_for_distance_calculation_label, k=3): # Evaluate the accuracy of knn correct_count = 0 for instance in range(0, len(data)-1): neighbors = self.get_neighbors(data_for_distance_caculation,data_for_distance_calculation_label, data[instance], k) label = self.get_label(neighbors) if int(label) == int(data_label[instance]): correct_count += 1 return (float(correct_count)/len(data)) def knn_train(self, dataset_path, train_test_split=0.2): dataset = self.dsr.read_dataset_images(dataset_path) freeman_codes_dict = self.fenc.encode_freeman_dataset(dataset) _, arrays_data, arrays_label = self.dsr.gen_labelled_arrays(freeman_codes_dict) arrays_data, arrays_label = shuffle(arrays_data, arrays_label) ad_train, ad_test, al_train, al_test = self.prepare_data(arrays_data, arrays_label, split=train_test_split) # Cross validation with 5 folds kf = KFold(len(ad_train), 5) result = 0 for train_index, test_index in kf: ad_train_kfold, ad_test_kfold = ad_train[train_index], ad_train[test_index] al_train_kfold, al_test_kfold = al_train[train_index], al_train[test_index] result += self.evaluation(ad_test_kfold, ad_train_kfold, al_test_kfold, al_train_kfold, k=2) result_average = result/5 # Result with the training result_training = self.evaluation(ad_train, ad_train, al_train, al_train, k=2) # Result with the test result_test = self.evaluation(ad_test, ad_train, al_test, al_train, k=2) return result_average, result_training, result_test # knn = KNN_strings(n_neighbors=1) # knn = KNN_statistic() # results = [] # for x in range(50): # result_average, result_training, result_test = knn.knn_train("/home/thovo/PycharmProjects/CharacterRecognition/digits_dataset", 0.2) # text = result_average.__str__() + " , " + result_training.__str__() + " , " + result_test.__str__() + "\n" # results.append(text) # # # f = open("Results/knn.txt", "w") # for item in results: # f.write(item) # # f.close()
def __init__(self, dataset_path, args): self._dataset_path = dataset_path self._documents = DatasetReader(dataset_path, args).read_dataset()
import numpy as np from matplotlib import pyplot as plt from Args import DIM, ROOT, EPOCHS, BATCH_SIZE, NUM_WORKERS, LEARNING_RATE from DatasetReader import DatasetReader from model import UNet import torch.optim as optim from copy import deepcopy from Evaluation import MeanDiceCoefficient if (__name__ == "__main__"): model = UNet().cuda() loss_fn = nn.BCELoss() optimiser = optim.Adam(model.parameters(), lr=LEARNING_RATE) trainset = DatasetReader(ROOT + "train/") testset = deepcopy(trainset) testset.setTrainMode(False) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS) testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE, shuffle=True, num_workers=NUM_WORKERS) for epoch in range(EPOCHS): # Training phase