def get_num_and_data(args): if not args.num: print('use test data') pre = Preprocessing('digits') pre.load_data(filename='test.csv', name='test') X_test = torch.tensor(pre.get('test').drop(columns=['0']).values, device=device, dtype=dtype) y_test = torch.tensor(pre.get('test')['0'].values, device=device, dtype=dtype) index = random.randint(0, len(y_test)) print('index {}'.format(index)) num = int(y_test[index].item()) print('real_numer {}'.format(num)) data_to_predict = X_test[index:index + 1, :] return num, data_to_predict else: print('use written images') num = int(args.num) im_imp = Image_Importer('digits') im_imp.load_image_as_grey(num) print('real_numer {}'.format(num)) data_to_predict = im_imp.get_image_as_256px_array(num) return num, data_to_predict
import argparse from models import LogReg import matplotlib.pyplot as plt if not __name__ == '__main_': parser = argparse.ArgumentParser(description='Digits') parser.add_argument('--s_model', default=True, help='save trained model') args=parser.parse_args() n_classes = 10 n_epochs = 200 pre = Preprocessing('digits') pre.load_data(filename='train.csv', name='train') X_df = pre.get(name='train').drop(columns=['0']) y_df = pre.get(name='train')['0'] dtype = torch.float device = torch.device("cpu") model_name = 'logreg_digits' model = LogReg(model_name, 256, n_classes) learning_rate = 0.0001 batch_size = 32 train_classifier = TrainClassifier(model, X_df, y_df)
from sklearn.model_selection import train_test_split import torch.nn as nn import models from models import LogReg if not __name__ == '__main_': parser = argparse.ArgumentParser(description='IMDBData') parser.add_argument('--n_feat', default=1000, help='number of features') parser.add_argument('--s_model', default=False, help='save trained model') args=parser.parse_args() pre = Preprocessing('IMDB') n_classes = 2 n_features = int(args.n_feat) n_epochs = 100 pre.load_data(filename=f'training_data_{n_features}.csv', name='training_data') X_df = pre.get(name='training_data').drop(columns=['target']) y_df = pre.get(name='training_data')['target'] model = LogReg('log_reg', n_features, n_classes) train_classifier = TrainClassifier(model, X_df, y_df) trained_model, optimizer, criterion, loss_hist, loss_validate_hist = train_classifier.run_train(n_epochs = n_epochs) pre.save_results(loss_hist, loss_validate_hist, f'log_reg_{100}')
"max_df": 1.0, "min_df": 0, "stop_words": [], # keep defaults "analyzer": "word", "tokenizer": None, "lowcase": False } lda_args = { "n_topics": 2, "max_iter": 1, "n_jobs": -1, # Note: thold is more prior than n_topic_words # But works slower "n_topic_words": 4, "word_thold": None } preprocessing_pipeline = Preprocessing() doc2topic = Docs2Topics(feature_extraction_args, lda_args) preprocessed_text = preprocessing_pipeline.preprocess_batch([texts]) topics, words_per_topic = doc2topic.fit_get_topics(preprocessed_text) import pprint pprint.pprint(topics) print(words_per_topic)
import time if not __name__ == '__main_': parser = argparse.ArgumentParser(description='IMDBData') parser.add_argument('--n_words', default=100, help='num words') parser.add_argument('--s_model', default=False, help='save transform model') args = parser.parse_args() n_words = args.n_words pre = Preprocessing('imdb') TRAIN_PATH_POS = 'train/pos/' TRAIN_PATH_NEG = 'train/neg/' pre.load_all_texts_from_directory(path=TRAIN_PATH_POS, name='raw_pos') pre.load_all_texts_from_directory(path=TRAIN_PATH_NEG, name='raw_neg') texts_list = pd.concat([pre.data['raw_pos'], pre.data['raw_neg']]) print('fit & transform training data') text_transformer = TextTransformer(num_words=n_words) train_features = text_transformer.fit_and_transform(texts_list) train_features['target'] = np.append(np.ones(len(pre.data['raw_pos'])), np.zeros(len(pre.data['raw_neg'])))
from preprocessing_utils import Preprocessing from sklearn.model_selection import train_test_split if not __name__ == '__main_': pre_train = Preprocessing('digits') kwarg = {'header': None, 'sep': ' '} pre_train.load_data(filename='zip.train', name='raw', **kwarg) pre_train.cleanup(name='raw', drop_duplicates=True, dropna={ 'axis': 1, 'thresh': 2 }) print(pre_train.get('clean').head()) #classes = ['0_0.0', '0_1.0', '0_2.0', '0_3.0', '0_4.0', '0_5.0', '0_6.0', '0_7.0', '0_8.0', '0_9.0'] X = pre_train.get('clean').drop(columns=[0]) y = pre_train.get('clean')[0] pre_train.set(name='train', value=pre_train.get('clean')) pre_train.save(name='train') pre_test = Preprocessing('digits') kwarg = {'header': None, 'sep': ' '} pre_test.load_data(filename='zip.test', name='raw', **kwarg) pre_test.cleanup(name='raw',