args=parser.parse_args() n_classes = 10 n_epochs = 200 pre = Preprocessing('digits') pre.load_data(filename='train.csv', name='train') X_df = pre.get(name='train').drop(columns=['0']) y_df = pre.get(name='train')['0'] dtype = torch.float device = torch.device("cpu") model_name = 'logreg_digits' model = LogReg(model_name, 256, n_classes) learning_rate = 0.0001 batch_size = 32 train_classifier = TrainClassifier(model, X_df, y_df) trained_model , optimizer, criterion, loss_hist, loss_val_hist, best_param = train_classifier.run_train(n_epochs = n_epochs, lr=learning_rate, batch_size=batch_size) pre.save_results(loss_hist, loss_val_hist, f'{model_name}') trained_model.load_state_dict(state_dict=best_param) trained_model.eval() if args.s_model: m_exporter = ModelExporter('digits') m_exporter.save_nn_model(trained_model, optimizer, 0, n_classes, n_epochs, trained_model.get_args())
pre = Preprocessing('IMDB') n_classes = 2 n_features = int(args.n_feat) n_epochs = 100 pre.load_data(filename=f'training_data_{n_features}.csv', name='training_data') X_df = pre.get(name='training_data').drop(columns=['target']) y_df = pre.get(name='training_data')['target'] model = LogReg('log_reg', n_features, n_classes) train_classifier = TrainClassifier(model, X_df, y_df) trained_model, optimizer, criterion, loss_hist, loss_validate_hist = train_classifier.run_train(n_epochs = n_epochs) pre.save_results(loss_hist, loss_validate_hist, f'log_reg_{100}') m_exporter = ModelExporter('IMDB') m_exporter.save_nn_model(trained_model, optimizer, n_features, n_classes, n_epochs) ##teeeeeest part pre.load_data(filename=f'test_data_{n_features}.csv', name='test_data') X_test_df = pre.get(name='test_data').drop(columns=['target']) y_test_df = pre.get(name='test_data')['target'] dtype = torch.float device = torch.device("cpu") X_test = torch.tensor(X_test_df.values, device=device, dtype=dtype) y_test = torch.tensor(y_test_df.values, device=device, dtype=torch.long)