class_filename = filename_root + "_class.txt" with open(os.path.join(class_path, class_filename), 'w') as f: f.write(result_class_name) # Load model model = tf.keras.models.load_model('bacteria_model.h5', compile=False) losses = {"class_output": "categorical_crossentropy", "segm_output": wbce} lossWeights = {"class_output": 1.0, "segm_output": 1.0} model.compile(optimizer='adam', loss=losses, loss_weights=lossWeights, metrics=['accuracy']) # Get training data train_data = prepare_train_data(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS, False) X_train = train_data['X_train'] # Get testing data test_data = prepare_test_data(IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS) X_test = test_data['X_test'] # Get results [class_predictions_train, segm_predictions_train] = model.predict(X_train, verbose=1) [class_predictions_test, segm_predictions_test] = model.predict(X_test, verbose=1) save_results_to_files(class_predictions_train, segm_predictions_train, 'train') save_results_to_files(class_predictions_test, segm_predictions_test, 'test')
help='initial learning rate') parser.add_argument('--epochs', type=int, default=10, help='training epochs') parser.add_argument('--seed', type=int, default=19, help='random seed') #for debug parser.add_argument('--datanum', type=int, default=-1, help='training only first N samples. (for debug)') args = parser.parse_args() ## example of training from files & output model files from prepare_data import get_sentences_from_files, prepare_train_data sentences = get_sentences_from_files(args.datafiles) train_data, train_target, word_to_idx, target_to_idx = prepare_train_data(sentences) np.save(args.word_to_idx, word_to_idx) np.save(args.target_to_idx, target_to_idx) if args.datanum > 0: train_data = train_data[:args.datanum] train_target = train_target[:args.datanum] trained_model = train_model(train_data, train_target, word_to_idx, target_to_idx, model_type = args.type, embedding_dim = args.emdim, hidden_dim = args.hiddim, epochs = args.epochs, learning_rate = args.lr, seed = args.seed) torch.save(trained_model, args.model)
parser.add_argument('--model_path', type=str, default='./logs/model_zoo/', help='model path') parser.add_argument('--model_name_train', type=str, default="guo.h5", help='trained model name') parser.add_argument('--model_name_predict', type=str, default="guo.h5", help='model used to predict') parser.add_argument('--result_path', type=str, default="./result", help='model path') parser.add_argument('--result_stats_path', type=str, default="./logs/statistic/", help='trained model name') parser.add_argument('-t','--train_mode', type=lambda x: (str(x).lower() == 'true'), default=True, help='train the model or not') parser.add_argument('-i','--nEpochs', type=int, default=2, help='number of epochs to train for') parser.add_argument('-u','--upscale_factor', type=int, default=2, help="super resolution upscale factor") opt = parser.parse_args() if opt.train_mode: print('===> Loading datasets') train_data, train_label = prepare_train_data(opt.train_data_path, opt.upscale_factor) print(train_data.shape) print(train_label.shape) test_data, test_label = prepare_test_data(opt.test_data_path, opt.upscale_factor) print(test_data.shape) print(test_label.shape) data_all = [train_data, train_label, test_data, test_label] print('===> Building model') train(data_all, os.path.join(opt.model_path, opt.model_name_train), opt.nEpochs) model_name_predict = opt.model_name_train print('===> Testing') stats = predict(os.path.join(opt.model_path, model_name_predict), opt.test_data_path, opt.result_path) else: print('===> Testing') stats = predict(os.path.join(opt.model_path, opt.model_name_predict), opt.test_data_path, opt.result_path) result_stats_save(stats, opt.result_stats_path)
import numpy as np import scipy.io as sio import gradient as gr import prepare_data as pre import sigmoid import random import math import time import csv import datetime import multilayer train_small = sio.loadmat('train_small.mat') train = sio.loadmat('train.mat') test = sio.loadmat('test.mat') train_small_data = train_small['train'] train_data = train['train'] test_data = test['test'] (features, labels) = pre.prepare_train_data(train_small_data) (x_test, y_test) = pre.prepare_test_data(test_data) if __name__ == '__main__': ann = multilayer.NeuralNet() (weights_ret, bias_ret) = ann.train_multilayer_SGD(labels[6], features[6], .01, 500, y_test, x_test)