def main(): trained_model = './trained_model.pth' test_batch_dir = './cifar-10/test_batch' classifier = CNNModel() classifier.load_state_dict(torch.load(trained_model)) classifier.cuda() classifier.eval() test_x, test_y = unpickle(test_batch_dir) test_x, test_y = torch.tensor(np.reshape( test_x, (len(test_x), 3, 32, 32))).to( 'cuda', dtype=torch.float), torch.tensor(test_y).cuda() classes = [ 'Airplane', 'Automobile', 'Bird', 'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck' ] # calculating the accuracy of our classifier; print("Calculating accuracy...") correct = 0 total = len(test_x) with torch.no_grad(): out = classifier(test_x) _, predicted = torch.max(out, 1) # calculate the total accuracy correct += (predicted == test_y).sum().item() print('Accuracy: %5d %%' % (correct / total * 100))
def restore_model(model_dir, model_name): """ Restore model from disk :param model_dir: directory where model checkpoint file is stored :param model_name: name of the stored model :return: loaded model """ restore_file = os.path.join(model_dir, model_name) try: os.path.exists(restore_file) except FileNotFoundError: print("Model checkpoint file does NOT exist.") sys.exit(-1) try: checkpoint = torch.load(restore_file) except IOError: print("Could not load the checkpoint file.") sys.exit(-1) model = CNNModel() model.load_state_dict(checkpoint['state_dict']) return model
def run(args): train_loader = torch.utils.data.DataLoader(datasets.ImageFolder( args.data + '/train', transform=data_transforms), batch_size=args.batch_size, shuffle=True, num_workers=16) val_loader = torch.utils.data.DataLoader(datasets.ImageFolder( args.data + '/val', transform=validation_data_transforms), batch_size=args.batch_size, shuffle=False, num_workers=16) model = CNNModel() model = nn.DataParallel(model) model = model.to(args.device) if args.checkpoint is not None: model.load_state_dict(torch.load(args.checkpoint)) optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=1e-3) scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.step_size) for epoch in range(1, args.epochs + 1): scheduler.step() train(epoch, model, optimizer, train_loader, args.log_interval) validation(epoch, model, val_loader) model_file = 'model_' + str(epoch) + '.pth' torch.save(model.state_dict(), model_file) writer.close()
def on_message(client, userdata, msg): try: print("Model from trainer received!") print('Topic: ', msg.topic) #print('Message: ', msg.payload) model_str = msg.payload buff = io.BytesIO(bytes(model_str)) # Create a dummy model to read weights model = CNNModel() model.load_state_dict(torch.load(buff)) global trainer_weights trainer_weights.append(copy.deepcopy(model.state_dict())) # Wait until we get trained weights from all trainers if len(trainer_weights) == NUM_TRAINERS: update_global_weights_and_send(trainer_weights) trainer_weights.clear() except: print("Unexpected error:", sys.exc_info())
from model import CNNModel from dqn_agent import ObsPreproc, TestAgent import sys sys.path.append('..') from common import make_env # noqa parser = argparse.ArgumentParser() parser.add_argument('--model_path', type=str, default='./ckpt.pth', help='The model path') opt = parser.parse_args() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') env = make_env('BreakoutNoFrameskip-v4', 0, 1, clip_reward=False) in_ch = env.observation_space.shape[-1] n_action = env.action_space.n model = CNNModel(in_ch, n_action) model.cuda() model.load_state_dict(torch.load(opt.model_path, map_location=device)) obs_preproc = ObsPreproc(device=device) test_agent = TestAgent(model, env, obs_preproc, device, 30) info = test_agent.evaluate() #print(info['average_return']) #writer.add_scalar("reward",info['average_return'],i) test_agent.display()
data_dict['y_pdb'].extend(pdb) data_dict['y_true'].extend(y_true.data) data_dict['y_pred'].extend(output.data) return data_dict if __name__ == '__main__': logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) args = get_arguments() cli_args = {key: value for key, value in vars(args).items() if value} # Commandline arguments get higher priority over default configuration values test_config = ChainMap(cli_args, DEFAULT_TEST_CONFIG) restore_file = os.path.join(test_config.model_dir, 'model.pth.tar') checkpoint = torch.load(restore_file) model = CNNModel() model.load_state_dict(checkpoint['state_dict']) model = utils.restore_model(test_config.model_dir, 'model.pth.tar') result_dict = test(model, test_loader) saved_csv = os.path.join(test_config.out_csv_dir, 'predictions.csv') utils.save_dict_to_csv(saved_csv, result_dict)
from model import CNNModel import sys import pickle if __name__ == '__main__': norm = pickle.load(open('norm.pkl', 'rb')) x_test, test_id = load_test_data('mfcc', sys.argv[1]) x_test = x_test / norm output = [] test_loader = DataLoader(dataset=TensorDataset(torch.FloatTensor(x_test), torch.FloatTensor(x_test)), batch_size=64, shuffle=False, num_workers=4) model = CNNModel(41, 40).cuda() model.load_state_dict(torch.load('model/model_cnn.pt')) model.eval() for i, (x, _) in enumerate(test_loader): y = model(Variable(x).cuda()) y = torch.max(y, -1)[1].data.cpu().numpy() y = encode(y) output += y output_file = open(sys.argv[2], 'w') print('id,phone_sequence', file=output_file) for i in range(len(output)): print('{},{}'.format(test_id[i], output[i]), file=output_file)
try: print("Model received from coordinator!") print(msg.topic + ' ' + str(msg.payload)) except: print("Unexpected error:", sys.exc_info()[0]) local_mqttclient = mqtt.Client() local_mqttclient.connect(LOCAL_MQTT_HOST, LOCAL_MQTT_PORT, 60) local_mqttclient.on_connect = on_connect_local local_mqttclient.on_message = on_message # Read test model model = CNNModel() model.load_state_dict(torch.load('models/mnist_cnn.pt')) buff = io.BytesIO() torch.save(model.state_dict(), buff) buff.seek(0) # Convert model to string for transmission model_str = buff.getvalue() local_mqttclient.publish(LOCAL_MQTT_TOPIC, payload=model_str, qos=0, retain=False) #local_mqttclient.publish(LOCAL_MQTT_TOPIC, payload="test message", qos=0, retain=False) local_mqttclient.loop_forever()
import torch import matplotlib.pyplot as plt from torch.utils.data import DataLoader from sklearn.metrics import auc from model import CNNModel from evaluate import evaluate from local_config import * from data_helper import testset, word_vectors, word2id, rel2id if __name__ == "__main__": test_loader = DataLoader(testset, shuffle=False, batch_size=BATCH_SIZE) model = CNNModel(torch.tensor(word_vectors, dtype=torch.float32), rel2id) model.load_state_dict(torch.load(BEST_MODEL_SAVE_PATH)) acc, precision, recall, f1_micro, f1_macro, prs = evaluate( model, test_loader, torch.device('cpu'), prc=True, report=True) ps, rs, ths = prs print('TEST >>> ACC: %.4f, Precision: %.4f, Recall: %.4f, F1-micro: %.4f, F1-macro: %.4f\n' \ % (acc, precision, recall, f1_micro, f1_macro)) print('TEST >>> AUC: %.4f' % auc(rs, ps)) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(rs, ps, lw=1.5) ax.set_title('Precision vs. Recall') ax.set_xlabel('Recall') ax.set_ylabel('Precision') ax.grid(True)
mode="test", transform=transforms.ToTensor()) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False) model = CNNModel() # Specify model to load. # Weights specified in 'Weights' folder model_no = 22 iterr = 5800 # model.load_state_dict(torch.load('Weights/model{}_iterr={}_state_dict.pt'.format(model_no, iterr), # map_location=torch.device('cpu'))) model.load_state_dict( torch.load("weights.pt", map_location=torch.device('cpu'))) # Initialize final output arrays. file_paths_list = np.array([]) preds_list = np.array([]) for i, (images, file_paths) in enumerate(test_loader): images = Variable(images) # Evaluate the test data. model.eval() outputs = model(images).detach() # Map predictions to meotions. preds = emotion_labels[np.argmax(outputs, axis=1)]