def train_test(): args = define_parse() # make labels and paths data_path = [] labels = [] label = -1 for dir_path, dir_name, file_name in os.walk(directory): for file_name in file_name: data_path.append(os.path.join(dir_path, file_name)) labels.append(label) label += 1 # transform labels into one-hot-vector labels_onehot = to_categorical(labels) print(labels_onehot.shape) #何クラス分類なのか。今回は101クラス分類なので101が入る num_classes = label # split data to training and test data X_train, X_test, y_train, y_test = train_test_split(data_path, labels_onehot, train_size=0.8) # for making validation data X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, train_size=0.8) # make Generator for fit_generator train_batch_generator = Generator.BatchGenerator(X_train, y_train, batch_size, height, width) test_batch_generator = Generator.BatchGenerator(X_val, y_val, batch_size, height, width) if args.mode == 'train': VGG = VGG16.vgg16(height, width, ch, num_classes) model = VGG.build_model() # training fit_history = model.fit_generator( train_batch_generator, epochs=epoch, verbose=1, steps_per_epoch=train_batch_generator.batches_per_epoch, validation_data=test_batch_generator, validation_steps=test_batch_generator.batches_per_epoch, shuffle=True) model.save(save_model_name) # evaluate ''' score = model.evaluate_generator(test_batch_generator, step=train_batch_generator.batches_per_epoch, verbose=1) ''' elif args.mode == 'test': model = load_model(load_model_name) # get class name for predicting class_name = [] with open(class_path, "r") as file: for i in file: class_name.append(i.replace('\n', '')) class_name = np.asarray(class_name) # prediction img = load_img(X_test[0], target_size=(height, width)) img_array = img_to_array(img) / 255 # normalization img_array = np.expand_dims(img_array, axis=0) #add dimention that is batch_size pred = model.predict(img_array, verbose=0) print('prediction result : {}'.format(class_name[np.argmax( pred[0, :])])) print('correct answer : {}'.format(class_name[np.argmax( y_test[0, :])])) else: print('illegal input.') print('please select train or test')
def main(): # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--batch-size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)') parser.add_argument('--test-batch-size', type=int, default=100, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--lr', type=float, default=0.001, metavar='LR', help='learning rate (default: 0.01)') parser.add_argument('--momentum', type=float, default=0.5, metavar='M', help='SGD momentum (default: 0.5)') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument( '--log-interval', type=int, default=10, metavar='N', help='how many batches to wait before logging training status') args = parser.parse_args() print(args) use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") PATH_TO_IMAGES = "" TRAIN_DATASET_CSV = "" TEST_DATASET_CSV = "" transform = transforms.Compose([ transforms.Grayscale(), transforms.Resize((224, 224), interpolation=2), transforms.ToTensor() ]) train_dataset = BoneDataset224(PATH_TO_IMAGES, TRAIN_DATASET_CSV, transform) test_dataset = BoneDataset224(PATH_TO_IMAGES, TEST_DATASET_CSV, transform) train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=32, shuffle=True) test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=64, shuffle=True) PATH_TO_WEIGHTS = "./weights/" + "ws_vgg16_sdg.pt" PATH_TO_WEIGHTS = None model = VGG16.vgg16(False, PATH_TO_WEIGHTS).to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum) for epoch in range(1, args.epochs + 1): train(args, model, device, train_loader, optimizer, epoch) test(args, model, device, test_loader) torch.save(model.state_dict(), PATH_TO_WEIGHTS)