Exemple #1
0
def main():
    # Training settings
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')

    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    train_loader, test_loader = get_data_loader(args.batch_size,
                                                args.test_batch_size, use_cuda)
    model = load_model(device=device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    # training
    for epoch in range(1, args.epochs + 1):
        train(model, device, train_loader, optimizer, epoch, args.log_interval)
        test(model, device, test_loader)

    # model serializing
    model_dir = PathManager.MODEL_DIR
    model_dir.mkdir(parents=True, exist_ok=True)
    model_path = model_dir / "mnist_cnn.pth"
    torch.save(model.state_dict(), str(model_path))
Exemple #2
0
    def test_load_model(self):
        devices = ["cpu"]
        if torch.cuda.is_available():
            devices.append("cuda")

        for device in devices:
            device = torch.device(device)
            with self.subTest(f"Device {device}"):
                model = load_model(device)
                self.assertTrue(isinstance(model, torch.nn.Module))
    def test_estimate(self):
        image = Image.open(Path(__file__).resolve().parent / 'datas' / 'sample.jpg')

        devices = ["cpu"]
        if torch.cuda.is_available():
            devices.append("cuda")

        for device in devices:
            device = torch.device(device)
            model = load_model(device)
            with self.subTest(f"Device {device}"):
                label = estimate(data=image, model=model, device=device)
                self.assertTrue(0 <= label <= 9)
Exemple #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--image_path',
                        action='store',
                        required=True,
                        help='path to mnist image')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    # load model
    model = load_model(device, trained=True)

    # load image
    image = Image.open(args.image_path)

    label = estimate(image, model, device)
    print(f"Estimated label: {int(label)}")
Exemple #5
0
    estimator = EstimateAPI()
    return estimator.estimate()


@app.route('/hc', methods=['GET'])
def hc():
    return 'alive'


# global variables for ML
from app.ml.model import load_model

env_USE_GPU = os.getenv('USE_GPU')
USE_GPU = not (env_USE_GPU is None or env_USE_GPU == '0')
DEVICE = 'cuda' if USE_GPU else 'cpu'
MODEL = load_model(device=DEVICE, trained=True)

if __name__ == '__main__':
    config_file = Path(
        __file__).resolve().parents[1] / 'config' / 'api_config_local.yml'
    with config_file.open('r') as fp:
        config = yaml.load(fp)

    from logging import getLogger, StreamHandler, DEBUG

    logger = getLogger()
    handler = StreamHandler()
    handler.setLevel(DEBUG)
    logger.setLevel(DEBUG)
    logger.addHandler(handler)