Example #1
0
def custom_classifier(
    model=Simple_CNN(img_size=128),
    batch_size=256,
    num_epochs=42,
    img_size=48,
    lr=1e-3,
    use_gpu=True,
    random_background=False,
    name="",
):
    """

    :param model:
    :param batch_size:  (int) batch size for training
    :param num_epochs: (int) number of training epochs
    :param img_size: size of input image
    :param lr: learning rate typically ~1e-3 - 1e-5
    :param use_gpu: (bool) use a GPU if available
    :param random_background: (bool) randomize uniform background
    :param name: (str) optionally name the saved state dicts
    """
    # Training settings
    use_cuda = torch.cuda.is_available()

    device = torch.device("cuda" if use_cuda and use_gpu else "cpu")
    train_loader, test_loader = create_dataloaders(
        batchsize=batch_size,
        img_size=img_size,
        random_background=random_background)

    model = model.to(device)

    loss = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=lr)

    for epoch in range(1, num_epochs + 1):
        train(model, device, train_loader, optimizer, epoch, loss)
        accuracy = run_t(model, device, test_loader, loss)

    if random_background:
        background_extension = "_rand_backgr"
    else:
        background_extension = ""

    if name == "":
        save_name = Path("./models/custom_cnn_e" + str(num_epochs) + "_size_" +
                         str(img_size) + background_extension + ".pt")
    else:
        save_name = Path(name + ".pt")

    torch.save(model.state_dict(), save_name)

    return accuracy
Example #2
0
def finetune_model(
    model_name="squeezenet",
    num_classes=2,
    batch_size=64,
    num_epochs=42,
    feature_extract=True,
    lr=1e-3,
    use_gpu=True,
    name="",
):
    """

    :param str model_name: model to be loaded
    :param int num_classes: number of classes
    :param int batch_size: size of batch used for SGD
    :param int num_epochs: Number of times the network is updated on the data
    :param bool feature_extract: deactivate gradients
    :param float lr: learning rate
    :param bool use_gpu: Allow or prohibit use of GPU
    :param str name: Optional path to save the network parameters to
    """

    model_ft, input_size = initialize_model(model_name,
                                            num_classes,
                                            feature_extract,
                                            use_pretrained=True)
    loss = nn.CrossEntropyLoss()
    use_cuda = torch.cuda.is_available()

    if use_gpu:
        device = torch.device("cuda" if use_cuda else "cpu")
    else:
        device = "cpu"

    train_loader, test_loader = create_dataloaders(batchsize=batch_size,
                                                   img_size=input_size)

    model = model_ft.to(device)
    optimizer = optim.Adam(model.parameters(), lr=lr)

    for epoch in range(1, num_epochs + 1):
        train(model, device, train_loader, optimizer, epoch, loss)
        accuracy = run_t(model, device, test_loader, loss)

    if name == "":
        save_name = Path("./models/" + model_name + "_e" + str(num_epochs) +
                         ".pt")
    else:
        save_name = Path(name + ".pt")

    torch.save(model.state_dict(), save_name)
    return accuracy
Example #3
0
def create_histogram(path, randomize_background):
    """
    Creates normalized histograms of the training images

    :param str path: path to use for saving raw data
    :param bool randomize_background: Randomize uniform background
    """
    num_episodes = 1
    batch_size = 1
    img_size = 32
    bins = int(256 / 2)
    torch.manual_seed(42)

    train_loader, test_loader, validation_loader = create_dataloaders(
        batch_size, img_size=img_size, random_background=randomize_background
    )
    del validation_loader
    del test_loader

    num_run = 0
    bin_counts_cum = None
    for episode in range(num_episodes):
        print("\nRun \t %.0f" % episode)
        for data, target in train_loader:
            del target

            # Reshape data for grayscale conversion
            data = data.view((-1, img_size, img_size, 3))
            data_train = data.detach().cpu().numpy()
            data_gray_train = rgb2gray(data_train)

            bin_counts = np.histogram(data_gray_train, bins)

            if bin_counts_cum is None:
                bin_counts_cum = bin_counts[0]
            else:
                bin_counts_cum += bin_counts[0]

            num_run += 1
            if num_run % 1000 == 0:
                print(num_run)

    normalized_hist = bin_counts_cum / num_run / (img_size * img_size)
    np.savetxt(Path(path), normalized_hist, delimiter=",")
def main():
    num_episodes = 5
    batch_size = 2000
    img_size = 64

    train_loader, test_loader, validation_loader = create_dataloaders(
        batch_size, img_size=img_size
    )
    del validation_loader

    classifier = LogisticRegression(
        random_state=42, warm_start=True, solver="liblinear"
    )

    for episode in range(num_episodes):
        print("\nRun \t %.0f" % episode)
        for data, target in train_loader:
            # Reshape data for grayscale conversion
            data = data.view((-1, img_size, img_size, 3))
            data_train = data.detach().cpu().numpy()
            data_gray_train = rgb2gray(data_train)
            data_gray_train = data_gray_train.reshape((-1, img_size * img_size))
            target_train = target.detach().cpu().numpy()

            # Train classifier
            classifier.fit(data_gray_train, target_train)

        # Eval accuracy
        results = []
        for batch_id, (data, target) in enumerate(test_loader):
            data = data.view((-1, img_size, img_size, 3))

            data_test = data.detach().cpu().numpy()
            data_gray_test = rgb2gray(data_test)
            data_gray_test = data_gray_test.reshape((-1, img_size * img_size))
            target_test = target.detach().cpu().numpy()

            predictions = classifier.predict(data_gray_test)
            score = accuracy_score(target_test, predictions)
            results.append(score)
            print("Batch %.0f, Accuracy: %.2f" % (batch_id, score))

    print("\n\nMean accuracy: %.2f" % np.mean(results))
def benchmark_inference_squeezenet():
    """
    Benchmark inference duration with JIT and GPU execution
    """
    num_runs = 10

    batch_size = 32

    random_background = 0

    path = Path("../state_dicts/squeezenet_e3.pt")
    results = []

    # load model
    original_model, img_size = initialize_model("squeezenet",
                                                2,
                                                True,
                                                use_pretrained=True)
    original_model.load_state_dict(torch.load(path))

    # get dataloader
    train_loader, test_loader, val_loader = create_dataloaders(
        batchsize=batch_size,
        img_size=img_size,
        random_background=random_background)

    for use_gpu in [1, 0]:
        for use_tracing in [1, 0]:

            print(use_gpu, use_tracing)

            # Specify execution device
            if use_gpu:
                device = torch.device("cuda")
            else:
                device = torch.device("cpu")

            # Clone original model
            execution_model = copy.deepcopy(original_model)
            execution_model.train()

            if use_tracing:
                for (data, target) in val_loader:
                    example_input = data
                    break
                execution_model = trace(execution_model,
                                        example_inputs=example_input,
                                        check_trace=False)

                if use_gpu:
                    execution_model.cuda()
            else:
                # Move model to GPU
                execution_model = execution_model.to(device)

            for ii in range(num_runs):
                start = timeit.default_timer()
                for (data, target) in val_loader:
                    data = data.to(device)
                    prediction = execution_model(data)
                end = timeit.default_timer()
                results.append(
                    [use_gpu, use_tracing, (end - start) / num_runs])
                np.savetxt(
                    Path("../logs/inference_speedup_squeezenet.csv"),
                    results,
                    delimiter=",",
                    header="use_gpu,use_tracing,duration",
                )
def benchmark_inference_custom_model():
    """
    Benchmark inference duration with JIT and GPU execution
    """
    num_runs = 25

    batch_size = 1024
    img_size = 128
    random_background = 0

    path = Path("../state_dicts/custom_cnn_e4_0.pt")
    results = []

    # load model
    original_model = Simple_CNN_e2(img_size=img_size)
    original_model.load_state_dict(torch.load(path))

    # get dataloader
    train_loader, test_loader, val_loader = create_dataloaders(
        batchsize=batch_size,
        img_size=img_size,
        random_background=random_background)

    for use_gpu in [0, 1]:
        for use_tracing in [0, 1]:

            print(use_gpu, use_tracing)

            # Specify execution device
            if use_gpu:
                device = torch.device("cuda")
            else:
                device = torch.device("cpu")

            # Clone original model
            execution_model = copy.deepcopy(original_model)
            original_model.train()

            if use_tracing:
                for (data, target) in val_loader:
                    example_input = data
                    break
                execution_model = trace(execution_model,
                                        example_inputs=example_input)

                if use_gpu:
                    execution_model.cuda()
            else:
                # Move model to GPU
                execution_model = execution_model.to(device)

            for ii in range(num_runs):
                start = timeit.default_timer()
                for (data, target) in val_loader:
                    data = data.to(device)
                    prediction = execution_model(data)
                end = timeit.default_timer()
                results.append(
                    [use_gpu, use_tracing, (end - start) / num_runs])
                np.savetxt(
                    Path("../logs/inference_speedup_custom_e2.csv"),
                    results,
                    delimiter=",",
                    header="use_gpu,use_tracing,duration",
                )