Esempio n. 1
0
def test():
    args = parse_args()
    print(args)

    if args.use_gpu and cuda_exist:
        place = fluid.CUDAPlace(0)
        print('GPU is used...')
    else:
        place = fluid.CPUPlace()
        print('CPU is used...')

    with fluid.dygraph.guard(place):
        print('start testing ... ')

        # prepare method
        model = prepare_model(args)

        # load checkpoint
        # model_dict, _ = fluid.dygraph.load_persistables("log/")
        params_dict, opt_dict = fluid.load_dygraph(args.log_dir +
                                                   'checkpoint/' +
                                                   args.dataset + '/' +
                                                   args.method + '_' +
                                                   args.backbone + '_' +
                                                   str(args.k_shot) + 'shot_' +
                                                   str(args.n_way) + 'way')
        model.load_dict(params_dict)
        print("checkpoint loaded")

        # prepare optimizer
        opt = prepare_optimizer(args, model)

        # prepare dataloader
        test_data_batches = prepare_dataloader(args)

        model.eval()
        accuracies = []
        losses = []
        for batch_id, batch in enumerate(test_data_batches):
            samples, label = batch
            samples = fluid.dygraph.to_variable(samples)
            labels = fluid.dygraph.to_variable(label)
            loss, acc = model.loss(samples, labels)
            avg_loss = fluid.layers.mean(loss)
            accuracies.append(acc.numpy())

        mean = np.mean(accuracies)
        stds = np.std(accuracies)
        ci95 = 1.96 * stds / np.sqrt(args.test_episodes)
        print("meta-testing accuracy: {}, 95_confidence_interval: {}".format(
            mean, ci95))
Esempio n. 2
0
def main():

    #Processing command line arguments
    data_dir, save_dir, arch, learning_rate, hidden_units, epochs, device = processing_arguments()
    #Loading image data
    dataloaders, image_data= loading_data(data_dir)
    #Defining model, classifier, criterion and optimizer
    model, classifier, criterion, optimizer = prepare_model(arch, hidden_units, learning_rate)
    #Training model
    do_deep_training(model,dataloaders['train'], epochs,criterion,optimizer,device)
    #Obtaining test loss and accuracy
    validation(model,dataloaders['test'],criterion,device)
    #Saving checkpoint
    saving_model(arch,model,save_dir, image_data['train'], classifier, optimizer, epochs)
Esempio n. 3
0
    def __init__(self, pretrained, cuda=True, visualise=False):
        super(Classifier, self).__init__()

        if isinstance(pretrained, str):
            pretrained = torch.load(pretrained)
        elif isinstance(pretrained, dict):
            pass
        self.model, _, _ = prepare_model(pretrained['arch'],
                                         pretrained['num_classes'])
        self.model.load_state_dict(pretrained['model_state_dict'], strict=True)

        self.is_cuda = cuda
        if self.is_cuda:
            self.model = self.model.cuda()
            self.is_cuda = True
        self.model.eval()

        self.class_names = np.array(pretrained['class_names'])
        self.visualise = visualise

        self.preprocess = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])
Esempio n. 4
0
def main(algorithm,
         optimizer,
         dataset,
         num_classes=10,
         optim_params={
             'lr': 0.05,
             'weight_decay': 5e-4,
             'momentum': 0.9
         }):

    filename = algorithm + '_' + optimizer + '_' + dataset

    # prepare dataset
    logger.info("====== Evaluation ======")
    logger.info("Preparing dataset...{}".format(dataset))
    db = utils.Datasets(dataset)
    train, valid, test = db.split_image_data(train_data=db.train,
                                             test_data=db.test)

    # prepare model
    model, optimizer = utils.prepare_model(algorithm, optimizer, filename,
                                           optim_params, device, num_classes)

    # get model's output
    data_size = test.dataset.data.shape[0]
    targets = test.dataset.targets if dataset == "CIFAR10" else test.dataset.labels
    predictions = torch.zeros(data_size, num_classes)
    labels = torch.zeros(data_size, 1)
    logger.info("data: {} - targets {}.".format(data_size, len(targets)))
    cum_loss = 0.0
    correct = 0.0
    n_samples = 0.0
    model.eval()
    with torch.no_grad():
        for idx, (data, target) in enumerate(test):
            start = idx * data.size(0)
            end = (idx + 1) * data.size(0)
            data, target = data.to(device), target.to(device)
            output = model(data)
            # sum up batch loss
            output = F.log_softmax(output, dim=1)
            if target.max() == 9:
                cum_loss += F.nll_loss(output, target, reduction='sum').item()
            # get the index of the max log-probability
            sftmx_probs, predicted_labels = output.max(dim=1,
                                                       keepdim=True)  # labels
            correct += (predicted_labels.view(-1) == target).sum().item()
            n_samples += len(output)
            predictions[start:end] = output
            labels[start:end] = predicted_labels
    predictions = predictions.cpu().numpy()
    labels = labels.view(-1).cpu().numpy()
    epoch_loss = cum_loss / n_samples  # avg. over all mini-batches
    epoch_acc = correct / n_samples
    logger.info("Loss = {}, Accuracy = {}, test set!".format(
        epoch_loss, epoch_acc))
    logger.info("Computing entropy on... test")
    stats.entropy(predictions, targets, filename + '_ENTROPY')
    # save model's outputs and targets valid data used in training
    logger.info("Computing calibration on... test")
    # compute and save reliability stats
    calibration = stats.calibration_curve(filename + '_ENTROPY')
    utils.save_nparray(filename + '_CALIBRATION', **calibration)
    logger.info("====== Evaluation End ======\n\n")
from utils import prepare_data, prepare_model, plot_losses, evaluate_model
from keras.callbacks import ModelCheckpoint, TensorBoard
import random

#Creates our training/val/testing splits
random.seed(9001)
X_train, X_val, X_test, y_train, y_val, y_test, input_output = prepare_data('./recordings/')

#Prepares a CNN with our desired architecture
CNN_best_model = prepare_model(input_output, modeltype='CNN', dropout=False, maxpooling=True, batch_n=False)

#Creates callbacks to save best model in training
callbacks = [ModelCheckpoint(filepath='models/cnn_best_model.h5', monitor='val_loss', save_best_only=True), TensorBoard(log_dir='./Graph', histogram_freq=1,
                                                  write_graph=False, write_images=False)]
#Fits model
history = CNN_best_model.fit(X_train, y_train, batch_size=32, epochs=50, verbose= 2, validation_data = [X_val, y_val],
                   callbacks=callbacks)

#Plots loss curve
plot_losses(history)

#Evaluate model on testing set
evaluate_model('models/cnn_best_model.h5', X_test, y_test)
Esempio n. 6
0
def train():
    args = parse_args()
    print(args)

    if args.use_gpu and cuda_exist:
        place = fluid.CUDAPlace(0)
        print('GPU is used...')
    else:
        place = fluid.CPUPlace()
        print('CPU is used...')

    with fluid.dygraph.guard(place):
        print('start training ... ')

        # prepare method
        model = prepare_model(args)
        model.train()

        # prepare optimizer
        opt = prepare_optimizer(args, model)

        # prepare dataloader
        train_data_batches, val_data_batches = prepare_dataloader(args)

        save_name = args.method + '_' + args.backbone + '_' + str(
            args.k_shot) + 'shot_' + str(args.n_way) + 'way'
        best_val_acc = 0
        with LogWriter(logdir=args.log_dir + 'logs/' + args.dataset + '/',
                       filename_suffix='_' + save_name) as writer:
            for epoch in range(args.epochs):
                train_loss, train_acc = [], []
                for batch_id, batch in enumerate(train_data_batches):
                    samples, label = batch
                    samples = fluid.dygraph.to_variable(samples)
                    labels = fluid.dygraph.to_variable(label)
                    loss, acc = model.loss(samples, labels)
                    avg_loss = fluid.layers.mean(loss)
                    train_loss.append(avg_loss.numpy())
                    train_acc.append(acc.numpy())

                    if batch_id % 100 == 0:
                        print(
                            "epoch: {}, batch_id: {}, loss is: {}, acc is: {}".
                            format(epoch, batch_id, avg_loss.numpy(),
                                   acc.numpy()))
                    avg_loss.backward()
                    opt.minimize(avg_loss)
                    model.clear_gradients()

                writer.add_scalar(tag="train_loss",
                                  step=epoch,
                                  value=np.mean(train_loss))
                writer.add_scalar(tag="train_acc",
                                  step=epoch,
                                  value=np.mean(train_acc))

                model.eval()
                accuracies = []
                losses = []
                for batch_id, batch in enumerate(val_data_batches):
                    samples, label = batch
                    samples = fluid.dygraph.to_variable(samples)
                    labels = fluid.dygraph.to_variable(label)
                    loss, acc = model.loss(samples, labels)
                    avg_loss = fluid.layers.mean(loss)
                    accuracies.append(acc.numpy())
                    losses.append(avg_loss.numpy())
                acc_avg_val = np.mean(accuracies)
                print("[validation] accuracy/loss: {}/{}".format(
                    acc_avg_val, np.mean(losses)))
                model.train()

                writer.add_scalar(tag="val_loss",
                                  step=epoch,
                                  value=np.mean(losses))
                writer.add_scalar(tag="val_acc", step=epoch, value=acc_avg_val)

                if acc_avg_val > best_val_acc:
                    # save params of model
                    fluid.save_dygraph(
                        model.state_dict(), args.log_dir + 'checkpoint/' +
                        args.dataset + '/' + save_name)
                    best_val_acc = acc_avg_val
Esempio n. 7
0
def main(arch="resnet18",
         data_path="dataset/",
         resume="",
         epochs=25,
         batch_size=4,
         img_size=224,
         use_scheduler=False,
         **kwargs):

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    now = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')

    ## dataset
    dataloaders = prepare_dataloaders(data_path, img_size, batch_size)
    class_names = dataloaders['train'].dataset.classes
    n_class = len(class_names)

    print("preparing '{}' model with {} class: {}".format(
        arch, n_class, class_names))

    ## models
    model, criterion, optimizer = prepare_model(arch, n_class)

    start_epoch = 0
    if resume != '':
        checkpoint = torch.load(resume)
        if checkpoint["arch"] != arch:
            raise ValueError
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['model_state_dict'], strict=True)
        optimizer.load_state_dict(checkpoint['optim_state_dict'])
    else:
        tb_path = os.path.join("result", arch, "tb")
        if os.path.exists(tb_path) and len(os.listdir(tb_path)) > 0:
            import shutil
            for f in os.listdir(tb_path):
                p = os.path.join(tb_path, f)
                if os.path.isdir(p):
                    shutil.rmtree(p)
                else:
                    os.remove(os.path.join(tb_path, f))

    scheduler = None
    if use_scheduler:
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=10,
                                              gamma=0.1)

    print("Training {} on {}".format(arch, device))
    if is_notebook():
        print(
            "you can also check progress on tensorboard, execute in terminal:")
        print("  > tensorboard --logdir result/<model_name>/tb/")

    train_model(model,
                arch,
                dataloaders,
                criterion,
                optimizer,
                scheduler=scheduler,
                num_epochs=epochs,
                output_path=os.path.join("result", arch),
                start_epoch=start_epoch)
import io
import base64
import json

from torchvision import models
import torchvision.transforms as transforms
from PIL import Image
from flask import Flask, jsonify, request
from flask_cors import CORS

from utils import get_prediction, prepare_model

app = Flask(__name__)
cors = CORS(app)
model = prepare_model()
model.eval()


@app.route('/predict', methods=['POST'])
def predict():
    if request.method == 'POST':
        file = request.files['file']
        img_bytes = file.read()
        class_name = get_prediction(model, image_bytes=img_bytes)
        return jsonify(
            {
                "results": [{
                    'model': 'breed',
                    'prediction': class_name
                }],
                "image": base64.b64encode(img_bytes).decode("utf-8"),