Example #1
0
class SeldonModel:
    def __init__(self):
        self._model = Net()
        self._model.load_state_dict(
            torch.load("/storage/model.pkl", map_location=torch.device("cpu"))
        )
        self._model.eval()

    def predict(self, X, features_names):
        """
        Return a prediction.

        Parameters
        ----------
        X : array-like
        feature_names : array of feature names (optional)
        """
        data = transforms.ToTensor()(Image.open(io.BytesIO(X)))
        return self._model(data[None, ...]).detach().numpy()

    def send_feedback(self, features, feature_names, reward, truth):
        """
        Handle feedback

        Parameters
        ----------
        features : array - the features sent in the original predict request
        feature_names : array of feature names. May be None if not available.
        reward : float - the reward
        truth : array with correct value (optional)
        """
        print("Send feedback called")
        return []
Example #2
0
def main():
    # Training settings
    # Use the command line to modify the default settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=14, metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--step', type=int, default=1, metavar='N',
                        help='number of epochs between learning rate reductions (default: 1)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')

    parser.add_argument('--evaluate', action='store_true', default=False,
                        help='evaluate your model on the official test set')
    parser.add_argument('--load-model', type=str,
                        help='model file path')

    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()

    torch.manual_seed(args.seed)

    device = 'cpu'

    kwargs = {}

    # Evaluate on the official test set
    if args.evaluate:
        assert os.path.exists(args.load_model)

        # Set the test model
        model = Net().to(device)
        model.load_state_dict(torch.load(args.load_model))

        test_dataset = datasets.MNIST('data', train=False,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,), (0.3081,))
                    ]))

        test_loader = torch.utils.data.DataLoader(
            test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)
        test2(model, device, test_loader, errors=False, kernels=False, \
            matrix=False, feature=False)

        return
Example #3
0
def main():
    use_cuda = torch.cuda.is_available()

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    tfms = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    img = tfms(Image.open(sys.argv[1]))

    model = Net().to(device)

    model.load_state_dict(torch.load("mnist_cnn.pt"))

    test(model, device, img)
Example #4
0
def run():
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # Set the test model
    model = Net().to(device)
    model.load_state_dict(torch.load('./mnist_model_epoch=14.pt'))

    test_dataset = datasets.MNIST('../data', train=False,
                                  transform=transforms.Compose([
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.1307,), (0.3081,))
                                  ]))

    test_loader = torch.utils.data.DataLoader(
        test_dataset, shuffle=False, **kwargs)

    path = './model_mistakes.pkl'
    if os.path.exists(path):
        with open(path, 'rb') as f:
            mistake_inds, pe_tuples, cm = pkl.load(f)
    else:
        with open(path, 'wb') as f:
            mistake_inds, pe_tuples, cm = mark_mistakes(model, device, test_loader)
            pkl.dump((mistake_inds, pe_tuples, cm), f)

    # visualize_mistakes(mistake_inds, pe_tuples, test_dataset)
    # visualize_kernels(model)
    # confusion_matrix(cm)

    path2 = './final_layer_outputs.pkl'
    if os.path.exists(path2):
        with open(path2, 'rb') as f:
            fl_outputs = pkl.load(f)
    else:
        with open(path2, 'wb') as f:
            fl_outputs = get_layer_outputs(model, device, test_loader)
            pkl.dump(fl_outputs, f)
    # t_sne_visualization(fl_outputs, test_dataset.targets)
    find_nearby_images(fl_outputs, test_dataset)
def load_model():
    model = Net()
    model.load_state_dict(torch.load('model.pth'))
    model.eval()
    return model
Example #6
0
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
import numpy.random as r
import sklearn.metrics as m
from main import Net
import glob

model_dir = 'checkpoint_lite.pth.tar'
model = Net()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(model_dir)
model.load_state_dict(checkpoint['state_dict'])
criterion = nn.CrossEntropyLoss().cuda()

model.eval()

#print(model.state_dict())
#print([i[0] for i in model.named_modules()])
#exit(0)

batch_num = 100
batch_size = 64
print(batch_size)
valdir = os.path.join('/train/trainset/1/DMS/data/', 'val_phone')

normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
Example #7
0
from main import Net, test
import argparse
import torch

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Pytorch MNIST Load Test')
    parser.add_argument('--cuda', action='store_true', default=False,
                        help='enables CUDA training')
    parser.add_argument('--model', default='./mnist.dat')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')

    args = parser.parse_args()

    use_cuda = args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print("Current Device:", device)

    model = Net()
    model.load_state_dict(torch.load(args.model, map_location=device))
    dataloader_kwargs = {'pin_memory': True} if use_cuda else {}

    test(args, model, device, dataloader_kwargs)
Example #8
0
import pickle
import numpy as np
import cPickle
import json
import torch
import collections
from main import Net


# load the unpickle object into a variable
dict_pickle = cPickle.load(open("audio_model.pkl","r"))

if __name__ == '__main__':
    #print dict_pickle
    the_model = Net(39, 100, 10)
    the_model.load_state_dict(torch.load("demo_model.pkl"))


    x = the_model.state_dict().get('fc1.bias').numpy().reshape(1,100)
    print x.dtype
    #temp = np.zeros((3,4))
    #newFile = open('output.txt', 'w')
    #pickle.dump(the_model.state_dict().get('fc1.weight').numpy()[0], newFile)
    np.savetxt('bias.txt', x, delimiter='\n')

    #print the_model.state_dict().get('fc4.weight')
    '''text_file = open("output.txt", "w")
    text_file.write(dict_pickle['f2.weight'])
    text_file.close()'''
Example #9
0
    return h

if __name__ == '__main__':
    model_name = 'model.pt'
    model_path = os.path.join(MODEL_PATH, model_name)
    assert os.path.exists(model_path)

    torch.manual_seed(1)

    # Set the test model
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    model = Net().to(device)
    model.load_state_dict(torch.load(model_path))

    test_dataset = datasets.MNIST('./mnist_data', train=False,
                transform=augmentation_scheme['augment1'])

    test_loader = torch.utils.data.DataLoader(
        test_dataset, batch_size=32, shuffle=False, **kwargs)

    img_idx = 7
    os.makedirs(f'./close_feature_vectors/{img_idx}', exist_ok=True)

    feature_vector_arr = get_feature_vectors(model, device, test_loader)
    feature_vector = feature_vector_arr[img_idx]
    closest_h = choose_closest_vector(feature_vector_arr, feature_vector)
    
    test_dataset = datasets.MNIST('./mnist_data', train=False,
    transform=transforms.Compose([
        transforms.ToTensor(),
    ])),
                                          batch_size=1,
                                          shuffle=False)

# Define what device we are using
print("CUDA Available: ", torch.cuda.is_available())
device = torch.device("cuda" if (
    use_cuda and torch.cuda.is_available()) else "cpu")

# Initialize the network
model = Net().to(device)

# Load the pretrained model
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))

# Set the model in evaluation mode. In this case this is for the Dropout layers
model.eval()

accuracies = []
examples = []

acc, ex = test(model, device, test_loader, epsilon, runs, first_k)
accuracies.append(acc)
examples.append(ex)

# Plot several examples of adversarial samples at each epsilon
cnt = 0
plt.figure(figsize=(12, 8))
indices = [0]
Example #11
0
# Some standard imports
import io
import numpy as np

from torch import nn
import torch.utils.model_zoo as model_zoo
import torch.onnx
import onnx
# import caffe2.python.onnx.backend as onnx_caffe2_backend

batch_size = 1  # just a random number
from main import Net

torch_model = Net()
torch_model.load_state_dict(
    torch.load('/home/odedf/lw_model.pth', map_location='cpu'))
torch_model.eval()

x = torch.randn(batch_size, 3, 43, 43, requires_grad=True)

torch_out = torch.onnx._export(
    torch_model,  # model being run
    x,  # model input (or a tuple for multiple inputs)
    "/home/odedf/lw_model.onnx",  # where to save the model (can be a file or file-like object)
    export_params=True
)  # store the trained parameter weights inside the model file

# Load the ONNX ModelProto object. model is a standard Python protobuf object
model = torch.onnx.load("/home/odedf/lw_model.onnx")

# prepare the caffe2 backend for executing the model this converts the ONNX model into a
Example #12
0
from flask import Flask, request, jsonify
import torch
import numpy as np
from torchvision.transforms import transforms
from main import Net
app = Flask(__name__)
PATH = "../MNIST-model/mnist_cnn.pt"

model = Net().double()
model.load_state_dict(torch.load(PATH))

trans = transforms.Compose([
    transforms.ToTensor(),
])


@app.route('/predict', methods=['POST'])
def predict():
    body = request.get_json()
    img_array = np.asarray(body['indices'])
    img_array = trans(img_array)
    img_array = img_array.view(1, 1, 28, 28)
    output = torch.exp(model(img_array.double()))
    return jsonify({"output": int(np.argmax(output.detach().numpy()))})


if __name__ == '__main__':
    app.run()
    for key, val in classes.items():
        if val == pred:
            return key
    return 'None'


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Test')
    parser.add_argument('--image',
                        type=str,
                        default='YL (18).jpg',
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--use-cuda', action='store_true', default=False)

    # parse argument
    args = parser.parse_args()
    device = torch.device('cuda' if args.use_cuda else 'cpu')
    model = Net().to(device)
    model.load_state_dict(torch.load(model_name))

    image = cv2.imread(args.image)

    pred_type = evaluate(image, model, device)
    print(pred_type)
    cv2.putText(image, pred_type, (10, 30), cv2.FONT_HERSHEY_COMPLEX, 1,
                (255, 255, 255), 2)
    cv2.namedWindow('Predict', cv2.WINDOW_NORMAL)
    cv2.imshow('Predict', image)
    cv2.waitKey(0)
Example #14
0
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
print("hello0")
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=False, transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=args.test_batch_size, shuffle=True, **kwargs)


test_model = Net()
print("hello1")
#modules = [model]
#load_ckpt(modules, "net-epoch-20_baseline", load_to_cpu=False)

test_model.load_state_dict(torch.load("net-epoch-20_baseline.pth"))
test_model.train(False)

print("hello2")
if args.cuda:
    test_model.cuda()
print("hello3")
test_model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
    if args.cuda:
        data, target = data.cuda(), target.cuda()
    data, target = Variable(data, volatile=True), Variable(target)
    output = test_model(data)
    test_loss += F.nll_loss(output, target, size_average=False).data[0] # sum up batch loss
Example #15
0
transform_test = transforms.Compose([transforms.ToTensor()])
test_set = ScatteredCIFAR10('./data',
                            train=False,
                            download=True,
                            transform=transforms.Compose([transform_test]))
test_gen = torch.utils.data.DataLoader(test_set,
                                       pin_memory=True,
                                       batch_size=128,
                                       shuffle=False,
                                       num_workers=4)
batch, _ = next(iter(test_gen))
inputs = batch[:16]

# Load models
stn = Net()
stn.load_state_dict(torch.load('standard1.49.pt')[torchbearer.MODEL])

multi = MultiScaleNet()
multi.load_state_dict(torch.load('multi1.49.pt')[torchbearer.MODEL])

fovea = FoveaNet(pool=True)
fovea.load_state_dict(torch.load('foveated1.49.pt')[torchbearer.MODEL])

stn_fa = Net(full_affine=True)
stn_fa.load_state_dict(torch.load('standardFA1.49.pt')[torchbearer.MODEL])

fovea_fa = FoveaNet(full_affine=True, pool=True)
fovea_fa.load_state_dict(torch.load('foveatedFA1.49.pt')[torchbearer.MODEL])

# Obtain transformed results
state = {}