Esempio n. 1
0
class SeldonModel:
    def __init__(self):
        self._model = Net()
        self._model.load_state_dict(
            torch.load("/storage/model.pkl", map_location=torch.device("cpu"))
        )
        self._model.eval()

    def predict(self, X, features_names):
        """
        Return a prediction.

        Parameters
        ----------
        X : array-like
        feature_names : array of feature names (optional)
        """
        data = transforms.ToTensor()(Image.open(io.BytesIO(X)))
        return self._model(data[None, ...]).detach().numpy()

    def send_feedback(self, features, feature_names, reward, truth):
        """
        Handle feedback

        Parameters
        ----------
        features : array - the features sent in the original predict request
        feature_names : array of feature names. May be None if not available.
        reward : float - the reward
        truth : array with correct value (optional)
        """
        print("Send feedback called")
        return []
Esempio n. 2
0
def main():
    # Training settings
    # Use the command line to modify the default settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=14, metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--step', type=int, default=1, metavar='N',
                        help='number of epochs between learning rate reductions (default: 1)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')

    parser.add_argument('--evaluate', action='store_true', default=False,
                        help='evaluate your model on the official test set')
    parser.add_argument('--load-model', type=str,
                        help='model file path')

    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    args = parser.parse_args()

    torch.manual_seed(args.seed)

    device = 'cpu'

    kwargs = {}

    # Evaluate on the official test set
    if args.evaluate:
        assert os.path.exists(args.load_model)

        # Set the test model
        model = Net().to(device)
        model.load_state_dict(torch.load(args.load_model))

        test_dataset = datasets.MNIST('data', train=False,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,), (0.3081,))
                    ]))

        test_loader = torch.utils.data.DataLoader(
            test_dataset, batch_size=args.test_batch_size, shuffle=True, **kwargs)
        test2(model, device, test_loader, errors=False, kernels=False, \
            matrix=False, feature=False)

        return
Esempio n. 3
0
def main():
    use_cuda = torch.cuda.is_available()

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    tfms = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])
    img = tfms(Image.open(sys.argv[1]))

    model = Net().to(device)

    model.load_state_dict(torch.load("mnist_cnn.pt"))

    test(model, device, img)
Esempio n. 4
0
def run():
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # Set the test model
    model = Net().to(device)
    model.load_state_dict(torch.load('./mnist_model_epoch=14.pt'))

    test_dataset = datasets.MNIST('../data', train=False,
                                  transform=transforms.Compose([
                                      transforms.ToTensor(),
                                      transforms.Normalize((0.1307,), (0.3081,))
                                  ]))

    test_loader = torch.utils.data.DataLoader(
        test_dataset, shuffle=False, **kwargs)

    path = './model_mistakes.pkl'
    if os.path.exists(path):
        with open(path, 'rb') as f:
            mistake_inds, pe_tuples, cm = pkl.load(f)
    else:
        with open(path, 'wb') as f:
            mistake_inds, pe_tuples, cm = mark_mistakes(model, device, test_loader)
            pkl.dump((mistake_inds, pe_tuples, cm), f)

    # visualize_mistakes(mistake_inds, pe_tuples, test_dataset)
    # visualize_kernels(model)
    # confusion_matrix(cm)

    path2 = './final_layer_outputs.pkl'
    if os.path.exists(path2):
        with open(path2, 'rb') as f:
            fl_outputs = pkl.load(f)
    else:
        with open(path2, 'wb') as f:
            fl_outputs = get_layer_outputs(model, device, test_loader)
            pkl.dump(fl_outputs, f)
    # t_sne_visualization(fl_outputs, test_dataset.targets)
    find_nearby_images(fl_outputs, test_dataset)
Esempio n. 5
0
def test(args):
    env_id, want_reward, generations, layers = args
    print(f'START({env_id})')

    with gym.make(env_id) as env:
        layers = (*env.observation_space.shape, *layers)
        if isinstance(env.action_space, Box):
            layers = (*layers, *env.action_space.shape)
        else:
            layers = (*layers, env.action_space.n)

        net = Net.random(layers)
        net.train(env, generations, render=False, print_stats=False)
        n = 10  # times to run evaluation before taking average
        reward = sum(net.evaluate(env) for _ in range(n)) / n

        if reward < want_reward:
            print(f'FAILED({env_id}): want {reward} < {want_reward}')
        else:
            print(f'SOLVED({env_id}) reward: {reward} >= {want_reward}')
Esempio n. 6
0
from main import Net


def heading(msg):
    print(('=' * 20) + ' ' + msg + ' ' + ('=' * 20))


# Simple handcrafted test of net.set_from_params
heading('Test handcrafted')
net = Net.random((3, 2, 4, 6))
p = net.params()
p[-1] = 100
p[0] = 200

print([w.shape for w in net.weights])
net = Net.from_params(p, net.layers)
print(f'Biases {[b.shape for b in net.biases]}\n', net.biases)
print(f'Weights {[w.shape for w in net.weights]}\n', net.weights)
assert net.biases[0][0] == 200
got = net.weights[-1][-1][-1]
assert got == 100, f'want 100 got {got}'

# ===========================
# global test, add 20 to all (does not test structure!)

heading('Test add 20')
net = Net.random((3, 2, 4, 5))
p = net.params()
p = p + 20

net = Net.from_params(p, net.layers)
Esempio n. 7
0
import pickle
import numpy as np
import cPickle
import json
import torch
import collections
from main import Net


# load the unpickle object into a variable
dict_pickle = cPickle.load(open("audio_model.pkl","r"))

if __name__ == '__main__':
    #print dict_pickle
    the_model = Net(39, 100, 10)
    the_model.load_state_dict(torch.load("demo_model.pkl"))


    x = the_model.state_dict().get('fc1.bias').numpy().reshape(1,100)
    print x.dtype
    #temp = np.zeros((3,4))
    #newFile = open('output.txt', 'w')
    #pickle.dump(the_model.state_dict().get('fc1.weight').numpy()[0], newFile)
    np.savetxt('bias.txt', x, delimiter='\n')

    #print the_model.state_dict().get('fc4.weight')
    '''text_file = open("output.txt", "w")
    text_file.write(dict_pickle['f2.weight'])
    text_file.close()'''
Esempio n. 8
0
            heapq.heappop(h)
    return h

if __name__ == '__main__':
    model_name = 'model.pt'
    model_path = os.path.join(MODEL_PATH, model_name)
    assert os.path.exists(model_path)

    torch.manual_seed(1)

    # Set the test model
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    model = Net().to(device)
    model.load_state_dict(torch.load(model_path))

    test_dataset = datasets.MNIST('./mnist_data', train=False,
                transform=augmentation_scheme['augment1'])

    test_loader = torch.utils.data.DataLoader(
        test_dataset, batch_size=32, shuffle=False, **kwargs)

    img_idx = 7
    os.makedirs(f'./close_feature_vectors/{img_idx}', exist_ok=True)

    feature_vector_arr = get_feature_vectors(model, device, test_loader)
    feature_vector = feature_vector_arr[img_idx]
    closest_h = choose_closest_vector(feature_vector_arr, feature_vector)
    
Esempio n. 9
0
 def __init__(self):
     self._model = Net()
     self._model.load_state_dict(
         torch.load("/storage/model.pkl", map_location=torch.device("cpu"))
     )
     self._model.eval()
Esempio n. 10
0
from flask import Flask, request, jsonify
import torch
import numpy as np
from torchvision.transforms import transforms
from main import Net
app = Flask(__name__)
PATH = "../MNIST-model/mnist_cnn.pt"

model = Net().double()
model.load_state_dict(torch.load(PATH))

trans = transforms.Compose([
    transforms.ToTensor(),
])


@app.route('/predict', methods=['POST'])
def predict():
    body = request.get_json()
    img_array = np.asarray(body['indices'])
    img_array = trans(img_array)
    img_array = img_array.view(1, 1, 28, 28)
    output = torch.exp(model(img_array.double()))
    return jsonify({"output": int(np.argmax(output.detach().numpy()))})


if __name__ == '__main__':
    app.run()
Esempio n. 11
0
    # Resample and rescale
    length = len(data) / rate
    new_length = int(length * SAMPLE_FREQ)

    data = signal.resample(data, new_length)
    data = data.astype(np.float32)
    data /= np.max(np.abs(data))

    # Create spectrogram
    spec = get_spectrogram(data)
    spec_orig = spec.copy()
    spectrogram_size = spec.shape[0]

    # Load model
    device = torch.device("cpu")
    model = Net(num_tags, spectrogram_size)
    model.load_state_dict(torch.load(args.model, map_location=device))
    model.eval()

    # Run model on audio
    spec = torch.from_numpy(spec)
    spec = spec.permute(1, 0)
    spec = spec.unsqueeze(0)
    y_pred = model(spec)
    y_pred_l = np.exp(y_pred[0].tolist())

    # Convert prediction into string
    # TODO: proper beam search
    m = torch.argmax(y_pred[0], 1)
    print(prediction_to_str(m))
Esempio n. 12
0
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from main import Net
from utils import get_flops
#from torch.autograd import Variable

model_dir = 'weights/checkpoint_quantize.pth.tar'
model = Net()
model = torch.nn.DataParallel(model).cuda(0)
#checkpoint = torch.load(model_dir)
#model.load_state_dict(checkpoint['state_dict'])
#model.eval()
print(get_flops(model))
Esempio n. 13
0
#torch.manual_seed(args.seed)
#if args.cuda:
    #torch.cuda.manual_seed(args.seed)

print("here")
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
print("hello0")
test_loader = torch.utils.data.DataLoader(
    datasets.MNIST('../data', train=False, transform=transforms.Compose([
                       transforms.ToTensor(),
                       transforms.Normalize((0.1307,), (0.3081,))
                   ])),
    batch_size=args.test_batch_size, shuffle=True, **kwargs)


test_model = Net()
print("hello1")
#modules = [model]
#load_ckpt(modules, "net-epoch-20_baseline", load_to_cpu=False)

test_model.load_state_dict(torch.load("net-epoch-20_baseline.pth"))
test_model.train(False)

print("hello2")
if args.cuda:
    test_model.cuda()
print("hello3")
test_model.eval()
test_loss = 0
correct = 0
for data, target in test_loader:
Esempio n. 14
0
from main import Net, test
import argparse
import torch

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Pytorch MNIST Load Test')
    parser.add_argument('--cuda', action='store_true', default=False,
                        help='enables CUDA training')
    parser.add_argument('--model', default='./mnist.dat')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')

    args = parser.parse_args()

    use_cuda = args.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")
    print("Current Device:", device)

    model = Net()
    model.load_state_dict(torch.load(args.model, map_location=device))
    dataloader_kwargs = {'pin_memory': True} if use_cuda else {}

    test(args, model, device, dataloader_kwargs)
Esempio n. 15
0
# Some standard imports
import io
import numpy as np

from torch import nn
import torch.utils.model_zoo as model_zoo
import torch.onnx
import onnx
# import caffe2.python.onnx.backend as onnx_caffe2_backend

batch_size = 1  # just a random number
from main import Net

torch_model = Net()
torch_model.load_state_dict(
    torch.load('/home/odedf/lw_model.pth', map_location='cpu'))
torch_model.eval()

x = torch.randn(batch_size, 3, 43, 43, requires_grad=True)

torch_out = torch.onnx._export(
    torch_model,  # model being run
    x,  # model input (or a tuple for multiple inputs)
    "/home/odedf/lw_model.onnx",  # where to save the model (can be a file or file-like object)
    export_params=True
)  # store the trained parameter weights inside the model file

# Load the ONNX ModelProto object. model is a standard Python protobuf object
model = torch.onnx.load("/home/odedf/lw_model.onnx")

# prepare the caffe2 backend for executing the model this converts the ONNX model into a
Esempio n. 16
0
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
import numpy.random as r
import sklearn.metrics as m
from main import Net
import glob

model_dir = 'checkpoint_lite.pth.tar'
model = Net()
model = torch.nn.DataParallel(model).cuda()
checkpoint = torch.load(model_dir)
model.load_state_dict(checkpoint['state_dict'])
criterion = nn.CrossEntropyLoss().cuda()

model.eval()

#print(model.state_dict())
#print([i[0] for i in model.named_modules()])
#exit(0)

batch_num = 100
batch_size = 64
print(batch_size)
valdir = os.path.join('/train/trainset/1/DMS/data/', 'val_phone')
    'data',
    train=False,
    download=True,
    transform=transforms.Compose([
        transforms.ToTensor(),
    ])),
                                          batch_size=1,
                                          shuffle=False)

# Define what device we are using
print("CUDA Available: ", torch.cuda.is_available())
device = torch.device("cuda" if (
    use_cuda and torch.cuda.is_available()) else "cpu")

# Initialize the network
model = Net().to(device)

# Load the pretrained model
model.load_state_dict(torch.load(pretrained_model, map_location='cpu'))

# Set the model in evaluation mode. In this case this is for the Dropout layers
model.eval()

accuracies = []
examples = []

acc, ex = test(model, device, test_loader, epsilon, runs, first_k)
accuracies.append(acc)
examples.append(ex)

# Plot several examples of adversarial samples at each epsilon
Esempio n. 18
0
def load_model():
    model = Net()
    model.load_state_dict(torch.load('model.pth'))
    model.eval()
    return model
Esempio n. 19
0
# Load batch
transform_test = transforms.Compose([transforms.ToTensor()])
test_set = ScatteredCIFAR10('./data',
                            train=False,
                            download=True,
                            transform=transforms.Compose([transform_test]))
test_gen = torch.utils.data.DataLoader(test_set,
                                       pin_memory=True,
                                       batch_size=128,
                                       shuffle=False,
                                       num_workers=4)
batch, _ = next(iter(test_gen))
inputs = batch[:16]

# Load models
stn = Net()
stn.load_state_dict(torch.load('standard1.49.pt')[torchbearer.MODEL])

multi = MultiScaleNet()
multi.load_state_dict(torch.load('multi1.49.pt')[torchbearer.MODEL])

fovea = FoveaNet(pool=True)
fovea.load_state_dict(torch.load('foveated1.49.pt')[torchbearer.MODEL])

stn_fa = Net(full_affine=True)
stn_fa.load_state_dict(torch.load('standardFA1.49.pt')[torchbearer.MODEL])

fovea_fa = FoveaNet(full_affine=True, pool=True)
fovea_fa.load_state_dict(torch.load('foveatedFA1.49.pt')[torchbearer.MODEL])

# Obtain transformed results