Esempio n. 1
0
def check():
    model = torch.load('first.pth')
    images, labels = next(iter(testloader))
    img = images[0].view(1, 784)
    logits = model.forward(img)
    ps = F.softmax(logits, dim=1)
    helper.view_classify(img.view(1, 28, 28), ps)
    def testnetwork(self):
        images, labels = next(iter(self.trainloader))

        img = images[0].view(1, 784)
        # Turn off gradients to speed up this part
        with torch.no_grad():
            logps = self.model(img)

        # Output of the network are log-probabilities, need to take exponential for probabilities
        ps = torch.exp(logps)
        helper.view_classify(img.view(1, 28, 28), ps)
def predict(model):
    if (model == None):
        model = helper.load_checkpoint()

    device = helper.get_device()

    image_tensor, image = helper.get_image_tensor(
        '\nPlease enter the path of the image you want to analyse\n')
    image_tensor = image_tensor.to(device)
    topk = helper.get_int(
        '\nPlease enter how many to the top predictions you want to see (topk)\n'
    )

    model = model.to(device)

    print('\nPredicting\n')

    with torch.no_grad():
        output = model.forward(image_tensor)

    ps = torch.exp(output)

    topK_ps = torch.topk(ps, topk)

    probs = topK_ps[0].cpu().numpy().squeeze()
    sorted_ps_label_keys = topK_ps[1].cpu().numpy().squeeze()
    classes = []

    print('Sorted label keys {}'.format(sorted_ps_label_keys))

    try:
        get_label = lambda x: idx_to_class[str(x)]
        for i in sorted_ps_label_keys[0:topk]:
            classes.append(get_label(i))

    except NameError:
        print(
            '\nCaught Key Error idx_to_class does not exist\nUsing normal keys\n'
        )
        for i in sorted_ps_label_keys[0:topk]:
            classes.append(i)

    print('\nFinished predicting\n')

    helper.view_classify(image, probs, classes)
    return
Esempio n. 4
0
def test(load_data=False):
	model = load_model('mnist_fashion')
	if load_data:
		_, test = load_data()
		dataiter = iter(load_data())
		images, labels = dataiter.next()
		img = images[11].view(1, 784).cuda()
		labels=labels.cuda()
		prediction = torch.exp(model(img.float()))
		torch.Tensor.cpu(labels)
		print(labels[11])
		helper.view_classify(torch.Tensor.cpu(img.view(1,28,28)), torch.Tensor.cpu(prediction), version='Fashion')
	else:
		image = torch.tensor(io.imread(fname='./red_shirt.png', as_gray=True))
		img = image.view(1, 784).cuda()
		prediction = torch.exp(model(img.float()))
		helper.view_classify(torch.Tensor.cpu(img.view(1,28,28)), torch.Tensor.cpu(prediction), version='Fashion')
Esempio n. 5
0
def buildNetworkWithPyTorch(flatImages, trainloader):
	model = Network()
	print(model.fc1.weight)
	print(model.fc1.bias)
	# Set biases to all zeros
	model.fc1.bias.data.fill_(0)
	# sample from random normal with standard dev = 0.01
	model.fc1.weight.data.normal_(std=0.01)
	# Grab some data
	dataiter = iter(trainloader)
	images, labels = dataiter.next()

	# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
	images.resize_(64, 1, 784)
	# or images.resize_(images.shape[0], 1, 784) to automatically get batch size

	# Forward pass through the network
	img_idx = 0
	ps = model.forward(images[img_idx, :])

	img = images[img_idx]
	helper.view_classify(img.view(1, 28, 28), ps)
Esempio n. 6
0
        x = F.relu(self.fc3(x))
        x = F.log_softmax(self.fc4(x), dim=1)

        return x


model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
epochs = 5

for e in range(epochs):
    running_loss = 0
    for images, labels in trainloader:

        log_ps = model(images)
        loss = criterion(log_ps, labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    else:
        print(f"Training loss: {running_loss/len(trainloader)}")

dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[1]

ps = torch.exp(model(img))
helper.view_classify(img, ps, version="Fashion")
    def forward(self, x):
        # make sure input tensor is flattened
        x = x.view(x.shape[0], -1)

        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = F.log_softmax(self.fc4(x), dim=1)

        return x


model = Classifier()
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.parameters(), lr=0.003)
'''# Training pass
epochs = 5

for e in range(epochs):
    running_loss = 0
    for images, labels in trainloader:
        optimizer.zero_grad()

        log_ps = model(images)
        loss = criterion(log_ps, labels)
        loss.backward()
        optimizer.step()
        
        running_loss += loss.item()
    else:
        print(f"Training loss: {running_loss/len(trainloader)}")
Esempio n. 8
0
# Pass data forward thought the network and display output
images, labels = next(iter(train_loader))

# Get batch size from tensor, which in this case is 64
# 784 is the 28*28 correspondent to img width and height
# and 1 layer since images are grayscale
batch_size_from_tensor = images.shape[0]
print(batch_size_from_tensor)
images.resize_(batch_size_from_tensor, 1, 784)

# probability distribution
ps = model.forward(images[0])

# Call view here covert image back to original size,
# is similar to resize, but return a tensor instead
helper.view_classify(images[0].view(1, 28, 28), ps)

# HyperParameters for network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10

# Same as Network but with Sequential
model = nn.Sequential(
    OrderedDict([('fc1', nn.Linear(input_size, hidden_sizes[0])),
                 ('relu1', nn.ReLU()),
                 ('fc2', nn.Linear(hidden_sizes[0], hidden_sizes[1])),
                 ('relu2', nn.ReLU()),
                 ('output', nn.Linear(hidden_sizes[1], output_size)),
                 ('softmax', nn.Softmax(dim=1))]))
Esempio n. 9
0
    running_loss = 0
    for images, labels in dataLoaders[i]:
        #  Acoplar imágenes de dara en un vector
        images = images.view(images.shape[0], -1)

        # TODO: pase de entrenamiento
        optimizer.zero_grad()  #optimiza

        output = model(images)  #muestra la salida de las imagenes
        loss = criterion(output, labels)  #muestra las perdidas
        loss.backward()
        optimizer.step()  #optimiza

        running_loss += loss.item()
    else:
        print(f"Training loss: {running_loss/len(dataLoaders)}"
              )  #muestra cada una de las perdidas dependiendo las epocas

import helper  #importamos la libreria helper

images, labels = next(iter(dataLoaders[i]))  #carga las imagenes

img = images[0].view(1, 784)  #visualizar las imagenes
# apaga los gradientes para acelarar el processo
with torch.no_grad():
    logps = model(img)

# La salida de la red son log-probabilidades, debe tomar exponencial para las probabilidades
ps = torch.exp(logps)
helper.view_classify(img.view(1, 28, 28), ps)  #clasifica las imagenes
Esempio n. 10
0
    def load_checkpoint(self, name_file="checkpoint.pt"):
        checkpoint = torch.load(name_file)
        self.load_state_dict(checkpoint)


# In[4]:

import helper
model = MyNetwork()
model.load_checkpoint("checkpoint_last.pt")
#checkpoint = torch.load("checkpoint_last.pt")
#model.load_state_dict(checkpoint)

images, labels = next(iter(validLoader))
img = images[1, :]
print(img.shape)
ps = torch.exp(model(images[1, :].unsqueeze(0)))
# Plot the image and probabilities
#helper.imshow(img)
t = [
    'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'k', 'L', 'M', 'N', 'O', 'P',
    'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y'
]
#t =list(reversed(t))
_, top = ps.topk(1, dim=1)
print(" expected = ", t[labels[1].item()], " result=", t[top.item()])
helper.view_classify(img, ps, version='ALPHABET', title=t[labels[1].item()])

# In[ ]:
Esempio n. 11
0
print2(model2.hidden1.weight, model2.hidden1.bias)

########################################################################
#######################################################################
#######################################################################
# Using model
img, labels = next(iter(iterloader))

img = img.view(img.shape[0], 1, -1)

# compute the forward pass
img0 = 0
output = model2.forward(img[img0, :])

img = img[img0]
helper.view_classify(img.view(1, 28, 28), output)
plt.show()

########################################################################
#########################################################################
#########################################################################
# Using sequential model
# create the feed-forward network
model = nn.Sequential(
    nn.Linear(784, 128),
    nn.ReLU(),
    nn.Linear(128, 86),
    nn.ReLU(),
    nn.Linear(86, 64),
    nn.ReLU(),
    nn.Linear(64, 10),
Esempio n. 12
0
        train_loss += loss.item()
    else:
        with torch.no_grad():
            model.eval()  # put in test mode
            for images, labels in testloader:
                ps = model(images)
                loss = criterion(ps, labels)
                equals = (ps.max(dim=1).indices == labels)
                accuracy += torch.mean(equals.float())
                test_loss += loss.item()
            print('Epoch: {}'.format(e))
            print('Test loss: {}'.format(test_loss / len(testloader)))
            print('Running Loss: {}'.format(train_loss / len(trainloader)))
            print(f'Accuracy: {accuracy.item()/len(testloader)*100}%')
            plt.plot(train_loss, label='training loss')
            plt.plot(test_loss, label='test loss')

            model.train()  # Go back to training mode

plt.show()

model.eval()
dataiter = iter(testloader)
images, labels = dataiter.next()

# TODO: Calculate the class probabilities (softmax) for img
ps = torch.exp(model(images[1]))

# Plot the image and probabilities
helper.view_classify(images[1], ps, version='Fashion')
Esempio n. 13
0
        if step % 32 == 0:
            # Turn off gradients for validation, saves memory and computations
            with torch.no_grad():
                test_loss, accuracy = validate(model, testloader, criterion)

            print("Epoch: {}/{}.. ".format(epoch + 1, EPOCHS),
                  "Training Loss: {:.3f}.. ".format(running_loss / 32),
                  "Test Loss: {:.3f}.. ".format(test_loss / len(testloader)),
                  "Test Accuracy: {:.3f}".format(accuracy / len(testloader)))

            running_loss = 0

# test network
dataiter = iter(testloader)
images, _ = dataiter.next()
img = images[0]
# Convert 2D image to 1D vector
img = img.view(1, 784)

# Calculate the class probabilities (softmax) for img
with torch.no_grad():
    output = model.forward(img)

class_probabilities = torch.exp(output)

# Plot the image and probabilities
helper.view_classify(img.view(1, 28, 28),
                     class_probabilities,
                     version='Fashion')
Esempio n. 14
0
            _, top_class = ps.topk(1, dim=1)
            eq = top_class == l.view(*top_class.shape)
            accuracy += torch.mean(eq.type(torch.FloatTensor))

    # calculating average losses
    train_loss = train_loss / len(train_loader)
    valid_loss = train_loss / len(valid_loader)

    print(
        'Epoch {}: Training loss:{:.4f} Validation loss:{:.4f} Accuracy:{:.4f}'
        .format(e, train_loss, valid_loss, accuracy / len(valid_loader)))

    if valid_loss < valid_loss_min:
        print(
            'Validation loss decreased   {:.4f}------>{:.4f}   Saving model...'
            .format(valid_loss_min, valid_loss))
        torch.save(model.state_dict(), 'model_MNIST')
        valid_loss_min = valid_loss

print("Total time on CPU : " + str(time.time() - start))

# =============================================================================
# testing
# =============================================================================
images, labels = next(iter(test_loader))
model.cpu()
img = images[0].view(1, 28 * 28)
ps = model(img)
score = torch.exp(ps)
helper.view_classify(img.view(1, 28, 28), score)
        # Forward and backward passes
        output = model.forward(images.to(device))
        loss = criterion(output, labels.to(device))
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

        if steps % print_every == 0:
            print("Epoch: {}/{}... ".format(e + 1, epochs),
                  "Loss: {:.4f}".format(running_loss / print_every))

            running_loss = 0

images, labels = next(iter(train_loader))

images = images.to(device)
labels = labels.to(device)

# check out trained network predictions.
img = images[0].view(1, 784)
# Turn off gradients to speed up this part
with torch.no_grad():
    logits = model.forward(img)

# Output of the network are logits, need to take softmax for probabilities
ps = F.softmax(logits, dim=1)

helper.view_classify(img.view(1, 28, 28).cpu(), ps.cpu())
Esempio n. 16
0
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
    else:
        print(f"Training loss: {running_loss/len(trainloader)}")

# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'

import helper

# Test out your network!

dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[0]
# Convert 2D image to 1D vector
img = img.resize_(1, 784)

# TODO: Calculate the class probabilities (softmax) for img
# turn off gradient b/c we don't need it
with torch.no_grad():
    logps = f_nn(img)

# probability = exp(log-probability)
ps = torch.exp(logps)

# Plot the image and probabilities
helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion')
Esempio n. 17
0
        # Backward propagation
        loss.backward()

        optimizer.step()

        running_loss += loss.item()
    else:
        print(f"Training loss: {running_loss / len(trainloader)}")

# Test model with predictions
while 1:
    # Get image from dataset
    images, labels = next(iter(trainloader))
    # Flat the image of 28x28 to a vector of 784 values
    img = images[0].view(1, 784)

    # Pass image to model to predict number
    with torch.no_grad():
        logps = model(img)

    ps = torch.exp(logps)

    # Visualize results
    hp.view_classify(img.view(1, 28, 28), ps)

    # Ask user to predict other number
    ans = input("Would you like to predict again? (Yes/No): ")
    if (ans == "n"):
        break
Esempio n. 18
0
                      nn.LogSoftmax(dim=1))

criterion = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.003)

epochs = 5

for e in range(epochs):
    running_loss = 0
    for images, labels in trainloader:

        images = images.view(images.shape[0], -1)

        optimizer.zero_grad()
        output = model.forward(images)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()
        #print('Gradient -', model[0].weight.grad)
        running_loss += loss.item()
    else:
        print(f"Training loss: {running_loss/len(trainloader)}")

images, labels = next(iter(trainloader))
images = images[0].view(1, 784)
with torch.no_grad():
    logits = model.forward(images)

Prob = F.softmax(logits, dim=1)
helper.view_classify(images.view(1, 28, 28), Prob)
Esempio n. 19
0
for epoch in range(EPOCH):
    for step, (x, y) in enumerate(train_loader):
        b_x = Variable(x.view(-1, 28,
                              28))  # reshape x to (batch,time_step,input_size)
        b_y = Variable(y)

        output = rnn_model(b_x)
        loss = loss_func(output, b_y)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step % 50 == 0:
            test_output = rnn_model(test_x)
            pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
            accuracy = sum(pred_y == test_y) / test_y.size

test_output = rnn_model(test_x[:10].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()

import helper
import matplotlib.pyplot as plt

for index in range(10):

    ps = torch.zeros([1, 10], dtype=torch.float64)
    print(pred_y[0])
    ps[0, pred_y[index]] = 1
    print(ps)
    helper.view_classify(test_x[index], ps)
    plt.show()
Esempio n. 20
0
    checkpoint = torch.load(filepath)
    model = Classifier(checkpoint['input_size'], checkpoint['filters'],
                       checkpoint['output_size'], checkpoint['dropout'])
    model.load_state_dict(checkpoint['state_dict'])
    return model


model = load_checkpoint(model_save_name)
model.to(device)

# perform inference
model.eval()
dataiter = iter(testloader)
images, labels = dataiter.next()
images, labels = images.to(device), labels.to(device)

img = images[0]
img = img.view(1, 784)

output = model.forward(img)
probs = F.softmax(output, dim=1)
top_prob, top_class = probs.topk(1, dim=1)
print("Result:\t", "digit:", top_class.item(),
      "\tprob: {:.4f}".format(top_prob.item()))

# Plot the image and probabilities
import helper
img = img.cpu()
probs = probs.cpu()
helper.view_classify(img.view(1, 28, 28), probs, version='MNIST')
# Set biases to all zeros
model.hidden1.bias.data.fill_(0)
# sample from random normal with standard dev = 0.01
model.hidden1.weight.data.normal_(std=0.01)

###  FORWARD PASS
# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size

# Forward pass through the network
img_idx = 0  # image ID
ps = model.forward(images[img_idx, :])  # output tensor with class probabilites

img = images[img_idx]  # this select a digit in the dataset
helper.view_classify(img.view(1, 28, 28),
                     ps)  # command for nice display and comparison
''' As you can see above, our network has basically no idea what this digit is. 
It's because we haven't trained it yet, all the weights are random!'''

### NN.SEQUENTIAL
'''convenient way to build networks like this where a tensor is passed sequentially
 through operations, nn.Sequential (documentation).'''

# Hyperparameters for our network
input_size = 784
hidden_sizes = [128, 64]
output_size = 10

# Build a feed-forward network
model = nn.Sequential(nn.Linear(input_size, hidden_sizes[0]), nn.ReLU(),
                      nn.Linear(hidden_sizes[0], hidden_sizes[1]), nn.ReLU(),
Esempio n. 22
0
for e in range(epochs):
    running_loss = 0
    for images, labels in trainloader:
        log_ps = model(images)
        loss = criterion(log_ps, labels)

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
    else:
        print(f"Training loss: {running_loss/len(trainloader)}")

# %%
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config',
                             "InlineBackend.figure_format = 'retina'")

# Test out your network!

dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[1]

# TODO: Calculate the class probabilities (softmax) for img
ps = torch.exp(model(img))

# Plot the image and probabilities
helper.view_classify(img, ps, version='Fashion')
    checkpoint = torch.load(filepath)
    model = Classifier(checkpoint['input_size'], checkpoint['filters'],
                       checkpoint['output_size'], checkpoint['dropout'])
    model.load_state_dict(checkpoint['state_dict'])
    return model


model = load_checkpoint(model_save_name)
model.to(device)

# perform inference
model.eval()
dataiter = iter(testloader)
images, labels = dataiter.next()
images, labels = images.to(device), labels.to(device)

img = images[0]
img = img.view(1, 784)

output = model.forward(img)
probs = F.softmax(output, dim=1)
top_prob, top_class = probs.topk(1, dim=1)
print("Result:\t", "class:", top_class.item(),
      "\tprob: {:.4f}".format(top_prob.item()))

# Plot the image and probabilities
import helper
img = img.cpu()
probs = probs.cpu()
helper.view_classify(img.view(1, 28, 28), probs, version='Fashion')
Esempio n. 24
0
        x = F.relu(x)
        x = self.fc2(x)
        x = F.relu(x)
        x = self.fc3(x)
        x = F.softmax(x, dim=1)

        return x


model = Network()

print(model.fc1.weight)
print(model.fc1.bias)
model.fc1.bias.data.fill_(0)
print(model.fc1.bias)

# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()

# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(64, 1, 784)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size

# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx, :])

img = images[img_idx]
helper.view_classify(img.view(1, 28, 28), ps)
Esempio n. 25
0
    running_loss = 0
    for images, labels in trainloader:
        images = images.view(images.shape[0], -1)

        optimizer.zero_grad()
        output = model(images)
        loss = criterion(output, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item()
    else:
        print(f"Training loss: {running_loss/len(trainloader)}")

#---
# USE MODEL
#---
# Grab some data
dataiter = iter(trainloader)
images, labels = dataiter.next()

# Resize images into a 1D vector, new shape is (batch size, color channels, image pixels)
images.resize_(32, 1, 2700)
# or images.resize_(images.shape[0], 1, 784) to automatically get batch size

# Forward pass through the network
img_idx = 0
ps = model.forward(images[img_idx, :])

img = images[img_idx]
helper.view_classify(img.view(3, 224, 224), ps)