Esempio n. 1
0
def main():
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = Net().to(device)

    criterion = nn.CrossEntropyLoss()
    # optimizer = optim.SGD(net.parameters(), lr=.001, momentum=.9)
    optimizer = optim.Adam(net.parameters())
    trainloader, _ = load_data()
    dataiter = iter(trainloader)

    # Train
    for epoch in range(20):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            inputs, labels = data[0].to(device), data[1].to(device)

            optimizer.zero_grad()

            outputs = net(inputs)  # forward
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print metrics
            running_loss += loss.item()
            if i % 938 == 937:
                print(f'Epoch: {epoch}\t Loss: {running_loss/938}')
                running_loss = 0.0
    print('Finished Training\n \n')
    filename = input('Enter file name for model: ')
    torch.save(net, filename)
Esempio n. 2
0
def train(learning_rate=0.1):
    #加载训练数据
    train_set,valid_set,test_set=mnist_data.load_data()
    
    train_X=train_set[0]
    train_y=train_set[1]
    y_=np.zeros((train_y.shape[0],10))
    y_[np.arange(train_y.shape[0]),train_y]=1
    
    #计算过程
    x=tf.placeholder('float',[None,784])
    y=tf.placeholder('float',[None,10])
    logistic=LogisticRegression(x,784,10)
    #y=logistic.output
    init=tf.initialize_all_variables()
    
    #损失函数
    loss=logistic.cross_entropy(y)
    #优化    
    opitimizer=tf.train.GradientDescentOptimizer(learning_rate)
    train=opitimizer.minimize(loss)
    
    with tf.Session() as sess:
        sess.run(init)
        for i in range(1000):
            r=sess.run([train,loss],feed_dict={x:train_X,y:y_})
            print  r[1]
Esempio n. 3
0
def main():
    filename = input('Enter file name for model: ')
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    net = torch.load(filename)
    _, testloader = load_data()
    # Test
    print('Starting Testing \n')
    correct = 0
    total = 0
    with torch.no_grad():
        for data in testloader:
            images, labels = data[0].to(device), data[1].to(device)
            outputs = net(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    print(f'Accuracy: {100*correct/total}')
Esempio n. 4
0
def get_input():
    (train_images, train_labels), (test_images,
                                   test_labels) = mnist_data.load_data()
    TRAINING_SIZE = len(train_images)
    TEST_SIZE = len(test_images)

    train_images = np.asarray(train_images, dtype=np.float32) / 255

    # Convert the train images and add channels
    train_images = train_images.reshape((TRAINING_SIZE, 28, 28, 1))

    test_images = np.asarray(test_images, dtype=np.float32) / 255
    # Convert the train images and add channels
    test_images = test_images.reshape((TEST_SIZE, 28, 28, 1))

    train_labels = tf.keras.utils.to_categorical(train_labels,
                                                 LABEL_DIMENSIONS)
    test_labels = tf.keras.utils.to_categorical(test_labels, LABEL_DIMENSIONS)

    # Cast the labels to floats, needed later
    train_labels = train_labels.astype(np.float32)
    test_labels = test_labels.astype(np.float32)

    return train_images, train_labels, test_images, test_labels
#!/usr/bin/env python
"""MNIST with TFlearn."""

from __future__ import division, print_function, absolute_import

import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
# from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from double_cnn import conv_2d_double

# Data loading and preprocessing
# import tflearn.datasets.mnist as mnist
from mnist_data import load_data
X, Y, testX, testY = load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])

# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d_double(network, 32, 3, activation='relu')
# network = conv_2d(network, 32, 3, activation='relu')
network = max_pool_2d(network, 2)
# network = conv_2d(network, 32, 3, activation='relu')
# network = max_pool_2d(network, 2)
# network = conv_2d(network, 32, 3, activation='relu')
# network = max_pool_2d(network, 2)
# network = conv_2d(network, 50, 3, activation='relu')
# network = tflearn.layers.conv.global_avg_pool(network, name='gap')
network = fully_connected(network, 256, activation='tanh')