Example #1
0
def train(train_data_iter, test_data_iter, net, loss, batch_size, learn_rate,
          num_epochs):
    optimizer = optim.SGD(net.parameters(), learn_rate)
    for epoch in range(num_epochs):
        for x, y in train_data_iter:
            y_hat = net(x)
            loss_sum = loss(y_hat, y).sum()
            optimizer.zero_grad()
            loss_sum.backward()
            optimizer.step()
        acc = evaluate_accuracy_v2(test_data_iter, net)
        print('epoch', epoch, 'acc', acc)


if '__main__' == __name__:
    batch_size = 256
    num_inputs = 784
    num_outputs = 10
    num_epochs = 5
    lr = 0.1

    mnist_train, mnist_test = get_fashion_MNST()
    train_iter = data_loader(mnist_train, 5, batch_size)
    test_iter = data_loader(mnist_test, 5, batch_size)
    net = nn.Sequential(
        OrderedDict([('FlattenLayer', FlattenLayer()),
                     ('linear', nn.Linear(num_inputs, num_outputs))]))
    init.normal_(net.linear.weight, mean=0, std=0.01)
    init.constant_(net.linear.bias, val=0)
    train(train_iter, test_iter, net, nn.CrossEntropyLoss(), batch_size, lr,
          num_epochs)
if '__main__' == __name__:
    import torch
    from torch import nn
    from utility.model_train import train_device
    from utility.load_fashion_MNIST import get_fashion_MNST
    from utility.data_loader import data_loader
    from utility.flatten_layer import FlattenLayer

    batch_size = 256
    lr = 0.001
    num_epochs = 5

    train_MNST, test_MNST = get_fashion_MNST()
    train_data_iter = data_loader(train_MNST, 5, batch_size)
    test_data_iter = data_loader(test_MNST, 5, batch_size)

    net = nn.Sequential(
        nn.Conv2d(1, 6, 5),  # in_channels, out_channels, kernel_size
        nn.BatchNorm2d(6),
        nn.Sigmoid(),
        nn.MaxPool2d(2, 2),  # kernel_size, stride
        nn.Conv2d(6, 16, 5),
        nn.BatchNorm2d(16),
        nn.Sigmoid(),
        nn.MaxPool2d(2, 2),
        FlattenLayer(),
        nn.Linear(16 * 4 * 4, 120),
        nn.BatchNorm1d(120),
        nn.Sigmoid(),
        nn.Linear(120, 84),
        nn.BatchNorm1d(84),
Example #3
0
    num_cov_in_block = [4, 4, 4, 4]
    temp_input_channel = number_channel
    for index, cov_num in enumerate(num_cov_in_block):
        one_dense_block = DenseBlock(cov_num, temp_input_channel, growth_rate)
        temp_input_channel = one_dense_block.output_channel
        net.add_module('dense_block_' + str(index), one_dense_block)
        if index != len(num_cov_in_block) - 1:
            one_transition_block = TransitionBlock(temp_input_channel,
                                                   temp_input_channel // 2)
            net.add_module('transition_block_' + str(index),
                           one_transition_block)
            temp_input_channel = temp_input_channel // 2
    net.add_module('BN', nn.BatchNorm2d(temp_input_channel))
    net.add_module('Relu', nn.ReLU())
    net.add_module('GlobalAvgPool', GlobalAvgPool())
    net.add_module('FlattenLayer', FlattenLayer())
    net.add_module('linear', nn.Linear(temp_input_channel, 10)) \

    print('net', net)
    batch_size = 128
    lr = 0.001
    num_epochs = 5

    train_mnist, test_mnist = get_fashion_MNST(resize=112)
    train_iter = data_loader(train_mnist, 5, batch_size)
    test_iter = data_loader(test_mnist, 5, batch_size)

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_device(train_iter, test_iter, net, nn.CrossEntropyLoss(), lr,
                 num_epochs, device, 'Adam')
Example #4
0
                                   padding=1)
        net.add_module('final_nin_block', block)
        net.add_module('global_avg_pool', GlobalAvgPool())
        net.add_module('flatten_layer', FlattenLayer())
        return net


if '__main__' == __name__:
    import torch
    from torch import nn
    from utility.load_fashion_MNIST import get_fashion_MNST
    from utility.data_loader import data_loader
    from utility.model_train import train_device

    batch_size = 64
    num_epoch = 5
    lr = 0.002

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    train_MNST, test_MNST = get_fashion_MNST(resize=224)

    train_data_iter = data_loader(train_MNST, 5, batch_size)
    test_data_iter = data_loader(test_MNST, 5, batch_size)

    nin_blocks = [(1, 96, 11, 4, 0), (96, 256, 5, 1, 2), (256, 384, 3, 1, 1)]
    net = NIN(nin_blocks, 10)

    print(net)
    train_device(train_data_iter, test_data_iter, net, nn.CrossEntropyLoss(),
                 lr, num_epoch, device, 'Adam')
import torch
from torch.utils.data import DataLoader
import random
import sys
def data_iter(batch_size, feature, label):
    num_samples = len(feature)
    indices = list(range(num_samples))
    random.shuffle(indices)
    for i in range(0, num_samples, batch_size):
        j = torch.LongTensor(indices[i: min(i + batch_size, num_samples)])
        yield feature.index_select(0, j), label.index_select(0, j)

def data_iter_upgrade(batch_size, data_set):

    if sys.platform.startswith('win'):
        num_workers = 0  # 0表示不用额外的进程来加速读取数据
    else:
        num_workers = 4
    data_iter = DataLoader(data_set, batch_size, shuffle=True, num_workers=num_workers)
    return data_iter

if '__main__' == __name__:
    from utility.load_fashion_MNIST import get_fashion_MNST

    train_set, test_set = get_fashion_MNST()
    train_iter = data_iter_upgrade(256, train_set)
    test_iter = data_iter_upgrade(256, test_set)
    for feature, label in test_iter:
        print(label)