コード例 #1
0
def train(batch_size=10, learning_rate=1e-2, epochs=10, log_interval=100):
    #    x = x.type(torch.FloatTensor)
    transformations = transforms.Compose([transforms.ToTensor()])
    custom_dataset = MyCustomDataset('./data/dataset.csv', transformations)

    # Define data loader
    train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
                                               batch_size=10,
                                               shuffle=False)

    nnet = NNet()
    # create a stochastic gradient descent optimizer
    optimizer = optim.SGD(nnet.parameters(), lr=learning_rate, momentum=0.9)
    # create a loss function
    criterion = nn.NLLLoss()
    for epoch in range(epochs):
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = Variable(data), Variable(target)
            # resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28)
            #  data = data.view(-1, 1*2)

            optimizer.zero_grad()
            net_out = nnet(data)
            loss = criterion(net_out, target)
            loss.backward()
            optimizer.step()
            if batch_idx % log_interval == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                    100. * batch_idx / len(train_loader), loss.data[0]))
コード例 #2
0
ファイル: plnn.py プロジェクト: htma/nn-trainer
def main():
    D_in, D_out = 2, 2
    H1, H2, H3 = 4, 16, 2
    model = PLNN(D_in, H1, H2, H3, D_out)
    criterion = nn.NLLLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
   
    
    print(model)
    #batches = generate_batches(N)

    train_loader = torch.utils.data.DataLoader(
        MyCustomDataset('./data/dataset.csv',
                        transform=transforms.Compose([
                            transforms.ToTensor()])),
        batch_size=10,shuffle=False)    

    # state_dict: keep the active state values, such as {state_id, state_value}
    state_dict = dict()
    state_id = 0 

    for batch_idx, (data, target)  in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()
        states,predictions = model(data)
        for i in range(states.shape[0]):
            state = int(''.join(list(map(str, states[0,:]))), 2)
            if state not in state_dict.values():
                id += 1
                state_dict[id] = state

        print('state dict is:  ', state_dict)
        loss = criterion(predictions, target)
        loss.backward()
        optimizer.step()
コード例 #3
0
ファイル: main.py プロジェクト: htma/nn-trainer
def main():
    train_loader = torch.utils.data.DataLoader(
    MyCustomDataset('./data/dataset.csv',
                    transform=transforms.Compose([
                        transforms.ToTensor()])),
        batch_size=200,
        shuffle=False)

#    train_loader = torch.utils.data.DataLoader(
 #       datasets.MNIST('../data', train=True, download=True,
  #                     transform=transforms.Compose([
   #                        transforms.ToTensor(),
    #                       transforms.Normalize((0.1307,),(0.3081,))])),
        #batch_size=200, shuffle=True)

    D_in, D_out = 2,2
    H1, H2, H3 = 4, 16, 2
    model = FullyConnectedNet(D_in, H1, H2, H3, D_out)

    optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9)
    criterion = nn.NLLLoss()

    # run the main training loop
    for epoch in range(args.max_epoch):
        for batch_idx, (data,labels) in enumerate(train_loader):
   #         print(batch_idx)
            data, labels = Variable(data), Variable(labels)
            optimizer.zero_grad()
            predictions = model(data)
            loss = criterion(predictions, labels)
            loss.backward()
            optimizer.step()

            if batch_idx % 10 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx*len(data),
                    len(train_loader.dataset),
                    100. * batch_idx/len(train_loader),
                    loss.data[0]))
コード例 #4
0
        plot_line(a, b, c)


def plot_line(a, b, c):
    x = np.linspace(-1.5, 1.5, 1000)
    y = -(a * x + c) / b
    plt.plot(x, y)


if __name__ == '__main__':
    model = FourLayerFNN()
    model.load_state_dict(torch.load('syn_model.pkl'))

    # load test data, here is same to train_data
    test_loader = torch.utils.data.DataLoader(MyCustomDataset(
        './data/dataset.csv',
        transform=transforms.Compose([transforms.ToTensor()])),
                                              batch_size=1,
                                              shuffle=True)
    #    test(model, test_loader)
    images = test_loader.dataset.images
    #    print(image.shape)
    #  _, outputs = model(image)
    #   _, prediction = torch.max(outputs.data, 1)
    #     print(prediction)
    #    check_states(model, image)

    for image in images:
        image = image.view(-1, 2)
        coefficients_file_name = calculate_ineuqality_coefficients(
            model, image)
コード例 #5
0
            axs[i][j].set_ylim((-1.5, 1.5))
            axs[i][j].set_title('model'+str(iter))
            axs[i][j].set(xlabel='x', ylabel='y')
            axs[i][j].label_outer()
            plot_unit_circle(axs[i][j])
            iter += 1
    plt.show()

if __name__ == '__main__':
    model = FourLayerFNN()
    model.load_state_dict(torch.load('syn_model.pkl'))

    # load test data, here is same to train_data
    test_loader = torch.utils.data.DataLoader(
        MyCustomDataset('./data/dataset.csv',
                        transform=transforms.Compose([
                            transforms.ToTensor()])),
        batch_size=1, shuffle=True)
#    test(model, test_loader)
    images  = test_loader.dataset.images[:700]
#    plot_one_model(model, images)
    plot_multiple_models(images)








    
コード例 #6
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=1000,
                        metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs',
                        type=int,
                        default=2,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        metavar='LR',
                        help='learning rate default(: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=True,
                        help='desables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=50,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For saving the current model')
    args = parser.parse_args()
    use_cuda = not args.no_cuda and torch.cuda.is_avaiable()

    torch.manual_seed(args.seed)
    device = torch.device('cuda' if use_cuda else 'cpu')
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # loading training data
    train_loader = torch.utils.data.DataLoader(MyCustomDataset(
        './data/dataset.csv',
        transform=transforms.Compose([transforms.ToTensor()])),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)

    test_loader = torch.utils.data.DataLoader(MyCustomDataset(
        './data/dataset.csv',
        transform=transforms.Compose([transforms.ToTensor()])),
                                              batch_size=args.test_batch_size,
                                              shuffle=True,
                                              **kwargs)

    #    print('length of test loader', len(test_loader))
    #   print('length of test loader dataset', len(test_loader.dataset))

    # train_loader = torch.utils.data.DataLoader(
    #     datasets.MNIST('../data', train=True, download=True,
    #                    transform=transforms.Compose([
    #                        transforms.ToTensor()
    #                        ])),
    #     batch_size=args.batch_size, shuffle=True, **kwargs)

    # test_loader = torch.utils.data.DataLoader(
    #     datasets.MNIST('../data', train=False,
    #                    transform=transforms.Compose([
    #                        transforms.ToTensor()
    #                        ])),
    #     batch_size=args.test_batch_size, shuffle=True,**kwargs)

    # training the model
    model = Net().to(device)
    print(model)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, test_loader)

    if (args.save_model):
        torch.save(model.state_dict(), 'mnist_cnn.pt')