예제 #1
0
    testImgs = images[trainSize:]
    testOneHotVecs = oneHotVecs[trainSize:]

    trainingAccuracy = []
    trainingLoss = []

    testingAccuracy = []
    testingLoss = []

    epochs = list(range(0, cfg.epochs))
    #training
    for epoch in range(0, cfg.epochs):

        #records weights to see if they change after training
        weights1 = []
        for param in cnn.parameters():
            weights1.append(param.clone())

        #training
        print("Training Epoch", str(epoch + 1), "of", str(cfg.epochs) + ":")
        trainAcc, trainLoss = cnn.train(trainImgs, trainOneHotVecs,
                                        cfg.trainBatchSize)
        trainingAccuracy.append(trainAcc)
        trainingLoss.append(trainLoss)

        #prints if weights are unchanged
        weights2 = []
        unchangedWeights = 0
        for param in cnn.parameters():
            weights2.append(param.clone())
        for i in zip(weights1, weights2):
예제 #2
0
cnn.to(device)
print(cnn)
dummy_input = torch.randn(1, 1, 48, 48, device=device)
torch.onnx.export(cnn,
                  dummy_input,
                  "convnet.onnx",
                  verbose=True,
                  input_names=['input'],
                  output_names=['output'])

# %%

learn_rate = 3e-4
los = nn.CrossEntropyLoss()
loader = Loader(rtrain_x, 64, label=rtrain_y)
optim = torch.optim.Adam(cnn.parameters(), lr=learn_rate)
for ep in range(24):
    print('epoches: %d' % ep)
    for i, (x, y) in enumerate(loader):
        optim.zero_grad()
        x = x.to(device, dtype=torch.float)
        # print(x.shape)
        y = y.to(device, dtype=torch.long)
        # print(y)
        outt = cnn(x)
        loss = los(outt, y)
        # print(loss)
        loss.backward()
        optim.step()
        if (i + 1) % (int(len(loader) / loader.bsz / 100)) == 0:
            print('\r',