예제 #1
0
def LeNet_test():
    # initializing the network
    network = LeNet(BATCH_SIZE)
    network.getTheParas(MODEL_FILE)

    # load the test data
    _, _, test_imgs, _, _, test_label = util.load_data(MNIST_PATH, False)

    log_string('------------start test-----------')

    num_batch = test_imgs.shape[0] // BATCH_SIZE
    start = 0
    end = start + BATCH_SIZE
    loss = 0.0
    total_correct = 0.0
    total_seen = 0
    for n in range(num_batch):
        log_string('--------{}/{}(batchs) completed!'.format(n + 1, num_batch))
        current_img = test_imgs[start:end, ...]
        current_label = test_label[start:end, ...]
        start = end
        end += BATCH_SIZE
        predict_val, loss_val = network.forward(current_img, current_label)
        correct = np.sum(predict_val == current_label)
        total_correct += correct
        loss += loss_val
        total_seen += BATCH_SIZE
    log_string('eval mean loss: {}'.format(loss / num_batch))
    log_string('eval accuracy: {}'.format(total_correct / total_seen))
예제 #2
0
def forward_pytorch(weightfile, image):
    #net=resnet.resnet18()
    #net = resnet.resnet18()
    net=LeNet(1,2)
    checkpoint = torch.load(weightfile)
    net.load_state_dict(checkpoint['weight'])
    net.double()                # to double
    if args.cuda:
        net.cuda()
    print(net)
    net.eval()
    image = torch.from_numpy(image.astype(np.float64)) # to double

    if args.cuda:
        image = Variable(image.cuda())
    else:
        image = Variable(image)
    t0 = time.time()
    blobs = net.forward(image)
    print(blobs.data.numpy().flatten())
    t1 = time.time()
    return t1-t0, blobs, net, torch.from_numpy(blobs.data.numpy())
        model.train()

        for i, (x_batch, y_batch) in enumerate(trainloader):
            if cuda:
                x_batch, y_batch = x_batch.cuda(), y_batch.cuda()

            model.optimizer.zero_grad()

            # forward + backward + optimize
            output = model(x_batch)
            loss = model.criterion(output, y_batch)
            loss.backward()
            model.optimizer.step()

            if i % args.log_interval == 0:
                print('Epoch: {:3d}, Batch {:3d}/{}, Loss: {:.5f}'.format(
                    epoch, i, len(trainloader), loss.item()))

        model.eval()
        correct, total = 0, 0
        for i, (x_batch, y_batch_true) in enumerate(testloader):
            if cuda:
                x_batch, y_batch_true = x_batch.cuda(), y_batch_true.cuda()

            y_batch_pred = torch.argmax(model.forward(x_batch), dim=1)

            correct += torch.sum(torch.eq(y_batch_pred, y_batch_true)).item()
            total += x_batch.shape[0]

        print('Accuracy: {}\n\n\n\n'.format(correct / total))
예제 #4
0
numb_param({
    "learning_rate": learning_rate,
    "epoch": epoch,
    "batch_size": batch_size
})

# Register queue
numb_queue(globals())  # it basically overrides variables in global scope


def make_batch(in_data, targets, batch_size):
    for start in range(0, in_data.size()[0], batch_size):
        real_size = min((batch_size, in_data.size()[0] - start))
        yield in_data[start:start + real_size], targets[start:start +
                                                        real_size]


criterion = nn.MSELoss()
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
for epk in range(epoch):
    batches = make_batch(dummy_inputs, dummy_targets, batch_size)
    for train_exp, target in batches:
        model.zero_grad()
        output = model.forward(train_exp)
        loss = criterion.forward(output, target)
        loss.backward()
        optimizer.step()

# print(model.state_dict())
print("Done Training!")
예제 #5
0
def train(data_path,batch_size,learning_rate,max_iteration):
    '''
    @description: 训练LeNet网络模型,保存训练好的网络模型
    @params: 
    - data_path: 数据集所在的文件路径
    - batch_size: 每批次的样本数
    - learning_rate: 网络参数的学习速率
    - max_iteration: 最大迭代次数
    @return: 
    '''
    lenet = LeNet(input_params=(28,28,1),
                  conv_params={'ksize':(4,4),'stride':3,'pad':0,'num':20},
                  pool_params={'ksize':2,'stride':2,'pad':0},
                  full_params=(320,100),
                  output_params=(100,10),
                  optimizer='Momentum')
    
    train_dataset,valid_dataset,test_dataset = LoadDataSet(data_path)
    
    train_accuracy = []
    train_loss = []
    valid_accuracy = []
    valid_loss = []
    max_accuracy = 0.0
    for step in range(max_iteration):
        
        image,labels = train_dataset.next_batch(batch_size)
        #前向传播
        predicts = lenet.forward(image)
        
        #计算loss和accuracy
        train_loss.append(lenet.calc_loss(predicts,labels))
        train_accuracy.append(lenet.calc_accuracy(predicts,labels))
        
        # learning rate指数衰减
        if step % 1000 == 0:
            learning_rate = learning_rate * (0.9**int(step/1000))
            
        #后向传播并更新网络参数
        lenet.optimizer(labels,learning_rate)
        
        
        if step % 100 == 0:
            predicts = lenet.forward(valid_dataset.image)
            loss = lenet.calc_loss(predicts,valid_dataset.label)
            accuracy = lenet.calc_accuracy(predicts,valid_dataset.label)
            valid_loss.append(loss)
            valid_accuracy.append(accuracy)
            print('%d-->train loss: %.4f, valid_loss: %.4f, valid accuracy: %.2f%%'%\
                  (step,train_loss[-1],loss,(accuracy*100)))
            
        if ((step + 1) % 1000 == 0) or (step + 1 == max_iteration):
            predicts = lenet.forward(test_dataset.image)
            accuracy = lenet.calc_accuracy(predicts,test_dataset.label)
            if accuracy > max_accuracy:
                max_accuracy = accuracy
                file_name = './model/model-' + str(step) + '.pkl'
                with open(file_name,'wb') as file:
                    pickle.dump(lenet,file)
            print('%d-->test accuracy: %.2f%%'%(step,(accuracy*100)))
            with open('./log/test_log.txt','at') as file:
                file.write('%d-->test accuracy: %.2f%%\n'%(step,(accuracy*100)))
    
    #保存train loss, train accuracy, valid loss和valid accuracy    
    np.save('./log/train_loss.npy',np.array(train_loss))
    np.save('./log/train_accuracy.npy',np.array(train_accuracy))
    np.save('./log/valid_loss.npy',np.array(valid_loss))
    np.save('./log/valid_accuracy.npy',np.array(valid_accuracy))
예제 #6
0
파일: train.py 프로젝트: tyommik/LeNet-5
test_loss_history = []

X_test = X_test.to(device)
y_test = y_test.to(device)

for epoch in range(1000):
    order = np.random.permutation(len(X_train))

    for start_index in range(0, len(X_train), batch_size):
        optimizer.zero_grad()

        batch_indexes = order[start_index:start_index + batch_size]

        X_batch = X_train[batch_indexes].to(device)
        y_batch = y_train[batch_indexes].to(device)

        preds = model.forward(X_batch)

        loss_value = loss(preds, y_batch)
        loss_value.backward()

        optimizer.step()

    test_preds = model.forward(X_test)
    test_loss_history.append(loss(test_preds, y_test).data.cpu())

    accuracy = (test_preds.argmax(dim=1) == y_test).float().mean().data.cpu()
    test_accuracy_history.append(accuracy)
    print(accuracy)

print(test_accuracy_history)