Beispiel #1
0
        output = lenet_model(batch_img)  #进行前向计算
        #  print output.data.size()  #output输出为batch_size*10
        #  print batch_label.data.size()
        loss = loss_func(output,
                         batch_label)  #计算loss 此loss已经在一个batch_size上做了平均
        #print loss.data.size()  loss输出为一个标量
        train_loss += loss.item()
        max, max_index = torch.max(output, 1)  #返回1维度上的最大值 记忆下标
        #print max.size()
        train_correct = torch.sum(
            (max_index.data == batch_label.data))  #统计一个batch中预测正确的数量
        #  print "train_correct:",train_correct
        train_acc += train_correct
        #  print "train_acc:",train_acc
        #反向传播
        lenet_model.zero_grad()  #因为梯度是累计的 所以先将梯度清0
        loss.backward()  #将自动计算所有可学习参数的梯度
        optimizer.step()  #调用优化器来更新参数

        #显示设置
        if (num_iter % iter_display == 0):
            print "iter_num:", num_iter
            if num_iter == 0:
                train_loss = train_loss
                train_acc = float(train_acc) / batch_size
            else:
                train_loss = train_loss / iter_display
                train_acc = float(train_acc) / (iter_display * batch_size)
            recoder["num_iter"].append(num_iter)
            recoder["train_loss"].append(train_loss)
            recoder["train_acc"].append(train_acc)