def Testing(pred, y, name): accuracy_test = score_py3.accuracy(pred, y.data) f1_test = score_py3.score_f1(pred, y.data) if Args.show_log: print('Epoch: %s | %s Train Accuracy: %.5f | Train F1: %.5f' % (epoch, name, accuracy_test, f1_test)) return f1_test
def training(output, loss, optimizer, y, name, Args): optimizer.zero_grad() # clear gradients for backward loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients if Args.show_log: if step % 1 == 0: if Args.cuda: pred = torch.max(output, 1)[1].cuda().data.squeeze() else: pred = torch.max(output, 1)[1].data.squeeze() # accuracy accuracy_train = score_py3.accuracy(pred, y.data) # F1 f1_train = score_py3.score_f1(pred, y.data) # print print( 'Epoch: %s | %s Train Accuracy: %.5f | Train F1: %.5f | Loss: %.2f' % (epoch, name, accuracy_train, f1_train, loss.data))
output = vgg16(x) loss = loss_func(output, y) # get loss optimizer.zero_grad() # clear gradients for backward loss.backward() # backpropagation, compute gradients optimizer.step() # apply gradients ##### train evaluate ##### if Args.show_log: if step % 1 == 0: if Args.cuda: pred = torch.max(output, 1)[1].cuda().data.squeeze() else: pred = torch.max(output, 1)[1].data.squeeze() # accuracy accuracy_train = score_py3.accuracy(pred, y.data) # F1 f1_train = score_py3.score_f1(pred, y.data) # print print( 'Epoch: %s | Train Accuracy: %.5f | Train F1: %.5f | Loss: %.2f' % (epoch, accuracy_train, f1_train, loss.data)) ##### Test ##### all_y = [] all_pred = [] for step, (x, y) in enumerate(loader_test): if Args.cuda: x = x.cuda() y = y.cuda() vgg16.eval() output = vgg16(x) if Args.cuda: pred = torch.max(output, 1)[1].cuda().data.squeeze()