コード例 #1
0
        with torch.no_grad():
            scores = model(imgs)
            emd_loss = criterion(scores, labels)
            emd_losses.update(emd_loss.item(), imgs.shape[0])

        scores_hist.append(scores.cpu().data.numpy())
        labels_hist.append(labels.cpu().data.numpy())
    print()
    logging.info("Test emd_loss@r=1: {:.4f}".format(emd_losses.avg))

    scores_hist = np.concatenate(scores_hist)
    labels_hist = np.concatenate(labels_hist)
    scores_mean = np.dot(scores_hist, np.arange(1, 11))
    labels_mean = np.dot(labels_hist, np.arange(1, 11))
    SRCC_mean, _ = spearmanr(scores_mean, labels_mean)
    logging.info("Test SRCC_mean: {:.4f}".format(SRCC_mean))

    return emd_losses.avg, SRCC_mean


if __name__ == "__main__":
    for epoch in range(1, epochs + 1):
        # Train Phase
        train(model, train_loader, scheduler, optimizer)
        # Valid Phase
        emd_loss, SRCC_mean = eval(model, test_loader)
        # Save model
        fname = "model_{}{}.pth".format(__file__.split(".")[0], comment)
        saver.save(SRCC_mean, model.state_dict(), fname)
コード例 #2
0
        # print(winner)

        if winner == 'BD':
            points_ac.append(points)
        else:
            points_ac.append(-points)
            points = -points

        for p in game.players:
            if isinstance(p, MyPlayer):
                loss += learn(p.probabilities, p.actions, points)

    rollout_points.append(sum(points_ac))

    loss /= rollouts
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    if e % print_epochs == 0:
        print(
            f'Epoch {e}: Mean AC total {rollouts} rollouts points in last {print_epochs} episodes: {np.mean(rollout_points[-print_epochs:])}'
        )
        # plt.plot(points_ac)
        plt.plot(rollout_points)
        plt.plot(running_average(rollout_points))
        # plt.plot(points_bd)
        # plt.plot(running_average(points_bd))
        plt.show()
        saver.save()