Esempio n. 1
0
def do_eval(data_path, model_name='mymodel', use_gpu=False):
    place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
    with fluid.dygraph.guard(place):
        model = MyNet()
        model_state_dict, _ = fluid.load_dygraph(
            os.path.join(MODEL_PATH, model_name))
        model.load_dict(model_state_dict)

        model.eval()
        eval_loader = load_data(data_path, mode='eval')

        avg_acc_set = []
        avg_loss_set = []
        for _, data in enumerate(eval_loader()):
            x_data, y_data = data
            img = fluid.dygraph.to_variable(x_data)
            label = fluid.dygraph.to_variable(y_data)
            predict, avg_acc = model(img, label)
            loss = fluid.layers.cross_entropy(input=predict, label=label)
            avg_loss = fluid.layers.mean(loss)
            avg_acc_set.append(float(avg_acc.numpy()))
            avg_loss_set.append(float(avg_loss.numpy()))

        #计算多个batch的平均损失和准确率
        avg_acc_val_mean = np.array(avg_acc_set).mean()
        avg_loss_val_mean = np.array(avg_loss_set).mean()

        print('loss={}, acc={}'.format(avg_loss_val_mean, avg_acc_val_mean))
Esempio n. 2
0
def start_test():
    '''
    测试
    '''
    # 初始化网络并加载预训练模型
    myNet = MyNet().to("cuda:0")
    myNet.eval()
    with torch.no_grad():
        input = input.to("cuda:0")

        # 若想推理加速,在精度接受范围内img\model手动half()为FP16,然后只能GPU推理
        # input=input.half()
        # myNet=myNet.half()
        feature = myNet(input)
Esempio n. 3
0
class Detector:
    def __init__(self, net_path, board_size, n):
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.net = MyNet().to(self.device)
        self.net.load_state_dict(torch.load(net_path))
        # self.board_size = args.board_size
        # self.n = args.number
        self.number_playout = args.n_playout
        self.env = GameState(board_size, n)
        self.net.eval()
        self.mcts_player = Player(policy=self.policy,
                                  number_playout=1000,
                                  is_self_play=False,
                                  print_detail=True)

    def policy(self, env):
        # 获取可用动作 15*15=225
        action_avail = env.action_avail
        # 获得当前状态
        state = torch.from_numpy(env.get_state).unsqueeze(0).to(self.device)

        # 放入神经网络得到预测的log动作概率以及当前状态的胜率
        log_action_probs, value = self.net(state)

        # 把 log 动作概率转换为动作概率并过滤不可用动作
        act_probs = torch.exp(
            log_action_probs).detach().cpu().numpy().flatten()
        act_probs = zip(action_avail, act_probs[action_avail])
        value = value.item()

        # 返回动作概率,当前局面价值
        return act_probs, value

    def detect(self):
        while True:
            action = None
            # 当玩家切换到人机以及游戏未结束时,人机使用MCTS算法得到最优动作
            if self.env.current_player == 1 and not self.env.pause:
                action = self.mcts_player.get_action(self.env.game)
            self.env.step(action)