예제 #1
0
    # get a cropped image sample
    image = Assistant.get_cropped_sample()[0].unsqueeze(0).unsqueeze(0).to(
        device)

    # pass image through first module of FasteNet and get feature map
    F_map_1 = FasteNet.module_one(image)

    # based on feature map, scale to get inputs for AC
    AC_input = F.adaptive_max_pool2d(F_map_1[..., :32], 32)

    # based on feature map get reward and actions
    actions, estimated_reward = ActorCritic.forward(AC_input)

    # crop the feature map
    cropped_F_map = Assistant.crop_feature_map(actions[0].squeeze().item(),
                                               actions[1].squeeze().item(),
                                               F_map_1).to(device)

    # passed cropped feature map to FasteNet to get saliency map
    saliency_map = FasteNet.module_two(cropped_F_map).to(
        'cpu').squeeze().numpy()

    # calculate 'reward' from saliency map
    Assistant.parse_saliency_map(saliency_map)
    reward = Assistant.calculate_loss()

    # zero the gradients of both actor and critic
    Actor_optimizer.zero_grad()
    Critic_optimizer.zero_grad()

    # calculate loss