Example #1
0
def main():
    t = transforms.Compose([transforms.ToPILImage(),
                            transforms.Resize((224, 224)),
                            transforms.ToTensor(),
                            transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                                 std=[0.5, 0.5, 0.5])
                            ])

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model.eval()
    for name, m in model.named_modules():
        print(name)
        if name == 'model.sa2.sigmoid':
            m.register_forward_hook(viz)

    img = cv2.imread('1_face_img.jpg')
    img = t(img).unsqueeze(0).to(device)
    with torch.no_grad():
        model(img)
        # 计算本个批次内的困难样本数量
        num_hard += len(anc_hard_embedding)
        # 计算这个epoch内的总三元损失和计算损失所用的困难样本个数
        triplet_loss_sum += triplet_loss.item()
        attention_loss_sum += attention_loss.item()

    # 计算这个epoch里的平均损失
    avg_triplet_loss = 0 if (num_hard == 0) else triplet_loss_sum / num_hard
    avg_attention_loss = 0 if (num_hard
                               == 0) else attention_loss_sum / num_hard
    avg_loss = avg_triplet_loss + avg_attention_loss
    epoch_time_end = time.time()

    # 出测试集准确度
    print("Validating on TestDataset! ...")
    model.eval()  # 验证模式
    with torch.no_grad():  # 不传梯度了
        distances, labels = [], []

        progress_bar = enumerate(tqdm(test_dataloader))
        for batch_index, (data_a, data_b, label) in progress_bar:
            # ‘img1, img2, issame’
            # data_a, data_b, label这仨是一批的矩阵
            data_a = data_a.cuda()
            data_b = data_b.cuda()
            label = label.cuda()

            output_a, output_b = model(data_a), model(data_b)
            distance = l2_distance.forward(output_a, output_b)
            # 列表里套矩阵
            labels.append(label.cpu().detach().numpy())
from torch.nn.modules.distance import PairwiseDistance
from Losses.Triplet_loss import TripletLoss
from validate_on_LFW import evaluate_lfw
from tqdm import tqdm
from config import config
from Data_loader.Data_loader_facenet import train_dataloader, test_dataloader
from Models.Model_for_facenet import model, optimizer_model, start_epoch, flag_train_multi_gpu

# total_time_start = time.time()
# start_epoch = start_epoch
end_epoch = start_epoch + config['epochs']
l2_distance = PairwiseDistance(2).cuda()
# best_roc_auc = 0
# best_accuracy = 0

model.eval()
with torch.no_grad():
    distances, labels = [], []

    print("Validating on LFW! ...")
    progress_bar = enumerate(tqdm(test_dataloader))

    for batch_index, (data_a, data_b, label) in progress_bar:

        data_a, data_b, label = data_a.cuda(), data_b.cuda(), label.cuda()

        output_a, output_b = model(data_a), model(data_b)
        distance = l2_distance.forward(output_a,
                                       output_b)  # Euclidean distance

        distances.append(distance.cpu().detach().numpy())