Example #1
0
        embedding_cpu = embedding_cuda.cpu().detach().numpy()
        anchor, pos, neg = select_triplet(embedding_cpu, y_train, 0.2)

        #for x in range(len(anchor)):
        #print(anchor[x], pos[x], neg[x],"++",y_train[anchor[x]], y_train[pos[x]], y_train[neg[x]])
        #print('~'*5)
        if len(anchor) > 0:
            anchor_train_cuda = x_train_cuda[anchor + [0] * (8 - len(anchor))]
            anchor_embedding = model(anchor_train_cuda)
            pos_train_cuda = x_train_cuda[pos + [0] * (8 - len(pos))]
            pos_embedding = model(pos_train_cuda)
            neg_train_cuda = x_train_cuda[neg + [0] * (8 - len(neg))]
            neg_embedding = model(neg_train_cuda)

            optimizer.zero_grad()
            loss = cost.forward(anchor_embedding, pos_embedding, neg_embedding,
                                0.2)
            loss = loss.cpu()

            loss.backward()
            optimizer.step()
            #print(str(time)+"****")
            #time += 1
            #print(loss.data)
            running_loss += loss.data
            #running_correct += torch.sum(pred == y_train.data)
            #print(anchor_train_cuda.shape)

            #print(anchor_embedding)
            #anchor_embedding = embedding_cuda[anchor]
            #pos_embedding = embedding_cuda[pos]
            #neg_embedding = embedding_cuda[neg]
Example #2
0
            neg_img = batch_sample['neg_img']
            
            # 模型运算
            # 前向传播过程-拿模型分别跑三张图,生成embedding和loss(在训练阶段的输入是两张图,输出带loss,而验证阶段输入一张图,输出只有embedding)
            anc_embedding = model(anc_img.cuda())
            pos_embedding = model(pos_img.cuda())
            neg_embedding = model(neg_img.cuda())
            
            anc_embedding = torch.div(anc_embedding, torch.norm(anc_embedding))
            pos_embedding = torch.div(pos_embedding, torch.norm(pos_embedding))
            neg_embedding = torch.div(neg_embedding, torch.norm(neg_embedding))
        
            # 损失计算
            # 计算这个批次困难样本的三元损失
            # 在159行处,调用triplet_loss完成loss的计算
            triplet_loss = loss_fun.forward(anc_embedding.cpu(), pos_embedding.cpu(), neg_embedding.cpu())
            
            loss = triplet_loss

            # 反向传播过程
            optimizer_model.zero_grad()
            loss.backward()
            optimizer_model.step()

            # update the optimizer learning rate
            adjust_learning_rate(optimizer_model, epoch)

            # 计算这个epoch内的总三元损失和计算损失所用的样本个数
            triplet_loss_sum += triplet_loss.item()
            sample_num +=anc_embedding.shape[0]