Пример #1
0
    def test__get_distance_zero(self):
        """
        Case where just a single item in each array that are the same
        """
        # Arrange
        input_x = torch.tensor([[0, 0, 1]], dtype=torch.float)
        input_y = torch.tensor([[0, 0, 1]], dtype=torch.float)

        expected = torch.tensor([0])
        sut = TripletLoss(.5)

        # Act
        actual = sut._get_distance(input_x, input_y)

        # Assert
        self.assertSequenceEqual(expected.cpu().numpy().tolist(),
                                 actual.cpu().numpy().tolist())
Пример #2
0
    def test__get_distance_single(self):
        """
        Case where just a single item in each array
        """
        # Arrange
        input_x = torch.tensor([[1, 8, 7]], dtype=torch.float)
        input_y = torch.tensor([[2, 3, 4]], dtype=torch.float)

        expected = torch.tensor([35])
        sut = TripletLoss(.5)

        # Act
        actual = sut._get_distance(input_x, input_y)

        # Assert
        self.assertSequenceEqual(expected.cpu().numpy().round(2).tolist(),
                                 actual.cpu().numpy().round(2).tolist())
Пример #3
0
    def test_forward_max_zero(self):
        """
        Case where the difference between p and n sample is much greater than the margin
        :return:
        """
        # Arrange
        margin = .1
        sut = TripletLoss(margin)
        p = torch.tensor([[0, 0, 1]], dtype=torch.float)
        q = torch.tensor([[0, 0, 1]], dtype=torch.float)
        n = torch.tensor([[1, 1, 2]], dtype=torch.float)

        target = torch.tensor([1])

        # max  ( 0, 0-3 + margin)
        expected = 0

        # Act
        actual = sut.forward(p, q, n, target)

        # Assert
        self.assertEqual(round(expected, 2), round(actual.item(), 2))
Пример #4
0
    def get(self, train_dataset):

        evaluator_factory = EvalutorFactoryServiceLocator().get_factory(
            "EvaluationFactory")
        evaluator = evaluator_factory.get_evaluator()

        trainer = Train(evaluator,
                        patience_epochs=self.patience_epochs,
                        early_stopping=self.early_stopping,
                        epochs=self.epochs)
        model = ModelResnet()

        # Define optimiser
        learning_rate = float(
            self._get_value(self.additional_args, "learning_rate", ".0001"))
        weight_decay = float(
            self._get_value(self.additional_args, "weight_decay", "5e-5"))
        momentum = float(
            self._get_value(self.additional_args, "momentum", ".9"))
        optimiser = SGD(lr=learning_rate,
                        params=model.parameters(),
                        momentum=momentum,
                        weight_decay=weight_decay)
        # optimiser = Adam(lr=self.learning_rate, params=model.parameters())

        self.logger.info("Using optimiser {}".format(type(optimiser)))

        # Define loss function
        tripletloss_margin = float(
            self._get_value(self.additional_args, "tripletloss_margin", "2.5"))
        tripletloss_topk = int(
            self._get_value(self.additional_args, "tripletloss_topk", "25"))
        loss = TripletLoss(margin=tripletloss_margin, topk=tripletloss_topk)
        # loss = nn.CrossEntropyLoss()

        train_pipeline = TrainPipeline(batch_size=self.batch_size,
                                       optimiser=optimiser,
                                       trainer=trainer,
                                       num_workers=self.num_workers,
                                       loss_func=loss,
                                       model=model)

        return train_pipeline
Пример #5
0
def train(epoch):
    current_lr = adjust_learning_rate(optimizer, epoch)
    train_loss = AverageMeter()
    data_time = AverageMeter()
    batch_time = AverageMeter()
    correct = 0
    total = 0
    #New
    tripletloss_global = TripletLoss(32,4,cross_modal = False)
    tripletloss_cross = TripletLoss(32,4,cross_modal = True)
    #klloss = nn.KLDivLoss(size_average = False)
    #centerloss = CenterLoss(395,2048)
    #logsoftmax = nn.LogSoftmax(dim = 1)
    softmax = nn.Softmax(dim = 1)

    # switch to train mode
    net.train()
    end = time.time()
    for batch_idx, (input1, input2, label1, label2) in enumerate(trainloader):
        #change_flag = False
        input1 = Variable(input1.cuda())
        input2 = Variable(input2.cuda())
        label1 = Variable(label1.cuda())
        label2 = Variable(label2.cuda())
        labels = torch.cat((label1,label2),0)


        data_time.update(time.time() - end)
        outputs,feat,_ = net(input1, input2)
        if args.method =='id':
            loss = criterion(outputs, labels)
            #Klloss
            #outputs_rgb = outputs[0:32,:]
            #outputs_ir = outputs[32:,:]
            #outputs_ir2 = logsoftmax(outputs_ir)
            #outputs_rgb2 = softmax(outputs_rgb)
            #KLLoss2 = klloss(outputs_ir2,outputs_rgb2)
            score = softmax(outputs)
            _, predicted = torch.max(score.data, 1)
            correct += predicted.eq(labels).sum().item()
        #feat = 1.*feat / (torch.norm(feat, 2, 1, keepdim=True).expand_as(feat) + 1e-10)
        triloss = tripletloss_cross(feat,labels) + tripletloss_global(feat,labels)
        loss = triloss + loss

        optimizer.zero_grad()    
        loss.backward()
        optimizer.step()
        train_loss.update(loss.item(), 2*input1.size(0))

        total += labels.size(0)

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()
        if batch_idx%10 ==0:
            print('Epoch: [{}][{}/{}] '
                  'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                  'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '
                  'lr:{} '
                  'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '
                  'Accu: {:.2f}' .format(
                  epoch, batch_idx, len(trainloader),current_lr, 
                  100.*correct/total, batch_time=batch_time, 
                  data_time=data_time, train_loss=train_loss))
Пример #6
0
def train(epoch):
    current_lr = adjust_learning_rate(optimizer, epoch)
    train_loss = AverageMeter()
    data_time = AverageMeter()
    batch_time = AverageMeter()
    correct = 0
    total = 0
    # New
    tripletloss_global = TripletLoss(args.batch_size, 4)
    Covloss = CovLoss(batchsize=args.batch_size, num_instance=4)
    softmax = nn.Softmax(dim=1)
    l2norm = Normalize()

    # switch to train mode
    net.train()
    end = time.clock()
    for batch_idx, (input1, input2, label1, label2) in enumerate(trainloader):
        input1 = Variable(input1.cuda())
        input2 = Variable(input2.cuda())
        label1 = Variable(label1.cuda())
        label2 = Variable(label2.cuda())
        labels = torch.cat((label1, label2), 0)
        inputs = torch.cat((input1, input2), 0)

        data_time.update(time.clock() - end)
        outputs, feat, _, outputs3, feat3, _ = net(inputs)
        if args.method == 'id':
            loss = criterion(outputs, labels)
            loss = loss + criterion(outputs3, labels)
            score = softmax(outputs)

            score3 = softmax(outputs3)

            _, predicted = torch.max(score.data, 1)
            correct += predicted.eq(labels).sum().item()
            _, predicted3 = torch.max(score3.data, 1)
            correct3 = predicted3.eq(labels).sum().item()
            correct = correct + correct3
        feat = l2norm(feat)
        feat3 = l2norm(feat3)
        closs = Covloss(feat)
        closs3 = Covloss(feat3)
        triloss = tripletloss_global(feat, labels)
        triloss3 = tripletloss_global(feat3, labels)
        loss = triloss + loss + triloss3 + closs + closs3

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        train_loss.update(loss.item(), 2 * input1.size(0))

        total += labels.size(0)

        # measure elapsed time
        batch_time.update(time.clock() - end)
        end = time.clock()
        if batch_idx % 10 == 0:
            print('Epoch: [{}][{}/{}] '
                  'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
                  'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '
                  'lr:{} '
                  'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '
                  'Accu: {:.2f}'.format(epoch,
                                        batch_idx,
                                        len(trainloader),
                                        current_lr,
                                        50. * correct / total,
                                        batch_time=batch_time,
                                        data_time=data_time,
                                        train_loss=train_loss))
Пример #7
0
    device = torch.device('cuda')
else:
    device = torch.device('cpu')
market = Market1501(root='./')
train_transform = Train_Transform(True)
val_transform = Val_Transform()
train_sampler = RandomIdentitySampler(market.train, 16, 4)
train_dataset = ImageDataset(dataset=market.train, transform=train_transform)
val_dataset = ImageDataset(dataset=market.test + market.query,
                           transform=val_transform)
train_dataloader = DataLoader(train_dataset, 64, False, train_sampler)
val_dataloader = DataLoader(val_dataset, 128, False)
Model = Train_Model().to(device)
IDloss = CrossEntropySmooth(market.num_train_id)
optimizer = Make_Optimizer(Model, 3.5e-5)
tripletloss = TripletLoss(0.3)
warmup = Warmup(optimizer)
EPOCH = 120
for epoch in range(EPOCH):
    print('Epoch {}/{}'.format(epoch, EPOCH - 1))
    print('-' * 10)
    warmup.step()
    Model.train()
    for phase in ['Train', 'Val']:
        if phase == 'Train':
            start = time.clock()
            running_loss = 0.0
            running_corrects = 0.0
            running_times = 0.0
            for index, data in enumerate(train_dataloader):
                running_times += 1