def train_new_data_with_model():
    model = CoordRegression(n_locations=8)
    optimizer = optim.RMSprop(model.parameters(), lr=2.5e-4, alpha=0.9)
    model = torch.nn.DataParallel(model).cuda()

    from data_process_landmarks_hw import dataloader
    # 训练集
    dataloader = dataloader

    for epoch in range(10):
        epoch_start = time.time()
        print("Epoch: {}/{}".format(epoch + 1, 10))
        train_loss = 0.0
        train_loss_cord = []

        # forward pass
        model.train()

        # 训练
        for i_batch, data in enumerate(dataloader):
            img, landmarks = data
            img = torch.tensor(img, dtype=torch.float32)
            img = img.to(device)
            landmarks = torch.tensor(landmarks / 64.0, dtype=torch.float32)
            landmarks = landmarks.to(device)
            # print("Ground-truth:", gt_hmap.shape)
            optimizer.zero_grad()
            # forward pass
            coords, heatmaps = model(img)
            # per-location euclidean losses
            euc_losses = dsntnn.euclidean_losses(coords, landmarks)
            # print("predict coords", coords, landmarks)
            # per-location regulation losses
            reg_losses = dsntnn.js_reg_losses(heatmaps, landmarks, sigma_t=1.0)
            # combine losses into an overall loss
            loss = dsntnn.average_loss(euc_losses + reg_losses)

            # calculate gradients
            optimizer.zero_grad()
            loss.backward()

            # update model parameters with RMSprop
            optimizer.step()

            train_loss_cord.append(loss)

            if i_batch % 20 == 19:
                print(loss, euc_losses, reg_losses)
                # break
            # print(loss)
        torch.save(
            model,
            'models/' + 'landmarks' + '_model_new_data_8' + str(epoch) + '.pt')
        print(train_loss_cord)
Esempio n. 2
0
    def forward_2d_losses(self, out_var, target_var):
        target_xy = target_var.narrow(-1, 0, 2)
        losses = 0

        for xy_hm, zy_hm, xz_hm in zip(self.xy_heatmaps, self.zy_heatmaps, self.xz_heatmaps):
            # Pixelwise heatmap loss.
            losses += self._calculate_pixelwise_loss(xy_hm, target_xy)
            # Calculated coordinate loss.
            actual_xy = self.heatmaps_to_coords(xy_hm, zy_hm, xz_hm)[..., :2]
            losses += euclidean_losses(actual_xy, target_xy)

        return losses
Esempio n. 3
0
    def forward_3d_losses(self, out_var, target_var):
        target_xyz = target_var.narrow(-1, 0, 3)
        losses = 0

        target_xy = target_xyz.narrow(-1, 0, 2)
        target_zy = torch.cat([target_xyz.narrow(-1, 2, 1), target_xyz.narrow(-1, 1, 1)], -1)
        target_xz = torch.cat([target_xyz.narrow(-1, 0, 1), target_xyz.narrow(-1, 2, 1)], -1)
        for xy_hm, zy_hm, xz_hm in zip(self.xy_heatmaps, self.zy_heatmaps, self.xz_heatmaps):
            # Pixelwise heatmap loss.
            losses += self._calculate_pixelwise_loss(xy_hm, target_xy)
            losses += self._calculate_pixelwise_loss(zy_hm, target_zy)
            losses += self._calculate_pixelwise_loss(xz_hm, target_xz)
            # Calculated coordinate loss.
            actual_xyz = self.heatmaps_to_coords(xy_hm, zy_hm, xz_hm)
            losses += euclidean_losses(actual_xyz, target_xyz)

        return losses
Esempio n. 4
0
def test_euclidean_mask():
    output = torch.tensor([
        [[0.0, 0.0], [1.0, 1.0], [0.0, 0.0]],
        [[1.0, 1.0], [0.0, 0.0], [0.0, 0.0]],
    ])

    target = torch.tensor([
        [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
        [[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]],
    ])

    mask = torch.tensor([
        [1.0, 0.0, 1.0],
        [0.0, 1.0, 1.0],
    ])

    expected = 0.0
    actual = average_loss(euclidean_losses(output, target), mask)
    assert expected == actual.item()
Esempio n. 5
0
def calc_coord_loss(coords, heatmaps, target_var, masks):
    # Per-location euclidean losses
    euc_losses = dsntnn.euclidean_losses(
        coords,
        target_var)  # shape:[B, D, L, 2] batch, depth, locations, feature
    # Per-location regularization losses

    reg_losses = []
    for i in range(heatmaps.shape[1]):
        hms = heatmaps[:, i]
        target = target_var[:, i]
        reg_loss = dsntnn.js_reg_losses(hms, target, sigma_t=1.0)
        reg_losses.append(reg_loss)
    reg_losses = torch.stack(reg_losses, 1)
    # reg_losses = dsntnn.js_reg_losses(heatmaps, target_var, sigma_t=1.0) # shape: [B, D, L, 7, 7]
    # Combine losses into an overall loss
    coord_loss = dsntnn.average_loss((euc_losses + reg_losses).squeeze(),
                                     mask=masks)
    return coord_loss
Esempio n. 6
0
    def forward(self, heatmaps, coords, targets, device):
        out = torch.Tensor([31, 31]).to(device)
        batch_size = coords.shape[0]
        n_stages = coords.shape[1]

        if len(targets.shape) != len(coords.shape):
            targets = torch.unsqueeze(targets, 1)
            targets = targets.repeat(1, n_stages, 1, 1)

        targets = (targets.div(255) * 2 + 1) / out - 1

        losses = []
        for i in range(batch_size):
            euc_loss = dsntnn.euclidean_losses(coords[i, :, :, :],
                                               targets[i, :, :, :])
            reg_loss = dsntnn.js_reg_losses(heatmaps[i, :, :, :, :],
                                            targets[i, :, :, :],
                                            sigma_t=1.0)
            losses.append(dsntnn.average_loss(euc_loss + reg_loss))
        return sum(losses) / batch_size
Esempio n. 7
0
    def test_mask(self):
        output = torch.Tensor([
            [[0, 0], [1, 1], [0, 0]],
            [[1, 1], [0, 0], [0, 0]],
        ])

        target = torch.Tensor([
            [[0, 0], [0, 0], [0, 0]],
            [[0, 0], [0, 0], [0, 0]],
        ])

        mask = torch.Tensor([
            [1, 0, 1],
            [0, 1, 1],
        ])

        expected = torch.Tensor([0])
        actual = average_loss(
            euclidean_losses(Variable(output), Variable(target)),
            Variable(mask))

        self.assertEqual(expected, actual.data)
Esempio n. 8
0
    def test_forward_and_backward(self):
        input_tensor = torch.Tensor([
            [[3, 4], [3, 4]],
            [[3, 4], [3, 4]],
        ])

        target = torch.Tensor([
            [[0, 0], [0, 0]],
            [[0, 0], [0, 0]],
        ])

        in_var = Variable(input_tensor, requires_grad=True)

        expected_loss = torch.Tensor([5])
        actual_loss = average_loss(euclidean_losses(in_var, Variable(target)))
        expected_grad = torch.Tensor([
            [[0.15, 0.20], [0.15, 0.20]],
            [[0.15, 0.20], [0.15, 0.20]],
        ])
        actual_loss.backward()
        actual_grad = in_var.grad.data

        self.assertEqual(expected_loss, actual_loss.data)
        self.assertEqual(expected_grad, actual_grad)