示例#1
0
    def loss(self,
             out,
             vt_1,
             virtual_inputs,
             real_inputs,
             flo,
             flo_back,
             real_projections_t,
             real_projections_t_1,
             real_postion_anchor,
             follow=True,
             undefine=True,
             optical=True,
             stay=False):
        unit_size = self.net.unit_size
        mid = real_inputs.size()[1] // (2 * unit_size)

        Rt = real_inputs[:, unit_size * (mid):unit_size * (mid) + 4]
        v_pos = torch_QuaternionProduct(out, virtual_inputs[:, -4:])
        r_pos = torch_QuaternionProduct(v_pos, real_postion_anchor)

        loss = torch.zeros(7).cuda()
        if self.loss_follow_w > 0 and follow:
            for i in range(-2, 3):
                loss[0] += self.loss_follow_w * self.loss_follow(
                    v_pos, real_inputs[:, unit_size * (i + mid):unit_size *
                                       (i + mid) + 4], None)
        if self.loss_angle_w > 0 and follow:
            threshold = 6 / 180 * 3.1415926
            loss_angle, theta = self.loss_angle(v_pos, Rt, threshold=threshold)
            loss[1] = self.loss_angle_w * loss_angle
        if self.loss_smooth_w > 0:
            loss_smooth = self.loss_smooth(out)
            loss[2] = self.loss_smooth_w * loss_smooth
        if self.loss_c2_smooth_w > 0:
            loss[3] = self.loss_c2_smooth_w * self.loss_c2_smooth(
                out, virtual_inputs[:, -4:], virtual_inputs[:, -8:-4])
        if self.loss_undefine_w > 0 and undefine:
            Vt_undefine = v_pos.clone()
            for i in range(0, 10, 2):
                Rt_undefine = real_inputs[:, unit_size * (mid + i):unit_size *
                                          (mid + i) + 4]
                loss_undefine_w = self.loss_undefine_w * self.gaussian_weight[i]
                loss[4] += loss_undefine_w * self.loss_undefine(
                    Vt_undefine, Rt_undefine)
                Vt_undefine = torch_QuaternionProduct(out, Vt_undefine)
                Vt_undefine = torch_QuaternionProduct(out, Vt_undefine)
        if self.loss_opt_w > 0 and optical:
            loss[5] = self.loss_opt_w * self.loss_optical(
                r_pos, vt_1, flo, flo_back, real_projections_t,
                real_projections_t_1)
        if self.loss_stay_w > 0 and stay:
            loss[6] = self.loss_stay_w * self.loss_stay(out)
        return loss
示例#2
0
 def forward(self, Q1, Q2, threshold = 0.5236, logistic_beta1 = 100):
     batch_size = Q1.shape[0]
     Q3 = torch_norm_quat(torch_QuaternionProduct(Q2, torch_QuaternionReciprocal(Q1)))
     theta = torch.zeros(batch_size).cuda()
     index = (Q3[:,3] < 1).nonzero()
     theta[index] = torch.acos(Q3[index,3]) * 2
     loss = torch.mean(theta * (1 / (1 + torch.exp(-logistic_beta1 * (theta - threshold)))))
     return loss, theta
def run(loader, cf, USE_CUDA=True):
    number_virtual, number_real = cf['data']["number_virtual"], cf['data'][
        "number_real"]
    for i, data in enumerate(loader, 0):
        # get the inputs; data is a list of [inputs, labels]
        real_inputs, times, flo, flo_back, real_projections, real_postion, ois, real_queue_idx = data
        print("Fininsh Load data")

        real_inputs = real_inputs.type(torch.float)  #[b,60,84=21*4]
        real_projections = real_projections.type(torch.float)

        batch_size, step, dim = real_inputs.size()
        times = times.numpy()
        real_queue_idx = real_queue_idx.numpy()
        virtual_queue = [None] * batch_size

        for j in range(step):
            virtual_inputs, vt_1 = loader.dataset.get_virtual_data(
                virtual_queue, real_queue_idx, times[:, j], times[:, j + 1],
                times[:, 0], batch_size, number_virtual, real_postion[:, j])
            real_inputs_step = real_inputs[:, j, :]
            if USE_CUDA:
                real_inputs_step = real_inputs_step.cuda()
                virtual_inputs = virtual_inputs.cuda()
                real_postion_anchor = real_postion[:, j].cuda()

            out = real_inputs_step[:, 40:44]

            virtual_position = virtual_inputs[:, -4:]
            pos = torch_QuaternionProduct(virtual_position,
                                          real_postion_anchor)

            out = torch_QuaternionProduct(out, pos)

            if USE_CUDA:
                out = out.cpu().detach().numpy()

            virtual_queue = loader.dataset.update_virtual_queue(
                batch_size, virtual_queue, out, times[:, j + 1])
    return np.squeeze(virtual_queue, axis=0)
示例#4
0
def run(model, loader, cf, USE_CUDA=True):
    no_flo = False
    number_virtual, number_real = cf['data']["number_virtual"], cf['data'][
        "number_real"]
    model.net.eval()
    model.unet.eval()
    activation = nn.Softshrink(0.0006)  # 0.0036
    for i, data in enumerate(loader, 0):
        # get the inputs; data is a list of [inputs, labels]
        real_inputs, times, flo, flo_back, real_projections, real_postion, ois, real_queue_idx = data
        print("Fininsh Load data")

        real_inputs = real_inputs.type(torch.float)  #[b,60,84=21*4]
        real_projections = real_projections.type(torch.float)
        flo = flo.type(torch.float)
        flo_back = flo_back.type(torch.float)
        ois = ois.type(torch.float)

        batch_size, step, dim = real_inputs.size()
        times = times.numpy()
        real_queue_idx = real_queue_idx.numpy()
        virtual_queue = [None] * batch_size

        run_loss = 0
        model.net.init_hidden(batch_size)
        count = 0
        for j in range(step):
            if (j + 1) % 100 == 0:
                print("Step: " + str(j + 1) + "/" + str(step))
            virtual_inputs, vt_1 = loader.dataset.get_virtual_data(
                virtual_queue, real_queue_idx, times[:, j], times[:, j + 1],
                times[:, 0], batch_size, number_virtual, real_postion[:, j])
            real_inputs_step = real_inputs[:, j, :]
            inputs = torch.cat((real_inputs_step, virtual_inputs), dim=1)

            # inputs = Variable(real_inputs_step)
            if USE_CUDA:
                real_inputs_step = real_inputs_step.cuda()
                virtual_inputs = virtual_inputs.cuda()
                inputs = inputs.cuda()
                if no_flo is False:
                    flo_step = flo[:, j].cuda()
                    flo_back_step = flo_back[:, j].cuda()
                else:
                    flo_step = None
                    flo_back_step = None
                vt_1 = vt_1.cuda()
                real_projections_t = real_projections[:, j + 1].cuda()
                real_projections_t_1 = real_projections[:, j].cuda()
                real_postion_anchor = real_postion[:, j].cuda()
                ois_step = ois[:, j].cuda()

            if no_flo is False:
                b, h, w, _ = flo_step.size()
                flo_step = norm_flow(flo_step, h, w)
                flo_back_step = norm_flow(flo_back_step, h, w)

            with torch.no_grad():
                if no_flo is False:
                    flo_out = model.unet(flo_step, flo_back_step)
                else:
                    flo_out = None
                if j < 1:
                    for i in range(2):
                        out = model.net(inputs, flo_out, ois_step)
                else:
                    out = model.net(inputs, flo_out, ois_step)

            real_position = real_inputs_step[:, 40:44]
            virtual_position = virtual_inputs[:, -4:]

            out[:, :3] = activation(out[:, :3])
            out = torch_norm_quat(out)

            pos = torch_QuaternionProduct(virtual_position,
                                          real_postion_anchor)
            loss_step = model.loss(out, vt_1, virtual_inputs, real_inputs_step, \
                flo_step, flo_back_step, real_projections_t, real_projections_t_1, real_postion_anchor, \
                follow = True, optical = True, undefine = True)
            run_loss += loss_step

            out = torch_QuaternionProduct(out, pos)

            if USE_CUDA:
                out = out.cpu().detach().numpy()

            virtual_queue = loader.dataset.update_virtual_queue(
                batch_size, virtual_queue, out, times[:, j + 1])

    run_loss /= step
    print("\nLoss: follow, angle, smooth, c2_smooth, undefine, optical")
    print(run_loss.cpu().numpy()[:-1], "\n")
    return np.squeeze(virtual_queue, axis=0)
示例#5
0
 def forward(self, virtual_quat, real_quat, real_postion = None):
     if real_postion is not None:
         real_quat = torch_QuaternionProduct(real_quat, real_postion)
     return self.MSE(virtual_quat, real_quat)
示例#6
0
 def forward(self, Qt, Qt_1, Qt_2):
     detaQt_1 = torch_QuaternionProduct(Qt_1, torch_QuaternionReciprocal(Qt_2))
     return self.MSE(Qt, detaQt_1)