Beispiel #1
0
def loss_update(temp_loss_list, temp_data, temp_weights, temp_zs):

    loss_1 = temp_loss_list[0]
    loss_2 = temp_loss_list[1]
    loss_3 = temp_loss_list[2]

    temp_xdata = temp_data[torch.randperm(batchsize_2)]
    loss_1 = loss_1 + tools.calculate_dist(temp_data, temp_xdata)

    for bid in range(batchsize_2):
        q = model.h(temp_data[bid], temp_weights[bid])
        loss_2 = loss_2 + tools.calculate_dist(q, temp_zs[bid])
        q = model.h(temp_xdata[bid], temp_weights[bid])
        loss_3 = loss_3 + tools.calculate_dist(q, temp_zs[bid])

    return [loss_1, loss_2, loss_3]
def calculate_loss(w, y, std, mean, batchsize, tlapmat, tlaptar, vnum,
                   weight_lap):

    loss_1 = tools.calculate_dist(w * std, y * std)
    w_ = w * std + mean
    y_ = y * std + mean

    # TODO: loss_2 function

    loss_2 = 0
    for bid in range(batchsize):
        tlaptar = torch.mm(tlapmat, y_[bid].view(vnum, 3))
        loss_2 = loss_2 + tools.calculate_dist(
            torch.mm(tlapmat, w_[bid].view(vnum, 3)), tlaptar)

    loss_2 = loss_2 / float(batchsize) * weight_lap
    return loss_1 + loss_2, loss_1, loss_2
Beispiel #3
0
    myr = np.asarray(md[case_index][frid, :9]).reshape((3, 3))
    myt = np.asarray(md[case_index][frid, 9:]).reshape((1, 3))

    x_ = np.dot(x - np.tile(myt, [vnum, 1]), np.linalg.inv(myr))

    t = np.divide((x_ - data_mean), data_std)
    y = Variable((torch.FloatTensor(t).view(1, vnum * 3)).cuda())

    z = model.g(y)
    zs[idx, :] = z.cpu().data.numpy().reshape(1, tardim[-1])
    w = model.h(z)
    u_ = np.multiply(w.cpu().data.numpy().reshape(vnum, 3),
                     data_std) + data_mean
    u = np.dot(u_, myr) + np.tile(myt, [vnum, 1])

    loss_1 = tools.calculate_dist(w, y)
    loss_2 = tools.calculate_dist(torch.FloatTensor(u_), torch.FloatTensor(x_))

    alll_1 = alll_1 + float(loss_1.cpu())
    alll_2 = alll_2 + float(loss_2.cpu())

print(alll_1 / samplenum, alll_2 / samplenum)

m = np.mean(zs, axis=0)
s = np.std(zs, axis=0)

np.save(SET.BASIS_MEAN_PATH, m)
np.save(SET.BASIS_STD_PATH, s)

# Calculating motion mean and std
for fid in range(uid):
        z = torch.div(basis_net.g(y) - basis_mean, basis_std)

        res = z.clone()
        res[z != z] = 0.0
        z = res

        temp_data = model.g(z, w)
        data[bid] = temp_data
        zs[bid] = z
        weights[bid] = w

    loss_1 = loss_1 + torch.sum(data.std(dim=0))
    dmean = data.mean(dim=0)

    for bid in range(batchsize_2):

        q = model.h(dmean, weights[bid])
        w = basis_net.h(torch.mul(q, basis_std) + basis_mean)
        u_ = np.multiply(w.cpu().data.numpy().reshape(vnum, 3),
                         data_std) + data_mean
        u = np.dot(u_, (myr)) + np.tile(myt, [vnum, 1])
        loss_2 = tools.calculate_dist(q, zs[bid])

        file_name_grt = 'eval_%s_%s_%s_%s_grt.obj' % (str(bid).zfill(3), fr[0],
                                                      fr[1], fr[2])
        file_name_rec = 'eval_%s_%s_%s_%s_%s_rec.obj' % (
            str(bid).zfill(3), fr[0], fr[1], fr[2], str(float(loss_2.cpu())))

        obj.objexport(x, tmp_f, os.path.join(SET.EVAL_MIE, file_name_grt))
        obj.objexport(u, tmp_f, os.path.join(SET.EVAL_MIE, file_name_rec))