return Variable(torch.from_numpy(mm.astype('float32'))).cuda()


def cat(m1, m2):
    return torch.cat([m1, m2], 1)


# z_fixed = Variable(torch.randn(20, Z_dim), volatile=False).cuda()
# c_fixed = np.array(range(0,4))
# c_fixed = Variable(torch.from_numpy(mutil.label_num2vec(np.repeat(c_fixed,5)).astype("float32")),volatile=False).cuda()
# zc_fixed = torch.cat([z_fixed, c_fixed], 1)

z_fixed = Variable(torch.randn(mb_size, Z_dim)).cuda()
c_fixed = np.array(range(0, mode_num))
c_fixed = Variable(torch.from_numpy(
    mutil.label_num2vec(np.repeat(c_fixed,
                                  mb_size // (mode_num))).astype("float32")),
                   volatile=False).cuda()
# zc_fixed = torch.cat([z_fixed, c_fixed],1)
# zc_fixed = Variable(zc_fixed, volatile=False).cuda()

x_limit = 10
y_limit = 10
unit = x_limit / (float(grid_num)) * 2

y_fixed, x_fixed = np.mgrid[-x_limit:x_limit:unit, -y_limit:y_limit:unit]

x_fixed, y_fixed = x_fixed.reshape(grid_num * grid_num,
                                   1), y_fixed.reshape(grid_num * grid_num, 1)
mesh_fixed_cpu = np.concatenate([x_fixed, y_fixed], 1)
mesh_fixed = Variable(
    torch.from_numpy(mesh_fixed_cpu.astype("float32")).cuda())
Ejemplo n.º 2
0
def save_grad(name):
    def hook(grad):
        grads[name] = grad

    return hook


# z_fixed = Variable(torch.randn(20, Z_dim), volatile=False).cuda()
# c_fixed = np.array(range(0,4))
# c_fixed = Variable(torch.from_numpy(mutil.label_num2vec(np.repeat(c_fixed,5)).astype("float32")),volatile=False).cuda()
# zc_fixed = torch.cat([z_fixed, c_fixed], 1)

z_fixed = torch.randn(20, Z_dim)
c_fixed = np.array(range(0, mode_num * mode_num))
c_fixed = Variable(
    torch.from_numpy(mutil.label_num2vec(np.repeat(c_fixed, mb_size // (mode_num * mode_num))).astype("float32")),
    volatile=False).cuda()
# zc_fixed = torch.cat([z_fixed, c_fixed],1)
# zc_fixed = Variable(zc_fixed, volatile=False).cuda()

grid_num = 100
y_fixed, x_fixed = np.mgrid[0:12:0.12, 13:-10:-0.23]
x_fixed, y_fixed = x_fixed.reshape(grid_num * grid_num, 1), y_fixed.reshape(grid_num * grid_num, 1)
mesh_fixed_cpu = np.concatenate([x_fixed, y_fixed], 1)
mesh_fixed = Variable(torch.from_numpy(mesh_fixed_cpu.astype("float32")).cuda())


# mesh_fixed.register_hook(save_grad('Mesh'))

def get_grad(input, label, name, c=False, is_z=True, need_sample=False):
    D.zero_grad()
Ejemplo n.º 3
0
def save_grad(name):
    def hook(grad):
        grads[name] = grad

    return hook


# z_fixed = Variable(torch.randn(20, Z_dim), volatile=False).cuda()
# c_fixed = np.array(range(0,4))
# c_fixed = Variable(torch.from_numpy(mutil.label_num2vec(np.repeat(c_fixed,5)).astype("float32")),volatile=False).cuda()
# zc_fixed = torch.cat([z_fixed, c_fixed], 1)

z_fixed = Variable(torch.randn(mb_size, Z_dim)).cuda()
c_fixed = np.array(range(0, mode_num * mode_num))
c_fixed = Variable(torch.from_numpy(
    mutil.label_num2vec(np.repeat(c_fixed, mb_size //
                                  (mode_num * mode_num))).astype("float32")),
                   volatile=False).cuda()
# zc_fixed = torch.cat([z_fixed, c_fixed],1)
# zc_fixed = Variable(zc_fixed, volatile=False).cuda()

grid_num = 100
y_fixed, x_fixed = np.mgrid[0:12:0.12, 13:-10:-0.23]
x_fixed, y_fixed = x_fixed.reshape(grid_num * grid_num,
                                   1), y_fixed.reshape(grid_num * grid_num, 1)
mesh_fixed_cpu = np.concatenate([x_fixed, y_fixed], 1)
mesh_fixed = Variable(
    torch.from_numpy(mesh_fixed_cpu.astype("float32")).cuda())

# mesh_fixed.register_hook(save_grad('Mesh'))

Ejemplo n.º 4
0
def save_grad(name):
    def hook(grad):
        grads[name] = grad

    return hook


# z_fixed = Variable(torch.randn(20, Z_dim), volatile=False).cuda()
# c_fixed = np.array(range(0,4))
# c_fixed = Variable(torch.from_numpy(mutil.label_num2vec(np.repeat(c_fixed,5)).astype("float32")),volatile=False).cuda()
# zc_fixed = torch.cat([z_fixed, c_fixed], 1)

z_fixed = torch.randn(20, Z_dim)
c_fixed = np.array(range(0, mode_num * mode_num))
c_fixed = Variable(
    torch.from_numpy(mutil.label_num2vec(np.repeat(c_fixed, mb_size // (mode_num * mode_num))).astype("float32")),
    volatile=False).cuda()
# zc_fixed = torch.cat([z_fixed, c_fixed],1)
# zc_fixed = Variable(zc_fixed, volatile=False).cuda()

grid_num = 100
y_fixed, x_fixed = np.mgrid[-20:20:float(40)/grid_num, -20:20:float(40)/grid_num]
x_fixed, y_fixed = x_fixed.reshape(grid_num * grid_num, 1), y_fixed.reshape(grid_num * grid_num, 1)
mesh_fixed_cpu = np.concatenate([x_fixed, y_fixed], 1)
mesh_fixed = Variable(torch.from_numpy(mesh_fixed_cpu.astype("float32")).cuda())


# mesh_fixed.register_hook(save_grad('Mesh'))

def get_grad(input, label, name, c=False, is_z=True, need_sample = False):