示例#1
0
    def __init__(self, filename_obj, filename_ref=None):
        super(Model, self).__init__()
        # load .obj
        vertices, faces = nr.load_obj(filename_obj)
        self.vertices = vertices.unsqueeze(0).float32().stop_grad()
        self.faces = faces.unsqueeze(0).float32().stop_grad()

        # create textures
        texture_size = 2
        self.textures = jt.ones((1, self.faces.shape[1], texture_size, texture_size, texture_size, 3)).float32().stop_grad()

        # load reference image
        self.image_ref = jt.array((imread(filename_ref).max(-1) != 0).astype(np.float32)).stop_grad()

        # camera parameters
        self.camera_position = jt.array([6,10,-14]).float32()

        # setup renderer
        renderer = nr.Renderer(camera_mode='look_at')
        renderer.eye = self.camera_position
        self.renderer = renderer
示例#2
0
 def test_data(self):
     a = jt.array([1, 2, 3])
     assert (a.data == [1, 2, 3]).all()
     d = a.data
     a.data[1] = -2
     assert (a.data == [1, -2, 3]).all()
     assert (a.fetch_sync() == [1, -2, 3]).all()
     li = jt.liveness_info()
     del a
     assert li == jt.liveness_info()
     del d
     assert li != jt.liveness_info()
示例#3
0
 def __init__(self, requires_grad=False):
     super(PerceptualLoss, self).__init__()
     self.mean_rgb = jt.array([0.485, 0.456, 0.406])
     self.std_rgb = jt.array([0.229, 0.224, 0.225])
     vgg_pretrained_features = models.vgg.vgg16().features
     vgg_pretrained_features.load('init_models/vgg_pretrained_features.pkl')
     self.slice1 = nn.Sequential()
     self.slice2 = nn.Sequential()
     self.slice3 = nn.Sequential()
     self.slice4 = nn.Sequential()
     for x in range(4):
         self.slice1.append(vgg_pretrained_features[x])
     for x in range(4, 9):
         self.slice2.append(vgg_pretrained_features[x])
     for x in range(9, 16):
         self.slice3.append(vgg_pretrained_features[x])
     for x in range(16, 23):
         self.slice4.append(vgg_pretrained_features[x])
     if (not requires_grad):
         for param in self.parameters():
             param = param.stop_grad()
    def get_latent(self, input_image):

        input_image = (input_image - 127.5) / 127.5
        input_image = np.expand_dims(input_image, axis=2)
        input_image = input_image.transpose(2, 0, 1)
        input_image = np.expand_dims(input_image, axis=0)
        input_image = input_image.astype('float32')
        input_image = transform.to_tensor(jt.array(input_image))
        # print(input_image.shape)
        mus_mouth = self.net_encoder(input_image)

        return mus_mouth
示例#5
0
    def test_avg_pool2d(self):
        from torch.nn.functional import avg_pool2d as t_avg_pool2d
        arr = np.random.random((2, 16, 33, 33))
        jt_model = avg_pool2d(jt.array(arr), 3, 1, 1, ceil_mode=True)
        torch_model = t_avg_pool2d(torch.Tensor(arr), 3, 1, 1, ceil_mode=True)
        assert np.allclose(jt_model.numpy(), torch_model.numpy())

        jt_model = avg_pool2d(jt.array(arr),
                              3,
                              1,
                              1,
                              ceil_mode=True,
                              count_include_pad=False)
        torch_model = t_avg_pool2d(torch.Tensor(arr),
                                   3,
                                   1,
                                   1,
                                   ceil_mode=True,
                                   count_include_pad=False)
        assert np.allclose(jt_model.numpy(), torch_model.numpy())
        print('finish')
    def test_forward(self):
        @contextlib.contextmanager
        def check(bop_num):
            jt.clean()
            yield
            graph = jt.dump_all_graphs()
            bop = [
                node for node in graph.nodes_info
                if node.startswith("Op") and "broadcast_to" in node
            ]
            assert len(bop) == bop_num, (len(bop), bop_num)

        with check(1):
            a = jt.array([1, 2, 3])
            b = a + 1
        assert (b.data == [2, 3, 4]).all()
        del a, b

        with check(0):
            a = jt.array([1, 2, 3])
            b = a + a
        assert (b.data == [2, 4, 6]).all()
        del a, b

        def test_shape(shape1, shape2, bop_num):
            with check(bop_num):
                a = jt.random(shape1)
                b = jt.random(shape2)
                c = a + b

        test_shape([3, 3, 3], [3, 3, 3], 0)
        test_shape([3, 3, 3], [3, 3, 1], 1)
        test_shape([3, 3, 3], [3, 1, 1], 1)
        test_shape([3, 3, 3], [1, 1, 1], 1)
        test_shape([3, 3, 3], [1, 1, 3], 1)
        test_shape([3, 3, 3], [1, 3, 3], 1)
        test_shape([3, 3, 1], [1, 3, 3], 2)
        test_shape([3, 1, 3], [1, 3, 3], 2)
        test_shape([3, 3], [1, 3, 3], 1)
        test_shape([3, 3], [1, 3, 1], 2)
示例#7
0
 def test_unpool_diff_kernel_stride(self):
     from jittor import nn
     pool = nn.MaxPool2d(3, stride=2, return_indices=True)
     unpool = nn.MaxUnpool2d(3, stride=2)
     input = jt.array([[[[1., 2, 3, 4, 0], [5, 6, 7, 8, 0],
                         [9, 10, 11, 12, 0], [13, 14, 16, 15, 0],
                         [0, 0, 0, 0, 0]]]])
     output, indices = pool(input)
     out = unpool(output, indices, output_size=input.shape)
     assert (out == jt.array([[[[
         0.,
         0.,
         0.,
         0.,
         0.,
     ], [
         0.,
         0.,
         0.,
         0.,
         0.,
     ], [
         0.,
         0.,
         11.,
         12.,
         0.,
     ], [
         0.,
         0.,
         32.,
         0.,
         0.,
     ], [
         0.,
         0.,
         0.,
         0.,
         0.,
     ]]]])).all()
示例#8
0
def farthest_point_sample(points, num_point):
    """
    Input:
        points: pointcloud data, [B, N, C]
        num_point: number of samples
    Return:
        centroids: sampled pointcloud index, [B, num_point]
    """
    B, N, C = points.shape
    centroids = jt.zeros((B, num_point))
    distance = jt.ones((B, N)) * 1e10

    farthest = np.random.randint(0, N, B, dtype='l')
    batch_indices = np.arange(B, dtype='l')
    farthest = jt.array(farthest)
    batch_indices = jt.array(batch_indices)
    # jt.sync_all(True)
    for i in range(num_point):
        centroids[:, i] = farthest
        centroid = points[batch_indices, farthest, :]
        centroid = centroid.view(B, 1, 3)

        dist = jt.sum((points - centroid.repeat(1, N, 1))**2, 2)
        mask = dist < distance
        # distance = mask.ternary(distance, dist)
        # print (mask.size())

        if mask.sum().data[0] > 0:
            distance[mask] = dist[mask]  # bug if mask.sum() == 0

        farthest = jt.argmax(distance, 1)[0]
        # print (farthest)
        # print (farthest.shape)
    # B, N, C = xyz.size()
    # sample_list = random.sample(range(0, N), npoint)
    # centroids = jt.zeros((1, npoint))
    # centroids[0,:] = jt.array(sample_list)
    # centroids = centroids.view(1, -1).repeat(B, 1)
    # x_center = x[:,sample_list, :]
    return centroids
示例#9
0
def detect(original_image, min_score, max_overlap, top_k, suppress=None):
    """ Detect objects in an image with a trained SSD300, and visualize the results.
    Args:
        original_image: image, a PIL Image
        min_score: minimum threshold for a detected box to be considered a match for a certain class
        max_overlap: maximum overlap two boxes can have so that the one with the lower score is not suppressed via Non-Maximum Suppression (NMS)
        top_k: if there are a lot of resulting detection across all classes, keep only the top 'k'
        suppress: classes that you know for sure cannot be in the image or you do not want in the image, a list
    Return: annotated image, a PIL Image
    """
    image = np.array(original_image).astype('float32')
    H, W, C = image.shape
    image = transform(image)
    image = jt.array(image[np.newaxis, :]).float32()
    predicted_locs, predicted_scores = model(image)
    det_boxes, det_labels, det_scores = model.detect_objects(
        predicted_locs,
        predicted_scores,
        min_score=min_score,
        max_overlap=max_overlap,
        top_k=top_k)
    det_boxes = det_boxes[0]
    original_dims = np.array([[W, H, W, H]])
    det_boxes = det_boxes * original_dims
    det_labels = [rev_label_map[l] for l in det_labels[0]]
    if det_labels == ['background']:
        return original_image
    annotated_image = original_image
    draw = ImageDraw.Draw(annotated_image)
    font = ImageFont.truetype("ahronbd.ttf", 15)
    for i in range(det_boxes.shape[0]):
        if suppress is not None:
            if det_labels[i] in suppress:
                continue
        box_location = det_boxes[i].tolist()
        draw.rectangle(xy=box_location, outline=label_color_map[det_labels[i]])
        draw.rectangle(xy=[l + 1. for l in box_location],
                       outline=label_color_map[det_labels[i]])
        text_size = font.getsize(det_labels[i].upper())
        text_location = [box_location[0] + 2., box_location[1] - text_size[1]]
        textbox_location = [
            box_location[0], box_location[1] - text_size[1],
            box_location[0] + text_size[0] + 4., box_location[1]
        ]
        draw.rectangle(xy=textbox_location,
                       fill=label_color_map[det_labels[i]])
        draw.text(xy=text_location,
                  text=det_labels[i].upper(),
                  fill='white',
                  font=font)
    del draw
    return annotated_image
示例#10
0
def test_ring_buffer():
    buffer = jt.RingBuffer(1000)

    def test_send_recv(data):
        print("test send recv", type(data))
        buffer.push(data)
        recv = buffer.pop()
        if isinstance(data, (np.ndarray, jt.Var)):
            assert (recv == data).all()
        else:
            assert data == recv

    n_byte = 0
    test_send_recv(1)
    n_byte += 1 + 8
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()
    test_send_recv(100000000000)
    n_byte += 1 + 8
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()

    test_send_recv(1e-5)
    n_byte += 1 + 8
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()
    test_send_recv(100000000000.0)
    n_byte += 1 + 8
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()

    test_send_recv("float32")
    n_byte += 1 + 8 + 7
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()
    test_send_recv("")
    n_byte += 1 + 8 + 0
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()
    test_send_recv("xxxxxxxxxx")
    n_byte += 1 + 8 + 10
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()

    test_send_recv([1, 0.2])
    n_byte += 1 + 8 + 1 + 8 + 1 + 8
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()
    test_send_recv({'asd': 1})
    n_byte += 1 + 8 + 1 + 8 + 3 + 1 + 8
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()

    test_send_recv(np.random.rand(10, 10))
    n_byte += 1 + 16 + 2 + 10 * 10 * 8
    assert n_byte == buffer.total_pop() and n_byte == buffer.total_push()
    test_send_recv(test_ring_buffer)

    test_send_recv(jt.array(np.random.rand(10, 10)))

    expect_error(lambda: test_send_recv(np.random.rand(10, 1000)))
示例#11
0
def sample_image(n_row, batches_done):
    """Saves a grid of generated digits ranging from 0 to n_classes"""
    # Static sample
    z = jt.array(np.random.normal(0, 1, (n_row**2, opt.latent_dim))).float32()
    static_sample = generator(z, static_label, static_code)
    save_image(static_sample.numpy(),
               "images/static/%d.png" % batches_done,
               nrow=n_row)

    # Get varied c1 and c2
    zeros = np.zeros((n_row**2, 1))
    c_varied = np.repeat(np.linspace(-1, 1, n_row)[:, np.newaxis], n_row, 0)
    c1 = jt.array(np.concatenate((c_varied, zeros), -1)).float32()
    c2 = jt.array(np.concatenate((zeros, c_varied), -1)).float32()
    sample1 = generator(static_z, static_label, c1)
    sample2 = generator(static_z, static_label, c2)
    save_image(sample1.numpy(),
               "images/varying_c1/%d.png" % batches_done,
               nrow=n_row)
    save_image(sample2.numpy(),
               "images/varying_c2/%d.png" % batches_done,
               nrow=n_row)
示例#12
0
def compute_gradient_penalty(D, real_samples, fake_samples):
    """Calculates the gradient penalty loss for WGAN GP"""
    # Random weight term for interpolation between real and fake samples
    alpha = jt.array(
        np.random.random((real_samples.size(0), 1, 1, 1)).astype(np.float32))
    # Get random interpolation between real and fake samples
    interpolates = alpha * real_samples + ((1 - alpha) * fake_samples)
    d_interpolates, _ = D(interpolates)
    # Get gradient w.r.t. interpolates
    gradients = jt.grad(d_interpolates, interpolates)
    gradients = gradients.view(gradients.size(0), -1)
    gradient_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
    return gradient_penalty
示例#13
0
 def test_wrong_fuse2(self):
     a = jt.array([1])
     b = jt.random([
         10,
     ])
     c = jt.random([
         100,
     ])
     bb = a * b
     cc = a * c
     jt.sync([bb, cc])
     np.testing.assert_allclose(b.data, bb.data)
     np.testing.assert_allclose(c.data, cc.data)
示例#14
0
 def run_models(self):
     def to_cuda(x):
         if jt.has_cuda:
             return x.cuda()
         return x
     threshold = 1e-2
     # Define numpy input image
     bs = 1
     test_img = np.random.random((bs,3,224,224)).astype('float32')
     # Define pytorch & jittor input image
     pytorch_test_img = to_cuda(torch.Tensor(test_img))
     jittor_test_img = jt.array(test_img)
     for test_model in self.models:
         print("test model", test_model)
         if test_model == "inception_v3":
             test_img = np.random.random((bs,3,300,300)).astype('float32')
             pytorch_test_img = to_cuda(torch.Tensor(test_img))
             jittor_test_img = jt.array(test_img)
         # Define pytorch & jittor model
         pytorch_model = to_cuda(tcmodels.__dict__[test_model]())
         jittor_model = jtmodels.__dict__[test_model]()
         # Set eval to avoid dropout layer
         pytorch_model.eval()
         jittor_model.eval()
         # Jittor loads pytorch parameters to ensure forward alignment
         jittor_model.load_parameters(pytorch_model.state_dict())
         # Judge pytorch & jittor forward relative error. If the differece is lower than threshold, this test passes.
         pytorch_result = pytorch_model(pytorch_test_img)
         jittor_result = jittor_model(jittor_test_img)
         x = pytorch_result.detach().cpu().numpy() + 1
         y = jittor_result.data + 1
         relative_error = abs(x - y) / abs(y)
         diff = relative_error.mean()
         assert diff < threshold, f"[*] {test_model} forward fails..., Relative Error: {diff}"
         print(f"[*] {test_model} forword passes with Relative Error {diff}")
         jt.clean()
         jt.gc()
         torch.cuda.empty_cache()
     print('all models pass test.')
示例#15
0
    def test_batchnorm_backward(self):
        mpi = jt.compile_extern.mpi
        data = np.random.rand(30, 3, 10, 10).astype("float32")
        global_x = jt.array(data)
        x = jt.array(data[mpi.world_rank() * 10:(mpi.world_rank() + 1) * 10,
                          ...])

        bn1 = nn.BatchNorm(3, sync=True)
        bn2 = FakeMpiBatchNorm(3)
        y1 = bn1(x)
        y2 = bn2(x, global_x)
        gs1 = jt.grad(y1, bn1.parameters())
        gs2 = jt.grad(y2, bn2.parameters())

        assert np.allclose(y1.data, y2.data,
                           atol=1e-5), (mpi.world_rank(), y1.data, y2.data,
                                        y1.data - y2.data)
        for i in range(len(gs1)):
            assert np.allclose(gs1[i].data, gs2[i].data,
                               rtol=1e-3), (mpi.world_rank(), gs1[i].data,
                                            gs2[i].data,
                                            gs1[i].data - gs2[i].data)
示例#16
0
 def test_nll_loss(self):
     tc_loss = tnn.functional.nll_loss
     jt_loss = jnn.nll_loss
     output = np.random.randn(10, 10).astype(np.float32)
     target = np.random.randint(10, size=(10))
     jt_y = jt_loss(jt.array(output), jt.array(target), reduction='mean')
     tc_y = tc_loss(torch.from_numpy(output),
                    torch.from_numpy(target),
                    reduction='mean')
     assert np.allclose(jt_y.numpy(), tc_y.numpy())
     output = np.random.randn(10, 10).astype(np.float32)
     target = np.random.randint(10, size=(10))
     weight = np.random.randn(10, ).astype(np.float32)
     jt_y = jt_loss(jt.array(output),
                    jt.array(target),
                    jt.array(weight),
                    reduction='mean')
     tc_y = tc_loss(torch.from_numpy(output),
                    torch.from_numpy(target),
                    torch.from_numpy(weight),
                    reduction='mean')
     assert np.allclose(jt_y.numpy(), tc_y.numpy())
示例#17
0
    def test_forward(self):
        a = np.random.rand(1,3,224,224).astype(np.float32)
        b = np.random.rand(64,3,7,7).astype(np.float32)
        c = jt.mkl_ops.mkl_conv(a,b,2,3).data

        a_jt = jt.array(a)
        b_jt = jt.array(b)
        with jt.flag_scope(enable_tuner=0,compile_options={"test_mkl_conv":1}):
            c_jt = conv(a_jt, b_jt, 3, 2).data
        with jt.log_capture_scope(
            enable_tuner=1,
            compile_options={"test_mkl_conv":2},
            log_v=0, log_vprefix="tuner_manager=100,conv_tuner=1000",
        ) as raw_logs:
            c_jt_tune = conv(a_jt, b_jt, 3, 2).data

        assert np.max(c_jt-c)<1e-4 and np.max(c_jt_tune-c)<1e-4
        logs = find_log_with_re(raw_logs, 
            "Run tuner conv: confidence\\((.*)\\) candidates\\((.*)\\)$")
        assert len(logs)==1
        assert logs[0][0] == '20'
        assert simple_parser(logs[0][1]) == {'relay0':[1,0]}
示例#18
0
    def __getitem__(self, index):
        A_path = self.files_A[index % len(self.files_A)]
        B_path = self.files_B[random.randint(0, len(self.files_B) - 1)]
        basenA = os.path.basename(A_path)
        basenB = os.path.basename(B_path)

        image_A = Image.open(A_path).convert('RGB')
        image_B = Image.open(B_path).convert('RGB')
        mask_A_ln = Image.open(os.path.join(self.auxdir_A+'_nose',basenA))
        mask_A_le = Image.open(os.path.join(self.auxdir_A+'_eyes',basenA))
        mask_A_ll = Image.open(os.path.join(self.auxdir_A+'_lips',basenA))
        mask_B_ln = Image.open(os.path.join(self.auxdir_B+'_nose',basenB))
        mask_B_le = Image.open(os.path.join(self.auxdir_B+'_eyes',basenB))
        mask_B_ll = Image.open(os.path.join(self.auxdir_B+'_lips',basenB))
        
        # Image transformations
        params_A = get_params(self.load_h, self.load_w, self.crop_h, self.crop_w)
        params_B = get_params(self.load_h, self.load_w, self.crop_h, self.crop_w)
        
        transform_A = get_transform(params_A)
        transform_A_mask = get_transform(params_A, gray=True, mask=True)
        transform_B = get_transform(params_B, gray=True)
        transform_B_mask = get_transform(params_B, gray=True, mask=True)

        item_A = transform_A(image_A)
        item_A_mask_ln = transform_A_mask(mask_A_ln)
        item_A_mask_le = transform_A_mask(mask_A_le)
        item_A_mask_ll = transform_A_mask(mask_A_ll)
        item_B = transform_B(image_B)
        item_B_mask_ln = transform_B_mask(mask_B_ln)
        item_B_mask_le = transform_B_mask(mask_B_le)
        item_B_mask_ll = transform_B_mask(mask_B_ll)

        B_feat = np.load(os.path.join(self.auxdir_B+'_feat',basenB[:-4]+'.npy'))
        item_B_label = jt.array(np.argmax(B_feat))
        item_B_style = jt.array(B_feat).view(3, 1, 1).repeat(1, 128, 128)
        item_B_style0 = jt.array(B_feat)

        return item_A, item_B, item_A_mask_ln, item_A_mask_le, item_A_mask_ll, item_B_mask_ln, item_B_mask_le, item_B_mask_ll, item_B_label, item_B_style, item_B_style0
    def test_default_var(self):
        a = jt.array((2, 3, 3), np.float32)
        b = a * 2.0
        assert str(b.dtype) == "float32"
        b = a * 2
        assert str(b.dtype) == "float32"
        a = jt.array((2, 3, 3), np.int32)
        b = a * 2.0
        assert str(b.dtype) == "float32"
        b = a * 2
        assert str(b.dtype) == "int32"

        a = jt.array((2, 3, 3), np.float64)
        b = a * 2.0
        assert str(b.dtype) == "float64"
        b = a * 2
        assert str(b.dtype) == "float64"
        a = jt.array((2, 3, 3), np.int64)
        b = a * 2.0
        assert str(b.dtype) == "float64"
        b = a * 2
        assert str(b.dtype) == "int64"
示例#20
0
 def __getitem__(self, index):
     index = (index % self.dataset_size)
     data_path = self.train_paths[index]
     try:
         data = sio.loadmat(data_path,
                            verify_compressed_data_integrity=False)
     except Exception as e:
         print(data_path, e)
         return None
     sample = data['surfaceSamples']
     voxel = data['Volume']
     cp = data['closestPoints']
     voxel = jt.transform.to_tensor(jt.array(voxel)).float().unsqueeze(0)
     sample = jt.transform.to_tensor(jt.array(sample)).float().t()
     cp = jt.transform.to_tensor(jt.array(cp)).float().view(((-1), 3))
     input_dict = {
         'voxel': voxel,
         'sample': sample,
         'cp': cp,
         'path': data_path
     }
     return input_dict
示例#21
0
 def test_cumprod_cpu(self):
     for i in range(1,6):
         for j in range(i):
             x = np.random.rand(*((10,)*i))
             x_jt = jt.array(x)
             y_jt = jt.cumprod(x_jt, j).sqr()
             g_jt = jt.grad(y_jt.sum(), x_jt)
             x_tc = Variable(torch.from_numpy(x), requires_grad=True)
             y_tc = torch.cumprod(x_tc, j)**2
             y_tc.sum().backward()
             g_tc = x_tc.grad
             assert np.allclose(y_jt.numpy(), y_tc.data)
             assert np.allclose(g_jt.numpy(), g_tc.data)
示例#22
0
def look_at(vertices, eye, at=[0, 0, 0], up=[0, 1, 0]):
    """"Look at" transformation of vertices. The z axis is changed to (at - eye). Original vertices are transformed to the new axis.
    """
    if len(vertices.shape) != 3:
        raise ValueError('vertices Tensor should have 3 dimensions')

    at = jt.array(at).float32()
    up = jt.array(up).float32()
    if isinstance(eye, tuple):
        eye = jt.array(list(eye)).float32()
    else:
        eye = jt.array(eye).float32()

    batch_size = vertices.shape[0]
    if len(eye.shape) == 1:
        eye = eye.broadcast([batch_size] + eye.shape)
    if len(at.shape) == 1:
        at = at.broadcast([batch_size] + at.shape)
    if len(up.shape) == 1:
        up = up.broadcast([batch_size] + up.shape)

    # create new axes
    # eps is chosen as 0.5 to match the chainer version
    z_axis = jt.normalize(at - eye, eps=1e-5)
    x_axis = jt.normalize(jt.cross(up, z_axis), eps=1e-5)
    y_axis = jt.normalize(jt.cross(z_axis, x_axis), eps=1e-5)

    # create rotation matrix: [bs, 3, 3]
    r = jt.contrib.concat(
        (x_axis.unsqueeze(1), y_axis.unsqueeze(1), z_axis.unsqueeze(1)), dim=1)
    # apply
    # [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
    if vertices.shape != eye.shape:
        eye = eye.unsqueeze(1)
    vertices = vertices - eye

    vertices = jt.matmul(vertices, r.transpose(0, 2, 1)[0])
    return vertices
示例#23
0
    def load_weights_from_keras(self, weights):
        assert self.use_viewdirs, "Not implemented if use_viewdirs=False"

        # Load pts_linears
        for i in range(self.D):
            idx_pts_linears = 2 * i
            self.pts_linears[i].weight.data = jt.array(
                np.transpose(weights[idx_pts_linears]))
            self.pts_linears[i].bias.data = jt.array(
                np.transpose(weights[idx_pts_linears + 1]))

        # Load feature_linear
        idx_feature_linear = 2 * self.D
        self.feature_linear.weight.data = jt.array(
            np.transpose(weights[idx_feature_linear]))
        self.feature_linear.bias.data = jt.array(
            np.transpose(weights[idx_feature_linear + 1]))

        # Load views_linears
        idx_views_linears = 2 * self.D + 2
        self.views_linears[0].weight.data = jt.array(
            np.transpose(weights[idx_views_linears]))
        self.views_linears[0].bias.data = jt.array(
            np.transpose(weights[idx_views_linears + 1]))

        # Load rgb_linear
        idx_rbg_linear = 2 * self.D + 4
        self.rgb_linear.weight.data = jt.array(
            np.transpose(weights[idx_rbg_linear]))
        self.rgb_linear.bias.data = jt.array(
            np.transpose(weights[idx_rbg_linear + 1]))

        # Load alpha_linear
        idx_alpha_linear = 2 * self.D + 6
        self.alpha_linear.weight.data = jt.array(
            np.transpose(weights[idx_alpha_linear]))
        self.alpha_linear.bias.data = jt.array(
            np.transpose(weights[idx_alpha_linear + 1]))
示例#24
0
def look(vertices, eye, direction=[0, 1, 0], up=None):
    """
    "Look" transformation of vertices.
    """
    if len(vertices.shape) != 3:
        raise ValueError('vertices Tensor should have 3 dimensions')

    direction = jt.array(direction).float32()
    if isinstance(eye, tuple):
        eye = jt.array(list(eye)).float32()
    else:
        eye = jt.array(eye).float32()

    if up is None:
        up = jt.array([0, 1, 0]).float32()
    if len(eye.shape) == 1:
        eye = eye.unsqueeze(0)
    if len(direction.shape) == 1:
        direction = direction.unsqueeze(0)
    if len(up.shape) == 1:
        up = up.unsqueeze(0)

    # create new axes
    z_axis = jt.normalize(direction, eps=1e-5)
    x_axis = jt.normalize(jt.cross(up, z_axis), eps=1e-5)
    y_axis = jt.normalize(jt.cross(z_axis, x_axis), eps=1e-5)

    # create rotation matrix: [bs, 3, 3]
    r = jt.contrib.concat(
        (x_axis.unsqueeze(1), y_axis.unsqueeze(1), z_axis.unsqueeze(1)), dim=1)

    # apply
    # [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
    if vertices.shape != eye.shape:
        eye = eye.unsqueeze(1)
    vertices = vertices - eye
    vertices = jt.matmul(vertices, r.transpose(0, 2, 1))
    return vertices
示例#25
0
    def R1Penalty(self, real_img, height, alpha):

        # TODO: use_loss_scaling, for fp16
        # apply_loss_scaling = lambda x: x * torch.exp(x * torch.Tensor([np.float32(np.log(2.0))]).to(real_img.device))
        apply_loss_scaling = lambda x: x * jt.exp(x * jt.array(
            [np.float32(np.log(2.0))]))
        # undo_loss_scaling = lambda x: x * torch.exp(-x * torch.Tensor([np.float32(np.log(2.0))]).to(real_img.device))
        undo_loss_scaling = lambda x: x * jt.exp(-x * jt.array(
            [np.float32(np.log(2.0))]))

        # real_img = torch.autograd.Variable(real_img, requires_grad=True)
        real_img = init.constant(real_img.shape, 'float32', real_img)
        assert not real_img.is_stop_grad()
        real_logit = self.dis(real_img, height, alpha)
        # real_logit = apply_loss_scaling(torch.sum(real_logit))
        # real_grads = torch.autograd.grad(outputs=real_logit, inputs=real_img,
        #                                  grad_outputs=torch.ones(real_logit.size()).to(real_img.device),
        #                                  create_graph=True, retain_graph=True)[0].view(real_img.size(0), -1)
        real_grads = jt.grad(real_logit, real_img).view(real_img.size(0), -1)
        # real_grads = undo_loss_scaling(real_grads)
        # r1_penalty = torch.sum(torch.mul(real_grads, real_grads))
        r1_penalty = jt.sum(jt.multiply(real_grads, real_grads))
        return r1_penalty
 def test_lived(self):
     jt.clean()
     check(0,0,0)
     a = jt.array(1.0).stop_fuse()
     a.name('a')
     b = jt.array(1.0).stop_fuse()
     b.name('b')
     check(2,2,2)
     c = a * b
     c.name('c')
     check(3,3,3)
     vc = c.numpy()
     check(3,3,1)
     da, db = jt.grad(c, [a, b])
     da.name('da')
     db.name('db')
     check(5,6,4) # dc, 3, da, 1, db, 1
     del a, b, c
     check(2,5,3)
     da.sync(), db.sync()
     check(2,2,0)
     del da, db
     check(0,0,0)
示例#27
0
def read_image(input_path, *, sidelength=256, channels=1):
    image = Image.open(input_path).resize((sidelength, sidelength))
    if channels == 1:
        image = image.convert('L')
    elif channels == 3:
        image = image.convert('RGB')
    else:
        raise ValueError()
    image_arr = jittor.array(np.array(image))
    if channels == 1:
        image_arr = image_arr.unsqueeze(-1)
    image_arr /= 255
    image_arr = 2 * image_arr - 1
    return image_arr
示例#28
0
 def test_cross(self):
     arr1 = np.random.randn(16, 3, 224, 224, 3)
     arr2 = np.random.randn(16, 3, 224, 224, 3)
     check_equal(
         torch.Tensor(arr1).cross(torch.Tensor(arr2), dim=1),
         jt.array(arr1).cross(jt.array(arr2), dim=1), 1e-1)
     check_equal(
         torch.Tensor(arr1).cross(torch.Tensor(arr2), dim=-4),
         jt.array(arr1).cross(jt.array(arr2), dim=-4), 1e-1)
     check_equal(
         torch.Tensor(arr1).cross(torch.Tensor(arr2), dim=-1),
         jt.array(arr1).cross(jt.array(arr2), dim=-1), 1e-1)
     check_equal(
         torch.Tensor(arr1).cross(torch.Tensor(arr2), dim=4),
         jt.array(arr1).cross(jt.array(arr2), dim=4), 1e-1)
     print('pass cross test ...')
 def test(self):
     a = jt.array([1,2,3])
     a.sync()
     assert a.compile_options=={}
     a.compile_options = {"compile_shapes":1}
     assert a.compile_options=={"compile_shapes":1}
     b = a+a
     assert b.compile_options=={}
     with jt.flag_scope(compile_options={"compile_shapes":1}):
         c = a+b
     assert c.compile_options=={"compile_shapes":1}
     with jt.profile_scope() as report:
         c.sync()
     assert len(report)==2 and "compile_shapes:1" in report[1][0]
示例#30
0
    def _calcLoss(self, netOutput):
        mask_loss_func = CrossEntropyLoss(ignore_index=255)

        gts = []
        for masks, Matrixs in zip(self.batchmasks, self.maskAlignMatrixs):
            for mask, matrix in zip(masks, Matrixs):
                gts.append(
                    cv2.warpAffine(mask, matrix[0:2],
                                   (self.size_output, self.size_output)))
        gts = jt.array(np.array(gts)).int32()
        #print(gts)

        loss = mask_loss_func(netOutput, gts)
        return loss