Ejemplo n.º 1
0
 def __init__(self):
     self.ps = U.Params(params).init_comps()
     self.pre = None
     self.post = None
     i = torch.constant([0.0] * (4 * 10), shape=(4, 10))
     self.src_b = torch.Variable(initial_value=i)
     i = torch.constant([0.0] * (4 * 10), shape=(4, 10))
     self.mem_b = torch.Variable(initial_value=i)
Ejemplo n.º 2
0
def test_with_owner():
    a = L.Attend(Owner())
    a.build([(4, 10, 16), (), (4, 18, 16), ()])
    src = torch.constant([0.0] * (4 * 10 * 16), shape=(4, 10, 16))
    bias = torch.constant([0.0] * (4 * 10), shape=(4, 10))
    bias = torch.expand_dims(torch.expand_dims(bias, axis=1), axis=3)
    mem = torch.constant([0.0] * (4 * 15 * 16), shape=(4, 15, 16))
    ctx = torch.constant([0.0] * (4 * 15 * 16), shape=(4, 15, 16))
    a.call([src, bias, mem, ctx])
Ejemplo n.º 3
0
def test_owner_none():
    a = L.Attend(Owner())
    a.build([(4, 10, 16)])
    src = torch.constant([0.0] * (4 * 10 * 16), shape=(4, 10, 16))
    a.call([src])
    bias = torch.constant([0.0] * (4 * 10), shape=(4, 10))
    bias = torch.expand_dims(torch.expand_dims(bias, axis=1), axis=3)
    a.call([src, bias])
    ctx = torch.constant([0.0] * (4 * 15 * 16), shape=(4, 15, 16))
    a.call([src, bias, None, ctx])
def loss_info_dropout(model, images_v, labels_v, train, criterion):
    """
    Criterion is Softmax-CE loss
    We add to cost function regularization on the noise (alphas) via the KL terms

    In all experiments we divide the KLdivergence term by the number of training
    samples, so that for beta = 1 the scaling of the KL-divergence term in similar to
    the one used by Variational Dropout
    """
    beta = 3.0
    x_output_v, kl_terms = model(images_v, train)

    loss_v = None
    if train:
        kl_terms = [kl.sum(dim=1).mean()
                    for kl in kl_terms]  # kl had dims (batch_sz,4096)
        if not kl_terms:
            kl_terms = [torch.constant(0.)]
        N = images_v.size(0)
        Lz = (kl_terms[0] + kl_terms[1]) * 1. / N  # sum the list

        # size_average = True : By default, the losses are averaged over observations for each minibatch.
        Lx = criterion(x_output_v, labels_v)

        if np.random.randint(0, 100) < 1:  #print 1% of time
            print('     [KL loss term: {}'.format(beta * Lz.data[0]))
            print('     [CE loss term: {}'.format(Lx.data[0]))

        loss_v = Lx + beta * Lz  # PyTorch implicitly includes weight_decay * L2 in the loss
    return loss_v, x_output_v
Ejemplo n.º 5
0
def test_tokembed():
    e = TokEmbed(ps)
    e.build((1, 5))
    src = torch.constant([1, 2, 0, 3, 0], shape=(1, 5))
    e.call(src)
    ps.one_hot = True
    e = TokEmbed(ps)
    e.build((1, 5))
    e.call(src)
Ejemplo n.º 6
0
 def top_logp(self, ctx, bias, i):
     cfg = self.cfg
     y = torch.zeros((
         cfg.batch_size,
         cfg.beam_size,
         cfg.num_toks,
     ))
     y += torch.expand_dims(self.logp, axis=2)
     b = torch.range(cfg.batch_size)
     ii = torch.constant([i] * cfg.batch_size)
     for j in range(cfg.beam_size):
         jj = torch.constant([j] * cfg.batch_size)
         sel = torch.stack([b, jj, ii])
         yj = self.to_logp(self.tgt[:, j, :], ctx, bias, i)[1]
         y = torch.tensor_scatter_nd_add(y, sel, yj)
     y = torch.reshape(y, (-1, cfg.beam_size * cfg.num_toks))
     logp, idx = torch.top_k(y, k=2 * cfg.beam_size)
     return logp, idx
Ejemplo n.º 7
0
def conj_kspace(image_in, name="kspace_conj"):
    """Conjugate k-space data."""
    image_out = torch.reverse(image_in, axis=[1])
    image_out = torch.reverse(image_out, axis=[2])
    mod = np.zeros((1, 1, 1, image_in.get_shape().as_list()[-1]))
    mod[:, :, :, 1::2] = -1
    mod = torch.constant(mod, dtype=torch.float32)
    image_out = torch.multiply(image_out, mod)

    return image_out
Ejemplo n.º 8
0
 def build(self, input_shape):
     cfg = self.cfg
     tgt = input_shape[0]
     assert tgt[0] == cfg.batch_size
     y = torch.constant([[0.0] + [-float("inf")] * (cfg.beam_size - 1)])
     self._logp = torch.tile(y, [cfg.batch_size, 1])
     sh = (cfg.batch_size, cfg.beam_size)
     self._score = torch.ones(shape=sh) * utils.big_neg
     self._flag = torch.zeros(dtype="bool", shape=sh)
     return super().build(input_shape)
Ejemplo n.º 9
0
def test_w_grad():
    e = TokEmbed(ps)
    e.build((None, 3))
    ins = torch.constant([[0, 1, 0]], dtype="int32")
    with torch.GradientTape() as tape:
        out = e(ins)
    print("===", out, e.weights)
    gs = tape.gradient(out, e.weights)
    opt = adagrad.AdagradOptimizer(0.1)
    opt.apply_gradients(zip(gs, e.weights))
    print("###", len(gs), 1)
Ejemplo n.º 10
0
 def append_tok(self, idx, i, **kw):
     cfg = self.cfg
     k = 2 * cfg.beam_size
     b = torch.range(cfg.batch_size * k) // k
     b = torch.reshape(b, (cfg.batch_size, k))
     beam = idx // cfg.num_toks
     sel = torch.stack([b, beam], axis=2)
     y = torch.gather_nd(self.tgt, sel)
     ii = torch.constant([i] * cfg.batch_size * k)
     ii = torch.reshape(ii, (cfg.batch_size, k))
     sel = torch.stack([b, beam, ii], axis=2)
     u = torch.expand_dims(idx % cfg.num_toks, axis=2)
     tgt = torch.tensor_scatter_nd_update(y, sel, u)
     return tgt
Ejemplo n.º 11
0
def pose2mat(quat, translation):

    # [R t] [R1 t1] = [R*R1 R*t1+t]
    # [0 1] [ 0  1] = [   0    1  ]

    x, y, z, w = quat[:, 0], quat[:, 1], quat[:, 2], quat[:, 3]

    B = quat.size(0)

    w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
    wx, wy, wz = w * x, w * y, w * z
    xy, xz, yz = x * y, x * z, y * z

    t1, t2, t3 = translation[:, 0], translation[:, 1], translation[:, 2]
    l0 = torch.constant(0.0)
    l1 = torch.constant(1.0)

    mat = torch.stack([
        w2 + x2 - y2 - z2, 2 * xy - 2 * wz, 2 * wy + 2 * xz, t1,
        2 * wz + 2 * xy, w2 - x2 + y2 - z2, 2 * yz - 2 * wx, t2,
        2 * xz - 2 * wy, 2 * wx + 2 * yz, w2 - x2 - y2 + z2, t3, l0, l0, l0, l1
    ],
                      dim=1).reshape(B, 4, 4)
    return mat
Ejemplo n.º 12
0
 def __init__(self, minval=0.0001, maxval=0.0001):
     self.maxvalval = maxval
     self.maxval = torch.constant(maxval)
     self.minvalval = minval
     self.minval = torch.constant(minval)
Ejemplo n.º 13
0
 def __init__(self, maxval=4.0):
     self.maxvalval = maxval
     self.maxval = torch.constant(maxval)
Ejemplo n.º 14
0
    def build(self, input_shape, dtype=torch.float64):
        print("building")

        self.input_shapes = input_shape
        self.len_input = len(self.input_shapes)
        self.connections = self.input_shapes[-1]
        if self.dendrite_mode == self.modes[1]:  # sparse
            self.connections -= self.dendrite_shift
        elif self.dendrite_mode == self.modes[2]:  # overlap:
            self.connections += self.dendrite_shift
        if self.dendrites is None:
            self.segmenter()  # list of dendrites per neuron
        if self.version == 4:
            self.dendrites = torch.constant(self.dendrites)
        self.pre_dendrites = self.connections * self.units  # neurons*previous_layer_neurons
        if self.version != 1:
            dwshape = [self.units, self.seql]
        else:
            dwshape = [self.seql, self.units]
        # dwshape=[self.units,self.seql,*[1 for _ in range(self.len_input-1)]]
        # self.num_dendrites=self.pre_dendrites/self.dendrite_size
        # if self.bigger_dendrite:
        #    self.num_dendrites=math.floor(self.num_dendrites)
        # else:
        #    self.num_dendrites=math.ceil(self.num_dendrites)

        # input_shape = tensor_shape.TensorShape(input_shape)
        if self.version == 2:
            if len(self.input_shapes) > 2:
                part_inshape = (*self.input_shapes[1:-1], -1)
            else:
                part_inshape = (-1, )
            self.debuildshape = (self.units * self.connections, *part_inshape)
            self.deseqshape = (self.units * self.connections, )
            self.rebuildshape = (self.units, self.seql, *part_inshape)
        print('line228')
        if self.weight_twice:
            """if self.uniqueW==2:#useless since all input are there once, could also work with sparse
                print([self.dendrite_size,self.seql, self.units])
                self.kernel=self.add_variable('Weight',shape=[*[1 for _ in range(self.len_input-1)],self.dendrite_size,self.seql, self.units],
                                    initializer=self.Weight_initializer,regularizer=self.Weight_regularizer,
                                    constraint=self.Weight_constraint,dtype=self.dtype,
                                    trainable=True)"""
            if self.uniqueW:
                kernel = torch.empty(*[1 for _ in range(self.len_input - 1)],
                                     self.input_shapes[-1],
                                     self.units,
                                     dtype=dtype)

            else:
                kernel = torch.empty(1, self.units, dtype=dtype)
            finit.kaiming_normal(kernel)
            self.kernel = nn.Parameter(kernel)
            self.register_parameter('kernel', self.kernel)
            self.params.append(self.kernel)
        print('line246')
        dw = torch.empty(*dwshape, dtype=dtype)
        finit.kaiming_normal(dw)
        self.dendriticW = nn.Parameter(dw)
        self.params.append(self.dendriticW)
        print(self.dendriticW)
        print("added dendw")
        if self.use_bias:
            if self.weight_twice:
                if self.uniqueW:
                    b = torch.empty(self.input_shapes[-1],
                                    self.units,
                                    dtype=dtype)
                else:
                    b = torch.empty(1, self.units, dtype=dtype)
                try:
                    finit.kaiming_normal_(b)
                except:
                    finit.xavier_normal_(b)
                self.bias = nn.Parameter(b)
                self.register_parameter('Bias', self.bias)
                self.params.append(self.bias)
            if self.uniqueW:
                db = torch.empty(self.seql, self.units, dtype=dtype)
            else:
                db = torch.empty(1, self.units, dtype=dtype)
            finit.kaiming_normal_(db)
            self.dendriticB = nn.Parameter(db)
            self.params.append(self.dendriticB)
            self.register_parameter('dendritic_B', self.dendriticB)
        print("supered")
        #self.register_parameter('dentritic_W', self.dendriticW)
        self.built = True
        print('builded')
Ejemplo n.º 15
0
    def initalize_data(self,
                       a,
                       phi,
                       b,
                       prior_W,
                       prior_H,
                       Beta,
                       K0,
                       use_val_set,
                       dtype=torch.float32):

        self.V = np.array(
            self.V
        )  #when gets called in a loop as in run_parameter_sweep this can get updated to a torch tensor in a previous iteration which breaks some numpy functions

        if K0 == None:
            self.K0 = self.M
            self.number_of_active_components = self.M
        else:
            self.K0 = K0
            self.number_of_active_components = self.K0

        if self.objective.lower() == 'poisson':
            self.phi = torch.tensor(phi, dtype=dtype, requires_grad=False)
        else:
            self.phi = torch.tensor(np.var(self.V) * phi,
                                    dtype=dtype,
                                    requires_grad=False)

        if use_val_set:
            torch.manual_seed(0)  #get the same mask each time
            self.mask = (torch.rand(self.V.shape) > 0.2).type(
                self.dtype
            )  #create mask, randomly mask ~20% of data in shape V. Only used when passed
        else:
            self.mask = torch.ones(self.V.shape, dtype=self.dtype)

        self.a = a
        self.prior_W = prior_W
        self.prior_H = prior_H
        self.C = []
        self.b = b

        W0 = np.multiply(
            np.random.uniform(size=[self.M, self.K0]) + self.eps_.numpy(),
            np.sqrt(self.V_max))
        H0 = np.multiply(
            np.random.uniform(size=[self.K0, self.N]) + self.eps_.numpy(),
            np.sqrt(self.V_max))
        L0 = np.sum(W0, axis=0) + np.sum(H0, axis=1)

        self.W = torch.tensor(W0, dtype=self.dtype, requires_grad=False)
        self.H = torch.tensor(H0, dtype=self.dtype, requires_grad=False)
        self.Lambda = torch.tensor(L0,
                                   dtype=torch.float32,
                                   requires_grad=False)

        # calculate default b as described in Tan and Fevotte (2012)
        if self.b == None or self.b == 'None':
            # L1 ARD
            if self.prior_H == 'L1' and self.prior_W == 'L1':

                self.bcpu = np.sqrt(
                    np.true_divide(
                        (self.a - 1) * (self.a - 2) * np.mean(self.V),
                        self.K0))
                self.b = torch.tensor(np.sqrt(
                    np.true_divide(
                        (self.a - 1) * (self.a - 2) * np.mean(self.V),
                        self.K0)),
                                      dtype=self.dtype,
                                      requires_grad=False)

                self.C = torch.tensor(self.N + self.M + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)
            # L2 ARD
            elif self.prior_H == 'L2' and self.prior_W == 'L2':

                self.bcpu = np.true_divide(
                    np.pi * (self.a - 1) * np.mean(self.V), 2 * self.K0)
                self.b = torch.tensor(np.true_divide(
                    np.pi * (self.a - 1) * np.mean(self.V), 2 * self.K0),
                                      dtype=self.dtype,
                                      requires_grad=False)

                self.C = torch.tensor((self.N + self.M) * 0.5 + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)

            # L1 - L2 ARD
            elif self.prior_H == 'L1' and self.prior_W == 'L2':
                self.bcpu = np.true_divide(
                    np.mean(self.V) * np.sqrt(2) * gamma(self.a - 3 / 2),
                    self.K0 * np.sqrt(np.pi) * gamma(self.a))
                self.b = torch.tensor(np.true_divide(
                    np.mean(self.V) * np.sqrt(2) * gamma(self.a - 3 / 2),
                    self.K0 * np.sqrt(np.pi) * gamma(self.a)),
                                      dtype=self.dtype,
                                      requires_grad=False)
                self.C = torch.tensor(self.N + self.M / 2 + self.a + 1,
                                      dtype=self.dtype)
            elif self.prior_H == 'L2' and self.prior_W == 'L1':
                self.bcpu = np.true_divide(
                    np.mean(self.V) * np.sqrt(2) * gamma(self.a - 3 / 2),
                    self.K0 * np.sqrt(np.pi) * gamma(self.a))
                self.b = torch.tensor(np.true_divide(
                    np.mean(self.V) * np.sqrt(2) * gamma(self.a - 3 / 2),
                    self.K0 * np.sqrt(np.pi) * gamma(self.a)),
                                      dtype=self.dtype,
                                      requires_grad=False)
                self.C = torch.tensor(self.N / 2 + self.M + self.a + 1,
                                      dtype=self.dtype)
        else:
            self.bcpu = self.b
            self.b = torch.tensor(self.b,
                                  dtype=self.dtype,
                                  requires_grad=False)
            if self.prior_H == 'L1' and self.prior_W == 'L1':
                self.C = torch.tensor(self.N + self.M + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)
            # L2 ARD
            elif self.prior_H == 'L2' and self.prior_W == 'L2':
                self.C = torch.tensor((self.N + self.M) * 0.5 + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)
            # L1 - L2 ARD
            elif self.prior_H == 'L1' and self.prior_W == 'L2':
                self.C = torch.tensor(self.N + self.M / 2 + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)
            elif self.prior_H == 'L2' and self.prior_W == 'L1':
                self.C = torch.constant(self.N / 2 + self.M + self.a + 1,
                                        dtype=self.dtype,
                                        requires_grad=False)

        self.V = torch.tensor(self.V, dtype=self.dtype, requires_grad=False)
        print('NMF data and parameters set.')
Ejemplo n.º 16
0
def test_shift():
    a = L.Attend(Owner())
    x = torch.constant([1, 2, 3, 4, 5, 6], shape=(1, 1, 2, 3))
    torch.print(x)
    x = a.shift(x)
    torch.print(x)
Ejemplo n.º 17
0
    def initalize_data(self,
                       a,
                       phi,
                       b,
                       prior_W,
                       prior_H,
                       Beta,
                       K0,
                       dtype=torch.float32):

        if K0 == None:
            self.K0 = self.M
            self.number_of_active_components = self.M
        else:
            self.K0 = K0
            self.number_of_active_components = self.K0

        if self.objective.lower() == 'poisson':

            self.phi = torch.tensor(phi, dtype=dtype, requires_grad=False)

        else:
            self.phi = torch.tensor(np.var(self.V) * phi,
                                    dtype=dtype,
                                    requires_grad=False)

        self.a = a
        self.prior_W = prior_W
        self.prior_H = prior_H
        self.C = []
        self.b = b

        W0 = np.multiply(
            np.random.uniform(size=[self.M, self.K0]) + self.eps_.numpy(),
            np.sqrt(self.V_max))
        H0 = np.multiply(
            np.random.uniform(size=[self.K0, self.N]) + self.eps_.numpy(),
            np.sqrt(self.V_max))
        L0 = np.sum(W0, axis=0) + np.sum(H0, axis=1)

        self.W = torch.tensor(W0, dtype=self.dtype, requires_grad=False)
        self.H = torch.tensor(H0, dtype=self.dtype, requires_grad=False)
        self.Lambda = torch.tensor(L0,
                                   dtype=torch.float32,
                                   requires_grad=False)

        # calculate default b as described in Tan and Fevotte (2012)
        if self.b == None or self.b == 'None':
            # L1 ARD
            if self.prior_H == 'L1' and self.prior_W == 'L1':

                self.bcpu = np.sqrt(
                    np.true_divide(
                        (self.a - 1) * (self.a - 2) * np.mean(self.V),
                        self.K0))
                self.b = torch.tensor(np.sqrt(
                    np.true_divide(
                        (self.a - 1) * (self.a - 2) * np.mean(self.V),
                        self.K0)),
                                      dtype=self.dtype,
                                      requires_grad=False)

                self.C = torch.tensor(self.N + self.M + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)
            # L2 ARD
            elif self.prior_H == 'L2' and self.prior_W == 'L2':

                self.bcpu = np.true_divide(
                    np.pi * (self.a - 1) * np.mean(self.V), 2 * self.K0)
                self.b = torch.tensor(np.true_divide(
                    np.pi * (self.a - 1) * np.mean(self.V), 2 * self.K0),
                                      dtype=self.dtype,
                                      requires_grad=False)

                self.C = torch.tensor((self.N + self.M) * 0.5 + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)

            # L1 - L2 ARD
            elif self.prior_H == 'L1' and self.prior_W == 'L2':
                self.bcpu = np.true_divide(
                    np.mean(self.V) * np.sqrt(2) * gamma(self.a - 3 / 2),
                    self.K0 * np.sqrt(np.pi) * gamma(self.a))
                self.b = torch.tensor(np.true_divide(
                    np.mean(self.V) * np.sqrt(2) * gamma(self.a - 3 / 2),
                    self.K0 * np.sqrt(np.pi) * gamma(self.a)),
                                      dtype=self.dtype,
                                      requires_grad=False)
                self.C = torch.tensor(self.N + self.M / 2 + self.a + 1,
                                      dtype=self.dtype)
            elif self.prior_H == 'L2' and self.prior_W == 'L1':
                self.bcpu = np.true_divide(
                    np.mean(self.V) * np.sqrt(2) * gamma(self.a - 3 / 2),
                    self.K0 * np.sqrt(np.pi) * gamma(self.a))
                self.b = torch.tensor(np.true_divide(
                    np.mean(self.V) * np.sqrt(2) * gamma(self.a - 3 / 2),
                    self.K0 * np.sqrt(np.pi) * gamma(self.a)),
                                      dtype=self.dtype,
                                      requires_grad=False)
                self.C = torch.tensor(self.N / 2 + self.M + self.a + 1,
                                      dtype=self.dtype)
        else:
            self.bcpu = self.b
            self.b = torch.tensor(self.b,
                                  dtype=self.dtype,
                                  requires_grad=False)
            if self.prior_H == 'L1' and self.prior_W == 'L1':
                self.C = torch.tensor(self.N + self.M + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)
            # L2 ARD
            elif self.prior_H == 'L2' and self.prior_W == 'L2':
                self.C = torch.tensor((self.N + self.M) * 0.5 + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)
            # L1 - L2 ARD
            elif self.prior_H == 'L1' and self.prior_W == 'L2':
                self.C = torch.tensor(self.N + self.M / 2 + self.a + 1,
                                      dtype=self.dtype,
                                      requires_grad=False)
            elif self.prior_H == 'L2' and self.prior_W == 'L1':
                self.C = torch.constant(self.N / 2 + self.M + self.a + 1,
                                        dtype=self.dtype,
                                        requires_grad=False)

        self.V = torch.tensor(self.V, dtype=self.dtype, requires_grad=False)
        print('NMF data and parameters set.')
Ejemplo n.º 18
0
    def testComplexShading(self):
        """Test specular highlights, colors, and multiple lights per image."""

        model_transforms = camera_utils.euler_matrices(
            [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]

        vertices_world_space = torch.matmul(
            torch.stack([self.cube_vertices, self.cube_vertices]),
            model_transforms.transpose())

        normals_world_space = torch.matmul(
            torch.stack([self.cube_normals, self.cube_normals]),
            model_transforms.transpose())

        # camera position:
        eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.2, 18.0]], dtype=torch.float32)
        center = torch.tensor([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]], dtype=torch.float32)
        world_up = torch.constant([[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]], dtype=torch.float32)
        fov_y = torch.tensor([40.0, 13.3], dtype=torch.float32)
        near_clip = 0.1
        far_clip = 25.0
        image_width = 640
        image_height = 480
        light_positions = torch.tensor([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],
                                        [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])
        light_intensities = torch.tensor(
            [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
             [[2.0, 0.0, 1.0], [0.0, 2.0, 1.0]]],
            dtype=torch.float32)
        vertex_diffuse_colors = torch.tensor(2*[[[1.0, 0.0, 0.0],
                                                 [0.0, 1.0, 0.0],
                                                 [0.0, 0.0, 1.0],
                                                 [1.0, 1.0, 1.0],
                                                 [1.0, 1.0, 0.0],
                                                 [1.0, 0.0, 1.0],
                                                 [0.0, 1.0, 1.0],
                                                 [0.5, 0.5, 0.5]]],
                                             dtype=torch.float32)
        vertex_specular_colors = torch.tensor(2*[[[0.0, 1.0, 0.0],
                                                  [0.0, 0.0, 1.0],
                                                  [1.0, 1.0, 1.0],
                                                  [1.0, 1.0, 0.0],
                                                  [1.0, 0.0, 1.0],
                                                  [0.0, 1.0, 1.0],
                                                  [0.5, 0.5, 0.5],
                                                  [1.0, 0.0, 0.0]]],
                                              dtype=torch.float32)
        shininess_coefficients = 6.0 * torch.ones([2, 8], dtype=torch.float32)
        ambient_color = torch.tensor([[0.0, 0.0, 0.0], [0.1, 0.1, 0.2]], dtype=torch.float32)
        renders = mesh_renderer.mesh_renderer(
            vertices_world_space,
            self.cube_triangles,
            normals_world_space,
            vertex_diffuse_colors,
            eye,
            center,
            world_up,
            light_positions,
            light_intensities,
            image_width,
            image_height,
            vertex_specular_colors,
            shininess_coefficients,
            ambient_color,
            fov_y,
            near_clip,
            far_clip)
        tonemapped_renders = torch.cat([
                mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),
                renders[:, :, :, 3:4]
            ],
            dim=3)

        # Check that shininess coefficient broadcasting works by also rendering
        # with a scalar shininess coefficient, and ensuring the result is identical:
        broadcasted_renders = mesh_renderer.mesh_renderer(
            vertices_world_space,
            self.cube_triangles,
            normals_world_space,
            vertex_diffuse_colors,
            eye,
            center,
            world_up,
            light_positions,
            light_intensities,
            image_width,
            image_height,
            vertex_specular_colors,
            6.0,
            ambient_color,
            fov_y,
            near_clip,
            far_clip)
        tonemapped_broadcasted_renders = torch.cat([
                mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),
                broadcasted_renders[:, :, :, 3:4]
            ],
            dim=3)

        for image_id in range(renders.shape[0]):
            target_image_name = "Colored_Cube_%i.png" % image_id
            baseline_image_path = os.path.join(self.test_data_directory,
                                               target_image_name)
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, tonemapped_renders[image_id, :, :, :])
            test_utils.expect_image_file_and_render_are_near(
                self, baseline_image_path, tonemapped_broadcasted_renders[image_id, :, :, :])