Пример #1
0
 def __call__(self, x):
     if self.original_stride != 1:
         _, _, width, height = cgt.infer_shape(x)
         unstrided_width = width * self.original_stride[0]
         unstrided_height = height * self.original_stride[1]
         # workaround for this
         # cgt.inc_subtensor(upsampled, (slice(None), slice(None), slice(None, None, self.original_stride[0])), slice(None, None, self.original_stride[1])), x)
         placeholder = cgt.zeros((x.shape[0], x.shape[1], width,
                                  unstrided_height))  # (None, 64, 4, 8)
         cgt.inc_subtensor(placeholder,
                           (slice(None), slice(None), slice(None),
                            slice(None, None, self.original_stride[1])), x)
         upsampled = cgt.zeros((x.shape[0], x.shape[1], unstrided_width,
                                unstrided_height))  # (None, 64, 8, 8)
         cgt.inc_subtensor(
             upsampled,
             (slice(None), slice(None),
              slice(None, None, self.original_stride[0]), slice(None)),
             placeholder)
     else:
         upsampled = x
     # then we conv to deconv
     deconv = super(SpatialDeconvolution, self).__call__(upsampled)
     # lastly we cut off original padding
     pad = self.original_pad
     original_width = (
         (width - 1) * self.original_stride[0]
     ) - 2 * self.original_pad[0] + self.original_kernelshape[0]
     original_height = (
         (height - 1) * self.original_stride[1]
     ) - 2 * self.original_pad[1] + self.original_kernelshape[1]
     t = deconv[:, :, pad[0]:(pad[0] + original_width),
                pad[1]:(pad[1] + original_height)]
     return t
Пример #2
0
def inc_row(M, x, Y):
    if isinstance(M, np.ndarray):
        M1 = M.copy()
        M1[0:1] += x
    else:
        M1 = cgt.inc_subtensor(M, slice(0, 1), x)
    return (M1 * Y).sum()
Пример #3
0
def inc_vec(M, x, Y):
    if isinstance(M, np.ndarray):
        M1 = M.copy()
        M1[0] += x
    else:
        M1 = cgt.inc_subtensor(M, 0, x)
    return (M1 * Y).sum()
Пример #4
0
Файл: api.py Проект: Quantza/cgt
def to_one_hot(y, nb_class, dtype=None):
    """
    Return a matrix where each row corresponds to the one hot
    encoding of each element in y.
    Parameters
    ----------
    y
        A vector of integer value between 0 and nb_class - 1.
    nb_class : int
        The number of classes in y.
    dtype : data-type
        The dtype of the returned matrix. Default floatX.
    Returns
    -------
    object
        A matrix of shape (y.shape[0], nb_class), where each row ``i`` is
        the one hot encoding of the corresponding ``y[i]`` value.
    """
    
    fill_vals = cgt.ones((y.shape[0],))
    ret = cgt.zeros((y.shape[0], nb_class), dtype)
    
    d1 = cgt.arange(y.shape[0])
    d2 = cgt.cast(y, 'i1')
    
    ret = cgt.inc_subtensor(ret, [d1, d2], fill_vals)
    
    return ret
Пример #5
0
def inc_row(M, x, Y):
    if isinstance(M, np.ndarray):
        M1 = M.copy()
        M1[0:1] += x
    else:
        M1 = cgt.inc_subtensor(M, slice(0, 1), x)
    return (M1 * Y).sum()
Пример #6
0
def inc_vec(M, x, Y):
    if isinstance(M, np.ndarray):
        M1 = M.copy()
        M1[0] += x
    else:
        M1 = cgt.inc_subtensor(M, 0, x)
    return (M1 * Y).sum()
Пример #7
0
Файл: api.py Проект: xyuan/cgt
def to_one_hot(y, nb_class, dtype=None):
    """
    Return a matrix where each row corresponds to the one hot
    encoding of each element in y.
    Parameters
    ----------
    y
        A vector of integer value between 0 and nb_class - 1.
    nb_class : int
        The number of classes in y.
    dtype : data-type
        The dtype of the returned matrix. Default floatX.
    Returns
    -------
    object
        A matrix of shape (y.shape[0], nb_class), where each row ``i`` is
        the one hot encoding of the corresponding ``y[i]`` value.
    """

    fill_vals = cgt.ones((y.shape[0], ))
    ret = cgt.zeros((y.shape[0], nb_class), dtype)

    d1 = cgt.arange(y.shape[0])
    d2 = cgt.cast(y, 'i1')

    ret = cgt.inc_subtensor(ret, [d1, d2], fill_vals)

    return ret
Пример #8
0
def test_incsubtensor0():
    # First let's test fancy slice along zeroth dimension

    W = cgt.shared(np.zeros((5, 3)), name="W")
    inc = cgt.matrix()  # we'll increment W by this matrix
    incval = np.arange(9).reshape(3, 3)

    inds = cgt.vector(dtype='i8')
    updates = {W: cgt.inc_subtensor(W, inds, inc)}
    f = cgt.function([inds, inc], [], updates=updates)
    f([1, 2, 4], incval)

    assert np.allclose(
        W.op.get_value(),
        np.array([[0., 0., 0.], [0., 1., 2.], [3., 4., 5.], [0., 0., 0.],
                  [6., 7., 8.]]))
Пример #9
0
def test_incsubtensor2():
    W = cgt.shared(np.zeros((5,3)), name="W")
    i0 = cgt.vector(dtype='i8')
    i1 = cgt.vector(dtype='i8')
    inc = cgt.vector()

    updates2 = {W : cgt.inc_subtensor(W, (i0,i1), inc)}
    f2 = cgt.function([i0,i1,inc],[],updates=updates2)
    f2([0,1,2,2],[0,1,2,2],[1,2,3,4])
    assert np.allclose(W.op.get_value(), 
        np.array(
        [
         [ 1.,  0.,  0.],
         [ 0.,  2.,  0.],
         [ 0.,  0.,  7.],
         [ 0.,  0.,  0.],
         [ 0.,  0.,  0.],
         ]))
Пример #10
0
def test_incsubtensor2():
    W = cgt.shared(np.zeros((5, 3)), name="W")
    i0 = cgt.vector(dtype='i8')
    i1 = cgt.vector(dtype='i8')
    inc = cgt.vector()

    updates2 = {W: cgt.inc_subtensor(W, (i0, i1), inc)}
    f2 = cgt.function([i0, i1, inc], [], updates=updates2)
    f2([0, 1, 2, 2], [0, 1, 2, 2], [1, 2, 3, 4])
    assert np.allclose(
        W.op.get_value(),
        np.array([
            [1., 0., 0.],
            [0., 2., 0.],
            [0., 0., 7.],
            [0., 0., 0.],
            [0., 0., 0.],
        ]))
Пример #11
0
def test_incsubtensor1():
    W = cgt.shared(np.zeros((5,3)), name="W")
    inc = cgt.matrix() # we'll increment W by this matrix
    incval = np.arange(9).reshape(3,3)

    start = cgt.scalar(dtype='i8')
    stop = cgt.scalar(dtype='i8')
    updates = {W : cgt.inc_subtensor(W, slice(start, stop), inc)}
    f = cgt.function([start,stop,inc],[],updates=updates)
    f(0,3,incval)
    assert np.allclose(W.op.get_value(), 
        np.array(
        [
         [ 0.,  1.,  2.],
         [ 3.,  4.,  5.],
         [ 6.,  7.,  8.],
         [ 0.,  0.,  0.],
         [ 0.,  0.,  0.],
         ]))
Пример #12
0
def test_incsubtensor1():
    W = cgt.shared(np.zeros((5, 3)), name="W")
    inc = cgt.matrix()  # we'll increment W by this matrix
    incval = np.arange(9).reshape(3, 3)

    start = cgt.scalar(dtype='i8')
    stop = cgt.scalar(dtype='i8')
    updates = {W: cgt.inc_subtensor(W, slice(start, stop), inc)}
    f = cgt.function([start, stop, inc], [], updates=updates)
    f(0, 3, incval)
    assert np.allclose(
        W.op.get_value(),
        np.array([
            [0., 1., 2.],
            [3., 4., 5.],
            [6., 7., 8.],
            [0., 0., 0.],
            [0., 0., 0.],
        ]))
Пример #13
0
def test_incsubtensor0():
    # First let's test fancy slice along zeroth dimension

    W = cgt.shared(np.zeros((5,3)), name="W")
    inc = cgt.matrix() # we'll increment W by this matrix
    incval = np.arange(9).reshape(3,3)
    

    inds = cgt.vector(dtype='i8')
    updates = {W : cgt.inc_subtensor(W, inds, inc)}
    f = cgt.function([inds,inc],[],updates=updates)
    f([1,2,4],incval)

    assert np.allclose(W.op.get_value(), 
        np.array(
        [[ 0.,  0.,  0.],
         [ 0.,  1.,  2.],
         [ 3.,  4.,  5.],
         [ 0.,  0.,  0.],
         [ 6.,  7.,  8.]]))