Example #1
0
 def __call__(self, x):
     if self.original_stride != 1:
         _, _, width, height = cgt.infer_shape(x)
         unstrided_width = width * self.original_stride[0]
         unstrided_height = height * self.original_stride[1]
         # workaround for this
         # cgt.inc_subtensor(upsampled, (slice(None), slice(None), slice(None, None, self.original_stride[0])), slice(None, None, self.original_stride[1])), x)
         placeholder = cgt.zeros((x.shape[0], x.shape[1], width,
                                  unstrided_height))  # (None, 64, 4, 8)
         cgt.inc_subtensor(placeholder,
                           (slice(None), slice(None), slice(None),
                            slice(None, None, self.original_stride[1])), x)
         upsampled = cgt.zeros((x.shape[0], x.shape[1], unstrided_width,
                                unstrided_height))  # (None, 64, 8, 8)
         cgt.inc_subtensor(
             upsampled,
             (slice(None), slice(None),
              slice(None, None, self.original_stride[0]), slice(None)),
             placeholder)
     else:
         upsampled = x
     # then we conv to deconv
     deconv = super(SpatialDeconvolution, self).__call__(upsampled)
     # lastly we cut off original padding
     pad = self.original_pad
     original_width = (
         (width - 1) * self.original_stride[0]
     ) - 2 * self.original_pad[0] + self.original_kernelshape[0]
     original_height = (
         (height - 1) * self.original_stride[1]
     ) - 2 * self.original_pad[1] + self.original_kernelshape[1]
     t = deconv[:, :, pad[0]:(pad[0] + original_width),
                pad[1]:(pad[1] + original_height)]
     return t
Example #2
0
File: api.py Project: Quantza/cgt
def to_one_hot(y, nb_class, dtype=None):
    """
    Return a matrix where each row corresponds to the one hot
    encoding of each element in y.
    Parameters
    ----------
    y
        A vector of integer value between 0 and nb_class - 1.
    nb_class : int
        The number of classes in y.
    dtype : data-type
        The dtype of the returned matrix. Default floatX.
    Returns
    -------
    object
        A matrix of shape (y.shape[0], nb_class), where each row ``i`` is
        the one hot encoding of the corresponding ``y[i]`` value.
    """
    
    fill_vals = cgt.ones((y.shape[0],))
    ret = cgt.zeros((y.shape[0], nb_class), dtype)
    
    d1 = cgt.arange(y.shape[0])
    d2 = cgt.cast(y, 'i1')
    
    ret = cgt.inc_subtensor(ret, [d1, d2], fill_vals)
    
    return ret
Example #3
0
File: api.py Project: xyuan/cgt
def to_one_hot(y, nb_class, dtype=None):
    """
    Return a matrix where each row corresponds to the one hot
    encoding of each element in y.
    Parameters
    ----------
    y
        A vector of integer value between 0 and nb_class - 1.
    nb_class : int
        The number of classes in y.
    dtype : data-type
        The dtype of the returned matrix. Default floatX.
    Returns
    -------
    object
        A matrix of shape (y.shape[0], nb_class), where each row ``i`` is
        the one hot encoding of the corresponding ``y[i]`` value.
    """

    fill_vals = cgt.ones((y.shape[0], ))
    ret = cgt.zeros((y.shape[0], nb_class), dtype)

    d1 = cgt.arange(y.shape[0])
    d2 = cgt.cast(y, 'i1')

    ret = cgt.inc_subtensor(ret, [d1, d2], fill_vals)

    return ret
Example #4
0
def diag(v, k=0):
    """
    see numpy.diag
    """
    assert isinstance(k, int)
    assert v.ndim == 1
    n = size(v,0)+abs(k)
    out = cgt.zeros((n,n), v.dtype)
    out = inc_subtensor(out, (cgt.arange(n), cgt.arange(n)+k), v)
    return out
Example #5
0
File: nn.py Project: EdsterG/cgt
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0,0), stride=(1,1)):
    devtype = cgt.get_config()["default_device"].devtype
    L,K,r,c = f_LKrc.shape
    if devtype == "gpu":        
        b_1K11 = cgt.zeros((1,L,1,1), cgt.floatX)
        return core.Result(cudnn_ops.CudnnConvForward(pad[0],pad[1],stride[0],stride[1]), [x_BKRC, f_LKrc, b_1K11])
    else:
        assert devtype == "cpu"
        col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
        f_LZ = f_LKrc.reshape([L, K*r*c])
        B,m,n,Z = col_BmnZ.shape
        col_Bmn_Z = col_BmnZ.reshape([B*m*n, Z])
        col_Bmn_L = core.Result(core.Mul22(False,True), [col_Bmn_Z, f_LZ])
        return col_Bmn_L.reshape([B,m,n,L]).transpose([0,3,1,2])
Example #6
0
File: nn.py Project: mesnilgr/cgt
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0, 0), stride=(1, 1)):
    devtype = cgt.get_config()["default_device"].devtype
    L, K, r, c = f_LKrc.shape
    if devtype == "gpu":
        b_1K11 = cgt.zeros((1, L, 1, 1), cgt.floatX)
        return core.Result(
            cudnn_ops.CudnnConvForward(pad[0], pad[1], stride[0], stride[1]),
            [x_BKRC, f_LKrc, b_1K11])
    else:
        assert devtype == "cpu"
        col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
        f_LZ = f_LKrc.reshape([L, K * r * c])
        B, m, n, Z = col_BmnZ.shape
        col_Bmn_Z = col_BmnZ.reshape([B * m * n, Z])
        col_Bmn_L = core.Result(core.Mul22(False, True), [col_Bmn_Z, f_LZ])
        return col_Bmn_L.reshape([B, m, n, L]).transpose([0, 3, 1, 2])
Example #7
0
    def get_train_objective(self, max_label_length, ground_labels_basis_btc):
        context_i_bf = parameter(init_array(IIDUniform(-0.1, 0.1), (self.batch_size, self.feature_size)), name=None)
        state_i_bf = parameter(init_array(IIDUniform(-0.1, 0.1), (self.batch_size, self.decoder_size)), name=None)
        prev_out_bc = cgt.zeros((self.batch_size, self.true_number_classes), dtype='i8') #+ self.start_token_index
        log_probs = None
        for iter_step in range(0, max_label_length):
            state_i_bf = self.get_decoder_state(context_i_bf, prev_out_bc, state_i_bf)
            context_i_bf = self.get_context(state_i_bf)
            this_character_dist_bc = self.get_character_distribution(state_i_bf, context_i_bf)
            prev_out_bc = ground_labels_basis_btc[:, iter_step, :]
            log_probs_pre = prev_out_bc * this_character_dist_bc
            log_probs_pre = cgt.log(cgt.sum(log_probs_pre, axis=1))
            if log_probs is None:
                log_probs = cgt.sum(log_probs_pre)
            else:
                log_probs += cgt.sum(log_probs_pre)

        log_probs = -log_probs
        return log_probs
Example #8
0
elapsed = []
horizons = 2**np.arange(2, 10)

for horizon in horizons:
    print "HORIZON", horizon
    tstart = time()

    batch_size = 6
    dim_x = 16
    mem_size = 10

    X_tnk = cgt.tensor3("X")

    cell = gru.GRUCell([dim_x], mem_size)

    Minit_nk = cgt.zeros((X_tnk.shape[0], X_tnk.shape[1]), cgt.floatX)
    M = Minit_nk

    for t in xrange(horizon):
        M = cell(M, X_tnk[t])

    # cgt.print_tree(M)
    print "simplifying..."
    M_simp = cgt.simplify([M])
    print "done"
    # cgt.print_tree(M_simp)
    print "fn before:", cgt.count_nodes(M)
    print "fn after:", cgt.count_nodes(M_simp)

    gs = cgt.grad(cgt.sum(M), cell.params())
    print "grad before", cgt.count_nodes(gs)
Example #9
0
elapsed = []
horizons = 2**np.arange(2, 10)

for horizon in horizons:
    print "HORIZON",horizon
    tstart = time()

    batch_size = 6
    dim_x = 16
    mem_size = 10

    X_tnk = cgt.tensor3("X")

    cell = gru.GRUCell([dim_x], mem_size)

    Minit_nk = cgt.zeros((X_tnk.shape[0], X_tnk.shape[1]),cgt.floatX)
    M = Minit_nk

    for t in xrange(horizon):
        M = cell(M, X_tnk[t])

    # cgt.print_tree(M)
    print "simplifying..."
    M_simp = cgt.simplify([M])
    print "done"
    # cgt.print_tree(M_simp)
    print "fn before:",cgt.count_nodes(M)
    print "fn after:",cgt.count_nodes(M_simp)

    gs = cgt.grad(cgt.sum(M), cell.params())
    print "grad before", cgt.count_nodes(gs)