Exemplo n.º 1
0
 def shp_apply(self, inputs):
     info = self.info
     batch_size, channels, height, width = cgt.shape(inputs[0])
     pooled_height =  cgt.ceil_divide(height + 2*info.pad_h - info.kernel_h, info.stride_h)
     pooled_width = cgt.ceil_divide(width + 2*info.pad_w - info.kernel_w, info.stride_w)
     outshape = [batch_size ,  channels, pooled_height, pooled_width]
     return outshape
Exemplo n.º 2
0
Arquivo: api.py Projeto: zxie/cgt
def _subtensor2(x, slis, y):

    dims2drop = []
    for (ax,sli) in enumerate(slis):
        if _is_int_scalar(sli):
            if y is None:
                dims2drop.append(ax)
            else:
                yshape = cgt.shape(y)
                yshape.insert(ax, 1)
                y = y.reshape(yshape)
            sli = slice(sli, sli + 1, 1)

        assert isinstance(sli.step, int) or sli.step is None
        step = 1 if sli.step is None else sli.step

        if step < 0:
            start = size(x, ax)-1 if sli.start is None else sli.start
            stop = -1 if sli.stop is None else sli.stop
        else:
            start = 0 if sli.start is None else sli.start
            stop = size(x, ax) if sli.stop is None else sli.stop

        assert isinstance(step, (int, core.Node)), "step argument of a slice should be an integer or a symbolic variable"

        if y is None:
            x = core.Result(core.GetSli(ax), [x, start, stop, step])
        else:
            # note: we only support incrementing slice along one axis
            return core.Result(core.IncSli(ax), [x, start, stop, step, y])

    x = _dropdims(x, dims2drop)
    return x
Exemplo n.º 3
0
Arquivo: api.py Projeto: Quantza/cgt
def _subtensor2(x, slis, y):

    dims2drop = []
    for (ax,sli) in enumerate(slis):
        if _is_int_scalar(sli):
            if y is None:
                dims2drop.append(ax)
            else:
                yshape = cgt.shape(y)
                yshape.insert(ax, 1)
                y = y.reshape(yshape)
            sli = slice(sli, sli + 1, 1)

        assert isinstance(sli.step, int) or sli.step is None
        step = 1 if sli.step is None else sli.step

        if step < 0:
            start = size(x, ax)-1 if sli.start is None else sli.start
            stop = -1 if sli.stop is None else sli.stop
        else:
            start = 0 if sli.start is None else sli.start
            stop = size(x, ax) if sli.stop is None else sli.stop

        assert isinstance(step, (int, core.Node)), "step argument of a slice should be an integer or a symbolic variable"

        if y is None:
            x = core.Result(core.GetSli(ax), [x, start, stop, step])
        else:
            # note: we only support incrementing slice along one axis
            return core.Result(core.IncSli(ax), [x, start, stop, step, y])

    x = _dropdims(x, dims2drop)
    return x
Exemplo n.º 4
0
Arquivo: nn.py Projeto: x724/cgt
def dropout(x, p=0):
    if p == 0:
        return x
    else:
        mask = cgt.greater(cgt.rand(*cgt.shape(x)), p)
        x = x * mask
        x = x / (1.0 - p)
        return x
Exemplo n.º 5
0
Arquivo: nn.py Projeto: EdsterG/cgt
def dropout(x, p=0):
    if p==0: 
        return x
    else:
        mask = cgt.greater(cgt.rand(*cgt.shape(x)), p)
        x = x * mask
        x = x /(1.0-p)
        return x
Exemplo n.º 6
0
 def shp_apply(self, inputs):
     info = self.info
     batch_size, channels, height, width = cgt.shape(inputs[0])
     pooled_height = cgt.ceil_divide(
         height + 2 * info.pad_h - info.kernel_h, info.stride_h)
     pooled_width = cgt.ceil_divide(width + 2 * info.pad_w - info.kernel_w,
                                    info.stride_w)
     outshape = [batch_size, channels, pooled_height, pooled_width]
     return outshape
Exemplo n.º 7
0
 def shp_apply(self, inputs):
     # pooled_height_ = static_cast<int>(ceil(static_cast<float>(height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1;
     # pooled_width_ = static_cast<int>(ceil(static_cast<float>(width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1;
     info = self.info
     batch_size, channels, height, width = cgt.shape(inputs[0])
     pooled_height =  cgt.ceil_divide(height + 2*info.pad_h - info.kernel_h, info.stride_h)
     pooled_width = cgt.ceil_divide(width + 2*info.pad_w - info.kernel_w, info.stride_w)
     outshape = [batch_size ,  channels, pooled_height, pooled_width]
     return (outshape, outshape)
Exemplo n.º 8
0
 def pullback(self, inputs, output, goutput):
     if self.pullback_impl is None:
         raise core.MethodNotDefined
     pb_input_types = self.input_types + [self.output_type]*2
     pb_output_type = core.TupleType(*self.input_types)
     pbop = EasyCustomOp(pb_input_types, pb_output_type, 
         forward_impl=self.pullback_impl, pullback_impl=None,
         shapefun = lambda *args : tuple(cgt.shape(x) for x in inputs)  )
     return cgt.core.unpack(core.Result(pbop, inputs + [output, goutput]))
Exemplo n.º 9
0
 def sample(self, p, shape=None, numeric=False):
     """ Element-wise sampling for each component of p """
     # TODO_TZ  maybe cgt has mechanism to eval an expr
     if not numeric:
         p = core.as_node(p)
         shape = shape or cgt.shape(p)
         return cgt.rand(*shape) <= p
     else:
         assert isinstance(p, np.ndarray)
         return np.array(nr.rand(*p.shape) <= p, dtype="i2")
Exemplo n.º 10
0
 def shp_apply(self, inputs):
     info = self.info
     batch_size, channels, height, width = cgt.shape(inputs[0])
     height_out = (height + 2 * info.pad_h -
                   info.kernel_h) // info.stride_h + 1
     width_out = (width + 2 * info.pad_w -
                  info.kernel_w) // info.stride_w + 1
     return [
         batch_size, height_out, width_out,
         channels * info.kernel_w * info.kernel_h
     ]
Exemplo n.º 11
0
 def shp_apply(self, inputs):
     # pooled_height_ = static_cast<int>(ceil(static_cast<float>(height_ + 2 * pad_h_ - kernel_h_) / stride_h_)) + 1;
     # pooled_width_ = static_cast<int>(ceil(static_cast<float>(width_ + 2 * pad_w_ - kernel_w_) / stride_w_)) + 1;
     info = self.info
     batch_size, channels, height, width = cgt.shape(inputs[0])
     pooled_height = cgt.ceil_divide(
         height + 2 * info.pad_h - info.kernel_h, info.stride_h)
     pooled_width = cgt.ceil_divide(width + 2 * info.pad_w - info.kernel_w,
                                    info.stride_w)
     outshape = [batch_size, channels, pooled_height, pooled_width]
     return (outshape, outshape)
Exemplo n.º 12
0
class Im2Col(core.Op):
    available_impls = ("native_cpu", )

    def __init__(self, info):
        assert info.stride_h > 0 and info.stride_w > 0
        self.info = info

    def get_diff(self, _):
        return [True]

    def get_py_impl(self):
        raise core.MethodNotDefined

    def pullback(self, (x, ), _y, gy):
        return [core.Result(Col2Im(self.info), [gy] + cgt.shape(x))]
Exemplo n.º 13
0
def determine_memowner(nodes_sorted, updates, node2dev, outputs):
    # First determine how many "child" nodes each node has
    node2child = defaultdict(list)
    for node in nodes_sorted:
        for parent in node.parents:
            node2child[parent].append(node)

    # Now traverse graph again and see where we can use the same memory
    node2memowner = {} # mapping node x -> the node that owns its memory
    
    # For updates, memlocation(RHS) = memlocation(LHS)
    after2before = {after:before for (before,after) in updates}

    enable_inplace_opt = core.get_config()["enable_inplace_opt"]

    for node in nodes_sorted:

        base = node # by default, 
        if node.is_argument():
            pass
        elif node.op.writes_to_input >= 0:
            base = node2memowner[node.parents[node.op.writes_to_input]]
        elif node in after2before:
            base = after2before[node]
        elif enable_inplace_opt and node.op.return_type == "byref": # TODO think about if we need any other conditions
            nodeshape = node.op.shp_apply(node.parents)
            for parent in node.parents:
                parentowner = node2memowner[parent]
                if (len(node2child[parent])==1
                        and nodeshape==cgt.shape(parent) # XXX not a very robust way to check
                        and node.dtype == parent.dtype
                        and _is_data_mutable(parentowner)
                        and parent not in outputs
                        ):
                    base = parentowner
                    break
        # TODO: add optimization for in-place incrementing
        node2memowner[node] = base

    return node2memowner
Exemplo n.º 14
0
 def shp_apply(self, inputs):
     return cgt.shape(inputs[0])
Exemplo n.º 15
0
 def sample(self, p, shape=None):
     p = core.as_node(p)
     shape = shape or cgt.shape(p)
     return cgt.rand(*shape) <= p
Exemplo n.º 16
0
 def shp_apply(self, inputs):
     info = self.info
     batch_size, channels, height, width = cgt.shape(inputs[0])
     height_out = (height + 2 * info.pad_h - info.kernel_h) // info.stride_h + 1
     width_out = (width + 2 * info.pad_w - info.kernel_w) // info.stride_w + 1
     return [batch_size ,  height_out,  width_out, channels * info.kernel_w * info.kernel_h]
Exemplo n.º 17
0
 def shp_apply(self, parents):
     if self.shapefun:
         return self.shapefun(parents)
     else:
         return cgt.shape(self)
Exemplo n.º 18
0
 def shp_apply(self, inputs):
     return cgt.shape(inputs[0])
Exemplo n.º 19
0
 def sample(self, p, shape=None):
     p = core.as_node(p)
     shape = shape or cgt.shape(p)
     return cgt.rand(*shape) <= p