Esempio n. 1
0
File: nn.py Progetto: mesnilgr/cgt
def max_pool_2d(x, kernelshape, pad=(0, 0), stride=(1, 1)):
    devtype = cgt.get_config()["default_device"].devtype
    kernel_h, kernel_w = kernelshape
    pad_h, pad_w = pad
    stride_h, stride_w = stride
    info = PoolInfo(kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w)
    if devtype == "gpu":
        return core.Result(cudnn_ops.CudnnPoolForward(info), [x])
    else:
        return core.Result(MaxPool(info), [x])[0]
Esempio n. 2
0
File: nn.py Progetto: EdsterG/cgt
def max_pool_2d(x, kernelshape, pad = (0,0), stride=(1,1)):
    devtype = cgt.get_config()["default_device"].devtype
    kernel_h, kernel_w = kernelshape
    pad_h, pad_w = pad
    stride_h, stride_w = stride
    info = PoolInfo(kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w)
    if devtype == "gpu":        
        return core.Result(cudnn_ops.CudnnPoolForward(info), [x])
    else:
        return core.Result(MaxPool(info), [x])[0]
Esempio n. 3
0
File: nn.py Progetto: FighterLYL/cgt
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0,0), stride=(1,1)):
    devtype = cgt.get_config()["default_device"].devtype
    if devtype == "gpu":        
        return cudnn_ops.CudnnConvForward(pad[0],pad[1],stride[0],stride[1])
    else:
        assert devtype == "cpu"
        col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
        L,K,r,c = f_LKrc.shape
        f_LZ = f_LKrc.reshape([L, K*r*c])
        B,m,n,Z = col_BmnZ.shape
        col_Bmn_Z = col_BmnZ.reshape([B*m*n, Z])
        col_Bmn_L = core.Result(core.Mul22(False,True), [col_Bmn_Z, f_LZ])
        return col_Bmn_L.reshape([B,m,n,L]).transpose([0,3,1,2])
Esempio n. 4
0
    def runTest(self):
        if cgt.get_config()["backend"] != "python":
            cgt.utils.warn("Skipping test -- only works for backend=python")
            return
        x = cgt.scalar()
        with cgt.debug_context() as dbg:
            cgt.assert_(cgt.equal(x, 1), "yoyoyo")
            cgt.dbg_call(myfunc, x)
            print "dbg", dbg.nodes
            # cgt.assert_(cgt.equal(x, 2))

        f = cgt.make_function([x], [x], dbg=dbg)
        f(1)
        with self.assertRaises(AssertionError):
            f(2)
Esempio n. 5
0
    def runTest(self):
        if cgt.get_config()["backend"] != "python":
            cgt.utils.warn("Skipping test -- only works for backend=python")
            return
        x = cgt.scalar()
        with cgt.debug_context() as dbg:
            cgt.assert_(cgt.equal(x, 1),"yoyoyo")
            cgt.dbg_call(myfunc, x)
            print "dbg",dbg.nodes
            # cgt.assert_(cgt.equal(x, 2))

        f = cgt.make_function([x],[x],dbg=dbg)
        f(1)
        with self.assertRaises(AssertionError):
            f(2)
Esempio n. 6
0
File: nn.py Progetto: mesnilgr/cgt
def conv2d(x_BKRC, f_LKrc, kernelshape, pad=(0, 0), stride=(1, 1)):
    devtype = cgt.get_config()["default_device"].devtype
    L, K, r, c = f_LKrc.shape
    if devtype == "gpu":
        b_1K11 = cgt.zeros((1, L, 1, 1), cgt.floatX)
        return core.Result(
            cudnn_ops.CudnnConvForward(pad[0], pad[1], stride[0], stride[1]),
            [x_BKRC, f_LKrc, b_1K11])
    else:
        assert devtype == "cpu"
        col_BmnZ = im2col(x_BKRC, kernelshape, pad, stride)
        f_LZ = f_LKrc.reshape([L, K * r * c])
        B, m, n, Z = col_BmnZ.shape
        col_Bmn_Z = col_BmnZ.reshape([B * m * n, Z])
        col_Bmn_L = core.Result(core.Mul22(False, True), [col_Bmn_Z, f_LZ])
        return col_Bmn_L.reshape([B, m, n, L]).transpose([0, 3, 1, 2])
Esempio n. 7
0
def create_interpreter(inputs, outputs, eg, node2memloc):
    assert isinstance(eg, ExecutionGraph)
    input_types = [input.typ for input in inputs] #pylint: disable=W0622
    output_locs = [node2memloc[node] for node in outputs]

    config = cgt.get_config()
    backend = config["backend"]
    parallel = config["parallel"]
    if backend == "python":
        if parallel:
            raise NotImplementedError("For parallel=True, set backend=native")
            # return ParallelInterpreter(eg, output_locs, input_types)
        else:
            return SequentialInterpreter(eg, output_locs, input_types)
    elif backend == "native":
        if parallel:
            return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, config["num_threads"])
        else:
            return cgt.cycgt.CppInterpreterWrapper(eg, input_types, output_locs, 0)
    else:
        raise NotImplementedError("invalid backend %s"%backend)
Esempio n. 8
0
File: api.py Progetto: TZ2016/snn
import rnn
from utils.opt import *
from utils.debug import safe_io
from utils.utilities import NONE


# import traceback
# def _numpy_err_callback(type, flag):
#     print type, flag
#     traceback.print_stack()
#     raise FloatingPointError('refer to _numpy_err_callback for more details')
# np.seterr(divide='call', over='warn', invalid='call', under='warn')
# np.seterrcall(_numpy_err_callback)
# np.set_printoptions(precision=4, suppress=True)
cgt.check_source()  # this line will fail if CGT in use is not TZ2016's fork
print cgt.get_config(True)


def init(args):
    ws = {}
    ws['config'] = copy.deepcopy(args)
    _is_sto = any(_n != 0 for _n in args['num_sto'])
    _is_rec = any(_n != 0 for _n in args['num_mems'])
    assert not (_is_rec and _is_sto), "Stochastic recurrent units not supported"
    net_type = []
    if _is_sto: net_type.append('snn')
    else: net_type.append('dnn')
    if _is_rec: net_type.append('rnn')
    else: net_type.append('fnn')
    ws['type'] = net_type
    # TODO: add in the dbg_out
Esempio n. 9
0
File: api.py Progetto: TZ2016/snn
import sfnn
import rnn
from utils.opt import *
from utils.debug import safe_io
from utils.utilities import NONE

# import traceback
# def _numpy_err_callback(type, flag):
#     print type, flag
#     traceback.print_stack()
#     raise FloatingPointError('refer to _numpy_err_callback for more details')
# np.seterr(divide='call', over='warn', invalid='call', under='warn')
# np.seterrcall(_numpy_err_callback)
# np.set_printoptions(precision=4, suppress=True)
cgt.check_source()  # this line will fail if CGT in use is not TZ2016's fork
print cgt.get_config(True)


def init(args):
    ws = {}
    ws['config'] = copy.deepcopy(args)
    _is_sto = any(_n != 0 for _n in args['num_sto'])
    _is_rec = any(_n != 0 for _n in args['num_mems'])
    assert not (_is_rec
                and _is_sto), "Stochastic recurrent units not supported"
    net_type = []
    if _is_sto: net_type.append('snn')
    else: net_type.append('dnn')
    if _is_rec: net_type.append('rnn')
    else: net_type.append('fnn')
    ws['type'] = net_type