示例#1
0
def fc(tensor_in,
       output_channels=1024,
       f_dtype=None,
       w_dtype=None,
       act='linear'):
    input_channels = tensor_in.shape[-1]
    weights = get_tensor(shape=(output_channels, input_channels),
                         name='weights',
                         dtype=w_dtype)
    biases = get_tensor(shape=(output_channels, ),
                        name='biases',
                        dtype=FixedPoint(
                            32, w_dtype.frac_bits + tensor_in.dtype.frac_bits))
    _fc = matmul(tensor_in, weights, biases, dtype=f_dtype)

    if act == 'leakyReLU':
        with get_default_graph().name_scope(act):
            act = leakyReLU(_fc, dtype=_fc.dtype)
    elif act == 'linear':
        with get_default_graph().name_scope(act):
            act = _fc
    else:
        raise ValueError, 'Unknown activation type {}'.format(act)

    return act
示例#2
0
    def __init__(self, node_name, input_tensors=None):
        self.graph = get_default_graph()
        self.op_type = self._get_op_type()
        self.name = self.graph.get_op_name(node_name, self.op_type)

        self.dtype = self._get_output_dtype()

        if isinstance(input_tensors, Tensor):
            input_tensors = tuple([input_tensors])
        else:
            it = []
            for _it in input_tensors:
                if isinstance(_it, tuple):
                    for __it in _it:
                        it.append(__it)
                else:
                    it.append(_it)
            input_tensors = tuple(it)

        # input_str = ','.join([x.__str__() for x in input_tensors])
        # print('## Creating op with name {} and inputs {}'.format(node_name, input_str))

        self.input_tensors = input_tensors
        self.output_tensors = self._create_output_tensors(self.name)

        self.input_loss = [None] * len(input_tensors)

        self.graph.create_node(self)

        self.incoming_gradients = None
示例#3
0
def yolo_convolution(tensor_in,
                     filters=32,
                     kernel_size=3,
                     batch_normalize=True,
                     act='leakyReLU',
                     c_dtype=None,
                     w_dtype=None,
                     s_dtype=None,
                     bn_dtype=None):

    input_channels = tensor_in.shape[-1]

    weights = get_tensor(shape=(filters, kernel_size, kernel_size,
                                input_channels),
                         name='weights',
                         dtype=w_dtype)
    biases = get_tensor(shape=(filters),
                        name='biases',
                        dtype=FixedPoint(
                            32, w_dtype.frac_bits + tensor_in.dtype.frac_bits))
    conv = conv2D(tensor_in, weights, biases, pad='SAME', dtype=c_dtype)

    if batch_normalize:
        with get_default_graph().name_scope('batch_norm'):
            mean = get_tensor(shape=(filters),
                              name='mean',
                              dtype=FixedPoint(16, c_dtype.frac_bits))
            scale = get_tensor(shape=(filters), name='scale', dtype=s_dtype)
            bn = batch_norm(conv, mean=mean, scale=scale, dtype=bn_dtype)
    else:
        bn = conv

    if act == 'leakyReLU':
        with get_default_graph().name_scope(act):
            act = leakyReLU(bn, dtype=bn.dtype)
    elif act == 'linear':
        with get_default_graph().name_scope(act):
            act = bn
    else:
        raise ValueError('Unknown activation type {}'.format(act))

    return act
示例#4
0
def conv2D(i, w, b, name=None, stride=None, pad='SAME', group=1, dtype=None):
    g = get_default_graph()
    op = Convolution(i,
                     w,
                     b,
                     name,
                     stride=stride,
                     pad=pad,
                     group=group,
                     dtype=dtype)
    return typecast(op.output_tensors, dtype)
示例#5
0
def conv(tensor_in,
         filters=32,
         stride=None,
         kernel_size=3,
         pad='SAME',
         c_dtype=None,
         w_dtype=None,
         act='linear'):

    if stride is None:
        stride = (1, 1, 1, 1)

    input_channels = tensor_in.shape[-1]

    weights = get_tensor(shape=(filters, kernel_size, kernel_size,
                                input_channels),
                         name='weights',
                         dtype=w_dtype)
    biases = get_tensor(shape=(filters),
                        name='biases',
                        dtype=FixedPoint(
                            32, w_dtype.frac_bits + tensor_in.dtype.frac_bits))
    _conv = conv2D(tensor_in,
                   weights,
                   biases,
                   stride=stride,
                   pad=pad,
                   dtype=c_dtype)

    if act == 'leakyReLU':
        with get_default_graph().name_scope(act):
            act = leakyReLU(_conv, dtype=_conv.dtype)
    elif act == 'linear':
        with get_default_graph().name_scope(act):
            act = _conv
    else:
        raise ValueError, 'Unknown activation type {}'.format(act)

    return act
示例#6
0
def maxPool(i,
            pooling_kernel,
            stride=(1, 2, 2, 1),
            pad='VALID',
            name=None,
            dtype=None):
    g = get_default_graph()
    op = MaxPooling(i,
                    pooling_kernel,
                    name,
                    stride=stride,
                    pad=pad,
                    dtype=dtype)
    return typecast(op.output_tensors, dtype)
示例#7
0
    def __init__(self, node_name, dtype=None, input_tensors=None):
        if dtype is None:
            dtype = get_default_graph().grad_dtype

        super(GradOp, self).__init__(node_name, dtype, input_tensors)
示例#8
0
def matmul(i, w, b, name=None, dtype=None):
    g = get_default_graph()
    op = MatMul(i, w, b, name=name, dtype=dtype)
    return typecast(op.output_tensors, dtype)
示例#9
0
def flatten(i, name=None, dtype=None):
    g = get_default_graph()
    op = Flatten(i, name)
    return typecast(op.output_tensors, dtype)
示例#10
0
def addBias(i, b, dim, name=None, dtype=None):
    g = get_default_graph()
    op = AddBias(i, b, dim, name, dtype=dtype)
    return typecast(op.output_tensors, dtype)
示例#11
0
def get_tensor(shape, name=None, dtype=FQDtype.FP32, trainable=True, data=None):
    g = get_default_graph()
    return g.tensor(shape=shape, name=name, dtype=dtype, trainable=trainable, data=data)