def __init__(self, node_name, cname, pool_params, pool_q, act_params, act_q, code_block=None, at_ver=3, gen_ctrl=None): if gen_ctrl is None: self.gen_ctrl = GenCtrl(None, cname=cname) else: gen_ctrl.cname = cname self.gen_ctrl = gen_ctrl in_q = out_q = None in_dim = out_dim = None pad_compatibilities = [] if pool_params is not None: at_pool_params = gen_pool_at_params(pool_params, pad_compatibilities) if in_dim is None: in_dim = pool_params.in_dims[0] out_dim = pool_params.out_dims[0] if in_q is None: in_q = pool_q.in_qs[0] out_q = pool_q.out_qs[0] else: at_pool_params = NO_POOL if act_params is not None: at_act_params = gen_active_at_params(act_params) if in_dim is None: in_dim = act_params.in_dims[0] if out_dim is None: out_dim = act_params.out_dims[0] if in_q is None: in_q = act_q.in_qs[0] out_q = act_q.out_qs[0] if at_ver < 3: if act_params.activation == "relu6" and out_q.q != 0: self.gen_ctrl.ReluN = 6 << out_q.q self.gen_ctrl.ReluNNoNorm = 1 else: if act_params.activation == "relun": self.gen_ctrl.ReluN = act_params.activation_params else: at_act_params = NO_ACTIVATION if code_block is None: code_block = CodeBlock() if pad_compatibilities: reduction = PadDim.pad_compatibility_reduce(*pad_compatibilities, "convolution padding is not compatible with pool padding") if not reduction[2]: # default is balanced pad left at_pad_ctrl = next(i for i, v in enumerate(reduction) if v) self.gen_ctrl.PadType = at_pad_ctrl if in_q.bits != out_q.bits: raise NotImplementedError("only homogenious operations are supported at present") if at_pool_params == NO_POOL: raise NotImplementedError( "activation layer on its own should not be matched by this kernel") self.at_pool_params = at_pool_params self.in_dim = in_dim self.out_dim = out_dim self.in_q = in_q self.out_q = out_q self.at_act_params = at_act_params self.cname = cname self.node_name = node_name self.at_ver = at_ver
def __init__(self, node_name, cname, conv_params, conv_q, pool_params, pool_q, act_params, act_q, at_ver=3, gen_ctrl=None): if gen_ctrl is None: gen_ctrl = GenCtrl(None, cname=cname) else: gen_ctrl.cname = cname in_q = filter_q = out_q = bias_q = mul_biases_q = None in_dim = out_dim = None pad_compatibilities = [] if conv_params is not None: at_conv_params = gen_conv_at_params( conv_params, conv_q, pad_compatibilities) in_dim = conv_params.in_dims[0] out_dim = conv_params.out_dims[0] filter_q = conv_q.in_qs[1] in_q = conv_q.in_qs[0] out_q = conv_q.out_qs[0] bias_q = conv_q.in_qs[2] if conv_params.has_mul_bias: mul_biases_q = conv_q.mul_biases_q else: at_conv_params = NO_CONV if pool_params is not None: at_pool_params = gen_pool_at_params( pool_params, pad_compatibilities) if in_dim is None: in_dim = pool_params.in_dims[0] out_dim = pool_params.out_dims[0] if in_q is None: in_q = pool_q.in_qs[0] out_q = pool_q.out_qs[0] else: at_pool_params = NO_POOL if act_params is not None: at_act_params = gen_active_at_params(act_params) if in_dim is None: in_dim = act_params.in_dims[0] if out_dim is None: out_dim = act_params.out_dims[0] if in_q is None: in_q = act_q.in_qs[0] out_q = act_q.out_qs[0] if at_ver < 3: if act_params.activation == "relu6" and out_q.q != 0: gen_ctrl.ReluN = 6 << out_q.q gen_ctrl.ReluNNoNorm = 1 else: if act_params.activation == "relun": gen_ctrl.ReluN = act_params.activation_params else: at_act_params = NO_ACTIVATION if pad_compatibilities: reduction = PadDim.pad_compatibility_reduce(*pad_compatibilities, "convolution padding is not compatible with pool padding") if not reduction[2]: # default is balanced pad left at_pad_ctrl = next(i for i, v in enumerate(reduction) if v) LOG.debug("%s: generating pad control block", node_name) gen_ctrl.PadType = at_pad_ctrl attrs = { 'in_qtype': in_q, 'out_qtype': out_q, 'filter_qtype': filter_q, 'bias_qtype': bias_q, 'mul_biases_qtype': mul_biases_q, 'relu_oper': at_act_params.ReLUOper } if at_pool_params.PoolOper != 'KOP_NONE': attrs.update({ 'pool_oper': at_pool_params.PoolOper, 'pool_w': at_pool_params.Fpx, 'pool_h': at_pool_params.Fpy, 'pool_d_w': at_pool_params.Dpx, 'pool_d_h': at_pool_params.Dpy, 'pool_s_w': at_pool_params.Spx, 'pool_s_h': at_pool_params.Spy, 'pool_pad': at_pool_params.PoolPad }) else: attrs.update({ 'pool_oper': 'KOP_NONE', 'pool_w': 0, 'pool_h': 0, 'pool_d_w': 0, 'pool_d_h': 0, 'pool_s_w': 0, 'pool_s_h': 0, 'pool_pad': 0 }) if at_conv_params == NO_CONV: if in_q.dtype_bits != out_q.dtype_bits: raise NotImplementedError( "only homogenious operations are supported at present") LOG.debug("%s: pool relu inq %s outq %s control block", node_name, in_q, out_q) if at_pool_params.PoolOper == 'KOP_NONE' and (not in_dim.is_named or not in_dim.has_keys(['c', 'w', 'h'])): in_shape = in_dim.shape + ([1] * (3 - len(in_dim.shape))) in_c, in_h, in_w = in_shape[0], in_shape[1], in_shape[2] else: in_c, in_h, in_w = in_dim.c, in_dim.h, in_dim.w if out_dim.is_named and out_dim.has_key('c'): out_c = out_dim.c else: out_c = in_c attrs.update({ 'in_c': in_c, 'in_h': in_h, 'in_w': in_w, 'out_c': out_c, 'conv_oper': 'KOP_NONE' }) self.template = 'CALL_TEMPLATE_POOL_RELU' else: # swap w and h if w and filter w is 1 so generator sees 1D conv if in_dim.w == 1 and at_conv_params.Fcx == 1: attrs.update({ 'in_c': in_dim.c, 'in_h': 1, 'in_w': in_dim.h, 'out_c': out_dim.c, 'conv_oper': at_conv_params.ConvOper, 'conv_w': at_conv_params.Fcy, 'conv_h': 1, 'conv_d_w': at_conv_params.Dcy, 'conv_d_h': at_conv_params.Dcx, 'conv_s_w': at_conv_params.Scy, 'conv_s_h': at_conv_params.Scx, 'conv_pad': at_conv_params.ConvPad }) else: attrs.update({ 'in_c': in_dim.c, 'in_h': in_dim.h, 'in_w': in_dim.w, 'out_c': out_dim.c, 'conv_oper': at_conv_params.ConvOper, 'conv_w': at_conv_params.Fcx, 'conv_h': at_conv_params.Fcy, 'conv_d_w': at_conv_params.Dcx, 'conv_d_h': at_conv_params.Dcy, 'conv_s_w': at_conv_params.Scx, 'conv_s_h': at_conv_params.Scy, 'conv_pad': at_conv_params.ConvPad }) if isinstance(at_conv_params, ConvATParam): if mul_biases_q is not None: LOG.debug("%s: mulconv pool relu inq %s outq %s control block", node_name, in_q, out_q) self.template = 'CALL_TEMPLATE_MULBIAS_CONV_POOL_RELU' else: LOG.debug("%s: conv pool relu inq %s outq %s control block", node_name, in_q, out_q) self.template = 'CALL_TEMPLATE_CONV_POOL_RELU' elif isinstance(at_conv_params, GroupedConvATParam): attrs.update({ 'group_in': at_conv_params.GroupIn, 'group_out': at_conv_params.GroupOut }) if mul_biases_q is not None: LOG.debug("%s: grouped conv pool relu inq %s outq %s control block", node_name, in_q, out_q) self.template = 'CALL_TEMPLATE_GROUPED_MULBIAS_CONV_POOL_RELU' else: LOG.debug("%s: grouped mulconv pool relu inq %s outq %s control block", node_name, in_q, out_q) self.template = 'CALL_TEMPLATE_GROUPED_CONV_POOL_RELU' else: raise ValueError('Internal error') # other attributes extra_attrs = { 'cname': cname, 'node_name': node_name } super().__init__(attrs, extra_attrs, gen_ctrl=gen_ctrl)
def __init__(self, node_name, cname, conv_params, conv_q, pool_params, pool_q, act_params, act_q, at_ver=3, gen_ctrl=None): if gen_ctrl is None: self.gen_ctrl = gen_ctrl = GenCtrl(None, cname=cname) else: gen_ctrl.cname = cname self.gen_ctrl = gen_ctrl in_q = filter_q = out_q = bias_q = mul_biases_q = None in_dim = out_dim = None pad_compatibilities = [] if conv_params is not None: at_conv_params = gen_conv_at_params(conv_params, conv_q, pad_compatibilities) in_dim = conv_params.in_dims[0] out_dim = conv_params.out_dims[0] filter_q = conv_q.weights_q in_q = conv_q.in_qs[0] out_q = conv_q.out_qs[0] bias_q = conv_q.biases_q if conv_params.has_mul_bias: mul_biases_q = conv_q.mul_biases_q else: at_conv_params = NO_CONV if pool_params is not None: at_pool_params = gen_pool_at_params(pool_params, pad_compatibilities) if in_dim is None: in_dim = pool_params.in_dims[0] out_dim = pool_params.out_dims[0] if in_q is None: in_q = pool_q.in_qs[0] out_q = pool_q.out_qs[0] else: at_pool_params = NO_POOL if act_params is not None: at_act_params = gen_active_at_params(act_params) if in_dim is None: in_dim = act_params.in_dims[0] if out_dim is None: out_dim = act_params.out_dims[0] if in_q is None: in_q = act_q.in_qs[0] out_q = act_q.out_qs[0] if at_ver < 3: if act_params.activation == "relu6" and out_q.q != 0: self.gen_ctrl.ReluN = 6 << out_q.q self.gen_ctrl.ReluNNoNorm = 1 else: if act_params.activation == "relun": self.gen_ctrl.ReluN = act_params.activation_params else: at_act_params = NO_ACTIVATION if pad_compatibilities: reduction = PadDim.pad_compatibility_reduce( *pad_compatibilities, "convolution padding is not compatible with pool padding") if not reduction[2]: # default is balanced pad left at_pad_ctrl = next(i for i, v in enumerate(reduction) if v) LOG.debug("%s: generating pad control block", node_name) self.gen_ctrl.PadType = at_pad_ctrl self.in_dim = in_dim self.out_dim = out_dim self.in_q = in_q self.bias_q = bias_q self.out_q = out_q self.filter_q = filter_q self.mul_biases_q = mul_biases_q self.at_act_params = at_act_params self.at_pool_params = at_pool_params self.at_conv_params = at_conv_params self.cname = cname self.node_name = node_name self.at_ver = at_ver