Esempio n. 1
0
 def __init__(
         self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False,
         act=tf.nn.tanh
 ):
     """Initialize the basic Conv LSTM cell."""
     # if not state_is_tuple:
     # logging.warn("%s: Using a concatenated state is slower and will soon be "
     #             "deprecated.  Use state_is_tuple=True.", self)
     if input_size is not None:
         logging.warn("%s: The input_size parameter is deprecated.", self)
     self.shape = shape
     self.filter_size = filter_size
     self.num_features = num_features
     self._forget_bias = forget_bias
     self._state_is_tuple = state_is_tuple
     self._activation = act
Esempio n. 2
0
 def __init__(
         self, shape, filter_size, num_features, forget_bias=1.0, input_size=None, state_is_tuple=False,
         act=tf.nn.tanh
 ):
     """Initialize the basic Conv LSTM cell."""
     # if not state_is_tuple:
     # logging.warn("%s: Using a concatenated state is slower and will soon be "
     #             "deprecated.  Use state_is_tuple=True.", self)
     if input_size is not None:
         logging.warn("%s: The input_size parameter is deprecated.", self)
     self.shape = shape
     self.filter_size = filter_size
     self.num_features = num_features
     self._forget_bias = forget_bias
     self._state_is_tuple = state_is_tuple
     self._activation = act
Esempio n. 3
0
    def __init__(self, network, idx, quant_func, dataset):
        logging.info("### init DeQuant_Layer")
        self.type = EL_DEQUANTIZE
        self.typename = "EL_DEQUANTIZE"
        layer = network.all_layers[idx]
        shape = layer._nodes[0].out_tensors[0].shape
        self.count = get_tensor_size(shape)

        if (self.count > 256 * 1024):
            logging.warn(
                "output>1MB data, we assume it is dbg usage, cut to 1MB")
            self.count = 256 * 1024

        minv, maxv, _ = quant_func(network, layer, dataset)
        self.scale, self.bias = min_max_to_scale_bias(minv, maxv)
        self.memsize = self.count * (4 + 1)
        self.outsize = self.count * 4

        logging.info("###dequant layer: count=%d, sclale=%f, bias=%f" %
                     (self.count, self.scale, self.bias))