Exemplo n.º 1
0
 def quantize(self,tensor):
     # TF alternative (rounding mantissa):
     # TF alternative is not working with slim, because it produces 2 additional layers at once(float16 and float32). sparsity estimation tries to take both, but fails. 
     #o_type=tensor.dtype
     #result=tf.cast(tf.cast(tensor,tf.float16),tf.float32)
     #print(result)
     #return result
     # kernel alternative (cutting off mantissa):
     return Wrapped.quant_halffp(tensor)
Exemplo n.º 2
0
 def quantize(self,tensor):
     return Wrapped.quant_sparse(tensor, self.threshold)
Exemplo n.º 3
0
 def quantize(self,tensor):
     return Wrapped.quant_log(tensor)