コード例 #1
0
    def __init__(
        self,
        in_features,
        out_features,
        bias=True,
        eps=1e-5,
        n_iter=5,
        momentum=0.1,
        block=512,
    ):
        super().__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = tf.Variable(tf.Tensor(out_features, in_features))
        if bias:
            self.bias = tf.Variable(tf.Tensor(out_features))
        else:
            self.bias = None
        self.reset_parameters()

        if block > in_features:
            block = in_features
        else:
            if in_features % block != 0:
                block = math.gcd(block, in_features)
                logger.info(f"block size set to: {block}")
        self.block = block
        self.momentum = momentum
        self.n_iter = n_iter
        self.eps = eps

        self.running_mean = tf.zeros(self.block)
        self.running_deconv = tf.eye(self.block)
コード例 #2
0
    def finalize(self):
        data = {}
        for key, val in self.data.items():
            try:
                arr = tf.Tensor(val).numpy()
            except:
                arr = tf.Tensor([i.numpy() for i in val]).numpy()

            data[key] = arr

        self.data = data
        return data
コード例 #3
0
def transform_fc_offline(tensor, exponent, mantissa, opt_exp_list):
    # Offline means the shared exponent is fixed
    #      it is deternmined during the pre-inference
    # Quantize the activation tensor along channel dimension
    # Here we require the input tensor has the shape: [batch, channel]
    # opt_exp_list: the shared exponent list for offline quantization
    shp = tensor.shape
    #print ("shape1:", shp[1], " opt_exp_list:", len(opt_exp_list))
    number_of_blocks = len(opt_exp_list)
    block_size = (int)(shp[1] / len(opt_exp_list))
    opt_exp_list = tf.Tensor(opt_exp_list).cuda()
    #print ("shp:", shp)
    #print ("opt_exp_list:", len(opt_exp_list))
    if shp[1] % block_size == 0:
        # shp[1] is divisible by block size
        # Therefore just one tensor will be created
        tensor = tf.reshape(tensor, (shp[0], number_of_blocks, block_size))
        opt_exp_list = tf.expand_dims(opt_exp_list, 0)  ##### Need Unit test
        tensor = to_exponent_mantissa_width(tensor,
                                            opt_exp_list,
                                            mantissa,
                                            quant_dim=len(tf.shape(tensor)) -
                                            1)
        tensor = tf.reshape(tensor, shp)
    else:
        raise ValueError(
            "Channel is not divisible by channel group while bfp quantizeing the FC"
        )

    return tensor
コード例 #4
0
def transform_activation_offline_3d(tensor, exponent, mantissa, opt_exp_list):
    # Offline means the shared exponent is fixed
    #      it is deternmined during the pre-inference
    # Quantize the activation tensor along channel dimension
    # Here we require the input tensor has the shape: [batch, channel, heigh, widht]
    # opt_exp_list: the shared exponent list for offline quantization
    shp = tf.shape(tensor)
    #print ("shape1:", shp[1], " opt_exp_list:", len(opt_exp_list))
    num_frame = shp[2]
    assert len(opt_exp_list) == num_frame
    chnl_group = (int)(shp[1] / len(opt_exp_list))
    number_of_blocks = math.ceil(
        shp[1] /
        chnl_group) * num_frame  # use different exp for different frame
    opt_exp_list = tf.Tensor(opt_exp_list).cuda()
    if shp[1] % chnl_group == 0:
        # shp[1] is divisible by block size
        # Therefore just one tensor will be created
        tensor = tf.reshape(
            tensor, (shp[0], number_of_blocks, chnl_group * shp[2] * shp[3]))
        opt_exp_list = tf.expand_dims(opt_exp_list, 0)  ##### Need Unit test
        tensor = to_exponent_mantissa_width(tensor,
                                            opt_exp_list,
                                            mantissa,
                                            quant_dim=len(tf.shape(tensor)) -
                                            1)
        tensor = tf.reshape(tensor, (shp[0], shp[1], shp[2], shp[3], shp[4]))
        return tensor

    else:
        raise NotImplementedError

    return tensor
コード例 #5
0
def get_outer(pic):
    data = tf.Tensor()
    for x in ['top', 'bottom_left', 'bottom_right']:
        image = data_root / x / pic
        img_raw = tf.read_file(image)
        img_tensor = tf.image.decode_image(img_raw)
        data = tf.concat([data, img_tensor], 1)
    return data
コード例 #6
0
def get_to_restore_graph(tensor_list, exclude):
    """
    Build a diction for restoring graph
    :param tensor_list: A list of tensors imported from pb file
    :param exclude: Name of exclude tensors.
    :return: dict <str:Tensor>
    """
    res = {}
    for n in tensor_list:
        n: tf.Tensor
        tensor_op_name = n.op.node_def.op
        tensor_index = n.value_index
        tensor_dtype = n.dtype
        print(tensor_index)
        if n.name not in exclude and tensor_op_name == "Variable":
            res[n.name[:-2]] = tf.Tensor(n.op, tensor_index, tensor_dtype)
    print(res)
    return res
コード例 #7
0
    def __init__(self,
                 block,
                 eps=1e-2,
                 n_iter=5,
                 momentum=0.1,
                 sampling_stride=3):
        super().__init__()

        self.eps = eps
        self.n_iter = n_iter
        self.momentum = momentum
        self.block = block

        self.running_mean1 = tf.zeros(block, 1)
        self.running_deconv = tf.eye(block)
        self.running_mean2 = tf.zeros(1, 1)
        self.running_var = tf.ones(1, 1)
        self.num_batches_tracked = tf.Tensor(0, dtype=tf.uint32)

        self.sampling_stride = sampling_stride
コード例 #8
0
def createFIFunc(
        operation,  # type: tf.Operation
        inputs,  # type: List[tf.Tensor]
        outputTypes,  # type:  List[tf.dtypes.DType]
        name,  # type: str
):  # type: (...) -> List[tf.Tensor]
    """Create a tensorflow operation representing a fault injection node"""
    # print "\nCreating FIfunc with ", opType, inputs, outputTypes, name

    fiFunc = None

    # Check the opType and return the corresponding function as Fifunc
    if operation.type == "Cast":
        # We have to special case Cast as it's expected to "remember" its type
        # This could be due to a bug in TensorFlow (at least it's not documented)
        fiFunc = injectFault.createInjectFaultCast(outputTypes[0])
    elif operation.type in injectFault.opTable:
        # Lookup the opTable and return the corresponding function (injectFault...)
        # This is the default case if there's an injectFault for the function
        fiFunc = injectFault.opTable[operation.type]
    else:
        # It's not a known operation, do not inject.
        logging.warning(
            "Operation {} should be either added to "
            "`excludeOps` or should have his injection implemented.".format(
                operation.type))
        if len(outputTypes) == 0:
            fiFunc = injectFault.opTable["Unknown"]
        else:
            # This is a temporary fix.
            return [tf.Tensor(operation, value_index=0, dtype=outputTypes[0])]

    # fiFunc should have been initialized (fiFunc != None)
    if fiFunc is None:
        raise ValueError("Unknown operation : " + str(operation.type))

    # Create a new TensorFlow operator with the corresponding fault injection function
    res = tf.numpy_function(fiFunc, inputs, outputTypes, name=name)
    # print "NewOp = ", res

    return res
コード例 #9
0
#12.两个张量做点乘
c_3 = tf.multiply(a, a)

#13.将一个张量转置
c_4 = tf.linalg.matrix_transpose(a)

#14_1.将一个12x1张量变形成3行的张量
b = tf.linspace(1.0, 10.0, 12)
c_5 = tf.reshape(b, [3, 4])

#14_2.方法二
c_6 = tf.reshape(b, [3, -1])

#显示
print(c_1, c_2, c_3, c_4, c_5, c_6)
'''输出
tf.Tensor(
[[2 4]
 [6 8]], shape=(2, 2), dtype=int32) 
 tf.Tensor(
[[ 7 10]
 [15 22]], shape=(2, 2), dtype=int32) 
 tf.Tensor(
[[ 1  4]
 [ 9 16]], shape=(2, 2), dtype=int32) 
 tf.Tensor(
[[1 3]
 [2 4]], shape=(2, 2), dtype=int32) 
 tf.Tensor(
[[ 1.         1.8181818  2.6363635  3.4545455]
 [ 4.272727   5.090909   5.909091   6.7272725]
コード例 #10
0
@创建日期 :2021/11/27
@修改日期 :2021/11/27
@作者 :jzj
@功能 :tf.Module
"""

import tensorflow as tf
import tensorflow
from tensorflow.keras import layers


def add(a, b):
    return a+ b


a1 = tf.Tensor(1, dtype=tf.int64)
a2 = tf.Tensor(1, dtype=tf.int64)


class Dense(tf.Module):
    def __init__(self, input_dim, output_size, name=None):
        super(Dense, self).__init__()
        self.w = tf.Variable(
            tf.random.normal(shape=[input_dim, output_size], name="w")
        )
        self.b = tf.Variable(tf.zeros([output_size]), name="b")

    def __call__(self, x):
        y = tf.matmul(x, self.w) + self.b
        return tf.nn.relu(y)
コード例 #11
0
 def setup_predict(self):
     self.predict_seed = tf.Tensor()
コード例 #12
0
    def forward(self, input_data):
        """Simulates the network for the number of time steps specified in self.simulation_time
        Args:
            input_data: tensor

        Returns:
             potentials (or spikes depending on the output method) of the output neurons"""

        # if two input neurons is True, double the input size, where the first set of values represents the positive inputs and the second set the negative inputs.
        if self.two_input_neurons:
            input_data = tf.concat.cat([
                input_data * (input_data > 0),
                (-1) * (input_data * (input_data < 0))
            ])

        # reshape input such that it is in the form (batch_size, input_dimenstion) and not (input_dimension,) or (input_dimension)
        if input_data.shape == (self.input_size, ) or input_data.dim() == 1:
            input_data = input_data.reshape(1, self.input_size)
        batch_size = input_data.shape[0]

        if self.add_bias_as_observation:
            bias = tf.ones((batch_size, 1), dtype=tf.float32)
            input_data = tf.concat((input_data, bias), axis=1)

        # reset the array for membrane potential and synaptic variables
        syn = []
        mem = []
        for l in range(0, len(self.weights)):
            syn.append(
                tf.zeros((batch_size, self.weights[l].shape[1]),
                         dtype=tf.float32))
            mem.append(
                tf.zeros((batch_size, self.weights[l].shape[1]),
                         dtype=tf.float32))

        # Here we define two lists which we use to record the membrane potentials and output spikes
        mem_rec = []
        spk_rec = []
        # Additionally we define a list that counts the spikes in the output layer, if the output method 'spikes' is used
        if self.decoding == 'spikes':
            spk_count = tf.zeros((batch_size, self.weights[-1].shape[1]),
                                 dtype=tf.float64)

        if self.encoding == 'equidistant':
            # spike counter is used to count the number of spike for each input neuron so far
            spike_counter = tf.ones_like(input_data)
            fixed_distance = 1 / input_data

        # Here we loop over time
        for t in range(self.simulation_time):
            # append the new timestep to mem_rec and spk_rec
            mem_rec.append([])
            spk_rec.append([])

            if self.encoding == 'constant':
                input = input_data.detach().clone()
            elif self.encoding == 'poisson':
                #generate poisson distributed input
                spike_snapshot = tf.Tensor(
                    np.random.uniform(low=0, high=1, size=input_data.shape))
                input = (spike_snapshot <= input_data).float()
            elif self.encoding == 'equidistant':
                # generate fixed number of equidistant spikes
                input = (tf.ones_like(input_data) * t == tf.math.round(
                    fixed_distance * spike_counter)).float()
                spike_counter += input
            else:
                raise Exception('Encoding Method ' + str(self.encoding) +
                                ' not implemented')

            # loop over layers
            for l in range(len(self.weights)):
                # define impulse
                if l == 0:
                    h = tf.einsum("ab,bc->ac", [input, self.weights[0]])
                else:
                    h = tf.einsum(
                        "ab,bc->ac",
                        [spk_rec[len(spk_rec) - 1][l - 1], self.weights[l]])
                # add bias
                if self.bias[l] is not None:
                    h += self.bias[l]

                # calculate the spikes for all layers (decoding='spikes' or 'first_spike') or for all but the last layer (decoding='potential')
                if self.decoding == 'spikes' or self.decoding == 'first_spike' or l < len(
                        self.weights) - 1:
                    mthr = mem[l] - self.threshold
                    out = self.spike_fn(mthr)
                    rst = tf.zeros_like(mem[l], device=self.device)
                    c = (mthr > 0)
                    rst[c] = tf.ones_like(mem[l], device=self.device)[c]
                    # count the spikes in the output layer
                    if self.decoding == 'spikes' and l == len(
                            self.weights) - 1:
                        spk_count = tf.add(spk_count, out)
                else:
                    # else reset is 0 (= no reset)
                    c = tf.zeros_like(mem[l],
                                      dtype=tf.bool,
                                      device=self.device)
                    rst = tf.zeros_like(mem[l], device=self.device)

                # calculate the new synapse potential
                new_syn = self.alpha * syn[l] + h
                # calculate new membrane potential
                if self.reset == 'subtraction':
                    new_mem = self.beta * mem[l] + syn[l] - rst
                elif self.reset == 'zero':
                    new_mem = self.beta * mem[l] + syn[l]
                    new_mem[c] = 0

                mem[l] = new_mem
                syn[l] = new_syn

                mem_rec[len(mem_rec) - 1].append(mem[l])
                spk_rec[len(spk_rec) - 1].append(out)

                if self.decoding == 'first_spike' and l == len(
                        self.weights) - 1:
                    if tf.reduce_sum(out) > 0:
                        return out
        if self.decoding == 'potential':
            # return the final recorded membrane potential (len(mem_rec)-1) in the output layer (-1)
            return mem_rec[len(mem_rec) - 1][-1]
        if self.decoding == 'spikes':
            # return the sum over the spikes in the output layer
            return spk_count
        else:
            raise Exception('Decoding Method ' + str(self.decoding) +
                            ' not implemented')
コード例 #13
0
import numpy as np
import tensorflow as tf


#%% p.37

mx1 = np.array([1.3, 1, 4.0, 23.99])
mx1.shape
mx1.ndim
mx1.dtype

#%% 

tr1 = tf.convert_to_tensor(mx1, dtype=tf.float32)
tr1
tf.Tensor(mx1, dtype=tf.float32)  # TypeError: __init__() missing 1 required positional argument: 'value_index'

with tf.Session() as ss:
    print(ss.run(tr1))
    print(ss.run(tr1[0]))
    print(ss.run(tf.shape(tr1)))
    print(ss.run(tf.rank(tr1)))
    # print(ss.run(tf.dtype(tr1))) AttributeError: module 'tensorflow' has no attribute 'dtype'
    # print(ss.run(tr1.dtype))   #! ERROR too..

with tf.Session() as ss:
    list(map(lambda x: print(ss.run(x)), [tr1, tr1[0], tf.shape(tr1), tf.rank(tr1)]))


with tf.Session() as ss:
    for k in map(lambda x: ss.run(x), [tr1, tr1[0], tf.shape(tr1), tf.rank(tr1)]):
コード例 #14
0
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
'''
模块功能描述:

@author Liu Mingxing
@date 2019-03-19
'''
import tensorflow as tf
tensor = tf.Tensor()
tf.rsqrt()
コード例 #15
0
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)

model.summary()
model.compile(loss="binary_crossentropy", optimizer=optimizer)

# training model
X_train = np.array(x_data)
y_train = np.array(y_data)

model.fit(X_train, y_train, epochs=1000)

score = model.evaluate(X_train, y_train)
print("score : {:.4f}".format(score))

predicted_x = model.predict(X_train)
print(predicted_x)
print(tf.cast(predicted_x >= 0.5, dtype=tf.float32))
'''
score.0.0434
[[0.01966098]
 [0.95703065]
 [0.950225  ]
 [0.0571076 ]]
tf.Tensor(
[[0.]
 [1.]
 [1.]
 [0.]], shape=(4, 1), dtype=float32)
'''
コード例 #16
0
from tensorflow.keras import layers, optimizers, datasets # 导入 TF 子库等
(x, y), (x_val, y_val) = datasets.mnist.load_data() # 加载 MNIST 数据集
x = 2*tf.convert_to_tensor(x, dtype=tf.float32)/255.-1 # 转换为浮点张量,并缩放到
y = tf.convert_to_tensor(y, dtype=tf.int32) # 转换为整形张量
y = tf.one_hot(y, depth=10) # one-hot 编码
print(x.shape, y.shape)
train_dataset = tf.data.Dataset.from_tensor_slices((x, y)) # 构建数据集对象
train_dataset = train_dataset.batch(512)
#In [1]:
y = tf.constant([0,1,2,3]) # 数字编码的 4 个样本标签
y = tf.one_hot(y, depth=10) # one-hot 编码,指定类别总数为 10
print(y)
#Out[1]:
tf.Tensor(
[[1. , 0. , 0.  ,0. , 0.  ,0. , 0. , 0.  ,0.  ,0.] # 数字 0 的 one-hot 编码向量
[0. , 1. , 0. , 0. , 0.,  0.  ,0.  ,0. , 0.  ,0.] # 数字 1 的 one-hot 编码向量
[0.,  0.,  1.,  0.  ,0.  ,0.  ,0., 0. , 0., 0.] # 数字 2 的 one-hot 编码向量
[0.  ,0.  ,0. , 1.,  0.,  0. , 0.,  0.,  0. , 0.]] ,shape=(4, 10), dtype=float32)


# 创建一层网络,设置输出节点数为 256,激活函数类型为 ReLU
layers.Dense(256, activation='relu')
# 利用 Sequential 容器封装 3 个网络层,前网络层的输出默认作为下一层的输入
model = keras.Sequential([ # 3 个非线性层的嵌套模型
    layers.Dense(256, activation='relu'), # 隐藏层 1
    layers.Dense(128, activation='relu'), # 隐藏层 2
    layers.Dense(10)]) # 输出层,输出节点数为 10
with tf.GradientTape() as tape: # 构建梯度记录环境
    # 打平操作,[b, 28, 28] => [b, 784]
    x = tf.reshape(x, (-1, 28*28))
    # Step1. 得到模型输出 output [b, 784] => [b, 10]
コード例 #17
0
ファイル: test_tensorflow1.py プロジェクト: oudream/hello-ai
import numpy as np
import tensorflow as tf

arr1 = np.array([])

tensor1 = tf.Tensor()
tensor1.eval()


def my_func(arg):
    arg = tf.convert_to_tensor(arg, dtype=tf.float32)
    return tf.matmul(arg, arg) + arg


# The following calls are equivalent.
value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]]))
value_2 = my_func([[1.0, 2.0], [3.0, 4.0]])
value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32))

init = tf.global_variables_initializer()

# Using the `close()` method.
sess = tf.Session()
sess.run([value_1])
sess.close()

tf.Constant
tf.constant()
tf.constant_initializer

print(value_1)
コード例 #18
0
"""
Takes a user id (name) as input, and returns a (N, m, n, 3) Tensor 
"""

SCALE_SIZE = 100 
NUM_CLASSES = 2
N = 10

def read_jpg(filename, label):
    image_string = tf.read_file(filename)
    image_decoded = tf.image.decode_bmp(image_string)
    image = tf.image.resize_images(image_decoded, [SCALE_SIZE, SCALE_SIZE])
    one_hot = tf.one_hot(label, NUM_CLASSES)
    return image, one_hot



if __name__ == "__main__":
	user = sys.argv[1]

	user_images = tf.Tensor(	
	dataset1 = tf.data.Dataset.from_tensor_slices(images)
	images = []
	for i in range(N):
		
		filename = "./Data/" + user + "/" + user + str(i) + ".bmp"
		im, oh = read_jpg(filename, 1)
		print(im)

コード例 #19
0
        The absolute value.

    """
    if x > 0:
        return x
    else:
        return -x


print(absolute_value)
"""Prints:
<tensorflow.python.eager.def_function.Function object at 0x7f89eb2ea3c8>
"""

print(absolute_value(4))
"""Prints:
tf.Tensor(4, shape=(), dtype=int32)
"""

print(absolute_value(-99))
"""Prints:
tf.Tensor(99, shape=(), dtype=int32)
"""


@tf.function
def looping(x):
    """Using a for loop do something useful.

    Parameters
    ----------
コード例 #20
0
import torchvision.models as models
import torch.nn as nn
import torch
from torch.autograd import Variable

models.alexnet()
c = nn.ConvTranspose2d(1, 1, 5)
print(c)
input_feature = torch.Tensor([[2, 2], [2, 2]])
input_feature = Variable(input_feature)
input_feature = input_feature[None, None, :, :]
print(c(input_feature))

nn.Container

import tensorflow as tf
tf.Tensor(np.array())

コード例 #21
0
    def model_exploration(self):
        """
        #TODO
        (done) Create pipeline for ranking features
        (done) Create function serialize_context_from_dictionary, which assumes a dictionary 
        of datbase features is passed
        (done) Create function to serialize examples (without mongo document)
            see serialize_examples_model4
        Load previously serialized model
        
        Define dependencies
        Isolate these models into a separate project
        """

        context_feature_spec, example_feature_spec = get_feature_specification(
        )

        # Load previously serialized model from V1 tensorflow
        # When you 'prune' a graph, you extract functions for a new subgraph. This is
        # Equivalent to naming feeds and fetches within a session
        _feed = 'Placeholder:0'
        _fetch = 'groupwise_dnn_v2/accumulate_scores/div_no_nan:0'
        _input_signature = example_feature_spec
        imported = tf.compat.v2.saved_model.load(_MODEL4_DIRECTORY)
        pruned = imported.prune(_feed,
                                _fetch,
                                input_signature=_input_signature)
        pruned.inputs
        pruned.name
        pruned.output_dtypes
        pruned.output_shapes
        pruned.outputs
        pruned.structured_input_signature
        pruned.structured_outputs
        pruned._captured_inputs

        pruned(serialized_example_in_example)
        pruned(tf.Tensor(serialized_example_in_example, 1, dtype=tf.string))
        pruned(context_features)
        pruned(tf.constant([[1.]]))
        pruned(tf.ones(5))
        pruned(serialized_context)
        pruned(input_feed_dict)

        pruned2 = imported.prune(
            'predict', 'groupwise_dnn_v2/accumulate_scores/div_no_nan:0')
        f_serving_default = imported.signatures['serving_default']
        f_serving_default.inputs
        f_serving_default.captured_inputs
        f_serving_default.output_dtypes
        f_serving_default.output_shapes
        f_serving_default.outputs
        f_serving_default(serialized_example_in_example)
        f_serving_default(context_features)
        f_serving_default(tf.ones(5))

        f_predict = imported.signatures['predict']
        f_predict.inputs
        f_predict.captured_inputs
        f_predict.output_dtypes
        f_predict.output_shapes
        f_predict.outputs
        f_predict(serialized_example_in_example)

        return None
コード例 #22
0
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 10 21:04:14 2021

@author: gamma
"""

import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers

model = keras.Sequential([
    layers.Dense(2, activation="relu", name="layer1"),
    layers.Dense(2, activation="relu", name="layer1"),
    layers.Dense(4, name="layer3"),
])

x = tf.Tensor(
    ["(", "1", "-2", "4", ")(", "-1", "2", "-3", ")(", "-2", "3", "-4", ")"],
    shape=(13, ),
    dtype=str)
コード例 #23
0
testing_sentences = []
testing_labels = []

# str(s.tonumpy()) is needed is Python3 instead of just s.numpy()

# The value of s and l are tensors, so by calling the numpy() method we extract their value

for s, l in train_data:
    training_sentences.append(str(s.numpy()))
    training_labels.append(l.numpy())

for s, l in test_data:
    testing_sentences.append(str(s.numpy()))
    testing_labels.append(l.numpy())

tf.Tensor(1, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)

# When training the labels are expected to be numpy arrays
training_labels_final = np.array(training_labels)
testing_labels_final = np.array(testing_labels)

# Tokenize the sentences (Hyper-parameters)
vocab_size = 10000
embedding_dim = 16
max_length = 120
truc_type = 'post'
コード例 #24
0
def transform_activation_offline(tensor,
                                 exponent,
                                 mantissa,
                                 opt_exp_list,
                                 is_3d=False):
    # Offline means the shared exponent is fixed
    #      it is deternmined during the pre-inference
    # Quantize the activation tensor along channel dimension
    # Here we require the input tensor has the shape: [batch, channel, heigh, widht]
    # opt_exp_list: the shared exponent list for offline quantization
    if is_3d is True:
        orig_shape = tf.shape(tensor)
        tensor = tf.reshape(tensor,
                            (orig_shape[0], orig_shape[1] * orig_shape[2],
                             orig_shape[3], orig_shape[4]))
    shp = tensor.shape
    #print ("shape1:", shp[1], " opt_exp_list:", len(opt_exp_list))
    chnl_group = (int)(shp[1] / len(opt_exp_list))
    number_of_blocks = math.ceil(shp[1] / chnl_group)
    opt_exp_list = tf.Tensor(opt_exp_list).cuda()
    #print ("shap[1]:", shp)
    #print ("len exp list", len(opt_exp_list))
    if shp[1] % chnl_group == 0:
        # shp[1] is divisible by block size
        # Therefore just one tensor will be created
        #print (tensor.shape)
        tensor = tf.reshape(
            tensor, (shp[0], number_of_blocks, chnl_group * shp[2] * shp[3]))
        opt_exp_list = opt_exp_list.unsqueeze(0)  ##### Need Unit test
        tensor = to_exponent_mantissa_width(tensor,
                                            opt_exp_list,
                                            mantissa,
                                            quant_dim=len(tf.shape(tensor)) -
                                            1)
        tensor = tf.reshape(tensor, (shp[0], shp[1], shp[2], shp[3]))
        if is_3d is True:
            tensor = tf.reshape(tensor,
                                (orig_shape[0], orig_shape[1], orig_shape[2],
                                 orig_shape[3], orig_shape[4]))
        return tensor

    else:
        # shp[1] is not divisible by channel group
        # Therefore two tensors will be created
        input('Channel is not divisible by channel group')

        if number_of_blocks == 1:
            # This means that the depth is less than the block size, so just one tensor will be created
            tensor = tf.reshape(tensor, (shp[0], 1, shp[1] * shp[2] * shp[3]))
            opt_exp_list = tf.expand_dims(opt_exp_list,
                                          0)  ##### Need Unit test
            tensor = to_exponent_mantissa_width(
                tensor,
                opt_exp_list,
                mantissa,
                quant_dim=len(tf.shape(tensor)) - 1)
            tensor = tf.reshape(tensor, (shp[0], shp[1], shp[2], shp[3]))
            return tensor
        else:
            # Separate two part, tensor1 contain (number_of_blocks-1), tensor2 contain the rest
            first_chnl = ((number_of_blocks - 1) * chnl_group)
            tensor1 = tensor[:, 0:first_chnl, :, :]
            t1_shp = tf.shape(tensor1)
            tensor2 = tensor[:, first_chnl:shp[1], :, :]
            t2_shp = tf.shape(tensor2)
            t1_exp_list = opt_exp_list[0:number_of_blocks - 1]
            t1_exp_list = tf.expand_dims(t1_exp_list, 0)
            t2_exp_list = opt_exp_list[number_of_blocks - 1]
            t2_exp_list = tf.expand_dims(t2_exp_list, 0)

            # Perform quantization
            tensor1 = tf.reshape(
                tensor1,
                (shp[0], number_of_blocks - 1, chnl_group * shp[2] * shp[3]))
            tensor2 = tf.reshape(tensor2,
                                 (shp[0], 1,
                                  (shp[1] - first_chnl) * shp[2] * shp[3]))
            tensor1 = to_exponent_mantissa_width(
                tensor1,
                t1_exp_list,
                mantissa,
                quant_dim=len(tf.shape(tensor1)) - 1)
            tensor2 = to_exponent_mantissa_width(
                tensor2,
                t1_exp_list,
                mantissa,
                quant_dim=len(tf.shape(tensor2)) - 1)

            # Reshape and put back to original tensor
            tensor1 = tf.reshape(tensor1, t1_shp)
            tensor2 = tf.reshape(tensor2, t2_shp)
            tensor[:, 0:first_chnl, :, :] = tensor1
            tensor[:, first_chnl:shp[1], :, :] = tensor2
            return tensor

    return tensor
コード例 #25
0
import tensorflow as tf

# Import other library
import numpy as np
import time
import tempfile

# These operations automatically convert native Python types, for example.
print(tf.add(1, 2))
print(tf.add([1, 2], [3, 4]))
print(tf.square(5))
print(tf.reduce_sum([1, 2, 3]))

# Operator overloading is also supported
print(tf.square(2) + tf.square(3))
"""
tf.Tensor(3, shape=(), dtype=int32) 
tf.Tensor([4 6], shape=(2,), dtype=int32) 
tf.Tensor(25, shape=(), dtype=int32) 
tf.Tensor(6, shape=(), dtype=int32) 
tf.Tensor(13, shape=(), dtype=int32)
"""

# Each tf.Tensor has a shape and a datatype.
x = tf.matmul([[1]], [[2, 3]])
print(x)
print(x.shape)
print(x.dtype)
"""
tf.Tensor([[2 3]], shape=(1, 2), dtype=int32) 
(1, 2) 
コード例 #26
0
 def call(self, inputs):
     real = tf.Tensor()
コード例 #27
0
 def __init__(self):
     
     self.state = tf.Tensor(shape=(10,))
     
     self.build()
コード例 #28
0
 def _construct_graph(self):
     #self.tf_input_valuation = tf.compat.v1.placeholder(shape=[None, self.base_valuation.shape[0]], dtype=tf.float32)
     self.tf_input_valuation = tf.Tensor(op=None,
                                         value_index=None,
                                         dtype=None)
     self.tf_result_valuation = self._construct_deduction()
コード例 #29
0
print("Input: " + str(input_images.shape))
np.random.shuffle(input_images)

#Use the (as yet untrained) generator to create an image
generator = make_generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)

#plt.imshow(generated_image[0, :, :, 0], cmap='gray')
#Use the (as yet untrained) discriminator to classify the generated images as real or fake.
#The model will be trained to output positive values for real images, and negative values for fake images.
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print(decision)

tf.Tensor([[-0.00366611]], shape=(1, 1), dtype='float32')
#Define the loss and optimizers
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
#Generator loss
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)

checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(
    generator_optimizer=generator_optimizer,
    discriminator_optimizer=discriminator_optimizer,
    generator=generator,
    discriminator=discriminator)
EPOCHS = 50
noise_dim = 100
コード例 #30
0
ファイル: tf_checkas.py プロジェクト: Daphiy/myPlayground
import tensorflow as tf

print("hello world")

x = [1,2,2,3,34,1,421,11]
xx = tf.Tensor(x)

print(xx)