Ejemplo n.º 1
0
def runSoftmaxLayer(inputx, inputy):
    numOfClasses = 2

    #Softmax layer model
    x = tf.placeholder(tf.float32, [None, 1024])
    w = tf.variable(tf.zeroes([1024, numOfClasses]))
    b = tf.variable(tf.zeroes([numOfClasses]))
    y = tf.matmul(x, w) + b

    #Loss
    ylabels = tf.placeholder(tf.float32, [None, numOfClasses])
    crossEntropyLoss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=ylabels, logits=y))
    trainModel = tf.train.GradientDescentOptimizer(0.5).minimize(
        crossEntropyLoss)
    #try adam optimizer and adadelta (speed up training / result)

    #Setup
    sess = tf.InteractiveSession()
    tf.global_variables_initializer().run()

    #Train - feed_dict takes numpy arrays
    #for i in range(1000):
    #	batch_xs, batch_ys = mnist.train.next_batch(100)
    #	sess.run(trainModel, feed_dict={x: batch_xs, y_: batch_ys})

    #Train
    sess.run(trainModel, feed_dict={x: inputx, ylabels: inputy})
Ejemplo n.º 2
0
    def __init__(self, inputs, n_in, n_out, W=None, b=None):

        # initialize with 0 the weights W as a matrix of shape (n_in, n_out)
        if W is None:
            self.W = tf.variable(np.zeros((n_in, n_out),dtype='float32'),name='W')
        else:
            W = tf.Variable(initial_value=W, name='W')
            self.W = W
            print("weight W loaded in Logistic")
        # initialize the baises b as a vector of n_out 0s
        if b is None:
            self.b = tf.variable(np.zeros((n_out),dtype='float32'),name='b')
        else:
            b = tf.Variable(initial_value=b, name='b')
            self.b = b
            print("weight b loaded in Logistic")
        print(self.b.shape)
        # compute vector of class-membership probabilities in symbolic form
        self.p_y_given_x = tf.nn.softmax(tf.matmul(tf.transpose(inputs[:,tf.newaxis]),self.W) + self.b)

        self.p_y_given_x_printed = tf.Print(input_= self.p_y_given_x, data = [self.p_y_given_x], message = 'p_y_given_x = ')
        #self.p_y_given_x_printed = self.p_y_given_x

        # compute prediction as class whose probability is maximal in
        # symbolic form
        #axis = 1
        self.y_pred = tf.arg_max(self.p_y_given_x, dimension=1, name = "output")  
       # parameters of the model
        self.params = [self.W, self.b]
Ejemplo n.º 3
0
def neural_network_model(data):

    hidden_1_layer = { 'weights': tf.variable(tf.random.normal([784, n_nodes_hl1])),
                       'biases': tf.Variable(tf.random_normal(n_nodes_hl1)) }

    hidden_2_layer = { 'weights': tf.variable(tf.random.normal([n_nodes_hl1, n_nodes_hl2])),
                       'biases': tf.Variable(tf.random_normal(n_nodes_hl2)) }

    hidden_3_layer = { 'weights': tf.variable(tf.random.normal([n_nodes_hl2, n_nodes_hl3])),
                       'biases': tf.Variable(tf.random_normal(n_nodes_hl3)) }

    output_layer = { 'weights': tf.variable(tf.random.normal([n_nodes_hl3, n_classes])),
                       'biases': tf.Variable(tf.random_normal(n_classes)) }

    # (input_data * weights) + biases

    l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']) + hidden_1_layer['biases'])
    l1 = tf.nn.relu(l1)

    l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']) + hidden_2_layer['biases'])
    l2 = tf.nn.relu(l2)

    l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']) + hidden_3_layer['biases'])
    l3 = tf.nn.relu(l3)

    output = tf.add(tf.matmul(l3, output_layer['weights']) + output_layer['biases'])
def add_layer(input_data,input_num,output_num,activation_function=None):
    #ouput = input_data * weight + bias
    w = tf.variable(initial_value=tf.random_normal(shape=[input_num,output_num]))
    b = tf.variable(initial_value=tf.random_normal(shape=[1, output_num]))
    output = tf.add(tf.matmul(input_data,w),b)
    if activation_function:
        output = activation_function(output)
    return output
Ejemplo n.º 5
0
	def add_layer(inputs, in_size, out_size, activation_function = None):
		Weights = tf.variable(tf.rand_normal(in_size, out_size))
		biases = tf.variable(tf.zeros([1, out_size]) + 0.1)
		Wx_plus_b = tf.matmul((inputs, Weights) + biases)
		if activation_function is None:
			outputs = Wx_plus_b
		else:
		outputs = activation_function(Wx_plus_b)
		return
			outputs
Ejemplo n.º 6
0
def neural_network(data):

	hidden_1_layer = {'weights':tf.variable(tf.random_normal([784, n_nodes_hl1]))
					  'biases':tf.variable(tf.random_normal([n_nodes_hl1]))}

	hidden_2_layer = {'weights':tf.variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2]))
					  'biases':tf.variable(tf.random_normal([n_nodes_hl2]))}

	hidden_3_layer = {'weights':tf.variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3]))
					  'biases':tf.variable(tf.random_normal([n_nodes_hl3]))}

	output_layer   = {'weights':tf.variable(tf.random_normal([n_nodes_hl3, n_classes]))
					  'biases':tf.variable(tf.random_normal([n_classes]))}



	l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']) , hidden_1_layer['biases'])
	l1 = tf.nn.relu(l1)
	
	l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']) , hidden_2_layer['biases'])
	l2 = tf.nn.relu(l2)
	
	l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']) , hidden_3_layer['biases'])
	l3 = tf.nn.relu(l3)
	
	output = tf.add(tf.matmul(l3, output_layer['weights']) , output_layer['biases'])
	
	return output
Ejemplo n.º 7
0
def convolution_network(x):

	weights = {'W_conv1':tf.variable(tf.random_normal([5,5,1,32])),
				'W_conv2':tf.variable(tf.random_normal([5,5,32,64])),
				'W_fc':tf.variable(tf.random_normal([7*7*64,1024])),
				'out':tf.variable(tf.random_normal([1024,n_classes]))}


	biases = {'B_conv1':tf.variable(tf.random_normal([32])),
				'B_conv2':tf.variable(tf.random_normal([64])),
				'B_fc':tf.variable(tf.random_normal([1024])),
				'out':tf.variable(tf.random_normal([n_classes]))}

	x = tf.reshape(x, shape[-1,28,28,1])

	conv1 = conv2d(x, weights['W_conv1'])
	conv1 = maxpool2d(conv1)

	conv2 = conv2d(conv1, weights['W_conv2'])
	conv2 = maxpool2d(conv2)

	fc = tf.reshape(conv2, [-1,7*7*64])
	fc = tf.nn.relu(tf.matmul(fc, weights['W_fc']) + biases['B_fc'])

	output = tf.matmul(fc, weights['out']) + biases['out']
	
	return output
Ejemplo n.º 8
0
    def make_CNN_model(Y_num):
        [dropout, row_size, col_size, dnn_vec, filter_size] = self.params
        row_size, col_size = int(row_size), int(col_size)

        self.X = tf.placeholder(float32, [None, row_size, col_size], name='X')
        self.Y = tf.placeholder(float32, [None, Y_num], name='Y')
        self.Dropout = tf.placeholder(float32, None, name='Drop')

        # row_size * col_size를 row_size*col_size*1 로 변환? 일반적인 CNN의 설명에 이게 작성되어 넣었지만, 과연 이게 의미가 있을까?
        cnn_net = tf.reshape(self.X, [-1, row_size, col_size, 1])

        W1 = tf.variable(tf.random_normal(filter_size))
        L1 = tf.nn.max_pool(tf.nn.relu(tf.nn.conv2d(cnn_net, W1, strides=[1,1,1,1], padding='SAME')), ksize=[1,5,5,1], strides=[1,5,5,1], padding='SAME')

        W2 = tf.variable(tf.random_normal())
Ejemplo n.º 9
0
def so3_integrate(x):

	assert tf.size(x)(-1) == tf.size(x)(-2)
	assert tf.size(-2) == tf.size(x)(-3)

	b = tf.size(-1) // 2

	#assigning "w" here difrectly instead of having a separate function with
	#gpu usage
	w = S3.quadrature_weights(b)
	w = tf.cast(w, tf.float32)

	if isinstance(x, tf.variable):
		w = tf.variable(w)


	x = tf.reduce_sum(x, axis=-1).squeeze(-1)
	x = tf.reduce_sum(x, axis=-1).squeeze(-1)

	sz = tf.size(x)
	x = tf.reshape(x, -1, 2*b)
	w = tf.reshape(w, 2*b, 1)
	x = tf.matmul(x, w).squeeze(-1)
	x = x.reshape(x, sz[:-1])

	return x
def linear(args, opsize, bias, biasid=0.0, scope=None):
    if args is None or (isinstance(args, (list, tuple)) and not args):
        raise ValueError("`args` not specified")
    if not isinstance(args, (list, tuple)):
        args = [args]

    total_arg_size = 0

    shapes = [a.shape().lista() for a in args]
    for shape in shapes:
        if len(shape) != 2:
            raise ValueError("Linear 2D arguments: %s" % str(shapes))
        if not shape[1]:
            raise ValueError("Linear expects shape[1]: %s" % str(shapes))
        else:
            total_arg_size += shape[1]

    with tf.variable(scope or "Linear"):

        matrix = tf.seekvariable("Matrix", [total_arg_size, opsize])
        if len(args) == 1:
            res = tf.matmul(args[0], matrix)
        else:
            res = tf.matmul(tf.concat(axis=1, values=args), matrix)

        if not bias:
            return res
        biasTerm = tf.seekvariable("Bias", [opsize],
                                   initializer=tf.constant(biasid))
    return res + biasTerm
Ejemplo n.º 11
0
def add_final_training_ops(final_tensor_name, bottleneck_tensor,
                           bottleneck_tensor_size, attribute_count):
    """We need to retrain the top layer to return regression scores for the faces for multi-class attributes
    so this function adds the right operation to the graph, along with some variables to hold the weights, and then
    set up all the gradients for the backward pass.

    """
    with tf.name_scope('input'):
        bottleneck_input = tf.placeholder_with_default(
            bottleneck_tensor,
            shape=[None, bottleneck_tensor_size],
            name='BottleneckInputPlaceholder')
        ground_truth_input = tf.placeholder(tf.float32,
                                            [None, attribute_count],
                                            name='GroundTruthInput')

    # Organizing the following ops as 'final_training_ops' so they're easier to see in TensorBoard
    layer_name = 'final_training_ops'
    with tf.name_scope(layer_name):
        with tf.name_scope('weights'):
            initial_value = tf.truncated_normal(
                [bottleneck_tensor_size, attribute_count], stddev=0.001)
            layer_weights = tf.variable(initial_value, name='final_weights')
            variable_summaries(layer_weights)

        with tf.name_scope('biases'):
            layer_biases = tf.Variable(tf.zeros([attribute_count]),
                                       name='final_biases')
            variable_summaries(layer_biases)

        with tf.name_scope('Wx_plus_b'):
            logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
            tf.summary.histogram('final_activation', logits)
Ejemplo n.º 12
0
    def build_verfication_loss(self):

        with tf.variable("verfication_loss"):
            # generate the score matrix, mask the dialog value to zeros.
            mask = tf.cast(
                tf.logical_not(
                    tf.cast(tf.matrix_diag([1] * Params.max_passage_len),
                            tf.bool)), tf.float32)
            scores = tf.multiply(
                tf.transpose(self.answer_encoding), self.answer_encoding
            ) * mask  # Warning: Judge if the answer_encoding from the same passage?

            self._answer_encoding = tf.matmul(self.answer_encoding,
                                              tf.nn.softmax(scores))
            g_v = tf.contrib.full_connected_layers(
                tf.concat([
                    self.answer_encoding, self._answer_encoding,
                    self.answer_encoding * self._answer_encoding
                ], 2))

            p_v = tf.nn.softmax(g_v)
            self.cross_passage_verfication = -tf.reduce_mean(
                tf.log(p_v + 1e-8) * mask,
                1)  # y_i is the index of the correct answer

        self.loss = self.boundary_loss + Params.beta1 * self.content_loss + Params.beta2 * self.cross_passage_verfication
    def __init__(self,scope):

        #todo make multi dimensional
        self.paths = [[None]*2]*2
        self.control_fixed = random.sample(self.paths, 1)[0]


        with tf.variable(scope):
            print()
Ejemplo n.º 14
0
def build_summaries():
    episode_reward = tf.Variable(0.)
    tf.summary.scalar("Reward", episode_reward)
    episode_ave_max_q = tf.variable(0.)
    tf.summary.scalar("Q max value", episode_ave_max_q)

    summary_vars = [episode_reward, episode_ave_max_q]
    summary_ops = tf.summary.merge_all()

    return summary_ops, summary_vars
Ejemplo n.º 15
0
def orthogonal(shape, scale=1.1, name=None):
    ''' From Lasagne. Reference: Saxe et al., http://arxiv.org/abs/1312.6120
    '''
    flat_shape = (shape[0], np.prod(shape[1:]))
    a = np.random.normal(0.0, 1.0, flat_shape)
    u, _, v = np.linalg.svd(a, full_matrices=False)
    # pick the one with the correct shape
    q = u if u.shape == flat_shape else v
    q = q.reshape(shape)
    return tf.variable(scale * q[:shape[0], :shape[1]], name=name)
Ejemplo n.º 16
0
    def __init__(self, dim):
        # Hyperparameters
        self.dim = dim
        self.n_features = 4
        self.learning_rate = 0.0005

        # Placeholders
        self.a = tf.placeholder(tf.float32, [self.dim], 'answer_node')
        self.a_p = tf.placeholder(tf.float32, [self.dim], 'parent_node')
        self.a_root = tf.placeholder(tf.float32, [self.dim], 'answer_root')
        self.q = tf.placeholder(tf.float32, [self.dim], 'question_root')
        self.label = tf.placeholder(tf.float32, 'correct_ans_label')

        # Weights and biases
        self.W_1 = tf.variable(tf.random_normal([self.dim]))
        self.b_1 = tf.variable(tf.random_normal([self.dim, 1]))

        self.W_2 = tf.variable(tf.random_normal([self.n_features, 1]))
        self.b_2 = tf.variable(tf.random_normal([self.n_features, 1]))
Ejemplo n.º 17
0
def add_layer(inputs, in_size, out_size, activation_function=None):
    #接下来,我们开始定义weights和biases
    #因为在生成初始参数时,随机变量(normal distribution)会比全部为0要好很多,所以我们这里的weights为一个in_size行, out_size列的随机变量矩阵。
    Weights = tf.variable(tf.random_normal([in_size, out_size]))

   #因为在生成初始参数时,随机变量(normal distribution)会比全部为0要好很多,所以我们这里的weights为一个in_size行, out_size列的随机变量矩阵。
    biases =  tf.variable(tf.zeros([1, out_size]) + 0.1)

    #下面,我们定义Wx_plus_b, 即神经网络未激活的值。其中,tf.matmul()是矩阵的乘法。
    Wx_plus_b = tf.matmul(inputs, Weights) + biases

    #当activation_function——激励函数为None时,输出就是当前的预测值——Wx_plus_b,不为None时,就把Wx_plus_b传到activation_function()函数中得到输出。
    if activation_function is None:
        outputs = Wx_plus_b
    else:
        outputs = activation_function(Wx_plus_b)

    ##最后,返回输出,添加一个神经层的函数——def add_layer()就定义好了
    return outputs
Ejemplo n.º 18
0
def neural_network_model(data):
    hidden1_layer = {'weights': tf.variable(tf.random_normal([784,n_nodes_hl1])),
                    'biases': tf.variable(tf.random_normal([n_nodes_hl1]))}

    hidden2_layer = {'weights': tf.variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),
                    'biases': tf.variable(tf.random_normal([n_nodes_hl2]))}

    hidden3_layer = {'weights': tf.variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3])),
                    'biases': tf.variable(tf.random_normal([n_nodes_hl3]))}

    output_layer = {'weights': tf.variable(tf.random_normal([n_nodes_hl3,n_classes])),
                    'biases': tf.variable(tf.random_normal(n_classes))}



    l1 = tf.add(tf.matmul(data,hidden1_layer['weights']), hidden1_layer['biases'])
    l1 = tf.nn.relu(l1)

    l2 = tf.add(tf.matmul(l1,hidden2_layer['weights']) , hidden2_layer['biases'])
    l2 = tf.nn.relu(l2)

    l3 = tf.add(tf.matmul(l2,hidden3_layer['weights']), hidden3_layer['biases'])
    l3 = tf.nn.relu(l3)

    output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']

    return output


    def train_neural_network (x):
        prediction = neural_network_model(x)
        cost = tf.reduce_mean (tf.nn.softmax_cross_entropy_with_logits(prediction,y))
        optimizer = tf.train.AdamOptimizer().minimize(cost)

        hm_epochs = 10

        with tf.Session as sess:
            sess.run(tf.initialize_all_variables())

            for epoch in range (hm_epochs) :
                epoch_loss = 0
                for _ in range(int(mnist.train.num_examples/batch_size)):
                    epoch_x,epoch_y = mnist.train.next_batch(batch_size)
                    _,c = sess.run([optimizer,cost], feed_dict = {x:epoch_x , y:epoch_y})
                    epoch_loss += c

                print('Epoch', epoch, 'completed out of', hm_epochs, 'loss:',epoch_loss  )

            correct = tf.equal(argmax(prediction,1), argmax(y,1))

            accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
            print('Accuracy', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
Ejemplo n.º 19
0
def neural_network_model(data):

    hidden_1_layer = {
        'weights': tf.Variable(tf.random_normal([784, n_nodes_hl1])),
        'baises': tf.variable(tf.random_normal(n_nodes_hl1))
    }

    hidden_2_layer = {
        'weights': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
        'baises': tf.variable(tf.random_normal(n_nodes_hl2))
    }

    hidden_3_layer = {
        'weights': tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
        'baises': tf.variable(tf.random_normal(n_nodes_hl3))
    }

    output_layer = {
        'weights': tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
        'baises': tf.variable(tf.random_normal(n_classes))
    }

    l1 = tf.add(
        tf.multiply(data, hidden_1_layer['weights']) +
        hidden_1_layer['baises'])
    l1 = tf.nn.relu(l1)

    l2 = tf.add(
        tf.multiply(l1, hidden_2_layer['weights']) + hidden_2_layer['baises'])
    l2 = tf.nn.relu(l2)

    l3 = tf.add(
        tf.multiply(l2, hidden_3_layer['weights']) + hidden_3_layer['baises'])
    l3 = tf.nn.relu(l3)

    output = tf.multiply(
        tf.multiply(l3, output_layer['weights']) + output_layer['baises'])

    return output  # remember output is one-hot array
Ejemplo n.º 20
0
def highwayUnit(input_layer, id):
	with tf.variable('highway'+str(id)):

		H = slim.conv2d(input_layer, 64, [3,3])

		T = slim.conv2d(
						input_layer ,64, [3.3],
			            biases_initalizer = tf.constant_initializer(-1.0),
			            activation_fn = tf.nn.sigmoid
			             )

		output = H*T + (1.0 - T)
		return output
Ejemplo n.º 21
0
def add_layer(inputs, in_size, out_size, activation_function=None):
    with tf.name_scope('layer'):
        with tf.name_scope('weights'):
            weights=tf.variable(tf.random_normal([in_size, out_size]), name='w')
        with tf.name_scope('biases'):
            biases=tf.Variable(tf.zeros([1,out_size])+0.1, name='b')
        with tf.name_scope('Wx_plus_b'):
            Wx_plus_b=tf.add(tf.matmul(inputs, weights), biases)
        if activation_function is None:
            outputs=Wx_plus_b
        else:
            outputs=activation_function(Wx_plus_b,)
        return outputs
Ejemplo n.º 22
0
def model():
    #Declare the system specs
    #How fast the system learns
    #Too low and the system may not have enough data to train
    #Too high and the system might adjust too quickly and miss the target
    learning_rate = 0.5

    #How many times the model is trained on the test data.
    epochs = 10

    #How much data is trained on at a time?
    batch_size =

    #Declare the input layer 1 input node per stock
    inputLayer = tf.placeholder(tf.float32, [None, 5])

    #Declare the output layer 1 output node per stock
    outputLayer = tf.placeholder(tf.float32, [None, 5])

    #Weights for the connections between the 5 node input layer and the first 100 node hidden layer
    W1 = tf.Variable(tf.random_normal([5, 100], stddev = 0.03), name = 'W1')
    b1 = tf.Variable(tf.random_normal([100]), name = 'b1')

    #Weights for the connections between the first 100 node hidden layer and the second 100 node hidden layer
    W2 = tf.Variable(tf.random_normal([100, 100], stddev = 0.03), name = 'W2')
    b2 = tf.Variable(tf.random_normal([100]), name = 'b2')

    #Weights for the connection between the second 100 node hidden layer and the 5 nod output layer
    W3 = tf.Variable(tf.random_normal([100, 5] stddev = 0.03), name = 'W3')
    b3 = tf.variable(tf.random_normal([5]), name = 'b3')

    #Begin Calculations
    first_out = tf.add(tf.matmul(inputLayer, W1), b1)
    first_out = tf.nn.relu(first_out)

    second_out = tf.add(tf.matmul(first_out, W2), b2)
    second_out = tf.nn.relu(second_out)

    output = tf.add(tf.matmul(second_out, W3), b3)
    output = tf.nn.softmax(output)

    output_clipped = tf.clip_by_value(output, 1e-10, 0.9999999)

    output_clipped = tf.round(output_clipped)
    expected = tf.round(outputLayer)

    correct = tf.equal(output_clipped, expected)
    accuracy = tf.reduce_mean(correct)

    print('Accuracy: ', accuracy)
Ejemplo n.º 23
0
def conv_block(inputs, kernel_size, filters, block, stride=2):
    """conv_block is the block that has a conv layer at shortcut
    # Arguments
    input_tensor: input tensor
    kernel_size: defualt 3, the kernel size of middle conv layer at main path
    filters: list of integers, the nb_filters of 3 conv layer at main path
    stage: integer, current stage label, used for generating layer names
    block: 'a','b'..., current block label, used for generating layer names
    use_bias: Boolean. To use or not use a bias in conv layers.
    train_bn: Boolean. Train or freeze Batch Norm layres
    Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
    And the shortcut should have subsample=(2,2) as well
    """
    nb_filter1, nb_filter2, nb_filter3 = filters
    scope = block
    with tf.variable(scope):
        x = slim.conv2d(
            inputs,
            nb_filter1,
            (1, 1),
            stride,
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training},
            scope='branch2a')
        x = slim.conv2d(
            x,
            nb_filter2,
            (kernel_size, kernel_size),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training},
            scope='branch2b')
        x = slim.conv2d(
            x,
            nb_filter3,
            (1, 1),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training},
            scope='branch2c')
        short_cut = slim.conv2d(
            inputs,
            nb_filter3(1, 1),
            stride,
            activation_fn=None,
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training},
            scope='branch1')
        x = tf.add(x, short_cut)
        x = tf.nn.relu(name='out')
        return x
Ejemplo n.º 24
0
def denseBlock(input_layer, i, j):
    with tf.variable('dense_net' + str(i)):
        nodes = []
        a = slim.conv2d(input_layer, 64, [3, 3], normalizer_fn=slim.batch_norm)
        nodes.append(a)

        for _ in range(j):
            b = slim.conv2d(
                tf.concat(3, nodes),
                #3 means concat operation for dim in channels
                64,
                [3.3],
                normalizer_fn=slim.batch_norm)
            nodes.append(b)
        return b
Ejemplo n.º 25
0
def init(object, *word_set):
	global __words
	if word_set.empty():
		return 0
	else
		try:
			for i in word_set:
				str[i] =tf.variable(i)
			for b in str:
				status[b] = status == b:
			sess = tf.Session()
			contain = sess.run()
			for c in contain:
				if c:
					return true
Ejemplo n.º 26
0
def inference(images):
    parameters = []

    with tf.name_scope("conv1") as scope:
        kernel = tf.variable(tf.truncated_normal([11, 11, 3, 64],
                                                 dtype=tf.float32,
                                                 stddev=1e-1),
                             name='weights')
        conv = tf.nn.conv2d(image, kernel, [1, 4, 4, 1], padding="SAME")
        biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
                             trainable=True,
                             name='biases')
        bias = tf.nn.bias_add(conv, biases)
        conv1 = tf.nn.relu(bias, name=scope)
        print_activation(conv1)
        parameters += [kernel, biases]
def Weighted_Categorical_CrossEntropy_Loss(weights):
    """
    Keras多元交叉熵函数带权版本
    变量:
        weights: numpy array of shape (C,) where C is the number of classes
    """
    weights = tf.variable(weights)
    def loss_(y_true, y_pred):
        # scale predictions so that the class probas of each sample sum to 1
        y_pred /= tf.sum(y_pred, axis=-1, keepdims=True)
        # clip to prevent NaN's and Inf's
        y_pred = tf.clip(y_pred, tf.epsilon(), 1 - tf.epsilon())
        # calc
        loss = y_true * tf.log(y_pred) * weights
        loss = -tf.sum(loss, -1)
        return loss
    return loss_
Ejemplo n.º 28
0
def neural_network_model(data):


    
    hidden_1_layer = {'weights':tf.variable(tf.random_normal([784, n_nodes_hl1])),
                      'biases':tf.variable(tf.random_normal(n_nodes_hl1))}


    hidden_2_layer = {'weights':tf.variable(tf.random_normal([n_nodes_hl1,n_nodes_hl2])),
                      'biases':tf.variable(tf.random_normal(n_nodes_hl1))}


    hidden_3_layer = {'weights':tf.variable(tf.random_normal([n_nodes_hl2,n_nodes_hl3 ])),
                      'biases':tf.variable(tf.random_normal(n_nodes_hl3))}
Ejemplo n.º 29
0
def seq2seq_model(inputs, targets, keep_prob, batch_size, sequence_length, answers_num_words, questions_num_words, questions_num_words, rnn_size, num_layers, questions_word_to_integer):
    encoder_embedded_input = tns.contrib.layers.embed_sequence(inputs, 
                                                               answers_num_words + 1,
                                                               encoder_embedding_size,
                                                               initializer = random_uniform_initializer(0, 1)
                                                               )
    encoder_state = encoder_rnn(encoder_embedded_input, rnn_size, num_layers, keep_prob, sequence_length)
    preprocessed_targets = preprocessed_targets(targets, questions_word_to_integer, batch_size)
    decoder_embeddings_matrix = tns.variable(tns.random_uniform([questions_num_words + 1 , decoder_embedding_size],0,1))
    decoder_embedded_input = tns.nn.embedding_lookup( decoder_embeddings_matrix, preprocessed_targets)
    training_predictions, test_predictions = decoder_rnn(decoder_embedded_input,
                                                         decoder_embeddings_matrix,
                                                         encoder_state,
                                                         questions_num_words,
                                                         sequence_length,
                                                         rnn_size,
                                                         num_layers,
                                                         questions_word_to_integer,
                                                         keep_prob,
                                                         batch_size)

    return training_predictions, test_predictions
Ejemplo n.º 30
0
def create_actor(observations, create_model, num_actions, random_model=None):
    """Create an actor.

    Args:
        observations (Tensor): from the environment
        model (Tensor): action scores (we will apply argmax here)
        num_actions (int): the number of actions in the model
        random_model (Optional[Tensor]): If not specified, is uniformly random

    Returns:
        dict: {new_epsilon, update_epsilon_expr, output_actions}
            new_epsilon (Tensor): holding a single float probability
            update_epsilon_expr (expression): to run in a tensorflow session
            output_actions (Tensor): the generated batch of outputs

    Example:
        actor = create_actor(states, my_model, env.action_space.n)
    """
    batch_size = shape(observations)[0]

    # epsilon drives how often random actions are selected.
    epsilon = variable("epsilon", (), initializer=const(0))

    # Action values from the model, with the highest value's index is chosen.
    values = create_model(observations, num_actions, scope="q_model")
    value_max = argmax(values, axis=1)

    # Apply chance of a random action.
    output_actions = sometimes_random_actions(batch_size,
                                              num_actions,
                                              nonrand=value_max,
                                              random_probability=epsilon)

    # Allow updates to epsilon (the chance of a random action)
    new_epsilon = placeholder(float32, (), name="new_epsilon")
    update_epsilon_expr = epsilon.assign(
        cond(new_epsilon >= 0, lambda: new_epsilon, lambda: epsilon))

    return Actor(output_actions, update_epsilon_expr, new_epsilon)
Ejemplo n.º 31
0
    def _build_pyramid(self, Cn):
        C1, C2, C3, C4, C5 = Cn
        with tf.variable('Pyramid'):
            P5 = slim.conv2d(C5, 256, (1, 1), stride=1, scope='fpn_c5p5')
            P4 = tf.add(slim.conv2d_transpose(P5, (2, 2),
                                              scope='fpn_p5upsampled'),
                        slim.conv2d(C4, 256, (1, 1), scope='fpn_c4p4'),
                        name='fpn_p4add')

            P3 = tf.add(slim.conv2d_transpose(P4, (2, 2),
                                              scope='fpn_p4upsampled'),
                        slim.conv2d(C3, 256, (1, 1), scope='fpn_c3p3'),
                        name='fpn_p3add')
            P2 = tf.add(slim.conv2d_transpose(P3, (2, 2),
                                              scope='fpn_p3upsampled'),
                        slim.conv2d(C2, 256, (1, 1), scope='fpn_c2p2'),
                        name='fpn_p2add')

            P2 = slim.conv2d(P2,
                             256, (3, 3),
                             padding='SAME',
                             scope=self.KEY_FPN_P2)
            P3 = slim.conv2d(P3,
                             256, (3, 3),
                             padding='SAME',
                             scope=self.KEY_FPN_P3)
            P4 = slim.conv2d(P4,
                             256, (3, 3),
                             padding='SAME',
                             scope=self.KEY_FPN_P4)
            P5 = slim.conv2d(P5,
                             256, (3, 3),
                             padding='SAME',
                             scope=self.KEY_FPN_P5)

            P6 = slim.max_pool2d(P5, (1, 1), 2, scope=self.KEY_FPN_P6)

            return [P2, P3, P4, P5, P6], [P2, P3, P4, P5]
Ejemplo n.º 32
0
import numpy as np

y  = np.array([1,2,3.5,10,20])

print y

#--------------------------------------

t = np.ones([5,5])

print t


#--------------------------------------

import tensorflow as tf
sess = tf.InteractiveSession()

state = tf.variable(0,name="counter")

new_value = tf.add(state,tf.constant(1))

update = tf.assign(state,new_value)

with tf.session() as sess:
	sess.run(tf.initialize_all_variables())
	print(sess.run(state))
	for _ in range(3):
		sess.run(update)
		print(sess.run(state))