Пример #1
0
def inference(x, is_training=True):
    with tf.variable_scope("layer1"):
        filter_weights = weight_variable([1, CHANNELS, CHANNELS_L1], name="weights")
        feature_map = tf.nn.conv1d(x, filter_weights, stride=1, padding='SAME')
        feature_map = batch_norm(feature_map, decay=decay_bn, center=True, scale=scale_bn,
                                 epsilon=epsilon_bn, activation_fn=None, is_training=is_training)
        activation = tf.nn.elu(feature_map)
        activation = tf.nn.dropout(activation, keep_prob=keep_prob)
        activation = tf.reshape(activation, [-1, CHANNELS_L1, WINDOW_SIZE, 1])

    with tf.variable_scope("layer2"):
        filter_weights = weight_variable(KERNEL2, name="weights")
        feature_map = tf.nn.conv2d(activation, filter_weights, strides=[1, 1, 1, 1], padding='SAME')
        feature_map = batch_norm(feature_map, decay=decay_bn, center=True, scale=scale_bn,
                                 epsilon=epsilon_bn, activation_fn=None, is_training=is_training)

        activation = tf.nn.elu(feature_map)
        activation = tf.nn.max_pool(activation, maxpool_ksize, [1, 1, 1, 1], padding='VALID',
                                    data_format='NHWC', name='maxpool')
        activation = tf.nn.dropout(activation, keep_prob=keep_prob)

    with tf.variable_scope("layer3"):
        filter_weights = weight_variable(KERNEL3, name="weights")
        feature_map = tf.nn.conv2d(activation, filter_weights, strides=[1, 1, 1, 1], padding='SAME')
        feature_map = batch_norm(feature_map, decay=decay_bn, center=True, scale=scale_bn,
                                 epsilon=epsilon_bn, activation_fn=None, is_training=is_training)

        activation = tf.nn.elu(feature_map)
        activation = tf.nn.max_pool(activation, maxpool_ksize, [1, 1, 1, 1], padding='VALID',
                                    data_format='NHWC', name='maxpool')
        activation = tf.nn.dropout(activation, keep_prob=keep_prob)

    with tf.variable_scope("layer4"):
        filter_weights = weight_variable(KERNEL_DEEP, name="weights")
        feature_map = tf.nn.conv2d(activation, filter_weights, strides=[1, 1, 1, 1], padding='SAME')
        feature_map = batch_norm(feature_map, decay=decay_bn, center=True, scale=scale_bn,
                                 epsilon=epsilon_bn, activation_fn=None, is_training=is_training)

        activation = tf.nn.elu(feature_map)
        activation = tf.nn.max_pool(activation, maxpool_ksize, [1, 1, 1, 1], padding='VALID',
                                    data_format='NHWC', name='maxpool')
        activation = tf.nn.dropout(activation, keep_prob=keep_prob)

    with tf.variable_scope("output"):
        dim = np.prod(activation.get_shape().as_list()[1:])
        flattened = tf.reshape(activation, [-1, dim])
        weights = weight_variable([dim, 1])
        bias = bias_variable([1])
        logits = tf.matmul(flattened, weights) + bias

    return logits
Пример #2
0
    def make_output_graph(self):
        self.n_actions = self.env.action_space.n
        with tf.name_scope("policy"):
            self.W_policy = weight_variable([self.FC_2_SIZE, self.n_actions],
                                            name="W")
            self.b_policy = bias_variable([self.n_actions], name="b")
            self.policy_head = tf.matmul(self.fc_2_activation_policy,
                                         self.W_policy) + self.b_policy

        with tf.name_scope("value"):
            self.W_value = weight_variable([self.FC_2_SIZE, 1], name="W")
            self.b_value = bias_variable([1], name="b")
            self.value_head = tf.matmul(self.fc_2_activation_value,
                                        self.W_value) + self.b_value
Пример #3
0
    def make_fc_2_graph(self):
        with tf.name_scope("fully_connected_2p"):
            self.W_fc_2_policy = weight_variable(
                [self.FC_1_SIZE, self.FC_2_SIZE], name="W")
            self.b_fc_2_policy = bias_variable([self.FC_2_SIZE], name="b")
            z = tf.matmul(self.fc_1_activation,
                          self.W_fc_2_policy) + self.b_fc_2_policy
            self.fc_2_activation_policy = tf.sigmoid(z)

        with tf.name_scope("fully_connected_2v"):
            self.W_fc_2_value = weight_variable(
                [self.FC_1_SIZE, self.FC_2_SIZE], name="W")
            self.b_fc_2_value = bias_variable([self.FC_2_SIZE], name="b")
            z = tf.matmul(self.fc_1_activation,
                          self.W_fc_2_value) + self.b_fc_2_value
            self.fc_2_activation_value = tf.sigmoid(z)
Пример #4
0
 def make_output_graph(self):
     self.n_outputs = self.env.action_space.n
     with tf.name_scope("output"):
         self.W_output = weight_variable([self.HIDDEN_SIZE, self.n_outputs],
                                         name="W")
         self.b_output = bias_variable([self.n_outputs], name="b")
         z = tf.matmul(self.hidden_activation,
                       self.W_output) + self.b_output
         self.output = z
         self.action = tf.argmax(self.output, 1)
Пример #5
0
    def make_hidden_graph(self):
        with tf.name_scope("flatten"):
            flat_input = tf.reshape(self.box_input, [-1, self.n_inputs - 1])
            flat_input = tf.concat([flat_input, self.chain_input], 1)

        with tf.name_scope("hidden"):
            self.W_hidden = weight_variable([self.n_inputs, self.HIDDEN_SIZE],
                                            name="W")
            self.b_hidden = bias_variable([self.HIDDEN_SIZE], name="b")
            z = tf.matmul(flat_input, self.W_hidden) + self.b_hidden
            self.hidden_activation = tf.sigmoid(z)
Пример #6
0
 def make_convolution_graph(self):
     with tf.name_scope("convolution"):
         self.W_conv = weight_variable([
             self.KERNEL_SIZE, self.KERNEL_SIZE, self.box_shape[0],
             self.NUM_FEATURES
         ],
                                       name="W")
         self.b_conv = bias_variable([self.NUM_FEATURES], name="b")
         self.box_activations = []
         for box in self.box_inputs:
             z = conv2d(box, self.W_conv) + self.b_conv
             self.box_activations.append(tf.sigmoid(z))
def ConvLayer(Input, FilterIn, FilterOut, Training, Scope):

    with tf.variable_scope(Scope):

        Weight = weight_variable([3, 3, FilterIn, FilterOut])

        if cfg.LeakyReLU == True:

            return tf.nn.leaky_relu(
                batch_norm_conv(conv2d(Input, Weight), FilterOut, Training))
        else:
            return tf.nn.relu(
                batch_norm_conv(conv2d(Input, Weight), FilterOut, Training))
Пример #8
0
def Dang_FNN(demand, topo):

    x_demand = tf.reshape(demand, [-1,20,20,1])
    x_topo   = tf.reshape(topo, [-1,20,20,1])
    x_in = tf.concat([ x_demand, x_topo], 3)

    x_in_all = tf.reshape(x_in ,[-1,800])
    # y
    with tf.name_scope('D_fully_1_layer'):
      with tf.name_scope('Weight'):
        D_W_fc1 = util.weight_variable([800, 512])
      with tf.name_scope('biases'):
        D_b_fc1 = util.bias_variable([512])
      with tf.name_scope('relu'):
        D_h_fc1 = tf.nn.relu(tf.matmul(x_in_all, D_W_fc1) + D_b_fc1)

    with tf.name_scope('D_fully_1_layer'):
      with tf.name_scope('Weight'):
        D_W_fc2 = util.weight_variable([512, 128])
      with tf.name_scope('biases'):
        D_b_fc2 = util.bias_variable([128])
      with tf.name_scope('relu'):
        D_h_fc2 = tf.nn.relu(tf.matmul(D_h_fc1, D_W_fc2) + D_b_fc2)
    # dropoutd
    with tf.name_scope('dropout'):
       keep_prob1 = tf.placeholder(tf.float32, name = "drop_in")
       h_fc1_drop = tf.nn.dropout(D_h_fc2, keep_prob1)

    # readout
    with tf.name_scope('fully_layer'):
      with tf.name_scope('Weight'):
        W_out = util.weight_variable([128, 1])
      with tf.name_scope('biases'):
        b_out = util.bias_variable([1])

      y_out = tf.matmul(h_fc1_drop, W_out) + b_out

    #return y_out, keep_prob1, keep_prob2, keep_prob_d, keep_prob_t
    return y_out, keep_prob1
Пример #9
0
    def make_fc_1_graph(self):
        conv_layer_size = self.NUM_FEATURES * self.box_shape[
            1] * self.box_shape[2]
        with tf.name_scope("flatten"):
            flat_input = self.chain_inputs[:]
            for activation in self.box_activations:
                flat_input.append(tf.reshape(activation,
                                             [-1, conv_layer_size]))
            flat_input = tf.concat(flat_input, 1)
        n_flat = self.HISTORY_SIZE * (1 + conv_layer_size)

        with tf.name_scope("fully_connected_1"):
            self.W_fc_1 = weight_variable([n_flat, self.FC_1_SIZE], name="W")
            self.b_fc_1 = bias_variable([self.FC_1_SIZE], name="b")
            z = tf.matmul(flat_input, self.W_fc_1) + self.b_fc_1
            self.fc_1_activation = tf.sigmoid(z)
Пример #10
0
        # 输入层,这里规定的是输入数据的格式
        x_raw = tf.placeholder(tf.float32, shape=[None,
                                                  784])  # 784 = 28x28,是图片的尺寸
        y = tf.placeholder(tf.float32, shape=[None, 10])  # 10是图片的类别数,分别是 0~9

    with tf.name_scope('hidden0'):
        # 第一层,这里是为了处理数据,使得模型可以被迁移,在这里,只是简单地将数据reshape,后期可以做其他的尝试
        l_pool0 = tf.reshape(x_raw, shape=[-1, 28, 28,
                                           1])  # 最后的 1,是输出数目,可以理解为通道数

        tf.summary.image('x_input', l_pool0, max_outputs=10)

    with tf.name_scope('hidden1'):
        # 第一个卷积层,从这一层开始到最后输出层为止,是要保留的模型
        W_conv1 = util.weight_variable(
            [5, 5, 1, 32],
            name='W_conv1')  # [5,5,1,32]中的1对应上一层的输出数目,而这里的输出数目是32
        b_conv1 = util.bias_variable([32], name='b_conv1')  # 32要跟上一行定义的32对应
        l_conv1 = tf.nn.relu(
            tf.nn.conv2d(
                l_pool0, W_conv1, strides=[1, 1, 1, 1], padding='SAME') +
            b_conv1)  # 设置激活函数
        # 池化层,这里[1,2,2,1]的步长会使输入数据的长宽分别减半, 最后是 [?, 14, 14, 32]
        l_pool1 = tf.nn.max_pool(l_conv1,
                                 ksize=[1, 2, 2, 1],
                                 strides=[1, 2, 2, 1],
                                 padding='SAME')

        tf.summary.histogram('W_con1', W_conv1)
        tf.summary.histogram('b_con1', b_conv1)
Пример #11
0
def SCNN(demand,topo) :

    x_demand = tf.reshape(demand, [-1,8,8,1])
    x_topo   = tf.reshape(topo, [-1,20,20,1])

    # convolution 1
    with tf.name_scope('D_cnn_1_layer'):
      with tf.name_scope('Weight'):
        W_conv1_d = util.weight_variable([3, 3, 1, 16])
      with tf.name_scope('biases'):
        b_conv1_d = util.bias_variable([16])
    # h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
      d_conv1 = tf.nn.relu(util.conv2d(x_demand, W_conv1_d) + b_conv1_d)
      #d_conv1 = util.conv2d(x_demand, W_conv1_d) + b_conv1_d
    # h_pool1 = max_pool_2x2(h_conv1)

    # convolution 2
    with tf.name_scope('D_cnn_2_layer'):
      with tf.name_scope('Weight'):
        W_conv2_d = util.weight_variable([3, 3, 16, 32])
      with tf.name_scope('biases'):
        b_conv2_d = util.bias_variable([32])
    # h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
      d_conv2 = tf.nn.relu(util.conv2d(d_conv1, W_conv2_d) + b_conv2_d)
      #d_conv2 = util.conv2d(d_conv1, W_conv2_d) + b_conv2_d
      #d_conv2_flat = tf.reshape(d_conv2, [-1, 8 * 8 * 32])
    # h_pool2 = max_pool_2x2(h_conv2)

    # dropout
    #with tf.name_scope('dropout'):
     #  keep_prob_d = tf.placeholder(tf.float32, name = "drop_in")
      # d_drop = tf.nn.dropout(d_conv2, keep_prob_d)

    # convolution 1
    with tf.name_scope('T_cnn_1_layer'):
      with tf.name_scope('Weight'):
        W_conv1_t = util.weight_variable([5, 5, 1, 16])
      with tf.name_scope('biases'):
        b_conv1_t = util.bias_variable([16])
    # h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
      t_conv1 = tf.nn.relu(util.conv2d(x_topo, W_conv1_t) + b_conv1_t)
      #t_conv1 = util.conv2d(x_topo, W_conv1_t) + b_conv1_t
    # h_pool1 = max_pool_2x2(h_conv1)

    # convolution 2
    with tf.name_scope('T_cnn_2_layer'):
      with tf.name_scope('Weight'):
        W_conv2_t = util.weight_variable([5, 5, 16, 32])
      with tf.name_scope('biases'):
        b_conv2_t = util.bias_variable([32])
    # h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
      t_conv2 = tf.nn.relu(util.conv2d(t_conv1, W_conv2_t) + b_conv2_t)
      #t_conv2 = util.conv2d(t_conv1, W_conv2_t) + b_conv2_t
      #t_conv2_flat = tf.reshape(t_conv2, [-1, 8 * 8 * 32])
    # h_pool2 = max_pool_2x2(h_conv2)

     #dropout
   # with tf.name_scope('dropout'):
   #    keep_prob_t = tf.placeholder(tf.float32, name = "drop_in")
   #    t_drop = tf.nn.dropout(t_conv2, keep_prob_t)
    
    with tf.name_scope('Contact'):
        D_conv2_seq = tf.reshape(d_conv2,[-1,8 * 8,32])
        T_conv2_seq = tf.reshape(t_conv2,[-1,20 * 20,32])
        Demand_Topo = tf.concat([D_conv2_seq, T_conv2_seq], 1)
    # full-connected 1
    Demand_Topo_flat = tf.reshape(Demand_Topo, [-1, (64 + 400) * 32])
    with tf.name_scope('fully_1_layer'):
      with tf.name_scope('Weight'):
        W_fc1 = util.weight_variable([ (64 + 400)* 32, 512])
      with tf.name_scope('biases'):
        b_fc1 = util.bias_variable([512])
      with tf.name_scope('relu'):
        h_fc1 = tf.nn.relu(tf.matmul(Demand_Topo_flat, W_fc1) + b_fc1)

    # dropout
    with tf.name_scope('dropout'):
       keep_prob1 = tf.placeholder(tf.float32, name = "drop_in")
       h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob1)

    # full-connected 2
    with tf.name_scope('fully_2_layer'):
      with tf.name_scope('Weight'):
        W_fc2 = util.weight_variable([512, 128])
      with tf.name_scope('biases'):
        b_fc2 = util.bias_variable([128])
      with tf.name_scope('relu'):
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)



    # dropout
    with tf.name_scope('dropout'):
       keep_prob2 = tf.placeholder(tf.float32, name = "drop_in")
       h_drop = tf.nn.dropout(h_fc2, keep_prob2)

    # readout
    with tf.name_scope('fully_layer'):
      with tf.name_scope('Weight'):
        W_out = util.weight_variable([128, 1])
      with tf.name_scope('biases'):
        b_out = util.bias_variable([1])
    
      y_out = tf.matmul(h_drop, W_out) + b_out

    #return y_out, keep_prob1, keep_prob2, keep_prob_d, keep_prob_t
    return y_out, keep_prob1, keep_prob2
Пример #12
0
def SFNN(demand,topo) :

    x_demand = tf.reshape(demand, [-1,8 * 8])
    x_topo   = tf.reshape(topo, [-1,20 * 20])

    # Demand Fully Connected 1
    with tf.name_scope('D_fully_1_layer'):
      with tf.name_scope('Weight'):
        D_W_fc1 = util.weight_variable([64, 128])
      with tf.name_scope('biases'):
        D_b_fc1 = util.bias_variable([128])
      with tf.name_scope('relu'):
        D_h_fc1 = tf.nn.relu(tf.matmul(x_demand, D_W_fc1) + D_b_fc1)

    # Demand Fully Connected 1
    with tf.name_scope('D_fully_2_layer'):
      with tf.name_scope('Weight'):
        D_W_fc2 = util.weight_variable([ 128, 64])
      with tf.name_scope('biases'):
        D_b_fc2 = util.bias_variable([64])
      with tf.name_scope('relu'):
        D_h_fc2 = tf.nn.relu(tf.matmul(D_h_fc1, D_W_fc2) + D_b_fc2)


    # Topo Fully Connected 1
    with tf.name_scope('T_fully_1_layer'):
      with tf.name_scope('Weight'):
        T_W_fc1 = util.weight_variable([400, 512])
      with tf.name_scope('biases'):
        T_b_fc1 = util.bias_variable([512])
      with tf.name_scope('relu'):
        T_h_fc1 = tf.nn.relu(tf.matmul(x_topo, T_W_fc1) + T_b_fc1)

    # Topo Fully Connected 2
    with tf.name_scope('T_fully_2_layer'):
      with tf.name_scope('Weight'):
        T_W_fc2 = util.weight_variable([ 512, 256])
      with tf.name_scope('biases'):
        T_b_fc2 = util.bias_variable([256])
      with tf.name_scope('relu'):
        T_h_fc2 = tf.nn.relu(tf.matmul(T_h_fc1, T_W_fc2) + T_b_fc2)
   
    with tf.name_scope('Contact'):
        Demand_Topo = tf.concat([D_h_fc2, T_h_fc2], 1)
    # full-connected 1
    Demand_Topo_flat = tf.reshape(Demand_Topo, [-1,  256 + 64])
    with tf.name_scope('fully_1_layer'):
      with tf.name_scope('Weight'):
        W_fc1 = util.weight_variable([ 256 + 64, 512])
      with tf.name_scope('biases'):
        b_fc1 = util.bias_variable([512])
      with tf.name_scope('relu'):
        h_fc1 = tf.nn.relu(tf.matmul(Demand_Topo_flat, W_fc1) + b_fc1)



    # full-connected 2
    with tf.name_scope('fully_2_layer'):
      with tf.name_scope('Weight'):
        W_fc2 = util.weight_variable([512, 128])
      with tf.name_scope('biases'):
        b_fc2 = util.bias_variable([128])
      with tf.name_scope('relu'):
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1, W_fc2) + b_fc2)

    # dropout
    with tf.name_scope('dropout'):
       keep_prob1 = tf.placeholder(tf.float32, name = "drop_in")
       h_drop = tf.nn.dropout(h_fc2, keep_prob1)
    # readout
    with tf.name_scope('fully_layer'):
      with tf.name_scope('Weight'):
        W_out = util.weight_variable([128, 1])
      with tf.name_scope('biases'):
        b_out = util.bias_variable([1])

      y_out = tf.matmul(h_drop, W_out) + b_out

    return y_out, keep_prob1
Пример #13
0
sess = tf.InteractiveSession(config=config_opt)

# TensorBoard debug view.
file_writer = tf.summary.FileWriter('LOGS', sess.graph)

# Create placeholders for independent and dependant variables once batch has been selected.
with tf.name_scope('Input_Image'):
	x = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='Image')  # Independent variables.
	# Reshape to amenable shape.
	# x_image = tf.reshape(x, [-1, windowSize[0], windowSize[1], 1])
with tf.name_scope('Input_Synapse'):
	y_syn = tf.placeholder(tf.float32, shape=[None, 2])  # Target values.

with tf.name_scope('First_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv1 = util.weight_variable(firstLayerDimensions, "w_conv_1")  # Weights in first layer.
	b_conv1 = util.bias_variable([firstLayerDimensions[3]], "b_conv_1")  # Biases in first layer.
	h_conv1 = tf.nn.relu(util.conv2d(x, W_conv1, valid=True, stride=1) + b_conv1)  # Perform convolution (with zero padding) and apply ReLU.
	h_pool1 = util.max_pool(h_conv1, 1) #, kernelWidth=2)

with tf.name_scope('Second_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv2 = util.weight_variable(secondLayerDimensions, "w_conv_2")  # Weights in first layer.
	b_conv2 = util.bias_variable([secondLayerDimensions[3]], "b_conv_2")  # Biases in first layer.
	h_conv2 = tf.nn.relu(util.atrous_conv2d(h_pool1, W_conv2, valid=True, rate=2) + b_conv2)  # Perform convolution (with zero padding) and apply ReLU.
	h_pool2 = util.atrous_max_pool(h_conv2, mask_size=2, rate=2)

with tf.name_scope('Third_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv3 = util.weight_variable(thirdLayerDimensions, "w_conv_3")  # Weights in first layer.
	b_conv3 = util.bias_variable([thirdLayerDimensions[3]], "b_conv_3")  # Biases in first layer.