示例#1
0
def build_atari(minimap, screen, info, msize, ssize, num_action):
  # Extract features
  mconv1 = layers.conv2d(tf.transpose(minimap, [0, 2, 3, 1]),
                         num_outputs=16,
                         kernel_size=8,
                         stride=4,
                         scope='mconv1')
  mconv2 = layers.conv2d(mconv1,
                         num_outputs=32,
                         kernel_size=4,
                         stride=2,
                         scope='mconv2')
  sconv1 = layers.conv2d(tf.transpose(screen, [0, 2, 3, 1]),
                         num_outputs=16,
                         kernel_size=8,
                         stride=4,
                         scope='sconv1')
  sconv2 = layers.conv2d(sconv1,
                         num_outputs=32,
                         kernel_size=4,
                         stride=2,
                         scope='sconv2')
  info_fc = layers.fully_connected(layers.flatten(info),
                                   num_outputs=256,
                                   activation_fn=tf.tanh,
                                   scope='info_fc')

  # Compute spatial actions, non spatial actions and value
  feat_fc = tf.concat([layers.flatten(mconv2), layers.flatten(sconv2), info_fc], axis=1)
  feat_fc = layers.fully_connected(feat_fc,
                                   num_outputs=256,
                                   activation_fn=tf.nn.relu,
                                   scope='feat_fc')

  spatial_action_x = layers.fully_connected(feat_fc,
                                            num_outputs=ssize,
                                            activation_fn=tf.nn.softmax,
                                            scope='spatial_action_x')
  spatial_action_y = layers.fully_connected(feat_fc,
                                            num_outputs=ssize,
                                            activation_fn=tf.nn.softmax,
                                            scope='spatial_action_y')
  spatial_action_x = tf.reshape(spatial_action_x, [-1, 1, ssize])
  spatial_action_x = tf.tile(spatial_action_x, [1, ssize, 1])
  spatial_action_y = tf.reshape(spatial_action_y, [-1, ssize, 1])
  spatial_action_y = tf.tile(spatial_action_y, [1, 1, ssize])
  spatial_action = layers.flatten(spatial_action_x * spatial_action_y)

  non_spatial_action = layers.fully_connected(feat_fc,
                                              num_outputs=num_action,
                                              activation_fn=tf.nn.softmax,
                                              scope='non_spatial_action')
  value = tf.reshape(layers.fully_connected(feat_fc,
                                            num_outputs=1,
                                            activation_fn=None,
                                            scope='value'), [-1])

  return spatial_action, non_spatial_action, value
示例#2
0
def lenet3_traffic(features, keep_prob):
    """
    Define simple Lenet-like model with one convolution layer and three fully
    connected layers.
    """
    # Convolutional layer 1
    l1_strides = (1, 1, 1, 1)
    l1_padding = 'VALID'
    l1_conv = tf.nn.conv2d(features, L1_W, l1_strides, l1_padding)
    l1_biases = tf.nn.bias_add(l1_conv, L1_B)

    # Activation.
    l1_relu = tf.nn.relu(l1_biases)

    # Pooling. Input = 28x28xL1_DEPTH. Output = 14x14xL1_DEPTH.
    l1_pool = tf.nn.max_pool(l1_relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], \
        padding='VALID')

    # Flatten. Input = 14x14xL1_DEPTH. Output = L1_SIZE.
    flat = flatten(l1_pool)
    print("Flatten dimensions:", flat.get_shape())

    # Layer 2: Fully Connected. Input = L1_SIZE. Output = L2_SIZE.
    l2_linear = tf.add(tf.matmul(flat, L2_W), L2_B)

    # Activation.
    l2_relu = tf.nn.relu(l2_linear)
    l2_drop = tf.nn.dropout(l2_relu, keep_prob)

    # Layer 3: Fully Connected. Input = 500. Output = 43.
    return tf.add(tf.matmul(l2_drop, L3_W), L3_B)
示例#3
0
def model(img_in, num_actions, scope, noisy=False, reuse=False,
          concat_softmax=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
                                       stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
                                       stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
                                       stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("action_value"):
            if noisy:
                # Apply noisy network on fully connected layers
                # ref: https://arxiv.org/abs/1706.10295
                out = noisy_dense(out, name='noisy_fc1', size=512,
                                  activation_fn=tf.nn.relu)
                out = noisy_dense(out, name='noisy_fc2', size=num_actions)
            else:
                out = layers.fully_connected(out, num_outputs=512,
                                             activation_fn=tf.nn.relu)
                out = layers.fully_connected(out, num_outputs=num_actions,
                                             activation_fn=None)
            # V: Softmax - inspired by deep-rl-attack #
            if concat_softmax:
                out = tf.nn.softmax(out)
        return out
    def Dense_net(self, input_x):
        x = conv_layer(input_x, filter=2 * self.filters, kernel=[7,7], stride=2, layer_name='conv0')
        x = Max_Pooling(x, pool_size=[3,3], stride=2)



        for i in range(self.nb_blocks) :
            # 6 -> 12 -> 48
            x = self.dense_block(input_x=x, nb_layers=4, layer_name='dense_'+str(i))
            x = self.transition_layer(x, scope='trans_'+str(i))


        """
        x = self.dense_block(input_x=x, nb_layers=6, layer_name='dense_1')
        x = self.transition_layer(x, scope='trans_1')

        x = self.dense_block(input_x=x, nb_layers=12, layer_name='dense_2')
        x = self.transition_layer(x, scope='trans_2')

        x = self.dense_block(input_x=x, nb_layers=48, layer_name='dense_3')
        x = self.transition_layer(x, scope='trans_3')
        """

        x = self.dense_block(input_x=x, nb_layers=32, layer_name='dense_final')

        # 100 Layer
        x = Batch_Normalization(x, training=self.training, scope='linear_batch')
        x = Relu(x)
        x = Global_Average_Pooling(x)
        x = flatten(x)
        x = Linear(x)


        # x = tf.reshape(x, [-1, 10])
        return x
示例#5
0
def dueling_model(img_in, num_actions, scope, reuse=False, layer_norm=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            state_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
            if layer_norm:
                state_hidden = layer_norm_fn(state_hidden, relu=True)
            else:
                state_hidden = tf.nn.relu(state_hidden)
            state_score = layers.fully_connected(state_hidden, num_outputs=1, activation_fn=None)
        with tf.variable_scope("action_value"):
            actions_hidden = layers.fully_connected(conv_out, num_outputs=512, activation_fn=None)
            if layer_norm:
                actions_hidden = layer_norm_fn(actions_hidden, relu=True)
            else:
                actions_hidden = tf.nn.relu(actions_hidden)
            action_scores = layers.fully_connected(actions_hidden, num_outputs=num_actions, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(action_scores_mean, 1)
        return state_score + action_scores
    def Build_SEnet(self, input_x):
        input_x = tf.pad(input_x, [[0, 0], [32, 32], [32, 32], [0, 0]])
        # size 32 -> 96
        # only cifar10 architecture

        x = self.Stem(input_x, scope='stem')

        for i in range(4) :
            x = self.Inception_A(x, scope='Inception_A'+str(i))
            channel = int(np.shape(x)[-1])
            x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_A'+str(i))

        x = self.Reduction_A(x, scope='Reduction_A')

        for i in range(7)  :
            x = self.Inception_B(x, scope='Inception_B'+str(i))
            channel = int(np.shape(x)[-1])
            x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_B'+str(i))

        x = self.Reduction_B(x, scope='Reduction_B')

        for i in range(3) :
            x = self.Inception_C(x, scope='Inception_C'+str(i))
            channel = int(np.shape(x)[-1])
            x = self.Squeeze_excitation_layer(x, out_dim=channel, ratio=reduction_ratio, layer_name='SE_C'+str(i))

        x = Global_Average_Pooling(x)
        x = Dropout(x, rate=0.2, training=self.training)
        x = flatten(x)

        x = Fully_connected(x, layer_name='final_fully_connected')
        return x
示例#7
0
def _cnn_to_mlp(convs, hiddens, dueling, inpt, num_actions, scope, reuse=False, layer_norm=False):
    with tf.variable_scope(scope, reuse=reuse):
        out = inpt
        with tf.variable_scope("convnet"):
            for num_outputs, kernel_size, stride in convs:
                out = layers.convolution2d(out,
                                           num_outputs=num_outputs,
                                           kernel_size=kernel_size,
                                           stride=stride,
                                           activation_fn=tf.nn.relu)
        conv_out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            action_out = conv_out
            for hidden in hiddens:
                action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                if layer_norm:
                    action_out = layers.layer_norm(action_out, center=True, scale=True)
                action_out = tf.nn.relu(action_out)
            action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

        if dueling:
            with tf.variable_scope("state_value"):
                state_out = conv_out
                for hidden in hiddens:
                    state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                    if layer_norm:
                        state_out = layers.layer_norm(state_out, center=True, scale=True)
                    state_out = tf.nn.relu(state_out)
                state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
            q_out = state_score + action_scores_centered
        else:
            q_out = action_scores
        return q_out
示例#8
0
文件: task.py 项目: spwcd/QTML
def conv_model(X, Y_, mode):
    XX = tf.reshape(X, [-1, 28, 28, 1])
    biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
    Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6], biases_initializer=biasInit)
    Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
    Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
    Y4 = layers.flatten(Y3)
    Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
    # to deactivate dropout on the dense layer, set keep_prob=1
    Y5d = layers.dropout(Y5, keep_prob=0.75, noise_shape=None, is_training=mode==learn.ModeKeys.TRAIN)
    Ylogits = layers.linear(Y5d, 10)
    predict = tf.nn.softmax(Ylogits)
    classes = tf.cast(tf.argmax(predict, 1), tf.uint8)

    loss = conv_model_loss(Ylogits, Y_, mode)
    train_op = conv_model_train_op(loss, mode)
    eval_metrics = conv_model_eval_metrics(classes, Y_, mode)

    return learn.ModelFnOps(
        mode=mode,
        # You can name the fields of your predictions dictionary as you like.
        predictions={"predictions": predict, "classes": classes},
        loss=loss,
        train_op=train_op,
        eval_metric_ops=eval_metrics
    )
示例#9
0
    def q_func_builder(input_placeholder, num_actions, scope, reuse=False):
        with tf.variable_scope(scope, reuse=reuse):
            latent = network(input_placeholder)
            if isinstance(latent, tuple):
                if latent[1] is not None:
                    raise NotImplementedError("DQN is not compatible with recurrent policies yet")
                latent = latent[0]

            latent = layers.flatten(latent)

            with tf.variable_scope("action_value"):
                action_out = latent
                for hidden in hiddens:
                    action_out = layers.fully_connected(action_out, num_outputs=hidden, activation_fn=None)
                    if layer_norm:
                        action_out = layers.layer_norm(action_out, center=True, scale=True)
                    action_out = tf.nn.relu(action_out)
                action_scores = layers.fully_connected(action_out, num_outputs=num_actions, activation_fn=None)

            if dueling:
                with tf.variable_scope("state_value"):
                    state_out = latent
                    for hidden in hiddens:
                        state_out = layers.fully_connected(state_out, num_outputs=hidden, activation_fn=None)
                        if layer_norm:
                            state_out = layers.layer_norm(state_out, center=True, scale=True)
                        state_out = tf.nn.relu(state_out)
                    state_score = layers.fully_connected(state_out, num_outputs=1, activation_fn=None)
                action_scores_mean = tf.reduce_mean(action_scores, 1)
                action_scores_centered = action_scores - tf.expand_dims(action_scores_mean, 1)
                q_out = state_score + action_scores_centered
            else:
                q_out = action_scores
            return q_out
示例#10
0
def dueling_model(img_in, num_actions, scope, noisy=False, reuse=False,
                  concat_softmax=False):
    """As described in https://arxiv.org/abs/1511.06581"""
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8,
                                       stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4,
                                       stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3,
                                       stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)

        with tf.variable_scope("state_value"):
            if noisy:
                # Apply noisy network on fully connected layers
                # ref: https://arxiv.org/abs/1706.10295
                state_hidden = noisy_dense(out, name='noisy_fc1', size=512,
                                           activation_fn=tf.nn.relu)
                state_score = noisy_dense(state_hidden, name='noisy_fc2',
                                          size=1)
            else:
                state_hidden = layers.fully_connected(
                    out,
                    num_outputs=512,
                    activation_fn=tf.nn.relu
                )
                state_score = layers.fully_connected(state_hidden,
                                                     num_outputs=1,
                                                     activation_fn=None)
        with tf.variable_scope("action_value"):
            if noisy:
                # Apply noisy network on fully connected layers
                # ref: https://arxiv.org/abs/1706.10295
                actions_hidden = noisy_dense(out, name='noisy_fc1', size=512,
                                             activation_fn=tf.nn.relu)
                action_scores = noisy_dense(actions_hidden, name='noisy_fc2',
                                            size=num_actions)
            else:
                actions_hidden = layers.fully_connected(
                    out,
                    num_outputs=512,
                    activation_fn=tf.nn.relu
                )
                action_scores = layers.fully_connected(
                    actions_hidden,
                    num_outputs=num_actions,
                    activation_fn=None
                )
            action_scores_mean = tf.reduce_mean(action_scores, 1)
            action_scores = action_scores - tf.expand_dims(
                action_scores_mean,
                1
            )

        return state_score + action_scores
示例#11
0
def to_trans(input):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    W_init = tf.constant_initializer(np.zeros((num_inputs, 2)))
    b_init = tf.constant_initializer(np.array([0.,0.]))
    return layers.fully_connected(input, 2,
            weights_initializer=W_init,
            biases_initializer=b_init)
def dummy_discriminator_fn(input_data, num_domains, mode):
  del mode

  hidden = layers.flatten(input_data)
  output_src = math_ops.reduce_mean(hidden, axis=1)
  output_cls = layers.fully_connected(
      inputs=hidden, num_outputs=num_domains, scope='debug')

  return output_src, output_cls
def LeNet(x): 
    
    # Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
    conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6), mean = 0, stddev = 0.1))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1   = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
    
    # Activation 1.
    conv1 = tf.nn.relu(conv1)

    # Pooling. Input = 28x28x6. Output = 14x14x6.
    conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
    
    
    # Layer 2: Convolutional. Input = 14x14x6. Output = 10x10x16.
    conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = 0, stddev = 0.1))
    conv2_b = tf.Variable(tf.zeros(16))
    conv2   = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
    
    # Activation 2.
    conv2 = tf.nn.relu(conv2)

    # Pooling. Input = 10x10x16. Output = 5x5x16.
    conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
    
    # Flatten. Input = 5x5x16. Output = 400.
    flattened   = flatten(conv2)
    
    #Matrix multiplication
    #input: 1x400
    #weight: 400x120 
    #Matrix multiplication(dot product rule)
    #output = 1x400 * 400*120 => 1x120
    
     # Layer 3: Fully Connected. Input = 400. Output = 120.
    fullyc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = 0, stddev = 0.1))
    fullyc1_b = tf.Variable(tf.zeros(120))
    fullyc1   = tf.matmul(flattened, fullyc1_W) + fullyc1_b
    
    # Full connected layer activation 1.
    fullyc1    = tf.nn.relu(fullyc1)
    
    # Layer 4: Fully Connected. Input = 120. Output = 84.
    fullyc2_W  = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = 0, stddev = 0.1))
    fullyc2_b  = tf.Variable(tf.zeros(84))
    fullyc2    = tf.matmul(fullyc1, fullyc2_W) + fullyc2_b
    
    # Full connected layer activation 2.
    fullyc2    = tf.nn.relu(fullyc2)
    
    # Layer 5: Fully Connected. Input = 84. Output = 43.
    fullyc3_W  = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = 0, stddev = 0.1))
    fullyc3_b  = tf.Variable(tf.zeros(43))
    logits = tf.matmul(fullyc2, fullyc3_W) + fullyc3_b
    
    return logits
示例#14
0
文件: nets.py 项目: 1202kbs/GAN
	def __call__(self, x, reuse=False):
		with tf.variable_scope(self.name) as scope:
			if reuse:
				scope.reuse_variables()
			size = 64
			shared = tcl.conv2d(x, num_outputs=size, kernel_size=4, # bzx64x64x3 -> bzx32x32x64
						stride=2, activation_fn=tf.nn.relu)
			shared = tcl.conv2d(shared, num_outputs=size * 2, kernel_size=4, # 16x16x128
						stride=2, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			shared = tcl.conv2d(shared, num_outputs=size * 4, kernel_size=4, # 8x8x256
						stride=2, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			shared = tcl.conv2d(shared, num_outputs=size * 8, kernel_size=3, # 4x4x512
						stride=2, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)

			shared = tcl.fully_connected(tcl.flatten( # reshape, 1
						shared), 1024, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			
			v = tcl.fully_connected(tcl.flatten(shared), 128)
			return v
def MyNet(x):    
   
    mu = 0
    sigma = 0.1
    global conv2_dropout
    
    keep_prob = tf.constant(0.5,dtype=tf.float32)
    
    # Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
    conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5,1, 6), mean = mu, stddev = sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1   = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b
        
    # Activation.
    conv1 = tf.nn.dropout(conv1,keep_prob)

    # Pooling. Input = 28x28x6. Output = 14x14x6.
    conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
        
    # Layer 2: Convolutional. Output = 10x10x16.
    conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
    conv2_b = tf.Variable(tf.zeros(16))
    conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
   
    # Activation.
    conv2_dropout = tf.nn.dropout(conv2,keep_prob)
    
    # Pooling. Input = 10x10x16. Output = 5x5x16.
    conv2_max = tf.nn.max_pool(conv2_dropout, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # Flatten. Input = 5x5x16. Output = 400.
    fc0 = flatten(conv2_max)

    # Layer 3: Fully Connected. Input = 400. Output = 120.
    fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
    fc1_b = tf.Variable(tf.zeros(120))
    fc1   = tf.matmul(fc0, fc1_W) + fc1_b

    # Activation.
    fc1 = tf.nn.sigmoid(fc1)
    
    # Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2_W  = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
    fc2_b  = tf.Variable(tf.zeros(84))
    fc2    = tf.matmul(fc1, fc2_W) + fc2_b

    # Activation.
    fc2 = tf.nn.sigmoid(fc2)

    # Layer 5: Fully Connected. Input = 84. Output = n_classes.
    fc3_W  = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = mu, stddev = sigma))
    fc3_b  = tf.Variable(tf.zeros(n_classes))
    logits = tf.matmul(fc2, fc3_W) + fc3_b
    
    return logits
示例#16
0
    def create_linear_inference_op(self, images):
        """
        Performs a forward pass estimating label maps from RGB images using only a linear classifier.

        :param images: The RGB images tensor.
        :type images: tf.Tensor
        :return: The label maps tensor.
        :rtype: tf.Tensor
        """
        predicted_labels = fully_connected(flatten(images), 2, activation_fn=None)
        return predicted_labels
示例#17
0
文件: DQN.py 项目: bigtreezhudi/dqn
    def create_network(self, scope):
        with tf.variable_scope(scope, reuse=False):
            state_input = tf.placeholder('float', [None, 84, 84, 4])
            out = layers.convolution2d(state_input, num_outputs=32, kernel_size=8, stride=1, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)

            conv_out = layers.flatten(out)
            value_out = layers.fully_connected(conv_out, num_outputs=256, activation_fn=tf.nn.relu)
            q_value = layers.fully_connected(value_out, num_outputs=self.action_dim, activation_fn=None)
            return state_input, q_value
示例#18
0
 def _discriminator_fn(inputs, num_domains):
   """Differentiable dummy discriminator for StarGAN."""
   hidden = layers.flatten(inputs)
   output_src = math_ops.reduce_mean(hidden, axis=1)
   output_cls = layers.fully_connected(
       inputs=hidden,
       num_outputs=num_domains,
       activation_fn=None,
       normalizer_fn=None,
       biases_initializer=None)
   return output_src, output_cls
def LeNet(x):    
    # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
    mu = 0
    sigma = 0.1
    
    # SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
    conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 32), mean = mu, stddev = sigma))
    conv1_b = tf.Variable(tf.zeros(32))
    conv1   = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b

    # SOLUTION: Activation.
    conv1 = tf.nn.relu(conv1)

    # SOLUTION: Pooling. Input = 28x28x32. Output = 14x14x32.
    conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # SOLUTION: Layer 2: Convolutional. Output = 10x10x64.
    conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 32, 64), mean = mu, stddev = sigma))
    conv2_b = tf.Variable(tf.zeros(64))
    conv2   = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
    
    # SOLUTION: Activation.
    conv2 = tf.nn.relu(conv2)

    # SOLUTION: Pooling. Input = 10x10x64. Output = 5x5x64.
    conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # SOLUTION: Flatten. Input = 5x5x64 Output = 1600.
    fc0   = flatten(conv2)
    
    # SOLUTION: Layer 3: Fully Connected. Input = 1600. Output = 120.
    fc1_W = tf.Variable(tf.truncated_normal(shape=(1600, 120), mean = mu, stddev = sigma))
    fc1_b = tf.Variable(tf.zeros(120))
    fc1   = tf.matmul(fc0, fc1_W) + fc1_b
    
    # SOLUTION: Activation.
    fc1    = tf.nn.relu(fc1)

    # SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2_W  = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
    fc2_b  = tf.Variable(tf.zeros(84))
    fc2    = tf.matmul(fc1, fc2_W) + fc2_b
    
    # SOLUTION: Activation.
    fc2    = tf.nn.relu(fc2)
    fc2 = tf.nn.dropout(fc2, dropout)

    # SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 10.
    fc3_W  = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
    fc3_b  = tf.Variable(tf.zeros(43))
    logits = tf.matmul(fc2, fc3_W) + fc3_b
    
    return logits
示例#20
0
    def create_two_layer_inference_op(self, images):
        """
        Performs a forward pass estimating label maps from RGB images using 2 fully connected layers.

        :param images: The RGB images tensor.
        :type images: tf.Tensor
        :return: The label maps tensor.
        :rtype: tf.Tensor
        """
        fc1_output = fully_connected(flatten(images), 64, activation_fn=leaky_relu)
        predicted_labels = fully_connected(fc1_output, 2, activation_fn=None)
        return predicted_labels
示例#21
0
文件: nets.py 项目: 1202kbs/GAN
	def __call__(self, x, reuse=True):
		with tf.variable_scope(self.name) as scope:
			if reuse:
				scope.reuse_variables()
			size = 64
			shared = tcl.conv2d(x, num_outputs=size, kernel_size=3, # bzx64x64x3 -> bzx32x32x64
						stride=2, activation_fn=lrelu)
			shared = tcl.conv2d(shared, num_outputs=size * 2, kernel_size=3, # 16x16x128
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			shared = tcl.conv2d(shared, num_outputs=size * 4, kernel_size=3, # 8x8x256
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			#d = tcl.conv2d(d, num_outputs=size * 8, kernel_size=3, # 4x4x512
			#			stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			shared = tcl.fully_connected(tcl.flatten( # reshape, 1
						shared), 1024, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			
			d = tcl.fully_connected(tcl.flatten(shared), 1, activation_fn=None)

			q = tcl.fully_connected(tcl.flatten(shared), 128, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			q = tcl.fully_connected(q, 10, activation_fn=None) # 10 classes
		
			return d,q
示例#22
0
def conv_model(X, Y_, mode):
    XX = tf.reshape(X, [-1, 28, 28, 1])
    biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
    Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6], biases_initializer=biasInit)
    Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
    Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
    Y4 = layers.flatten(Y3)
    Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
    Ylogits = layers.linear(Y5, 10)
    predict = tf.nn.softmax(Ylogits)
    classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(Ylogits, tf.one_hot(Y_, 10)))*100
    train_op = layers.optimize_loss(loss, framework.get_global_step(), 0.001, "Adam")
    return {"predictions":predict, "classes": classes}, loss, train_op
示例#23
0
def conv_model(X, Y_):
   XX = tf.reshape(X, [-1, 28, 28, 1])
   Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6])
   Y2 = layers.conv2d(Y1,  num_outputs=12, kernel_size=[5, 5], stride=2)
   Y3 = layers.conv2d(Y2,  num_outputs=24, kernel_size=[4, 4], stride=2)
   Y4 = layers.flatten(Y3)
   Y5 = layers.relu(Y4, 200)
   Ylogits = layers.linear(Y5, 10)
   predict = tf.nn.softmax(Ylogits)

   classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
   loss = tf.nn.softmax_cross_entropy_with_logits(Ylogits, tf.one_hot(Y_, 10))
   train_op = layers.optimize_loss(loss, framework.get_global_step(), 0.003, "Adam")
   return {"predictions":predict, "classes": classes}, loss, train_op
示例#24
0
    def Build_SEnet(self, input_x):
        # only cifar10 architecture

        input_x = self.first_layer(input_x, scope='first_layer')

        x = self.residual_layer(input_x, out_dim=64, layer_num='1')
        x = self.residual_layer(x, out_dim=128, layer_num='2')
        x = self.residual_layer(x, out_dim=256, layer_num='3')

        x = Global_Average_Pooling(x)
        x = flatten(x)

        x = Fully_connected(x, layer_name='final_fully_connected')
        return x
示例#25
0
    def Build_ResNext(self, input_x):
        # only cifar10 architecture

        input_x = self.first_layer(input_x, scope='first_layer')

        x = self.residual_layer(input_x, out_dim=64, layer_num='1')
        x = self.residual_layer(x, out_dim=128, layer_num='2')
        x = self.residual_layer(x, out_dim=256, layer_num='3')

        x = Global_Average_Pooling(x)
        x = flatten(x)
        x = Linear(x)

        # x = tf.reshape(x, [-1,10])
        return x
示例#26
0
文件: nets.py 项目: 1202kbs/GAN
	def __call__(self, x, reuse=False):
		with tf.variable_scope(self.name) as scope:
			if reuse:
				scope.reuse_variables()
			size = 64
			shared = tcl.conv2d(x, num_outputs=size, kernel_size=5, # bzx28x28x1 -> bzx14x14x64
						stride=2, activation_fn=tf.nn.relu)
			shared = tcl.conv2d(shared, num_outputs=size * 2, kernel_size=5, # 7x7x128
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			shared = tcl.fully_connected(tcl.flatten( # reshape, 1
						shared), 1024, activation_fn=tf.nn.relu)
			
			#c = tcl.fully_connected(shared, 128, activation_fn=tf.nn.relu, normalizer_fn=tcl.batch_norm)
			c = tcl.fully_connected(shared, 10, activation_fn=None) # 10 classes
			return c
示例#27
0
def atari_model(img_in, num_actions, scope, reuse=False):
    # as described in https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
    with tf.variable_scope(scope, reuse=reuse):
        out = img_in
        with tf.variable_scope("convnet"):
            # original architecture
            out = layers.convolution2d(out, num_outputs=32, kernel_size=8, stride=4, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=4, stride=2, activation_fn=tf.nn.relu)
            out = layers.convolution2d(out, num_outputs=64, kernel_size=3, stride=1, activation_fn=tf.nn.relu)
        out = layers.flatten(out)
        with tf.variable_scope("action_value"):
            out = layers.fully_connected(out, num_outputs=512,         activation_fn=tf.nn.relu)
            out = layers.fully_connected(out, num_outputs=num_actions, activation_fn=None)

        return out
示例#28
0
文件: nets.py 项目: 1202kbs/GAN
	def __call__(self, x, reuse=False):
		with tf.variable_scope(self.name) as scope:
			if reuse:
				scope.reuse_variables()
			size = 64
			shared = tcl.conv2d(x, num_outputs=size, kernel_size=4, # bzx28x28x1 -> bzx14x14x64
						stride=2, activation_fn=lrelu)
			shared = tcl.conv2d(shared, num_outputs=size * 2, kernel_size=4, # 7x7x128
						stride=2, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			shared = tcl.flatten(shared)
			
			d = tcl.fully_connected(shared, 1, activation_fn=None, weights_initializer=tf.random_normal_initializer(0, 0.02))
			q = tcl.fully_connected(shared, 128, activation_fn=lrelu, normalizer_fn=tcl.batch_norm)
			q = tcl.fully_connected(q, 2, activation_fn=None) # 10 classes
			return d, q
示例#29
0
def to_loc(input, is_simple=False):
    if len(input.get_shape()) == 4:
        input = layers.flatten(input)
    num_inputs = input.get_shape()[1]
    num_outputs = 3 if is_simple else 6
    W_init = tf.constant_initializer(
            np.zeros((num_inputs, num_outputs)))
    if is_simple:
        b_init = tf.constant_initializer(np.array([1.,0.,0.]))
    else:
        b_init = tf.constant_initializer(np.array([1.,0.,0.,0.,1.,0.]))

    return layers.fully_connected(input, num_outputs,
            activation_fn=None,
            weights_initializer=W_init,
            biases_initializer=b_init)
示例#30
0
def make_network(convs,
                 fcs,
                 padding,
                 lstm,
                 obs_t,
                 action_tm1,
                 reward_t,
                 rnn_state_tuple,
                 num_actions,
                 lstm_unit,
                 scope,
                 reuse=None):
    with tf.variable_scope(scope, reuse=reuse):
        out = make_convs(obs_t, convs, padding)

        out = layers.flatten(out)

        with tf.variable_scope('hiddens'):
            for hidden in fcs:
                out = layers.fully_connected(
                    out, hidden, activation_fn=tf.nn.relu)

        reward_t = tf.reshape(reward_t, [-1, 1])
        out = tf.concat([out, action_tm1, reward_t], axis=1)

        with tf.variable_scope('rnn'):
            lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_unit, state_is_tuple=True)
            rnn_in = tf.expand_dims(out, [0])
            step_size = tf.shape(obs_t)[:1]
            lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
                lstm_cell, rnn_in, initial_state=rnn_state_tuple,
                sequence_length=step_size, time_major=False)
            rnn_out = tf.reshape(lstm_outputs, [-1, lstm_unit])

        if lstm:
            out = rnn_out

        policy = layers.fully_connected(
            out, num_actions, activation_fn=tf.nn.softmax,
            weights_initializer=normalized_columns_initializer(0.01),
            biases_initializer=None)

        value = layers.fully_connected(
            out, 1, activation_fn=None, biases_initializer=None,
            weights_initializer=normalized_columns_initializer())

    return policy, value, (lstm_state[0][:1, :], lstm_state[1][:1, :])
def main(unused_argv):
    mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
    if FLAGS.download_only:
        sys.exit(0)


    # Sanity check on the number of workers and the worker index #
    if FLAGS.task_index >= FLAGS.num_workers:
        raise ValueError("Worker index %d exceeds number of workers %d " % 
                         (FLAGS.task_index, FLAGS.num_workers))

    # Sanity check on the number of parameter servers #
    if FLAGS.num_parameter_servers <= 0:
        raise ValueError("Invalid num_parameter_servers value: %d" % 
                         FLAGS.num_parameter_servers)

    ps_hosts = re.findall(r'[\w\.:]+', FLAGS.ps_hosts)
    worker_hosts = re.findall(r'[\w\.:]+', FLAGS.worker_hosts)
    server = tf.train.Server({"ps":ps_hosts,"worker":worker_hosts}, job_name=FLAGS.job_name, task_index=FLAGS.task_index)

    print("GRPC URL: %s" % server.target)
    print("Task index = %d" % FLAGS.task_index)
    print("Number of workers = %d" % FLAGS.num_workers)

    if FLAGS.job_name == "ps":
        server.join()
    else:
        is_chief = (FLAGS.task_index == 0)

    if FLAGS.sync_replicas:
        if FLAGS.replicas_to_aggregate is None:
            replicas_to_aggregate = FLAGS.num_workers
        else:
            replicas_to_aggregate = FLAGS.replicas_to_aggregate

    # Construct device setter object #
    device_setter = get_device_setter(FLAGS.num_parameter_servers,
                                      FLAGS.num_workers)

    # The device setter will automatically place Variables ops on separate        #
    # parameter servers (ps). The non-Variable ops will be placed on the workers. #
    with tf.device(device_setter):
        global_step = tf.Variable(0, name="global_step", trainable=False)
        with tf.name_scope('input'):
            # input #
            x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input")
            x_image = tf.reshape(x, [-1,28,28,1])
            # label, 10 output classes #
            y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input")
            prob = tf.placeholder(tf.float32, name='keep_prob')

        stack1_conv1 = layers.convolution2d(x_image,
                                            64,
                                            [3,3],
                                            weights_regularizer=layers.l2_regularizer(0.1),
                                            biases_regularizer=layers.l2_regularizer(0.1),
                                            scope='stack1_Conv1')
        stack1_conv2 = layers.convolution2d(stack1_conv1,
                                            64,
                                            [3,3],
                                            weights_regularizer=layers.l2_regularizer(0.1),
                                            biases_regularizer=layers.l2_regularizer(0.1),
                                            scope='stack1_Conv2')
        stack1_pool = layers.max_pool2d(stack1_conv2,
                                        [2,2],
                                        padding='SAME',
                                        scope='stack1_Pool')
        stack3_pool_flat = layers.flatten(stack1_pool, scope='stack3_pool_flat')
        fcl1 = layers.fully_connected(stack3_pool_flat, 
                                      512, 
                                      weights_regularizer=layers.l2_regularizer(0.1), 
                                      biases_regularizer=layers.l2_regularizer(0.1), 
                                      scope='FCL1')
        fcl1_d = layers.dropout(fcl1, keep_prob=prob, scope='dropout1')
        fcl2 = layers.fully_connected(fcl1_d, 
                                      128, 
                                      weights_regularizer=layers.l2_regularizer(0.1), 
                                      biases_regularizer=layers.l2_regularizer(0.1), 
                                      scope='FCL2')
        fcl2_d = layers.dropout(fcl2, keep_prob=prob, scope='dropout2')
        y, cross_entropy = skflow.models.logistic_regression(fcl2_d, y_, init_stddev=0.01)
        tf.scalar_summary('cross_entropy', cross_entropy)

        with tf.name_scope('train'):
            start_l_rate = 0.001
            decay_step = 1000
            decay_rate = 0.5
            learning_rate = tf.train.exponential_decay(start_l_rate, global_step, decay_step, decay_rate, staircase=False)
            grad_op = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
            '''rep_op = tf.train.SyncReplicasOptimizer(grad_op, 
                                                    replicas_to_aggregate=len(workers),
                                                    replica_id=FLAGS.task_index, 
                                                    total_num_replicas=len(workers)) # also belong to the same class as other optimizers'''
            train_op = tf.contrib.layers.optimize_loss(loss=cross_entropy, 
                                                       global_step=global_step, 
                                                       learning_rate=0.001, 
                                                       optimizer=grad_op, 
                                                       clip_gradients=1)
            tf.scalar_summary('learning_rate', learning_rate)

        '''if FLAGS.sync_replicas and is_chief:
            # Initial token and chief queue runners required by the sync_replicas mode #
            chief_queue_runner = opt.get_chief_queue_runner()
            init_tokens_op = opt.get_init_tokens_op()'''

        with tf.name_scope('accuracy'):
            correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            tf.scalar_summary('accuracy', accuracy)


        merged = tf.merge_all_summaries()
        init_op = tf.initialize_all_variables()
        sv = tf.train.Supervisor(is_chief=is_chief,
                                 init_op=init_op,
                                 recovery_wait_secs=1,
                                 global_step=global_step)

        sess_config = tf.ConfigProto(allow_soft_placement=True,
                                     log_device_placement=False,
                                     device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])

        # The chief worker (task_index==0) session will prepare the session,   #
        # while the remaining workers will wait for the preparation to complete. #
        if is_chief:
            print("Worker %d: Initializing session..." % FLAGS.task_index)
        else:
            print("Worker %d: Waiting for session to be initialized..." % FLAGS.task_index)

        sess = sv.prepare_or_wait_for_session(server.target,
                                              config=sess_config)


        if tf.gfile.Exists('./summary/train'):
            tf.gfile.DeleteRecursively('./summary/train')
        tf.gfile.MakeDirs('./summary/train')

        train_writer = tf.train.SummaryWriter('./summary/train', sess.graph)
        print("Worker %d: Session initialization complete." % FLAGS.task_index)

        '''if FLAGS.sync_replicas and is_chief:
            # Chief worker will start the chief queue runner and call the init op #
            print("Starting chief queue runner and running init_tokens_op")
            sv.start_queue_runners(sess, [chief_queue_runner])
            sess.run(init_tokens_op)'''

        ## Perform training ##
        time_begin = time.time()
        print("Training begins @ %s" % time.ctime(time_begin))

        local_step = 1
        while True:
            # Training feed #
            batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
            train_feed = {x: batch_xs,
                          y_: batch_ys,
                          prob: 0.8}
            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
            run_metadata = tf.RunMetadata()
            _, step, loss, summary = sess.run([train_op, global_step, cross_entropy, merged], feed_dict=train_feed, options=run_options, run_metadata=run_metadata)

            now = time.time()
            if(local_step % 2 == 0):
                print("%s: Worker %d: training step %d done (global step: %d), loss: %.6f" %
                   (time.ctime(now), FLAGS.task_index, local_step, step+1, loss))
                train_writer.add_run_metadata(run_metadata, 'step'+str(step+1))
                train_writer.add_summary(summary, step+1)

            if step+1 >= FLAGS.train_steps:
              break
            local_step += 1

        time_end = time.time()
        print("Training ends @ %s" % time.ctime(time_end))
        training_time = time_end - time_begin
        print("Training elapsed time: %f s" % training_time)


        # memory issue occured, split testing data into batch #
        acc_acu = 0.
        for i in xrange(int(10000/1000)):
            test_x, test_y = mnist.test.next_batch(1000)
            acc_batch = sess.run(accuracy, feed_dict={x: test_x, y_: test_y, prob: 1.0})
            print(acc_batch)
            acc_acu += acc_batch
        acc = acc_acu/10.0
        print ("test accuracy %g" % acc)
        sv.stop()
示例#32
0
文件: vgg.py 项目: zlyin/aardvark
def classification_head (net, num_classes):
    net = flatten(net)
    net = fully_connected(net, 4096)
    net = fully_connected(net, 4096)
    net = fully_connected(net, num_classes, activation_fn=None)
    return net
示例#33
0
def LeNet(x):    
    # Hyperparameters
    mu = 0
    sigma = 0.1
        
    # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
    W1 = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma), name="W1")
    b1 = tf.Variable(tf.zeros(6), name="b1")

    x = tf.nn.conv2d(x, W1, strides=[1, 1, 1, 1], padding='VALID')
    x = tf.nn.bias_add(x, b1)
    x = tf.nn.relu(x)
    
    # Pooling. Input = 28x28x6. Output = 14x14x6.
    x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
    
    # Layer 2: Convolutional. Output = 10x10x16.
    W2 = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma), name="W2")
    b2 = tf.Variable(tf.zeros(16), name="b2")

    x = tf.nn.conv2d(x, W2, strides=[1, 1, 1, 1], padding='VALID')
    x = tf.nn.bias_add(x, b2)
    x = tf.nn.relu(x)

    # Pooling. Input = 10x10x16. Output = 5x5x16.
    x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
    layer2 = x # Saving it for later
    
    # Layer 3: Convolutional. Output = 1x1x400.
    W3 = tf.Variable(tf.truncated_normal(shape=(5, 5, 16, 400), mean = mu, stddev = sigma), name="W3")
    b3 = tf.Variable(tf.zeros(400), name="b3")

    x = tf.nn.conv2d(x, W3, strides=[1, 1, 1, 1], padding='VALID')
    x = tf.nn.bias_add(x, b3)
    x = tf.nn.relu(x)
    layer3 = x # Saving it for later

    # Flatten. Input = 5x5x16. Output = 400.
    layer2flat = flatten(layer2)
    
    # Flatten x. Input = 1x1x400. Output = 400.
    layer3flat = flatten(layer3)
    
    # Concat layer2flat and x. Input = 400 + 400. Output = 800
    x = tf.concat([layer3flat, layer2flat], 1)
    
    # Dropout
    x = tf.nn.dropout(x, keep_prob)
    
    # Layer 4: Fully Connected. Input = 800. Output = 120.
    W4 = tf.Variable(tf.truncated_normal(shape=(800, 120), mean = mu, stddev = sigma), name="W4")
    b4 = tf.Variable(tf.zeros(120), name="b4")    
    x = tf.add(tf.matmul(x, W4), b4)
    x = tf.nn.relu(x)

    # Layer 5: Fully Connected. Input = 120. Output = 84.
    W5 = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
    b5 = tf.Variable(tf.zeros(84)) 
    x = tf.add(tf.matmul(x, W5), b5)
    x = tf.nn.relu(x)

    # Layer 6: Fully Connected. Input = 84. Output = 43.
    W6 = tf.Variable(tf.truncated_normal(shape=(84, 43), mean = mu, stddev = sigma))
    b6 = tf.Variable(tf.zeros(43)) 
    x = tf.add(tf.matmul(x, W6), b6)
    
    return x
示例#34
0
def generator(z,
              progress,
              num_filters_fn,
              resolution_schedule,
              num_blocks=None,
              kernel_size=3,
              colors=3,
              to_rgb_activation=None,
              simple_arch=False,
              scope='progressive_gan_generator',
              reuse=None):
  """Generator network for the progressive GAN model.

  Args:
    z: A `Tensor` of latent vector. The first dimension must be batch size.
    progress: A scalar float `Tensor` of training progress.
    num_filters_fn: A function that maps `block_id` to # of filters for the
        block.
    resolution_schedule: An object of `ResolutionSchedule`.
    num_blocks: An integer of number of blocks. None means maximum number of
        blocks, i.e. `resolution.schedule.num_resolutions`. Defaults to None.
    kernel_size: An integer of convolution kernel size.
    colors: Number of output color channels. Defaults to 3.
    to_rgb_activation: Activation function applied when output rgb.
    simple_arch: Architecture variants for lower memory usage and faster speed
    scope: A string or variable scope.
    reuse: Whether to reuse `scope`. Defaults to None which means to inherit
        the reuse option of the parent scope.
  Returns:
    A `Tensor` of model output and a dictionary of model end points.
  """
  if num_blocks is None:
    num_blocks = resolution_schedule.num_resolutions

  start_h, start_w = resolution_schedule.start_resolutions
  final_h, final_w = resolution_schedule.final_resolutions

  def _conv2d(scope, x, kernel_size, filters, padding='SAME'):
    return layers.custom_conv2d(
        x=x,
        filters=filters,
        kernel_size=kernel_size,
        padding=padding,
        activation=lambda x: layers.pixel_norm(tf.nn.leaky_relu(x)),
        he_initializer_slope=0.0,
        scope=scope)

  def _to_rgb(x):
    return layers.custom_conv2d(
        x=x,
        filters=colors,
        kernel_size=1,
        padding='SAME',
        activation=to_rgb_activation,
        scope='to_rgb')

  he_init = contrib_layers.variance_scaling_initializer()

  end_points = {}

  with tf.variable_scope(scope, reuse=reuse):
    with tf.name_scope('input'):
      x = contrib_layers.flatten(z)
      end_points['latent_vector'] = x

    with tf.variable_scope(block_name(1)):
      if simple_arch:
        x_shape = tf.shape(x)
        x = tf.layers.dense(x, start_h*start_w*num_filters_fn(1),
                            kernel_initializer=he_init)
        x = tf.nn.relu(x)
        x = tf.reshape(x, [x_shape[0], start_h, start_w, num_filters_fn(1)])
      else:
        x = tf.expand_dims(tf.expand_dims(x, 1), 1)
        x = layers.pixel_norm(x)
        # Pad the 1 x 1 image to 2 * (start_h - 1) x 2 * (start_w - 1)
        # with zeros for the next conv.
        x = tf.pad(x, [[0] * 2, [start_h - 1] * 2, [start_w - 1] * 2, [0] * 2])
        # The output is start_h x start_w x num_filters_fn(1).
        x = _conv2d('conv0', x, (start_h, start_w), num_filters_fn(1), 'VALID')
        x = _conv2d('conv1', x, kernel_size, num_filters_fn(1))
      lods = [x]

    if resolution_schedule.scale_mode == 'H':
      strides = (resolution_schedule.scale_base, 1)
    else:
      strides = (resolution_schedule.scale_base,
                 resolution_schedule.scale_base)

    for block_id in range(2, num_blocks + 1):
      with tf.variable_scope(block_name(block_id)):
        if simple_arch:
          x = tf.layers.conv2d_transpose(
              x,
              num_filters_fn(block_id),
              kernel_size=kernel_size,
              strides=strides,
              padding='SAME',
              kernel_initializer=he_init)
          x = tf.nn.relu(x)
        else:
          x = resolution_schedule.upscale(x, resolution_schedule.scale_base)
          x = _conv2d('conv0', x, kernel_size, num_filters_fn(block_id))
          x = _conv2d('conv1', x, kernel_size, num_filters_fn(block_id))
        lods.append(x)

    outputs = []
    for block_id in range(1, num_blocks + 1):
      with tf.variable_scope(block_name(block_id)):
        if simple_arch:
          lod = lods[block_id - 1]
          lod = tf.layers.conv2d(
              lod,
              colors,
              kernel_size=1,
              padding='SAME',
              name='to_rgb',
              kernel_initializer=he_init)
          lod = to_rgb_activation(lod)
        else:
          lod = _to_rgb(lods[block_id - 1])
        scale = resolution_schedule.scale_factor(block_id)
        lod = resolution_schedule.upscale(lod, scale)
        end_points['upscaled_rgb_{}'.format(block_id)] = lod

        # alpha_i is used to replace lod_select. Note sum(alpha_i) is
        # garanteed to be 1.
        alpha = _generator_alpha(block_id, progress)
        end_points['alpha_{}'.format(block_id)] = alpha

        outputs.append(lod * alpha)

    predictions = tf.add_n(outputs)
    batch_size = z.shape[0].value
    predictions.set_shape([batch_size, final_h, final_w, colors])
    end_points['predictions'] = predictions

  return predictions, end_points
def Lenet():
    lmbda = 0.01
    lmbda_batch = 0.0005
    sigma = 0.01

    init_learning_rate = 0.002
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(init_learning_rate,
                                               global_step,
                                               decay_steps=800,
                                               decay_rate=0.9,
                                               staircase=True)

    init_learning_rate_batch = 0.002
    global_step_batch = tf.Variable(0, trainable=False)
    learning_rate_batch = tf.train.exponential_decay(init_learning_rate_batch,
                                                     global_step_batch,
                                                     decay_steps=800,
                                                     decay_rate=0.9,
                                                     staircase=True)

    #................number of color channels
    n_input_channels = 1
    #................converlutional learyer depths
    layer_depth = {'conv1': 108, 'conv2': 200, 'fc1': 100, 'fc2': 43}
    #................converlutional leayer filter size
    fsize = {'1': 5, '2': 5}
    #................max pool and stride size
    conv_stride = 1
    pool_k = 2
    #...............flag for stop backpropergation for validation set
    is_training = tf.placeholder(tf.bool)
    #...............Keep prob for dropout. Unused in this model.
    keep_prob = tf.placeholder(tf.float32)
    #..............input output place holders

    X = tf.placeholder(tf.float32, (None, 32, 32, n_input_channels))
    y = tf.placeholder(tf.int32, (None))
    y_one_hot = tf.one_hot(y, 43)  #number of outputs

    #.............Generate predetermined random weights in order to initialize both networks identically.
    conv1_W_init = np.random.normal(scale=sigma,
                                    size=(fsize['1'], fsize['1'],
                                          n_input_channels,
                                          layer_depth['conv1']))
    conv2_W_init = np.random.normal(scale=sigma,
                                    size=(fsize['2'], fsize['2'],
                                          layer_depth['conv1'],
                                          layer_depth['conv2']))
    fc1_W_init = np.random.normal(scale=sigma,
                                  size=(5 * 5 * layer_depth['conv2'],
                                        layer_depth['fc1']))
    fc2_W_init = np.random.normal(scale=sigma,
                                  size=(layer_depth['fc1'],
                                        layer_depth['fc2']))

    conv1_W_init = conv1_W_init.astype(np.float32)
    conv2_W_init = conv2_W_init.astype(np.float32)
    fc1_W_init = fc1_W_init.astype(np.float32)
    fc2_W_init = fc2_W_init.astype(np.float32)

    # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
    conv1_W = tf.Variable(conv1_W_init)
    conv1_b = tf.Variable(tf.zeros(layer_depth['conv1']))
    conv1 = tf.nn.conv2d(X,
                         conv1_W, [1, conv_stride, conv_stride, 1],
                         padding='VALID') + conv1_b
    conv1 = tf.nn.relu(conv1)

    #batch mode
    conv1_W_batch = tf.Variable(conv1_W_init)
    conv1_beta = tf.Variable(
        tf.zeros(layer_depth['conv1']
                 ))  #Scale for batch normalization. Learnable parameter.
    conv1_gamma = tf.Variable(
        tf.ones(layer_depth['conv1']
                ))  #Offset for batch normalization. Learnable parameter.
    conv1_pop_mean = tf.Variable(
        tf.zeros(layer_depth['conv1']), trainable=False
    )  #An estimator of the population mean, estimated over the course of training. Not learnable.
    conv1_pop_var = tf.Variable(
        tf.ones(layer_depth['conv1']), trainable=False
    )  #An estimator of the population variance, estimated over the course of training. Not learnable.
    conv1_batch = tf.nn.conv2d(X,
                               conv1_W_batch, [1, conv_stride, conv_stride, 1],
                               padding='VALID')
    conv1_batch = batch_normalization(conv1_batch,
                                      is_training,
                                      conv1_beta,
                                      conv1_gamma,
                                      conv1_pop_mean,
                                      conv1_pop_var,
                                      layer_type='conv')
    conv1_batch = tf.nn.relu(conv1_batch)

    # Pooling. Input = 28x28x6. Output = 14x14x6.
    conv1 = tf.nn.max_pool(conv1,
                           ksize=[1, pool_k, pool_k, 1],
                           strides=[1, pool_k, pool_k, 1],
                           padding='VALID')
    conv1_batch = tf.nn.max_pool(conv1_batch,
                                 ksize=[1, pool_k, pool_k, 1],
                                 strides=[1, pool_k, pool_k, 1],
                                 padding='VALID')

    # Layer 2: Convolutional. Input = 14x14x6. Output = 10x10x16.
    conv2_W = tf.Variable(conv2_W_init)
    conv2_b = tf.Variable(tf.zeros(layer_depth['conv2']))
    conv2 = tf.nn.conv2d(conv1,
                         conv2_W, [1, conv_stride, conv_stride, 1],
                         padding='VALID') + conv2_b
    conv2 = tf.nn.relu(conv2)

    #batch
    conv2_W_batch = tf.Variable(conv2_W_init)
    conv2_beta = tf.Variable(tf.zeros(layer_depth['conv2']))
    conv2_gamma = tf.Variable(tf.ones(layer_depth['conv2']))
    conv2_pop_mean = tf.Variable(tf.zeros(layer_depth['conv2']),
                                 trainable=False)
    conv2_pop_var = tf.Variable(tf.ones(layer_depth['conv2']), trainable=False)
    conv2_batch = tf.nn.conv2d(conv1_batch,
                               conv2_W_batch, [1, conv_stride, conv_stride, 1],
                               padding='VALID')
    batch_m, batch_v = tf.nn.moments(conv2_batch, axes=[0, 1, 2])
    conv2_batch = batch_normalization(conv2_batch,
                                      is_training,
                                      conv2_beta,
                                      conv2_gamma,
                                      conv2_pop_mean,
                                      conv2_pop_var,
                                      layer_type='conv')
    conv2_batch = tf.nn.relu(conv2_batch)

    # Pooling. Input = 10x10x16. Output = 5x5x16.
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, pool_k, pool_k, 1],
                           strides=[1, pool_k, pool_k, 1],
                           padding='VALID')
    conv2_batch = tf.nn.max_pool(conv2_batch,
                                 ksize=[1, pool_k, pool_k, 1],
                                 strides=[1, pool_k, pool_k, 1],
                                 padding='VALID')

    # Flatten. Input = 5x5x16. Output = 400.
    fc0 = flatten(conv2)
    fc0_batch = flatten(conv2_batch)

    # Layer 3: Fully Connected. Input = 400. Output = 120.
    fc1_W = tf.Variable(fc1_W_init)
    fc1_b = tf.Variable(tf.zeros(layer_depth['fc1']))
    fc1 = tf.matmul(fc0, fc1_W) + fc1_b
    fc1 = tf.nn.relu(fc1)

    #batch
    fc1_W_batch = tf.Variable(fc1_W_init)
    fc1_beta = tf.Variable(tf.zeros(layer_depth['fc1']))
    fc1_gamma = tf.Variable(tf.ones(layer_depth['fc1']))
    fc1_pop_mean = tf.Variable(tf.zeros(layer_depth['fc1']), trainable=False)
    fc1_pop_var = tf.Variable(tf.ones(layer_depth['fc1']), trainable=False)
    fc1_batch = tf.matmul(fc0_batch, fc1_W_batch)
    fc1_batch = batch_normalization(fc1_batch,
                                    is_training,
                                    fc1_beta,
                                    fc1_gamma,
                                    fc1_pop_mean,
                                    fc1_pop_var,
                                    layer_type='fc')
    fc1_batch = tf.nn.relu(fc1_batch)

    # Layer 4: Fully Connected. Input = 84. Output = 43.
    fc2_W = tf.Variable(fc2_W_init)
    fc2_b = tf.Variable(tf.zeros(layer_depth['fc2']))
    logits = tf.matmul(fc1, fc2_W) + fc2_b
    #batch
    fc2_W_batch = tf.Variable(fc2_W_init)
    fc2_b_batch = tf.Variable(tf.zeros(layer_depth['fc2']))
    logits_batch = tf.matmul(fc1_batch, fc2_W_batch) + fc2_b_batch

    #Softmax with cross entropy losslabels=
    loss = tf.reduce_sum(
        tf.nn.softmax_cross_entropy_with_logits(
            logits=logits, labels=y_one_hot)) + lmbda * (
                tf.nn.l2_loss(conv1_W) + tf.nn.l2_loss(conv2_W) +
                tf.nn.l2_loss(fc1_W) + tf.nn.l2_loss(fc2_W))
    loss_batch = tf.reduce_sum(
        tf.nn.softmax_cross_entropy_with_logits(
            logits=logits_batch, labels=y_one_hot)) + lmbda_batch * (
                tf.nn.l2_loss(conv1_W_batch) + tf.nn.l2_loss(conv2_W_batch) +
                tf.nn.l2_loss(fc1_W_batch) + tf.nn.l2_loss(fc2_W_batch))

    #Adam minimizer
    training_step = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(loss, global_step=global_step)
    training_step_batch = tf.train.AdamOptimizer(
        learning_rate=learning_rate_batch).minimize(
            loss_batch, global_step=global_step_batch)
    #Prediction accuracy op
    correct_prediction = tf.equal(tf.argmax(logits, 1),
                                  tf.argmax(y_one_hot, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    #batch
    correct_prediction_batch = tf.equal(tf.argmax(logits_batch, 1),
                                        tf.argmax(y_one_hot, 1))
    accuracy_batch = tf.reduce_mean(
        tf.cast(correct_prediction_batch, tf.float32))

    return (X, y, is_training, keep_prob, training_step, training_step_batch,
            accuracy, accuracy_batch, fc1, fc1_batch, conv2_beta, conv2_gamma,
            conv2_pop_mean, conv2_pop_var, batch_m, batch_v, tf.train.Saver())
示例#36
0
def teacher(input_images,
            keep_prob,
            is_training=True,
            weight_decay=5e-5,
            batch_norm_decay=0.99,
            batch_norm_epsilon=0.001):
    with tf.variable_scope("Teacher_model"):
        net, endpoints = resnet_v2(inputs=input_images,
                                   num_classes=M,
                                   is_training=True,
                                   scope='resnet_v2')
        # co_trained layers
        var_scope = 'Teacher_model/resnet_v2/'
        co_list_0 = slim.get_model_variables(var_scope + 'Conv2d_0')
        # co_list_1 = slim.get_model_variables(var_scope +'InvertedResidual_16_0/conv')
        # co_list_2 = slim.get_model_variables(var_scope +'InvertedResidual_24_')
        t_co_list = co_list_0

        base_var_list = slim.get_model_variables('Teacher_model/resnet_v2')

        # feature & attention
        t_g0 = endpoints["InvertedResidual_{}_{}".format(256, 2)]
        t_at0 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g0), -1),
                                   axis=0,
                                   name='t_at0')
        t_g1 = endpoints["InvertedResidual_{}_{}".format(512, 3)]
        t_at1 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g1), -1),
                                   axis=0,
                                   name='t_at1')
        part_feature = endpoints["InvertedResidual_{}_{}".format(1024, 3)]
        t_at2 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(part_feature), -1),
                                   axis=0,
                                   name='t_at2')
        t_g3 = endpoints["InvertedResidual_{}_{}".format(1024, 4)]
        t_at3 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(t_g3), -1),
                                   axis=0,
                                   name='t_at3')
        object_feature = endpoints["InvertedResidual_{}_{}".format(1024, 5)]
        t_at4 = tf.nn.l2_normalize(tf.reduce_sum(tf.square(object_feature),
                                                 -1),
                                   axis=0,
                                   name='t_at4')

        t_g = (t_g0, t_g1, part_feature, object_feature)
        t_at = (t_at0, t_at1, t_at2, t_at3, t_at4)

        object_feature_h = object_feature.get_shape().as_list()[1]
        object_feature_w = object_feature.get_shape().as_list()[2]
        fc_obj = slim.max_pool2d(object_feature,
                                 (object_feature_h, object_feature_w),
                                 scope="GMP1")
        batch_norm_params = {
            'center': True,
            'scale': True,
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
        }

        fc_obj = slim.conv2d(
            fc_obj,
            M, [1, 1],
            activation_fn=None,
            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            scope='fc_obj')
        fc_obj = tf.nn.dropout(fc_obj, keep_prob=keep_prob)
        fc_obj = slim.flatten(fc_obj)
        fc_part = slim.conv2d(
            part_feature,
            M * k,  #卷积核个数
            [1, 1],  #卷积核高宽
            activation_fn=tf.nn.relu,
            normalizer_fn=slim.batch_norm,  # 标准化器设置为BN
            normalizer_params=batch_norm_params,
            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay))
        fc_part_h = fc_part.get_shape().as_list()[1]
        fc_part_w = fc_part.get_shape().as_list()[2]
        fc_part = slim.max_pool2d(fc_part, (fc_part_h, fc_part_w),
                                  scope="GMP2")
        ft_list = tf.split(fc_part, num_or_size_splits=M, axis=-1)  #最后一维度(C)
        cls_list = []
        for i in range(M):
            ft = tf.transpose(ft_list[i], [0, 1, 3, 2])
            cls = layers_lib.pool(ft, [1, k], "AVG")
            cls = layers.flatten(cls)
            cls_list.append(cls)
        fc_ccp = tf.concat(cls_list, axis=-1)  #cross_channel_pooling (N, M)

        fc_part = slim.conv2d(
            fc_part,
            M, [1, 1],
            activation_fn=None,
            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
            scope="fc_part")
        fc_part = tf.nn.dropout(fc_part, keep_prob=keep_prob)
        fc_part = slim.flatten(fc_part)
        t_var_list = slim.get_model_variables()
    return t_co_list, t_g, t_at, fc_obj, fc_part, fc_ccp, base_var_list, t_var_list
示例#37
0
    def _construct( self, x ):

        # Hyperparametros
        self.mu = 0
        self.sigma = 0.1

        # Layer 1 (Convolutional): Input = 32x32x1. Output = 28x28x6.
        self.filter1_width = 5
        self.filter1_height = 5
        self.input1_channels = 1
        self.conv1_output = 6

        tf.reset_default_graph()


        # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
        self.conv1_weight = tf.Variable(tf.truncated_normal(
            shape=(self.filter1_width, self.filter1_height, self.input1_channels, self.conv1_output), mean=self.mu,
            stddev=self.sigma))
        self.conv1_bias = tf.Variable(tf.zeros(self.conv1_output))
        self.conv1 = tf.nn.conv2d(x, self.conv1_weight, strides=[1, 1, 1, 1], padding='VALID') + self.conv1_bias
        # activation
        self.conv1 = tf.nn.relu(self.conv1)

        # Pooling. Input = 28x28x6. Output = 14x14x6.
        self.conv1 = tf.nn.max_pool(self.conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

        # Layer 2 (Convolutional): Output = 10x10x16.
        self.filter2_width = 5
        self.filter2_height = 5
        self.input2_channels = 6
        self.conv2_output = 16
        # Weight and bias
        self.conv2_weight = tf.Variable(tf.truncated_normal(shape=(self.filter2_width, self.filter2_height, self.input2_channels, self.conv2_output),
            mean=self.mu, stddev=self.sigma))
        self.conv2_bias = tf.Variable(tf.zeros(self.conv2_output))
        # Apply Convolution
        self.conv2 = tf.nn.conv2d(self.conv1, self.conv2_weight, strides=[1, 1, 1, 1],
                                  padding='VALID') + self.conv2_bias

        # Activation:
        self.conv2 = tf.nn.relu(self.conv2)

        # Pooling: Input = 10x10x16. Output = 5x5x16.
        self.conv2 = tf.nn.max_pool(self.conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

        # Flattening: Input = 5x5x16. Output = 400.
        self.fully_connected0 = flatten(self.conv2)

        # Layer 3 (Fully Connected): Input = 400. Output = 120.
        self.connected1_weights = tf.Variable(tf.truncated_normal(shape=(400, 120), mean=self.mu, stddev=self.sigma))
        self.connected1_bias = tf.Variable(tf.zeros(120))
        self.fully_connected1 = (tf.matmul(self.fully_connected0, self.connected1_weights)) + self.connected1_bias

        # Activation:
        self.fully_connected1 = tf.nn.relu(self.fully_connected1)

        # Layer 4 (Fully Connected): Input = 120. Output = 84.
        self.connected2_weights = tf.Variable(tf.truncated_normal(shape=(120, 84), mean=self.mu, stddev=self.sigma))
        self.connected2_bias = tf.Variable(tf.zeros(84))
        self.fully_connected2 = tf.add((tf.matmul(self.fully_connected1, self.connected2_weights)), self.connected2_bias)

        # Activation.
        self.fully_connected2 = tf.nn.relu(self.fully_connected2)

        # Layer 5 (Fully Connected): Input = 84. Output = 43.
        self.output_weights = tf.Variable(tf.truncated_normal(shape=(84, 43), mean=self.mu, stddev=self.sigma))
        self.output_bias = tf.Variable(tf.zeros(43))
        self.logits = tf.add((tf.matmul(self.fully_connected2, self.output_weights)), self.output_bias)

        self.y = tf.placeholder(tf.int32, None)
        self.one_hot_y = tf.one_hot(self.y, self.CLASSES_SIZE)

        self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.one_hot_y)
        self.loss_operation = tf.reduce_mean(self.cross_entropy)
        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.LEARNING_RATE)
        self.training_operation = self.optimizer.minimize(self.loss_operation)

        # Accuracy operation
        self.correct_prediction = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.one_hot_y, 1))
        self.accuracy_operation = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))

        # Saving all variables
        self.saver = tf.train.Saver()

        self.keep_prob = tf.placeholder(tf.float32)  # For fully-connected layers
        self.keep_prob_conv = tf.placeholder(tf.float32)  # For convolutional layers
示例#38
0
def conv(x):
    mu = 0
    sigma = 0.1

    #conv1
    conv1_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 1, 64), mean=mu, stddev=sigma))
    conv1_b = tf.Variable(tf.zeros(64))
    conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv1_b
    conv1 = tf.nn.relu(conv1)

    #conv2
    conv2_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 64, 64), mean=mu, stddev=sigma))
    conv2_b = tf.Variable(tf.zeros(64))
    conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv2_b
    conv2 = tf.nn.relu(conv2)
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')

    #conv3
    conv3_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 64, 256), mean=mu, stddev=sigma))
    conv3_b = tf.Variable(tf.zeros(256))
    conv3 = tf.nn.conv2d(conv2, conv3_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv3_b
    conv3 = tf.nn.relu(conv3)

    #conv4
    conv4_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 256, 256), mean=mu, stddev=sigma))
    conv4_b = tf.Variable(tf.zeros(256))
    conv4 = tf.nn.conv2d(conv3, conv4_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv4_b
    conv4 = tf.nn.relu(conv4)

    #conv5
    conv5_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 256, 256), mean=mu, stddev=sigma))
    conv5_b = tf.Variable(tf.zeros(256))
    conv5 = tf.nn.conv2d(conv4, conv5_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv5_b
    conv5 = tf.nn.relu(conv5)
    conv5 = tf.nn.max_pool(conv5,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')

    #conv6
    conv6_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 256, 512), mean=mu, stddev=sigma))
    conv6_b = tf.Variable(tf.zeros(512))
    conv6 = tf.nn.conv2d(conv5, conv6_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv6_b
    conv6 = tf.nn.relu(conv6)

    #conv7
    conv7_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 512, 512), mean=mu, stddev=sigma))
    conv7_b = tf.Variable(tf.zeros(512))
    conv7 = tf.nn.conv2d(conv6, conv7_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv7_b
    conv7 = tf.nn.relu(conv7)

    #conv8
    conv8_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 512, 512), mean=mu, stddev=sigma))
    conv8_b = tf.Variable(tf.zeros(512))
    conv8 = tf.nn.conv2d(conv7, conv8_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv8_b
    conv8 = tf.nn.relu(conv8)
    conv8 = tf.nn.max_pool(conv8,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME')

    #conv9
    conv9_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 512, 512), mean=mu, stddev=sigma))
    conv9_b = tf.Variable(tf.zeros(512))
    conv9 = tf.nn.conv2d(conv6, conv9_W, strides=[1, 1, 1, 1],
                         padding='SAME') + conv9_b
    conv9 = tf.nn.relu(conv9)

    #conv10
    conv10_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 512, 512), mean=mu, stddev=sigma))
    conv10_b = tf.Variable(tf.zeros(512))
    conv10 = tf.nn.conv2d(
        conv9, conv10_W, strides=[1, 1, 1, 1], padding='SAME') + conv10_b
    conv10 = tf.nn.relu(conv10)

    #conv11
    conv11_W = tf.Variable(
        tf.truncated_normal(shape=(3, 3, 512, 512), mean=mu, stddev=sigma))
    conv11_b = tf.Variable(tf.zeros(512))
    conv11 = tf.nn.conv2d(
        conv10, conv11_W, strides=[1, 1, 1, 1], padding='SAME') + conv11_b
    conv11 = tf.nn.relu(conv11)

    fc0 = flatten(conv2)

    #fc1
    s = int(np.prod(conv11.get_shape()[1:]))
    fc1_W = tf.Variable(
        tf.truncated_normal(shape=(65536, 4096), mean=mu, stddev=sigma))
    fc1_b = tf.Variable(tf.zeros(4096))
    fc1 = tf.matmul(fc0, fc1_W) + fc1_b
    fc1 = tf.nn.relu(fc1)

    #fc2
    fc2_W = tf.Variable(
        tf.truncated_normal(shape=(4096, 4096), mean=mu, stddev=sigma))
    fc2_b = tf.Variable(tf.zeros(4096))
    fc2 = tf.matmul(fc1, fc2_W) + fc2_b
    fc2 = tf.nn.relu(fc2)

    #fc3
    fc3_W = tf.Variable(
        tf.truncated_normal(shape=(4096, 4), mean=mu, stddev=sigma))
    fc3_b = tf.Variable(tf.zeros(4))
    logits = tf.matmul(fc2, fc3_W) + fc3_b
    return logits
def LeNet(x):
    mu = 0
    sigma = 0.1

    # Layer 1: Convolutional. Input = 32x32x1, Output = 28x28x6.
    conv_layer1 = tf.nn.conv2d(x,
                               weight_conv_layer1,
                               strides=[1, 1, 1, 1],
                               padding='VALID')
    conv_layer1 = tf.nn.bias_add(conv_layer1, bias_conv_layer1)
    conv_layer1 = tf.nn.relu(conv_layer1)  #Activation: Relu

    # Sub: Pooling. Input = 28x28x6, Output = 14x14x6
    pool_layer1 = tf.nn.max_pool(conv_layer1,
                                 ksize=[1, 2, 2, 1],
                                 strides=[1, 2, 2, 1],
                                 padding='VALID')

    # Layer 2: Convolutional, Input = 14x14x6, Output = 10x10x16.
    weight_conv_layer2 = tf.Variable(
        tf.truncated_normal([5, 5, 6, 16], mean=mu, stddev=sigma))
    bias_conv_layer2 = tf.Variable(tf.zeros(16))

    conv_layer2 = tf.nn.conv2d(pool_layer1,
                               weight_conv_layer2,
                               strides=[1, 1, 1, 1],
                               padding='VALID')
    conv_layer2 = tf.nn.bias_add(conv_layer2, bias_conv_layer2)
    conv_layer2 = tf.nn.relu(conv_layer2)  #Activation: Relu

    # Sub: Pooling. Input = 10x10x16, Output = 5x5x16
    pool_layer2 = tf.nn.avg_pool(conv_layer2,
                                 ksize=[1, 2, 2, 1],
                                 strides=[1, 2, 2, 1],
                                 padding='VALID')

    # Flatten To Fully Connected Layer. Input = 5x5x16, Output = 400
    neural_feed = flatten(pool_layer2)

    # Layer 3: Fully Connected, Input = 400, Output = 120
    weight_fc_layer3 = tf.Variable(
        tf.truncated_normal([400, 120], mean=mu, stddev=sigma))
    bias_fc_layer3 = tf.Variable(tf.zeros(120))

    fc_layer3 = tf.matmul(neural_feed, weight_fc_layer3)
    fc_layer3 = tf.nn.bias_add(fc_layer3, bias_fc_layer3)
    fc_layer3 = tf.nn.relu(fc_layer3)

    # Layer 4: Fully Connected, Input = 120, Output = 84
    weight_fc_layer4 = tf.Variable(
        tf.truncated_normal([120, 84], mean=mu, stddev=sigma))
    bias_fc_layer4 = tf.Variable(tf.zeros(84))

    fc_layer4 = tf.matmul(fc_layer3, weight_fc_layer4)
    fc_layer4 = tf.nn.bias_add(fc_layer4, bias_fc_layer4)
    fc_layer4 = tf.nn.relu(fc_layer4)

    # Layer 5: Fully Connected, Input = 84, Output = 10
    weight_fc_layer5 = tf.Variable(
        tf.truncated_normal([84, 10], mean=mu, stddev=sigma))
    bias_fc_layer5 = tf.Variable(tf.zeros(10))

    fc_layer5 = tf.matmul(fc_layer4, weight_fc_layer5)
    fc_layer5 = tf.nn.bias_add(fc_layer5, bias_fc_layer5)

    logits = fc_layer5

    return logits
示例#40
0
    def LeNet(self, x):  
 
        # Hyperparameters
        mu = 0
        sigma = 0.1
        Padding='VALID'
        W_lambda = 5.0
    
        conv1_W = tf.Variable(tf.truncated_normal(shape=(6, 4, 3, 3), mean = mu, stddev = sigma))
        conv1_b = tf.Variable(tf.zeros(3))
        conv1   = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding=Padding) + conv1_b
        if self.debug:
            print("x shape: ", x.shape)
            print("conv1_W shape: ", conv1_W.shape)
            print("conv1_b shape: ", conv1_b.shape)
            print("conv1 shape: ", conv1.shape)
    
        # L2 Regularization
        conv1_W = -W_lambda*conv1_W
        if self.debug:
            print("conv1_W (after L2 1) shape: ", conv1_W.shape)
    
        # Activation.
        conv1 = tf.nn.relu(conv1)
        if self.debug:
            print("conv1 (after Activiateion) shape: ", conv1.shape)
    
        # Pooling...
        conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=Padding)
        if self.debug:
            print("conv1 (after Pooling 1) shape: ", conv1.shape)
    
        # Layer 2: Convolutional...
        conv2_W = tf.Variable(tf.truncated_normal(shape=(6, 4, 3, 6), mean = mu, stddev = sigma))
        conv2_b = tf.Variable(tf.zeros(6))
        conv2   = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1], padding=Padding) + conv2_b
        if self.debug:
            print("conv2_W shape: ", conv2_W.shape)
            print("conv2_b shape: ", conv2_b.shape)
            print("conv2 shape: ", conv2.shape)
    
        # L2 Regularization
        conv2 = -W_lambda*conv2
        if self.debug:
            print("conv2 shape after L2: ", conv2.shape)
    
        # Activation.
        conv2 = tf.nn.relu(conv2)
        if self.debug:
            print("conv2 shape after activation: ", conv2.shape)
    
        # Pooling...
        conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=Padding)
        if self.debug:
            print("conv2 shape after pooling: ", conv2.shape)

        # Flatten...
        fc0   = flatten(conv2)
    
        # Layer 3: Fully Connected...
        fc1_W = tf.Variable(tf.truncated_normal(shape=(4356, 60), mean = mu, stddev = sigma))
        fc1_b = tf.Variable(tf.zeros(60))
        
        if self.debug:
            print("fc0", fc0.shape)
            print("fc1_W", fc1_W.shape)
            print("fc1_b", fc1_b.shape)
        fc1   = tf.matmul(fc0, fc1_W) + fc1_b
        if self.debug:
            print("fc1", fc1.shape)
    
        # Activation.
        fc1    = tf.nn.relu(fc1)
        if self.debug:
            print("fc1 after Activation", fc1.shape)
    
        # Layer 4: Fully Connected...
        fc2_W  = tf.Variable(tf.truncated_normal(shape=(60, 30), mean = mu, stddev = sigma))
        fc2_b  = tf.Variable(tf.zeros(30))
        fc2    = tf.matmul(fc1, fc2_W) + fc2_b
        if self.debug:
            print("fc2_W shape: ", fc2_W.shape)
            print("fc2_b shape: ", fc2_b.shape)
            print("fc2 shape: ", fc2.shape)
    
        # Activation.
        fc2    = tf.nn.relu(fc2)
        if self.debug:
            print("fc2 shape after activation: ", fc2.shape)
    
        # Layer 5: Fully Connected. Input = 30. Output = 3.
        fc3_W  = tf.Variable(tf.truncated_normal(shape=(30, 3), mean = mu, stddev = sigma))
        fc3_b  = tf.Variable(tf.zeros(3))
        logits = tf.matmul(fc2, fc3_W) + fc3_b
        if self.debug:
            print("fc3_W shape: ", fc3_W.shape)
            print("fc3_b shape: ", fc3_b.shape)
            print("logits shape: ", logits.shape)
    
        return logits
示例#41
0
def LeNet(x):
    # Define hyperparameters
    mu = 0
    sigma = 0.1

    # Layer 1: Convolution. Input 32x32x1. Output = 28x28x6. 5*5*1 is the size of the filter
    conv1_w = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1 = tf.nn.conv2d(x, conv1_w, strides=[1, 1, 1, 1],
                         padding='VALID') + conv1_b

    # Activation,Transform the linear model to the non-linear model
    conv1 = tf.nn.relu(conv1)

    # Pooling. Input = 28x28x6. Output = 14x14x6. ksize/strides=[batch, height,width, channels]
    conv1 = tf.nn.max_pool(conv1,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # Layer 2: Convolution. Output = 10x10x6
    conv2_w = tf.Variable(
        tf.truncated_normal(shape=[5, 5, 6, 16], mean=mu, stddev=sigma))
    conv2_b = tf.Variable(tf.zeros(16))
    conv2 = tf.nn.conv2d(conv1, conv2_w, strides=[1, 1, 1, 1],
                         padding='VALID') + conv2_b

    # Activation
    conv2 = tf.nn.relu(conv2)

    # Pooling. Input = 10x10x16. Output = 5x5x16
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # Flatten. Input = 5x5x16. Output = 400. Unfold the 3D matrix to 1D vector
    fc0 = flatten(conv2)

    # Layer 3: Fully Connected. Input = 400. Output = 120.
    fc1_w = tf.Variable(
        tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))
    fc1_b = tf.Variable(tf.zeros(120))
    fc1 = tf.matmul(fc0, fc1_w) + fc1_b

    # Activation
    fc1 = tf.nn.relu(fc1)

    # Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2_w = tf.Variable(
        tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))
    fc2_b = tf.Variable(tf.zeros(84))
    fc2 = tf.matmul(fc1, fc2_w) + fc2_b

    # Activation
    fc2 = tf.nn.relu(fc2)

    # Layer 5: Fully Connected. Input = 84.  Output = 10.
    fc3_w = tf.Variable(
        tf.truncated_normal(shape=(84, 10), mean=mu, stddev=sigma))
    fc3_b = tf.Variable(tf.zeros(10))
    logits = tf.matmul(fc2, fc3_w) + fc3_b
    return logits
示例#42
0
    def __init__(self,
                 stack,
                 maxstack,
                 mode,
                 finetune=False,
                 learn_diff=False,
                 psize=32):
        self.p_size = psize  #32
        self.x1 = tf.placeholder(tf.float32,
                                 [None, self.p_size, self.p_size, 1],
                                 'x1_input')
        self.x2 = tf.placeholder(tf.float32,
                                 [None, self.p_size, self.p_size, 1],
                                 'x2_input')
        self.x1_o = tf.placeholder(tf.float32,
                                   [None, self.p_size, self.p_size, 1],
                                   'x1_original')
        self.x2_o = tf.placeholder(tf.float32,
                                   [None, self.p_size, self.p_size, 1],
                                   'x2_original')

        self.keep_prob = tf.placeholder_with_default(0.5,
                                                     shape=(),
                                                     name='keep_prob')
        self.training = tf.placeholder_with_default(True,
                                                    shape=(),
                                                    name='training')

        self.finetune = finetune
        self.learn_diff = learn_diff

        self.init_filt = self.p_size * self.p_size
        self.stack = stack
        self.maxstack = maxstack
        self.trainable = False

        self.mode = mode
        self.is_first = True  # indicator for settings for 1st autoencoder ( we dont want to add summaries twice for same values)

        self.middle_mse = tf.placeholder(tf.float32,
                                         shape=(),
                                         name='middle_mse')
        self.end_mse = tf.placeholder(tf.float32, shape=(), name='end_mse')
        self.diff = tf.placeholder(tf.float32, shape=(), name='diff')

        self.mse_one = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='reconstruct_one')
        self.mse_two = tf.placeholder(tf.float32,
                                      shape=(),
                                      name='reconstruct_two')
        self.cross_entropy_one = tf.placeholder(tf.float32,
                                                shape=(),
                                                name='cross_entropy_one')
        self.cross_entropy_two = tf.placeholder(tf.float32,
                                                shape=(),
                                                name='cross_entropy_two')

        self.o1_beg = tf.placeholder(tf.float32, shape=(), name='o1_beg')
        self.o1_beg_o = tf.placeholder(tf.float32,
                                       shape=(),
                                       name='o1_beg_original')
        self.o1_mid = tf.placeholder(tf.float32, shape=(), name='o1_mid')
        self.o1_diff = tf.placeholder(tf.float32, shape=(), name='o1_diff')
        self.o1_end = tf.placeholder(tf.float32, shape=(), name='o1_end')
        self.o1_end_resh = tf.placeholder(tf.float32,
                                          shape=(),
                                          name='o1_end_resh')
        self.o2_beg = tf.placeholder(tf.float32, shape=(), name='o2_beg')
        self.o2_beg_o = tf.placeholder(tf.float32,
                                       shape=(),
                                       name='o2_beg_original')
        self.o2_mid = tf.placeholder(tf.float32, shape=(), name='o2_mid')
        self.o2_diff = tf.placeholder(tf.float32, shape=(), name='o2_diff')
        self.o2_end = tf.placeholder(tf.float32, shape=(), name='o2_end')
        self.o2_end_resh = tf.placeholder(tf.float32,
                                          shape=(),
                                          name='o2_end_resh')

        self.mid_weights = tf.placeholder(tf.float32,
                                          shape=(),
                                          name='mid_weights')
        self.reuse_list = []
        self.reuse_list_load = []

        # internal settings
        self.do_dropout = True
        self.norm = False
        self.ker_reg = None
        self.bias_reg = None
        self.act_reg = None
        self.act_reg_mid = None
        self.initializer = tf.contrib.layers.xavier_initializer(uniform=True)
        self.do_summary = True
        self.use_bias = True
        self.epsilon = 0.0  # 10e-9

        self.act = tf.nn.sigmoid
        self.act_mid = None
        self.act_end = tf.nn.sigmoid

        self.act_diff = tf.nn.sigmoid
        self.act_diff_last = None
        self.act_last = tf.nn.sigmoid
        self.act_last_use = True

        self.current_max = tf.placeholder_with_default(1.0,
                                                       shape=(),
                                                       name='current_max')
        self.current_max_end = tf.placeholder_with_default(
            1.0, shape=(), name='current_max_end')
        self.max_mid = tf.placeholder(tf.float32, shape=(), name='max_mid')
        self.max_end = tf.placeholder(tf.float32, shape=(), name='max_end')
        self.min_mid = tf.placeholder(tf.float32, shape=(), name='min_mid')
        self.min_end = tf.placeholder(tf.float32, shape=(), name='min_end')

        with tf.variable_scope("siamese", reuse=tf.AUTO_REUSE) as scope:
            tf.summary.image('input_first', self.x1, 4)

            self.o1_beg = flatten(self.x1)
            self.o1_beg_o = flatten(self.x1_o)
            tf.summary.histogram('input_hist', self.o1_beg)
            self.o1_mid = self.network_middle(self.o1_beg, scope)

            if self.learn_diff:
                self.o1_diff = self.network_diff(self.o1_mid, scope)

            self.o1_end = self.network_end(self.o1_mid, scope)
            self.o1_end_resh = tf.reshape(self.o1_end,
                                          [-1, self.p_size, self.p_size, 1])
            tf.summary.image('output_first', self.o1_end_resh, 4)
            #tf.summary.histogram('output_hist', self.o1_end) ### TODO UNCOMMENT
            self.is_first = False
            scope.reuse_variables()

            self.o2_beg = flatten(self.x2)
            self.o2_beg_o = flatten(self.x2_o)
            self.o2_mid = self.network_middle(self.o2_beg, scope)

            if self.learn_diff:
                self.o2_diff = self.network_diff(self.o2_mid, scope)

            self.o2_end = self.network_end(self.o2_mid, scope)
            self.o2_end_resh = tf.reshape(self.o2_end,
                                          [-1, self.p_size, self.p_size, 1])

            # Create loss
            self.y_ = tf.placeholder(tf.float32, [None])

            if learn_diff:
                self.loss_diff = self.siamcoder_loss_diff()
            self.loss_mse_diff = self.siamcoder_loss_mse_diff()
            self.loss_mse = self.siamcoder_loss_mse()

            with tf.name_scope('diffs'):
                tf.summary.scalar('diff', self.diff)
                tf.summary.scalar('mse_one', self.mse_one)
                tf.summary.scalar('mse_two', self.mse_two)

            with tf.name_scope('layers_mean'):
                tf.summary.scalar('o1_mid', tf.reduce_mean(self.o1_mid))
                tf.summary.scalar('o1_end', tf.reduce_mean(self.o1_end))
                tf.summary.scalar('o2_mid', tf.reduce_mean(self.o2_mid))
                tf.summary.scalar('o2_end', tf.reduce_mean(self.o2_end))
示例#43
0
    def build(self, inputs, depth, width, num_classes, name=None):
        """Model architecture to train the model.

    The configuration of the resnet blocks requires that depth should be
    6n+4 where n is the number of resnet blocks desired.

    Args:
      inputs: A 4D float tensor containing the model inputs.
      depth: Number of convolutional layers in the network.
      width: Size of the convolutional filters in the residual blocks.
      num_classes: Positive integer number of possible classes.
      name: Optional string, the name of the resulting op in the TF graph.

    Returns:
      A 2D float logits tensor of shape (batch_size, num_classes).
    Raises:
      ValueError: if depth is not the minimum amount required to build the
        model.
    """

        if (depth - 4) % 6 != 0:
            raise ValueError('Depth of ResNet specified not sufficient.')

        resnet_blocks = (depth - 4) // 6
        with tf.variable_scope(name, 'resnet_model'):

            first_layer_technique = self._pruning_method
            if not self._prune_first_layer:
                first_layer_technique = 'baseline'
            net = self._conv(inputs,
                             'conv_1',
                             output_size=16,
                             sparsity_technique=first_layer_technique)
            net = self._residual_block(net,
                                       'conv_2',
                                       16 * width,
                                       subsample=False,
                                       blocks=resnet_blocks)

            net = self._residual_block(net,
                                       'conv_3',
                                       32 * width,
                                       subsample=True,
                                       blocks=resnet_blocks)
            net = self._residual_block(net,
                                       'conv_4',
                                       64 * width,
                                       subsample=True,
                                       blocks=resnet_blocks)

            # Put the final BN, relu before the max pooling.
            with tf.name_scope('Pooling'):
                net = self._batch_norm(net)
                net = tf.nn.relu(net)
                net = tf.layers.average_pooling2d(
                    net, pool_size=8, strides=1, data_format=self._data_format)

            net = contrib_layers.flatten(net)
            last_layer_technique = self._pruning_method
            if not self._prune_last_layer:
                last_layer_technique = 'baseline'
            net = self._dense(net,
                              num_classes,
                              'logits',
                              sparsity_technique=last_layer_technique)
        return net
def LeNet(x):

    mu = 0
    sigma = 0.1

    # SOLUTION: Layer 1: Convolutional.
    conv1_W = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv1_b

    # SOLUTION: Activation.
    conv1 = tf.nn.relu(conv1)
    #conv1 = tf.nn.softmax(conv1)
    #conv1 = tf.sigmoid(conv1)
    #conv1 = tf.tanh(conv1)

    # SOLUTION: Pooling.
    conv1 = tf.nn.max_pool(conv1,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')
    #conv1 = tf.nn.avg_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # SOLUTION: Layer 2: Convolutional.
    conv2_W = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 6, 20), mean=mu, stddev=sigma))
    conv2_b = tf.Variable(tf.zeros(20))
    conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv2_b

    # SOLUTION: Activation.
    conv2 = tf.nn.relu(conv2)
    #conv2 = tf.nn.softmax(conv2)
    #conv2 = tf.sigmoid(conv2)
    #conv2 = tf.tanh(conv2)

    # SOLUTION: Pooling.
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')
    #conv2 = tf.nn.avg_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # SOLUTION: Flatten.
    fc0 = flatten(conv2)

    # SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
    fc1_W = tf.Variable(
        tf.truncated_normal(shape=(500, 200), mean=mu, stddev=sigma))
    fc1_b = tf.Variable(tf.zeros(200))
    fc1 = tf.matmul(fc0, fc1_W) + fc1_b

    # SOLUTION: Activation.
    fc1 = tf.nn.relu(fc1)
    #fc1 = tf.nn.softmax(fc1)
    #fc1 = tf.sigmoid(fc1)
    #fc1 = tf.tanh(fc1)

    # Dropout: prevent overfitting
    fc1 = tf.nn.dropout(fc1, keep_prob)

    # SOLUTION: Layer 4: Fully Connected. Input = 200. Output = 120.
    fc2_W = tf.Variable(
        tf.truncated_normal(shape=(200, 100), mean=mu, stddev=sigma))
    fc2_b = tf.Variable(tf.zeros(100))
    fc2 = tf.matmul(fc1, fc2_W) + fc2_b

    # SOLUTION: Activation.
    fc2 = tf.nn.relu(fc2)
    #fc2 = tf.nn.softmax(fc2)
    #fc2 = tf.sigmoid(fc2)
    #fc2 = tf.tanh(fc2)

    # Dropout: prevent overfitting
    fc2 = tf.nn.dropout(fc2, keep_prob)

    # SOLUTION: Layer 5: Fully Connected. Input = 120. Output = n_classes or 43.
    fc3_W = tf.Variable(
        tf.truncated_normal(shape=(100, n_classes), mean=mu, stddev=sigma))
    fc3_b = tf.Variable(tf.zeros(n_classes))
    logits = tf.matmul(fc2, fc3_W) + fc3_b

    return logits
示例#45
0
def LeNet(x, keep_prob):
    # Hyperparameters
    mu = 0
    sigma = 0.1

    # SOLUTION: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
    conv1_W = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 1, 6), mean=mu, stddev=sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv1_b

    # SOLUTION: Activation.
    conv1 = tf.nn.relu(conv1)

    # SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
    conv1 = tf.nn.max_pool(conv1,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
    conv2_W = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma))
    conv2_b = tf.Variable(tf.zeros(16))
    conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv2_b

    # SOLUTION: Activation.
    conv2 = tf.nn.relu(conv2)

    # SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # SOLUTION: Flatten. Input = 5x5x16. Output = 400.
    fc0 = flatten(conv2)

    # SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
    fc1_W = tf.Variable(
        tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))
    fc1_b = tf.Variable(tf.zeros(120))
    fc1 = tf.matmul(fc0, fc1_W) + fc1_b

    # SOLUTION: Activation.
    fc1 = tf.nn.relu(fc1)

    # DROPOUT
    fc1 = tf.nn.dropout(fc1, keep_prob)

    # SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2_W = tf.Variable(
        tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))
    fc2_b = tf.Variable(tf.zeros(84))
    fc2 = tf.matmul(fc1, fc2_W) + fc2_b

    # SOLUTION: Activation.
    fc2 = tf.nn.relu(fc2)

    # DROPOUT
    fc2 = tf.nn.dropout(fc2, keep_prob)

    # SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 10.
    fc3_W = tf.Variable(
        tf.truncated_normal(shape=(84, 43), mean=mu, stddev=sigma))
    fc3_b = tf.Variable(tf.zeros(43))
    logits = tf.matmul(fc2, fc3_W) + fc3_b

    return logits
示例#46
0
文件: proj.py 项目: smurthas/CarND
def lenet(x_input):
    """Implements an alter version of LeNet in Tensorflow"""
    # Hyperparameters
    mu = 0
    sigma = 0.1

    # SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
    conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 3, 6),
                                              mean=mu,
                                              stddev=sigma),
                          name="conv1_W")
    conv1_b = tf.Variable(tf.zeros(6), name="conv1_b")
    conv1 = tf.nn.conv2d(
        x_input, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b

    # SOLUTION: Activation.
    conv1 = tf.nn.relu(conv1)

    # SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
    conv1 = tf.nn.max_pool(conv1,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
    conv2_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16),
                                              mean=mu,
                                              stddev=sigma),
                          name="conv2_W")
    conv2_b = tf.Variable(tf.zeros(16), name="conv2_b")
    conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv2_b

    # SOLUTION: Activation.
    conv2 = tf.nn.relu(conv2)

    # SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # SOLUTION: Flatten. Input = 5x5x16. Output = 400.
    fc0 = flatten(conv2)

    # SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
    fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120),
                                            mean=mu,
                                            stddev=sigma),
                        name="fc1_W")
    fc1_b = tf.Variable(tf.zeros(120), name="fc1_b")
    fc1 = tf.matmul(fc0, fc1_W) + fc1_b

    # SOLUTION: Activation.
    fc1 = tf.nn.relu(fc1)

    # SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84),
                                            mean=mu,
                                            stddev=sigma),
                        name="fc2_W")
    fc2_b = tf.Variable(tf.zeros(84), name="fc2_b")
    fc2 = tf.matmul(fc1, fc2_W) + fc2_b

    # SOLUTION: Activation.
    fc2 = tf.nn.relu(fc2)

    # SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
    fc3_W = tf.Variable(tf.truncated_normal(shape=(84, n_classes),
                                            mean=mu,
                                            stddev=sigma),
                        name="fc3_W")
    fc3_b = tf.Variable(tf.zeros(n_classes), name="fc3_b")
    return tf.matmul(fc2, fc3_W) + fc3_b
示例#47
0
def LeNet(x):
    mu = 0
    sigma = 0.1

    # LAYER 1:
    with tf.variable_scope('layer-conv1'):
        conv1_w = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6),
                                                  mean=mu,
                                                  stddev=sigma),
                              name='conv1_w')
        conv1_b = tf.Variable(tf.zeros(6), name='conv1_b')
        conv1 = tf.add(tf.nn.conv2d(x,
                                    conv1_w,
                                    strides=[1, 1, 1, 1],
                                    padding='VALID'),
                       conv1_b,
                       name='conv1')

        conv1 = tf.nn.relu(conv1, name='conv1-relu')

        conv1 = tf.nn.max_pool(conv1,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='conv1-maxpool')
        print conv1.shape

    # LAYER 2:
    with tf.variable_scope('layer-conv2'):
        conv2_w = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16),
                                                  mean=mu,
                                                  stddev=sigma),
                              name='conv2_w')
        conv2_b = tf.Variable(tf.zeros(16), name='conv2_b')
        conv2 = tf.add(tf.nn.conv2d(conv1,
                                    conv2_w,
                                    strides=[1, 1, 1, 1],
                                    padding='VALID'),
                       conv2_b,
                       name='conv2')

        conv2 = tf.nn.relu(conv2, name='conv2-relu')

        conv2 = tf.nn.max_pool(conv2,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='conv2-maxpool')
        print conv2.shape

    # LAYER 3
    fc0 = flatten(conv2)

    # LAYER 4
    with tf.variable_scope('layer-fc1'):
        fc1_w = tf.Variable(tf.truncated_normal(shape=(400, 120),
                                                mean=mu,
                                                stddev=sigma),
                            name='w')
        fc1_b = tf.Variable(tf.zeros(120), name='b')
        fc1 = tf.add(tf.matmul(fc0, fc1_w), fc1_b, name='fc1')

        fc1 = tf.nn.relu(fc1, name='relu')

    # LAYER 5
    with tf.variable_scope('layer-fc2'):
        fc2_w = tf.Variable(tf.truncated_normal(shape=(120, 84),
                                                mean=mu,
                                                stddev=sigma),
                            name='w')
        fc2_b = tf.Variable(tf.zeros(84), name='b')
        fc2 = tf.add(tf.matmul(fc1, fc2_w), fc2_b, name='fc2')

        fc2 = tf.nn.relu(fc2, name='relu')

    # LAYER 6
    with tf.variable_scope('layer-fc3'):
        fc3_w = tf.Variable(tf.truncated_normal(shape=(84, n_classes),
                                                mean=mu,
                                                stddev=sigma),
                            name='w')
        fc3_b = tf.Variable(tf.zeros(n_classes), name='b')
        fc3 = tf.add(tf.matmul(fc2, fc3_w), fc3_b, name='fc3')

    logits = fc3
    return logits
示例#48
0
def build_atari(minimap, screen, info, msize, ssize, num_action):
    # Extract features
    mconv1 = layers.conv2d(tf.transpose(minimap, [0, 2, 3, 1]),
                           num_outputs=16,
                           kernel_size=8,
                           stride=4,
                           scope='mconv1')
    mconv2 = layers.conv2d(mconv1,
                           num_outputs=32,
                           kernel_size=4,
                           stride=2,
                           scope='mconv2')
    sconv1 = layers.conv2d(tf.transpose(screen, [0, 2, 3, 1]),
                           num_outputs=16,
                           kernel_size=8,
                           stride=4,
                           scope='sconv1')
    sconv2 = layers.conv2d(sconv1,
                           num_outputs=32,
                           kernel_size=4,
                           stride=2,
                           scope='sconv2')
    info_fc = layers.fully_connected(layers.flatten(info),
                                     num_outputs=256,
                                     activation_fn=tf.tanh,
                                     scope='info_fc')

    # Compute spatial actions, non spatial actions and value
    feat_fc = tf.concat(
        [layers.flatten(mconv2),
         layers.flatten(sconv2), info_fc], axis=1)
    feat_fc = layers.fully_connected(feat_fc,
                                     num_outputs=256,
                                     activation_fn=tf.nn.relu,
                                     scope='feat_fc')

    spatial_action_x = layers.fully_connected(feat_fc,
                                              num_outputs=ssize,
                                              activation_fn=tf.nn.softmax,
                                              scope='spatial_action_x')
    spatial_action_y = layers.fully_connected(feat_fc,
                                              num_outputs=ssize,
                                              activation_fn=tf.nn.softmax,
                                              scope='spatial_action_y')
    spatial_action_x = tf.reshape(spatial_action_x, [-1, 1, ssize])
    spatial_action_x = tf.tile(spatial_action_x, [1, ssize, 1])
    spatial_action_y = tf.reshape(spatial_action_y, [-1, ssize, 1])
    spatial_action_y = tf.tile(spatial_action_y, [1, 1, ssize])
    spatial_action = layers.flatten(spatial_action_x * spatial_action_y)

    non_spatial_action = layers.fully_connected(feat_fc,
                                                num_outputs=num_action,
                                                activation_fn=tf.nn.softmax,
                                                scope='non_spatial_action')
    value = tf.reshape(
        layers.fully_connected(feat_fc,
                               num_outputs=1,
                               activation_fn=None,
                               scope='value'), [-1])

    return spatial_action, non_spatial_action, value
def LeNet(x):
    # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
    mu = 0
    sigma = 0.1

    # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
    wc1 = tf.Variable(tf.truncated_normal([5, 5, 1, 6], mean=mu, stddev=sigma))
    bc1 = tf.Variable(tf.truncated_normal([6], mean=mu, stddev=sigma))
    conv1 = tf.nn.conv2d(x, wc1, strides=[1, 1, 1, 1], padding='VALID')
    conv1 = tf.nn.bias_add(conv1, bc1)

    # Activation.
    conv1 = tf.nn.relu(conv1)

    # Pooling. Input = 28x28x6. Output = 14x14x6.
    p1 = tf.nn.max_pool(conv1,
                        ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        padding='VALID')

    # Layer 2: Convolutional. Output = 10x10x16.
    wc2 = tf.Variable(tf.truncated_normal([5, 5, 6, 16], mean=mu,
                                          stddev=sigma))
    bc2 = tf.Variable(tf.truncated_normal([16], mean=mu, stddev=sigma))
    conv2 = tf.nn.conv2d(p1, wc2, strides=[1, 1, 1, 1], padding='VALID')
    conv2 = tf.nn.bias_add(conv2, bc2)

    # Activation.
    conv2 = tf.nn.relu(conv2)

    # Pooling. Input = 10x10x16. Output = 5x5x16.
    p2 = tf.nn.max_pool(conv2,
                        ksize=[1, 2, 2, 1],
                        strides=[1, 2, 2, 1],
                        padding='VALID')

    # Layer 3: Convolutional. Output = 2x2x100.
    wc3 = tf.Variable(
        tf.truncated_normal([4, 4, 16, 100], mean=mu, stddev=sigma))
    bc3 = tf.Variable(tf.truncated_normal([100], mean=mu, stddev=sigma))
    conv3 = tf.nn.conv2d(p2, wc3, strides=[1, 1, 1, 1], padding='VALID')
    conv3 = tf.nn.bias_add(conv3, bc3)

    # Activation.
    p3 = tf.nn.relu(conv3)

    # Flatten. Input = 2x2x100. Output = 400.
    flat = flatten(p3)

    # Layer 3: Fully Connected. Input = 400. Output = 120.
    wfc1 = tf.Variable(tf.truncated_normal([400, 120], mean=mu, stddev=sigma))
    bfc1 = tf.Variable(tf.truncated_normal([120], mean=mu, stddev=sigma))
    fc1 = tf.add(tf.matmul(flat, wfc1), bfc1)

    # Activation.
    fc1 = tf.nn.relu(fc1)

    # Dropout
    fc1 = tf.nn.dropout(fc1, keep_prob)

    #     # Layer 4: Fully Connected. Input = 120. Output = 84.
    #     wfc2 = tf.Variable(tf.truncated_normal([120, 84], mean=mu, stddev=sigma))
    #     bfc2 = tf.Variable(tf.truncated_normal([84], mean=mu, stddev=sigma))
    #     fc2 = tf.add(tf.matmul(fc1, wfc2), bfc2)

    #     # Activation.
    #     fc2 = tf.nn.relu(fc2)

    #     # Dropout
    #     fc2 = tf.nn.dropout(fc2, keep_prob)

    #     # Layer 5: Fully Connected. Input = 84. Output = 43.
    #     wout = tf.Variable(tf.truncated_normal([84, 43], mean=mu, stddev=sigma))
    #     bout = tf.Variable(tf.truncated_normal([43], mean=mu, stddev=sigma))
    #     logits = tf.add(tf.matmul(fc2, wout), bout)

    # Layer 5: Fully Connected. Input = 120. Output = 10.
    wout = tf.Variable(tf.truncated_normal([120, 10], mean=mu, stddev=sigma))
    bout = tf.Variable(tf.truncated_normal([10], mean=mu, stddev=sigma))
    logits = tf.add(tf.matmul(fc1, wout), bout)

    return logits
示例#50
0
    def create_model(self, input):
        # STEM Network
        with tf.variable_scope('stem'):
            self.conv2d_1 = conv2d(input,
                                   num_outputs=32,
                                   kernel_size=[3, 3],
                                   stride=2,
                                   padding='VALID',
                                   activation_fn=tf.nn.relu)
            self.conv2d_2 = conv2d(self.conv2d_1,
                                   num_outputs=32,
                                   kernel_size=[3, 3],
                                   stride=1,
                                   padding='VALID',
                                   activation_fn=tf.nn.relu)
            self.conv2d_3 = conv2d(self.conv2d_2,
                                   num_outputs=64,
                                   kernel_size=[3, 3],
                                   stride=1,
                                   padding='SAME',
                                   activation_fn=tf.nn.relu)
            self.pool_1 = max_pool2d(self.conv2d_3,
                                     kernel_size=[3, 3],
                                     stride=2,
                                     padding='VALID')

            self.conv2d_4 = conv2d(self.pool_1,
                                   num_outputs=80,
                                   kernel_size=[3, 3],
                                   stride=1,
                                   padding='VALID',
                                   activation_fn=tf.nn.relu)
            self.conv2d_5 = conv2d(self.conv2d_4,
                                   num_outputs=192,
                                   kernel_size=[3, 3],
                                   stride=2,
                                   padding='VALID',
                                   activation_fn=tf.nn.relu)
            self.pool_2 = max_pool2d(self.conv2d_5,
                                     kernel_size=[3, 3],
                                     stride=2,
                                     padding='VALID')

            prev = self.pool_2

        # Inception (3) 1, 2, 3
        inception3_nums = {
            'branch_a': [64, 64, 64],
            'branch_b_1': [48, 48, 48],
            'branch_b_2': [64, 64, 64],
            'branch_c_1': [64, 64, 64],
            'branch_c_2': [96, 96, 96],
            'branch_c_3': [96, 96, 96],
            'branch_d': [32, 64, 64]
        }

        with tf.variable_scope('inception_3'):
            for i in range(3):
                branch_a_kernels = inception3_nums['branch_a'][i]
                branch_b_1_kernels = inception3_nums['branch_b_1'][i]
                branch_b_2_kernels = inception3_nums['branch_b_2'][i]
                branch_c_1_kernels = inception3_nums['branch_c_1'][i]
                branch_c_2_kernels = inception3_nums['branch_c_2'][i]
                branch_c_3_kernels = inception3_nums['branch_c_3'][i]
                branch_d_kernels = inception3_nums['branch_d'][i]

                branch_a = conv2d(prev,
                                  num_outputs=branch_a_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')

                branch_b = conv2d(prev,
                                  num_outputs=branch_b_1_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')
                branch_b = conv2d(branch_b,
                                  num_outputs=branch_b_2_kernels,
                                  kernel_size=[3, 3],
                                  stride=1,
                                  padding='SAME')

                branch_c = conv2d(prev,
                                  num_outputs=branch_c_1_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')
                branch_c = conv2d(branch_c,
                                  num_outputs=branch_c_2_kernels,
                                  kernel_size=[3, 3],
                                  stride=1,
                                  padding='SAME')
                branch_c = conv2d(branch_c,
                                  num_outputs=branch_c_3_kernels,
                                  kernel_size=[3, 3],
                                  stride=1,
                                  padding='SAME')

                branch_d = avg_pool2d(prev,
                                      kernel_size=[3, 3],
                                      stride=1,
                                      padding='SAME')
                branch_d = conv2d(branch_d,
                                  num_outputs=branch_d_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')

                layers_concat = list()
                layers_concat.append(branch_a)
                layers_concat.append(branch_b)
                layers_concat.append(branch_c)
                layers_concat.append(branch_d)
                prev = tf.concat(layers_concat, 3)

        with tf.variable_scope('grid_reduction_a'):
            branch_a = conv2d(prev,
                              num_outputs=384,
                              kernel_size=[3, 3],
                              stride=2,
                              padding='VALID')

            branch_b = conv2d(prev,
                              num_outputs=64,
                              kernel_size=[1, 1],
                              stride=1,
                              padding='SAME')
            branch_b = conv2d(branch_b,
                              num_outputs=96,
                              kernel_size=[3, 3],
                              stride=1,
                              padding='SAME')
            branch_b = conv2d(branch_b,
                              num_outputs=96,
                              kernel_size=[3, 3],
                              stride=2,
                              padding='VALID')

            branch_c = max_pool2d(prev,
                                  kernel_size=[3, 3],
                                  stride=2,
                                  padding='VALID')

            layers_concat = list()
            layers_concat.append(branch_a)
            layers_concat.append(branch_b)
            layers_concat.append(branch_c)
            prev = tf.concat(layers_concat, 3)

        inception5_nums = {
            'branch_a': [192, 192, 192, 192],
            'branch_b_1': [128, 160, 160, 192],
            'branch_b_2': [128, 160, 160, 192],
            'branch_b_3': [192, 192, 192, 192],
            'branch_c_1': [128, 160, 160, 192],
            'branch_c_2': [128, 160, 160, 192],
            'branch_c_3': [128, 160, 160, 192],
            'branch_c_4': [128, 160, 160, 192],
            'branch_c_5': [192, 192, 192, 192],
            'branch_d': [192, 192, 192, 192]
        }

        with tf.variable_scope('inception_5'):
            for i in range(4):
                branch_a_kernels = inception5_nums['branch_a'][i]
                branch_b_1_kernels = inception5_nums['branch_b_1'][i]
                branch_b_2_kernels = inception5_nums['branch_b_2'][i]
                branch_b_3_kernels = inception5_nums['branch_b_3'][i]
                branch_c_1_kernels = inception5_nums['branch_c_1'][i]
                branch_c_2_kernels = inception5_nums['branch_c_2'][i]
                branch_c_3_kernels = inception5_nums['branch_c_3'][i]
                branch_c_4_kernels = inception5_nums['branch_c_4'][i]
                branch_c_5_kernels = inception5_nums['branch_c_5'][i]
                branch_d_kernels = inception5_nums['branch_d'][i]

                branch_a = conv2d(prev,
                                  num_outputs=branch_a_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')

                branch_b = conv2d(prev,
                                  num_outputs=branch_b_1_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')
                branch_b = conv2d(branch_b,
                                  num_outputs=branch_b_2_kernels,
                                  kernel_size=[1, 7],
                                  stride=1,
                                  padding='SAME')
                branch_b = conv2d(branch_b,
                                  num_outputs=branch_b_3_kernels,
                                  kernel_size=[7, 1],
                                  stride=1,
                                  padding='SAME')

                branch_c = conv2d(prev,
                                  num_outputs=branch_c_1_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')
                branch_c = conv2d(branch_c,
                                  num_outputs=branch_c_2_kernels,
                                  kernel_size=[7, 7],
                                  stride=1,
                                  padding='SAME')
                branch_c = conv2d(branch_c,
                                  num_outputs=branch_c_3_kernels,
                                  kernel_size=[1, 7],
                                  stride=1,
                                  padding='SAME')
                branch_c = conv2d(branch_c,
                                  num_outputs=branch_c_4_kernels,
                                  kernel_size=[7, 1],
                                  stride=1,
                                  padding='SAME')
                branch_c = conv2d(branch_c,
                                  num_outputs=branch_c_5_kernels,
                                  kernel_size=[1, 7],
                                  stride=1,
                                  padding='SAME')

                branch_d = avg_pool2d(prev,
                                      kernel_size=[3, 3],
                                      stride=1,
                                      padding='SAME')
                branch_d = conv2d(branch_d,
                                  num_outputs=branch_d_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')

                layers_concat = list()
                layers_concat.append(branch_a)
                layers_concat.append(branch_b)
                layers_concat.append(branch_c)
                layers_concat.append(branch_d)
                prev = tf.concat(layers_concat, 3)

            self.aux = prev

        with tf.variable_scope('grid_reduction_b'):
            branch_base = conv2d(prev,
                                 num_outputs=192,
                                 kernel_size=[1, 1],
                                 stride=1,
                                 padding='SAME')

            branch_a = conv2d(branch_base,
                              num_outputs=320,
                              kernel_size=[3, 3],
                              stride=2,
                              padding='VALID')

            branch_b = conv2d(branch_base,
                              num_outputs=192,
                              kernel_size=[1, 7],
                              stride=1,
                              padding='SAME')
            branch_b = conv2d(branch_b,
                              num_outputs=192,
                              kernel_size=[7, 1],
                              stride=1,
                              padding='SAME')
            branch_b = conv2d(branch_b,
                              num_outputs=192,
                              kernel_size=[3, 3],
                              stride=2,
                              padding='VALID')

            branch_c = max_pool2d(prev,
                                  kernel_size=[3, 3],
                                  stride=2,
                                  padding='VALID')

            layers_concat = list()
            layers_concat.append(branch_a)
            layers_concat.append(branch_b)
            layers_concat.append(branch_c)
            prev = tf.concat(layers_concat, 3)

        inception2_nums = {
            'branch_a': [320, 320],
            'branch_b_1': [384, 384],
            'branch_b_2': [384, 384],
            'branch_b_3': [384, 384],
            'branch_c_1': [448, 448],
            'branch_c_2': [384, 384],
            'branch_c_3': [384, 384],
            'branch_d': [192, 192]
        }

        with tf.variable_scope('inception_2'):
            for i in range(2):
                branch_a_kernels = inception5_nums['branch_a'][i]
                branch_b_1_kernels = inception5_nums['branch_b_1'][i]
                branch_b_2_kernels = inception5_nums['branch_b_2'][i]
                branch_b_3_kernels = inception5_nums['branch_b_3'][i]
                branch_c_1_kernels = inception5_nums['branch_c_1'][i]
                branch_c_2_kernels = inception5_nums['branch_c_2'][i]
                branch_c_3_kernels = inception5_nums['branch_c_3'][i]
                branch_d_kernels = inception5_nums['branch_d'][i]

                branch_a = conv2d(prev,
                                  num_outputs=branch_a_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')

                branch_b = conv2d(prev,
                                  num_outputs=branch_b_1_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')
                branch_b = conv2d(branch_b,
                                  num_outputs=branch_b_2_kernels,
                                  kernel_size=[1, 3],
                                  stride=1,
                                  padding='SAME')
                branch_b = conv2d(branch_b,
                                  num_outputs=branch_b_3_kernels,
                                  kernel_size=[3, 1],
                                  stride=1,
                                  padding='SAME')

                branch_c = conv2d(prev,
                                  num_outputs=branch_c_1_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')
                branch_c = conv2d(branch_c,
                                  num_outputs=branch_c_2_kernels,
                                  kernel_size=[1, 3],
                                  stride=1,
                                  padding='SAME')
                branch_c = conv2d(branch_c,
                                  num_outputs=branch_c_3_kernels,
                                  kernel_size=[3, 1],
                                  stride=1,
                                  padding='SAME')

                branch_d = max_pool2d(prev,
                                      kernel_size=[3, 3],
                                      stride=1,
                                      padding='SAME')
                branch_d = conv2d(branch_d,
                                  num_outputs=branch_d_kernels,
                                  kernel_size=[1, 1],
                                  stride=1,
                                  padding='SAME')

                layers_concat = list()
                layers_concat.append(branch_a)
                layers_concat.append(branch_b)
                layers_concat.append(branch_c)
                layers_concat.append(branch_d)
                prev = tf.concat(layers_concat, 3)

        with tf.variable_scope('final'):
            self.aux_pool = avg_pool2d(self.aux,
                                       kernel_size=[5, 5],
                                       stride=3,
                                       padding='VALID')
            self.aux_conv = conv2d(self.aux_pool,
                                   num_outputs=128,
                                   kernel_size=[1, 1],
                                   stride=1,
                                   padding='SAME')
            self.aux_flat = flatten(self.aux_conv)
            self.aux_bn = tf.layers.batch_normalization(self.aux_flat)
            self.aux_out = fully_connected(self.aux_bn,
                                           num_outputs=self.num_classes,
                                           activation_fn=None)

            self.final_pool = avg_pool2d(prev,
                                         kernel_size=[2, 2],
                                         stride=1,
                                         padding='VALID')
            self.final_dropout = tf.nn.dropout(self.final_pool, 0.8)
            self.final_flat = flatten(self.final_dropout)
            self.final_bn = tf.layers.batch_normalization(self.final_flat)
            self.final_out = fully_connected(self.final_bn,
                                             num_outputs=self.num_classes,
                                             activation_fn=None)

        return [self.aux_out, self.final_out]
示例#51
0
def dfb(input_images, 
            keep_prob,
            is_training=True,
            weight_decay=5e-5,
            batch_norm_decay=0.99,
            batch_norm_epsilon=0.001):
    with tf.variable_scope("Teacher_model"):     
        net, endpoints = resnet_v2(inputs=input_images,
                                num_classes=M,
                                is_training=True,
                                scope='resnet_v2')
        
        base_var_list = slim.get_model_variables('Teacher_model/resnet_v2')

        part_feature = endpoints["InvertedResidual_{}_{}".format(1024, 3)]
        object_feature = endpoints["InvertedResidual_{}_{}".format(1024, 5)]

        object_feature_h = object_feature.get_shape().as_list()[1]
        object_feature_w = object_feature.get_shape().as_list()[2]
        fc_obj = slim.max_pool2d(object_feature, (object_feature_h, object_feature_w), scope="GMP1")
        batch_norm_params = {
            'center': True,
            'scale': True,
            'decay': batch_norm_decay,
            'epsilon': batch_norm_epsilon,
        }

        fc_obj = slim.conv2d(fc_obj,
                            M,
                            [1, 1],
                            activation_fn=None,    
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope='fc_obj')
        fc_obj = tf.nn.dropout(fc_obj, keep_prob=keep_prob)
        fc_obj = slim.flatten(fc_obj)
        fc_part = slim.conv2d(part_feature,
                            M * k,          #卷积核个数
                            [1, 1],         #卷积核高宽
                            activation_fn=tf.nn.relu,
                            normalizer_fn=slim.batch_norm,                               # 标准化器设置为BN
                            normalizer_params=batch_norm_params,
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay)
                            )
        fc_part_h = fc_part.get_shape().as_list()[1]
        fc_part_w = fc_part.get_shape().as_list()[2]
        fc_part = slim.max_pool2d(fc_part, (fc_part_h, fc_part_w), scope="GMP2")
        ft_list = tf.split(fc_part,
                        num_or_size_splits=M,
                        axis=-1)            #最后一维度(C)
        cls_list = []
        for i in range(M):
            ft = tf.transpose(ft_list[i], [0, 1, 3, 2])
            cls = layers_lib.pool(ft,
                                [1, k],
                                "AVG")
            cls = layers.flatten(cls)
            cls_list.append(cls)
        fc_ccp = tf.concat(cls_list, axis=-1) #cross_channel_pooling (N, M)

        fc_part = slim.conv2d(fc_part,
                            M,
                            [1, 1],
                            activation_fn=None,                         
                            weights_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            biases_regularizer=tf.contrib.layers.l2_regularizer(weight_decay),
                            scope="fc_part")
        fc_part = tf.nn.dropout(fc_part, keep_prob=keep_prob)
        fc_part = slim.flatten(fc_part)
        t_var_list = slim.get_model_variables()
    return fc_obj, fc_part, fc_ccp, base_var_list, t_var_list
示例#52
0
文件: parts_2.py 项目: HamSade/codes
def discriminator(wave_in, reuse=True):
    
      
    hi = wave_in
    
#    hi = tf.expand_dims(wave_in, -1)
        
#    batch_size = int(wave_in.get_shape()[0])


    # set up the disc_block function
   
    with tf.variable_scope('d_model') as scope:
        if reuse:
            scope.reuse_variables()

        def disc_block(block_idx, input_, kwidth, nfmaps, bnorm, activation, name, pooling=2):

                with tf.variable_scope('d_block_{}'.format(block_idx)):

                    if not reuse:
                        print('D block {} input shape: {}'
                              ''.format(block_idx, input_.get_shape()),
                              end=' *** ')

                    bias_init = None

                    if bias_D_conv:
                        if not reuse:
                            print('biasing D conv', end=' *** ')
                        bias_init = tf.constant_initializer(0.)

                    downconv_init = tf.truncated_normal_initializer(stddev=0.02)

##########################################
                    hi_a = downconv(input_, nfmaps, kwidth=kwidth, pool=pooling,
                                    init=downconv_init, bias_init=bias_init, name=name)
##########################################                    
                    
                    if not reuse:
                        print('downconved shape: {} '
                              ''.format(hi_a.get_shape()), end=' *** ')
                    
#                    if bnorm:
#                        if not reuse:
#                            print('Applying VBN', end=' *** ')
#                        hi_a = vbn(hi_a, 'd_vbn_{}'.format(block_idx))
                    
                    if activation == 'leakyrelu':
                        if not reuse:
                            print('Applying Lrelu', end=' *** ')
                        hi = leakyrelu(hi_a)
                    
                    elif activation == 'relu':
                        if not reuse:
                            print('Applying Relu', end=' *** ')
                        hi = tf.nn.relu(hi_a)
                    
                    else:
                        raise ValueError('Unrecognized activation {} '
                                         'in D'.format(activation))
                    return hi
                
                
       #%%         
#            beg_size = canvas_size
          
            # apply input noisy layer to real and fake samples
            
        hi = gaussian_noise_layer(hi, disc_noise_std)
            
        if not reuse:
            print('*** Discriminator summary ***')
            
        
        for block_idx, fmaps in enumerate(d_num_fmaps):
            
            hi = disc_block(block_idx, hi, 31, d_num_fmaps[block_idx], False, 'leakyrelu',
                            name='db_{}_{}'.format(block_idx,fmaps))
            
            if not reuse:
                print()
        
        if not reuse:
            print('discriminator deconved shape: ', hi.get_shape())
        
        hi_f = flatten(hi)  #keeps batch size, flatten everything else
        
        #hi_f = tf.nn.dropout(hi_f, self.keep_prob_var)
        
        d_logit_out = conv1d(hi, kwidth=1, num_kernels=1,
                             init=tf.truncated_normal_initializer(stddev=0.02),
                             name='logits_conv')
        
        d_logit_out = tf.squeeze(d_logit_out)  #removes dimensions of 1
        
        # all logits connected to 1 single neuron for binary classification
        d_logit_out = fully_connected(d_logit_out, 1, activation_fn=None)
        
        if not reuse:
            print('discriminator output shape: ', d_logit_out.get_shape())
            print('*****************************')
            
            
        return d_logit_out    
示例#53
0
def LeNet(x):
    # Hyperparameters
    mu = 0
    sigma = 0.1
    layer_depth = {'layer_1': 6, 'layer_2': 16, 'layer_3': 120, 'layer_f1': 84}

    # TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
    conv1_w = tf.Variable(
        tf.truncated_normal(shape=[5, 5, 1, 6], mean=mu, stddev=sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1 = tf.nn.conv2d(x, conv1_w, strides=[1, 1, 1, 1],
                         padding='VALID') + conv1_b
    # TODO: Activation.
    conv1 = tf.nn.relu(conv1)

    # TODO: Pooling. Input = 28x28x6. Output = 14x14x6.
    pool_1 = tf.nn.max_pool(conv1,
                            ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding='VALID')

    # TODO: Layer 2: Convolutional. Output = 10x10x16.
    conv2_w = tf.Variable(
        tf.truncated_normal(shape=[5, 5, 6, 16], mean=mu, stddev=sigma))
    conv2_b = tf.Variable(tf.zeros(16))
    conv2 = tf.nn.conv2d(
        pool_1, conv2_w, strides=[1, 1, 1, 1], padding='VALID') + conv2_b
    # TODO: Activation.
    conv2 = tf.nn.relu(conv2)

    # TODO: Pooling. Input = 10x10x16. Output = 5x5x16.
    pool_2 = tf.nn.max_pool(conv2,
                            ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding='VALID')

    # TODO: Flatten. Input = 5x5x16. Output = 400.
    fc1 = flatten(pool_2)

    # TODO: Layer 3: Fully Connected. Input = 400. Output = 120.
    fc1_w = tf.Variable(
        tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))
    fc1_b = tf.Variable(tf.zeros(120))
    fc1 = tf.matmul(fc1, fc1_w) + fc1_b

    # TODO: Activation.
    fc1 = tf.nn.relu(fc1)

    # TODO: Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2_w = tf.Variable(
        tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))
    fc2_b = tf.Variable(tf.zeros(84))
    fc2 = tf.matmul(fc1, fc2_w) + fc2_b
    # TODO: Activation.
    fc2 = tf.nn.relu(fc2)

    # TODO: Layer 5: Fully Connected. Input = 84. Output = 10.
    fc3_w = tf.Variable(
        tf.truncated_normal(shape=(84, 10), mean=mu, stddev=sigma))
    fc3_b = tf.Variable(tf.zeros(10))
    logits = tf.matmul(fc2, fc3_w) + fc3_b
    return logits
def LeCeption(x):    
    mu = 0
    sigma = 0.1
    
    # Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
    conv1_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1   = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1], padding='VALID') + conv1_b

    # Activation.
    conv1 = tf.nn.leaky_relu(conv1)

    # Pooling. Input = 28x28x6. Output = 14x14x6.
    conv1 = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')

    # Layer 2_1: Convolutional (Inception 1). Input = 14x14x6. Output = 10x10x16.
    conv21_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 6, 16), mean = mu, stddev = sigma))
    conv21_b = tf.Variable(tf.zeros(16))
    conv21   = tf.nn.conv2d(conv1, conv21_W, strides=[1, 1, 1, 1], padding='VALID') + conv21_b
    
    # Layer 2_2: Convolutional (Inception 2). Input = 14x14x6. Output = 12x12x16.
    conv22_W = tf.Variable(tf.truncated_normal(shape=(3, 3, 6, 16), mean = mu, stddev = sigma))
    conv22_b = tf.Variable(tf.zeros(16))
    conv22   = tf.nn.conv2d(conv1, conv22_W, strides=[1, 1, 1, 1], padding='VALID') + conv22_b
    
    # Layer 2_2: Double Max Pool. Input = 12x12x16. Output = 10x10x16
    conv22 = tf.nn.max_pool(conv22, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
    conv22 = tf.nn.max_pool(conv22, ksize=[1, 2, 2, 1], strides=[1, 1, 1, 1], padding='VALID')
    
    # Inception Stack. Output = 10x10x32
    conv2 = tf.concat((conv21, conv22), 3)
    
    # Activation.
    conv2 = tf.nn.leaky_relu(conv2)

    # Pooling. Input = 10x10x32. Output = 5x5x32.
    conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
        
    # Branch off bypass. Input = 5x5x32. Output = 800.
    bypBranch = flatten(conv2)
    
    # Main feedforward path
    # Layer 3: Convolutional. Input = 5x5x32. Output = 1x1x800.
    conv3_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 32, 800), mean = mu, stddev = sigma))
    conv3_b = tf.Variable(tf.zeros(800))
    conv3 = tf.nn.conv2d(conv2, conv3_W, strides=[1, 1, 1, 1], padding='VALID') + conv3_b
    
    # Activation
    conv3 = tf.nn.leaky_relu(conv3)
    
    # Merge branches. Output = 1600.
    mainBranch = flatten(conv3)
    fc0 = tf.concat([mainBranch, bypBranch], 1)
    
    fc0 = tf.nn.dropout(fc0, keep_prob)

    # Layer 4: Fully Connected. Input = 1600. Output = 400.
    fc1_W = tf.Variable(tf.truncated_normal(shape=(1600, 400), mean = mu, stddev = sigma))
    fc1_b = tf.Variable(tf.zeros(400))
    fc1   = tf.matmul(fc0, fc1_W) + fc1_b
    # Activation.
    fc1    = tf.nn.leaky_relu(fc1)
    
    fc1    = tf.nn.dropout(fc1, keep_prob)

    # Layer 5: Fully Connected. Input = 400. Output = 84.
    fc2_W  = tf.Variable(tf.truncated_normal(shape=(400, 84), mean = mu, stddev = sigma))
    fc2_b  = tf.Variable(tf.zeros(84))
    fc2    = tf.matmul(fc1, fc2_W) + fc2_b
    
    # Activation.
    fc2    = tf.nn.leaky_relu(fc2)
    
    fc2    = tf.nn.dropout(fc2, keep_prob)

    # Layer 6: Fully Connected. Input = 84. Output = 43.
    fc3_W  = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = mu, stddev = sigma))
    fc3_b  = tf.Variable(tf.zeros(n_classes))
    logits = tf.matmul(fc2, fc3_W) + fc3_b
    
    return logits
示例#55
0
    def build_model(self):
        with tf.name_scope('inputs'):
            self.X = tf.placeholder(tf.float32,
                                    shape=[
                                        None, self.height_width,
                                        self.height_width, self.channels
                                    ],
                                    name="X")
            self.y_true = tf.placeholder(tf.int64,
                                         shape=[None, self.num_classes],
                                         name='y_true')
            y_true_cls = tf.argmax(self.y_true, dimension=1)
            self.is_traing = tf.placeholder(tf.bool, name='is_traing')
        # Normalizing the images
        self.X = tf.map_fn(lambda img: tf.image.per_image_standardization(img),
                           self.X)

        with tf.name_scope('data_augmentation'):
            self.X = tf.map_fn(
                lambda img: tf.image.random_brightness(img, max_delta=20 / 255
                                                       ), self.X)
            self.X = tf.map_fn(
                lambda img: tf.image.random_contrast(img, lower=0.5, upper=2),
                self.X)
        # inception layer
        if self.layer_name == 'PreLogits':  # Running on the entire network, remove only the last layer
            with slim.arg_scope(inception.inception_v3_arg_scope()):
                _, end_points = inception.inception_v3(self.X,
                                                       num_classes=1001,
                                                       is_training=is_traing)
                end_transfer_net = tf.squeeze(end_points[self.layer_name],
                                              axis=[1, 2])
        else:
            with slim.arg_scope(inception.inception_v3_arg_scope()):
                end_transfer_net, _ = inception.inception_v3_base(
                    self.X,
                    final_endpoint=self.layer_name,
                    min_depth=16,
                    depth_multiplier=1.0,
                    scope=None)

        tf.summary.histogram(self.layer_name, end_transfer_net)
        self.inception_saver = tf.train.Saver()

        if self.layer_name != 'PreLogits':
            # extra CNN for Identifying features focused on mammograms
            self.conv_net, self.w1 = self.conv2d(end_transfer_net,
                                                 self.nfs,
                                                 k_h=self.fs,
                                                 k_w=self.fs,
                                                 d_h=1,
                                                 d_w=1,
                                                 stddev=0.02,
                                                 name='conv',
                                                 padding='VALID')
            tf.summary.histogram('conv', self.conv_net)
            tf.summary.histogram('Wight_conv', self.w1)

            x_maxpool = tf.nn.max_pool(value=self.conv_net,
                                       ksize=[
                                           1,
                                           self.conv_net.get_shape()[1],
                                           self.conv_net.get_shape()[1], 1
                                       ],
                                       strides=[1, 2, 2, 1],
                                       padding='VALID')
            end_transfer_net = tf.nn.relu(x_maxpool)
            self.flatt = flatten(end_transfer_net, scope='flatten')
            tf.summary.histogram('flatten', self.flatt)
        else:
            self.flatt = end_transfer_net
            tf.summary.histogram('flatten', self.flatt)

        self.x_hidden = fully_connected(self.flatt,
                                        self.n_hidden,
                                        scope='hidden')
        tf.summary.histogram('hidden', self.x_hidden)

        self.logits = fully_connected(self.x_hidden,
                                      self.num_classes,
                                      activation_fn=None,
                                      scope='output')
        tf.summary.histogram('logits', self.logits)

        with tf.name_scope('predicted'):
            self.y_pred = tf.nn.softmax(self.logits)
            self.y_pred_cls = tf.argmax(self.y_pred, dimension=1)

        with tf.name_scope('loss'):
            cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
                logits=self.logits, labels=self.y_true, name='xentropy')
            self.loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
            tf.summary.scalar('loss', self.loss)

        with tf.name_scope('performance'):
            correct_prediction = tf.equal(self.y_pred_cls, y_true_cls)
            self.accuracy = tf.reduce_mean(
                tf.cast(correct_prediction, tf.float32))

        self.merged = tf.summary.merge_all(
        )  # merge the data for the tensorboard
        self.saver = tf.train.Saver(max_to_keep=1)
def TrafficModel(x):
    # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
    mu = 0
    sigma = 0.1

    # SOLUTION: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x6.
    conv1_W = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 3, 6), mean=mu, stddev=sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv1_b

    # SOLUTION: Activation.
    conv1 = tf.nn.relu(conv1)

    # SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
    conv1 = tf.nn.max_pool(conv1,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
    conv2_W = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma))
    conv2_b = tf.Variable(tf.zeros(16))
    conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv2_b

    # SOLUTION: Activation.
    conv2 = tf.nn.relu(conv2)

    # SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # SOLUTION: Flatten. Input = 5x5x16. Output = 400.
    fc0 = flatten(conv2)

    # SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 200 #####120.
    #fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
    #fc1_b = tf.Variable(tf.zeros(120))
    fc1_W = tf.Variable(
        tf.truncated_normal(shape=(400, 200), mean=mu, stddev=sigma))
    fc1_b = tf.Variable(tf.zeros(200))
    fc1 = tf.matmul(fc0, fc1_W) + fc1_b

    # SOLUTION: Activation.
    fc1 = tf.nn.relu(fc1)

    ##--------------------------------

    # Layer 4: Fully Connected. Input = 200. Output = 120.
    fc4_W = tf.Variable(
        tf.truncated_normal(shape=(200, 120), mean=mu, stddev=sigma))
    fc4_b = tf.Variable(tf.zeros(shape=120))
    fc4 = tf.matmul(fc1, fc4_W) + fc4_b

    # Activation.
    fc4 = tf.nn.relu(fc4)

    # Introduce Dropout after first fully connected layer
    #fc4 = tf.nn.dropout(fc4, keep_prob)

    ##-------------------------------

    # SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2_W = tf.Variable(
        tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))
    fc2_b = tf.Variable(tf.zeros(84))
    fc2 = tf.matmul(fc4, fc2_W) + fc2_b

    # SOLUTION: Activation.
    fc2 = tf.nn.relu(fc2)

    # SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 43.
    fc3_W = tf.Variable(
        tf.truncated_normal(shape=(84, 43), mean=mu, stddev=sigma))
    fc3_b = tf.Variable(tf.zeros(43))
    logits = tf.matmul(fc2, fc3_W) + fc3_b

    return logits
示例#57
0
def squeeze_net(input, classes):
    """
    SqueezeNet model written in tensorflow. It provides AlexNet level accuracy with 50x fewer parameters
    and smaller model size.
    :param input: Input tensor (4D)
    :param classes: number of classes for classification
    :return: Tensorflow tensor
    """

    # Input has 3 channels, output has 96 channels
    weights = {
        'conv1': tf.Variable(tf.truncated_normal([7, 7, 3, 96])),
        'conv10': tf.Variable(tf.truncated_normal([1, 1, 512, classes])),
        'fc12': tf.Variable(tf.truncated_normal(shape=(1425, classes)))
    }

    biases = {
        'conv1': tf.Variable(tf.truncated_normal([96])),
        'conv10': tf.Variable(tf.truncated_normal([classes])),
        'fc12': tf.Variable(tf.truncated_normal([classes]))
    }

    # Layer 1: Convolutional with 96 output channels
    output = tf.nn.conv2d(input,
                          weights['conv1'],
                          strides=[1, 2, 2, 1],
                          padding='SAME',
                          name='conv1')
    output = tf.nn.bias_add(output, biases['conv1'])
    conv1 = tf.nn.relu(output)
    conv1_pool = tf.nn.max_pool(conv1,
                                ksize=[1, 3, 3, 1],
                                strides=[1, 2, 2, 1],
                                padding='SAME',
                                name='maxpool1')

    # Layer 2: Fire module with 96 output channels
    output = fire_module(conv1_pool,
                         s1=16,
                         e1=64,
                         e2=64,
                         channel=96,
                         fire_id='fire2')
    fire2 = tf.nn.relu(output)

    # Layer 3: Fire module with 128 output channels
    output = fire_module(fire2,
                         s1=16,
                         e1=64,
                         e2=64,
                         channel=128,
                         fire_id='fire2')
    fire2 = tf.nn.relu(output)

    # Layer 4: Fire module with 128 output channels
    output = fire_module(fire2,
                         s1=32,
                         e1=128,
                         e2=128,
                         channel=128,
                         fire_id='fire4')
    fire4 = tf.nn.relu(output)
    fire4_pool = tf.nn.max_pool(fire4,
                                ksize=[1, 3, 3, 1],
                                strides=[1, 2, 2, 1],
                                padding='SAME',
                                name='maxpool4')

    # Layer 5: Fire module with 256 output channels
    output = fire_module(fire4_pool,
                         s1=32,
                         e1=128,
                         e2=128,
                         channel=256,
                         fire_id='fire5')
    fire5 = tf.nn.relu(output)

    # Layer 6: Fire module with 256 output channels
    output = fire_module(fire5,
                         s1=48,
                         e1=192,
                         e2=192,
                         channel=256,
                         fire_id='fire6')
    fire6 = tf.nn.relu(output)

    # Layer 7: Fire module with 384 output channels
    output = fire_module(fire6,
                         s1=48,
                         e1=192,
                         e2=192,
                         channel=384,
                         fire_id='fire7')
    fire7 = tf.nn.relu(output)

    # Layer 8: Fire module with 384 output channels
    output = fire_module(fire7,
                         s1=64,
                         e1=256,
                         e2=256,
                         channel=384,
                         fire_id='fire8')
    fire8 = tf.nn.relu(output)
    fire8_pool = tf.nn.max_pool(fire8,
                                ksize=[1, 3, 3, 1],
                                strides=[1, 2, 2, 1],
                                padding='SAME',
                                name='maxpool8')

    # Layer 9: Fire module with 512 output channels
    output = fire_module(fire8_pool,
                         s1=64,
                         e1=256,
                         e2=256,
                         channel=512,
                         fire_id='fire9')
    fire9 = tf.nn.relu(output)
    fire9_dropout = tf.nn.dropout(fire9, keep_prob=0.5, name='dropout9')

    # Layer 10 : 1x1 convolution
    output = tf.nn.conv2d(fire9_dropout,
                          weights['conv10'],
                          strides=[1, 1, 1, 1],
                          padding='VALID',
                          name='conv10')
    conv10 = tf.nn.bias_add(output, biases['conv10'])
    conv10_pool = tf.nn.avg_pool(conv10,
                                 ksize=[1, 13, 13, 1],
                                 strides=[1, 2, 2, 1],
                                 padding='SAME',
                                 name='avgpool10')

    # Layer 11: Flatten
    flatten11 = flatten(conv10_pool)

    # Layer12: Fully connected layer
    output = tf.matmul(flatten11, weights['fc12']) + biases['fc12']
    fc12 = tf.nn.relu(output)

    # Return the logits
    return fc12
示例#58
0
    def get_q_values_op(self, state, scope, reuse=False):
        """
        Returns Q values for all actions

        Args:
            state: (tf tensor) 
                shape = (batch_size, img height, img width, nchannels)
            scope: (string) scope name, that specifies if target network or not
            reuse: (bool) reuse of variables in the scope

        Returns:
            out: (tf tensor) of shape = (batch_size, num_actions)
        """
        # this information might be useful
        num_actions = self.env.action_space.n
        out = state
        ##############################################################
        """
        TODO: implement the computation of Q values like in the paper
                https://storage.googleapis.com/deepmind-data/assets/papers/DeepMindNature14236Paper.pdf
                https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf

              you may find the section "model architecture" of the appendix of the 
              nature paper particulary useful.

              store your result in out of shape = (batch_size, num_actions)

        HINT: you may find tensorflow.contrib.layers useful (imported)
              make sure to understand the use of the scope param

              you can use any other methods from tensorflow
              you are not allowed to import extra packages (like keras,
              lasagne, cafe, etc.)

        """
        ##############################################################
        ################ YOUR CODE HERE - 10-15 lines ################
        with tf.variable_scope(scope):
            conv1 = layers.convolution2d(inputs=state,
                                         num_outputs=32,
                                         kernel_size=(8, 8),
                                         stride=4,
                                         activation_fn=tf.nn.relu,
                                         reuse=reuse)
            conv2 = layers.convolution2d(inputs=conv1,
                                         num_outputs=64,
                                         kernel_size=(4, 4),
                                         stride=2,
                                         activation_fn=tf.nn.relu,
                                         reuse=reuse)
            conv3 = layers.convolution2d(inputs=conv2,
                                         num_outputs=64,
                                         kernel_size=(3, 3),
                                         stride=1,
                                         activation_fn=tf.nn.relu,
                                         reuse=reuse)
            flattened_conv3 = layers.flatten(conv3)
            fc = layers.fully_connected(inputs=flattened_conv3,
                                        num_outputs=512,
                                        activation_fn=tf.nn.relu,
                                        reuse=reuse)
            out = layers.fully_connected(inputs=fc,
                                         num_outputs=num_actions,
                                         activation_fn=None,
                                         reuse=reuse)
        ##############################################################
        ######################## END YOUR CODE #######################
        return out
def LeNet(x):
    # Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
    mu = 0
    sigma = 0.1

    # SOLUTION: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
    conv1_W = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 3, 6), mean=mu, stddev=sigma))
    conv1_b = tf.Variable(tf.zeros(6))
    conv1 = tf.nn.conv2d(x, conv1_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv1_b

    # SOLUTION: Activation.
    conv1 = tf.nn.relu(conv1)

    # SOLUTION: Pooling. Input = 28x28x6. Output = 14x14x6.
    conv1 = tf.nn.max_pool(conv1,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # SOLUTION: Layer 2: Convolutional. Output = 10x10x16.
    conv2_W = tf.Variable(
        tf.truncated_normal(shape=(5, 5, 6, 16), mean=mu, stddev=sigma))
    conv2_b = tf.Variable(tf.zeros(16))
    conv2 = tf.nn.conv2d(conv1, conv2_W, strides=[1, 1, 1, 1],
                         padding='VALID') + conv2_b

    # SOLUTION: Activation.
    conv2 = tf.nn.relu(conv2)

    # SOLUTION: Pooling. Input = 10x10x16. Output = 5x5x16.
    conv2 = tf.nn.max_pool(conv2,
                           ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1],
                           padding='VALID')

    # SOLUTION: Flatten. Input = 5x5x16. Output = 400.
    fc0 = flatten(conv2)

    # SOLUTION: Layer 3: Fully Connected. Input = 400. Output = 120.
    fc1_W = tf.Variable(
        tf.truncated_normal(shape=(400, 120), mean=mu, stddev=sigma))
    fc1_b = tf.Variable(tf.zeros(120))
    fc1 = tf.matmul(fc0, fc1_W) + fc1_b

    # SOLUTION: Activation.
    fc1 = tf.nn.relu(fc1)

    # SOLUTION: Layer 4: Fully Connected. Input = 120. Output = 84.
    fc2_W = tf.Variable(
        tf.truncated_normal(shape=(120, 84), mean=mu, stddev=sigma))
    fc2_b = tf.Variable(tf.zeros(84))
    fc2 = tf.matmul(fc1, fc2_W) + fc2_b

    # SOLUTION: Activation.
    fc2 = tf.nn.relu(fc2)

    # SOLUTION: Layer 5: Fully Connected. Input = 84. Output = 10.
    fc3_W = tf.Variable(
        tf.truncated_normal(shape=(84, n_classes), mean=mu, stddev=sigma))
    fc3_b = tf.Variable(tf.zeros(n_classes))
    logits = tf.matmul(fc2, fc3_W) + fc3_b

    return logits
示例#60
0
    def forward(self,
                encoder_inputs,
                trainable=True,
                is_training=True,
                reuse=False,
                with_batchnorm=False):
        with tf.variable_scope(self.name_scope, reuse=reuse) as vs:
            if (reuse):
                vs.reuse_variables()
            lrelu = VAE.lrelu

            if (with_batchnorm):
                print('here')
                h0 = lrelu(
                    tcl.batch_norm(tcl.conv2d(
                        encoder_inputs,
                        num_outputs=self.nfilters * 4,
                        stride=2,
                        kernel_size=[2, 7],
                        activation_fn=None,
                        padding='SAME',
                        biases_initializer=None,
                        weights_regularizer=tcl.l2_regularizer(self.re_term),
                        scope="conv1"),
                                   scope='bn1',
                                   trainable=trainable,
                                   is_training=is_training))

                h0 = lrelu(
                    tcl.batch_norm(tcl.conv2d(
                        h0,
                        num_outputs=self.nfilters * 4,
                        stride=2,
                        kernel_size=[2, 7],
                        activation_fn=None,
                        padding='SAME',
                        scope="conv2",
                        weights_regularizer=tcl.l2_regularizer(self.re_term),
                        biases_initializer=None),
                                   trainable=trainable,
                                   scope='bn2',
                                   is_training=is_training))

                h0 = tcl.dropout(h0, 0.8, is_training=is_training)

                h0 = lrelu(
                    tcl.batch_norm(tcl.conv2d(
                        h0,
                        num_outputs=self.nfilters * 8,
                        stride=2,
                        kernel_size=[2, 7],
                        activation_fn=None,
                        padding='SAME',
                        scope="conv3",
                        weights_regularizer=tcl.l2_regularizer(self.re_term),
                        biases_initializer=None),
                                   trainable=trainable,
                                   scope='bn3',
                                   is_training=is_training))
            else:
                h0 = lrelu(
                    tcl.conv2d(encoder_inputs,
                               num_outputs=self.nfilters * 4,
                               stride=2,
                               kernel_size=[2, 7],
                               activation_fn=None,
                               padding='Valid',
                               biases_initializer=None,
                               weights_regularizer=tcl.l2_regularizer(
                                   self.re_term),
                               scope="conv1"))
                h0 = tcl.dropout(h0, 0.8, is_training=is_training)
                h0 = lrelu(
                    tcl.conv2d(h0,
                               num_outputs=self.nfilters * 8,
                               stride=2,
                               kernel_size=[2, 7],
                               activation_fn=None,
                               padding='VALID',
                               scope="conv2",
                               weights_regularizer=tcl.l2_regularizer(
                                   self.re_term),
                               biases_initializer=None))
                h0 = tcl.dropout(h0, 0.8, is_training=is_training)

                h0 = lrelu(
                    tcl.conv2d(h0,
                               num_outputs=self.nfilters * 8,
                               stride=2,
                               kernel_size=[2, 7],
                               activation_fn=None,
                               padding='VALID',
                               scope="conv3",
                               weights_regularizer=tcl.l2_regularizer(
                                   self.re_term),
                               biases_initializer=None))
                h0 = tcl.dropout(h0, 0.5, is_training=is_training)

            h0 = tcl.flatten(h0)

            h0 = tcl.fully_connected(h0,
                                     self.encoded_dim,
                                     weights_regularizer=tcl.l2_regularizer(
                                         self.re_term),
                                     scope="fc1",
                                     activation_fn=None)

            return h0