Exemplo n.º 1
0
  def testVarOpScopeReuseParam(self):
    with self.test_session():
      with tf.variable_scope("outer") as outer:
        with tf.variable_op_scope([], "tower", "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "outer/tower/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "outer/tower/scope2/")
        with tf.variable_op_scope([], None, "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "outer/default/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "outer/default/scope2/")

      with tf.variable_scope(outer) as outer:
        with tf.variable_op_scope([], "tower", "default", reuse=True):
          self.assertEqual(tf.get_variable("w", []).name,
                           "outer/tower/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "outer_1/tower/scope2/")
        outer.reuse_variables()
        with tf.variable_op_scope([], None, "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "outer/default/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "outer_1/default/scope2/")
Exemplo n.º 2
0
  def testVarOpScopeOuterScope(self):
    with self.test_session():
      with tf.variable_scope("outer") as outer:
        pass
      with tf.variable_op_scope([], outer, "default"):
        self.assertEqual(tf.get_variable("w", []).name,
                         "outer/w:0")
        with tf.name_scope("scope2") as sc2:
          self.assertEqual(sc2, "outer_1/scope2/")
        with tf.variable_op_scope([], None, "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "outer/default/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "outer_1/default/scope2/")

      with tf.variable_op_scope([], outer, "default", reuse=True):
        self.assertEqual(tf.get_variable("w", []).name,
                         "outer/w:0")
        with tf.name_scope("scope2") as sc2:
          self.assertEqual(sc2, "outer_2/scope2/")
        outer.reuse_variables()
        with tf.variable_op_scope([], None, "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "outer/default/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "outer_2/default/scope2/")
Exemplo n.º 3
0
  def testVarOpScope(self):
    with self.test_session():
      with tf.name_scope("scope1"):
        with tf.variable_op_scope([], "tower", "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "tower/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "scope1/tower/scope2/")
        with tf.variable_op_scope([], "tower", "default"):
          with self.assertRaises(ValueError):
            tf.get_variable("w", [])
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "scope1/tower_1/scope2/")

      with tf.name_scope("scope2"):
        with tf.variable_op_scope([], None, "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "default/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "scope2/default/scope2/")
        with tf.variable_op_scope([], None, "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "default_1/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "scope2/default_1/scope2/")
Exemplo n.º 4
0
    def testVarOpScope(self):
        with self.test_session():
            with tf.name_scope("scope1"):
                with tf.variable_op_scope([], "tower", "default"):
                    self.assertEqual(
                        tf.get_variable("w", []).name, "tower/w:0")
                    with tf.name_scope("scope2") as sc2:
                        self.assertEqual(sc2, "scope1/tower/scope2/")
                with tf.variable_op_scope([], "tower", "default"):
                    with self.assertRaises(ValueError):
                        tf.get_variable("w", [])
                    with tf.name_scope("scope2") as sc2:
                        self.assertEqual(sc2, "scope1/tower_1/scope2/")

            with tf.name_scope("scope2"):
                with tf.variable_op_scope([], None, "default"):
                    self.assertEqual(
                        tf.get_variable("w", []).name, "default/w:0")
                    with tf.name_scope("scope2") as sc2:
                        self.assertEqual(sc2, "scope2/default/scope2/")
                with tf.variable_op_scope([], None, "default"):
                    self.assertEqual(
                        tf.get_variable("w", []).name, "default_1/w:0")
                    with tf.name_scope("scope2") as sc2:
                        self.assertEqual(sc2, "scope2/default_1/scope2/")
Exemplo n.º 5
0
  def testVarOpScopeOuterScope(self):
    with self.test_session():
      with tf.variable_scope("outer") as outer:
        pass
      with tf.variable_op_scope([], outer, "default"):
        self.assertEqual(tf.get_variable("w", []).name,
                         "outer/w:0")
        with tf.name_scope("scope2") as sc2:
          self.assertEqual(sc2, "outer_1/scope2/")
        with tf.variable_op_scope([], None, "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "outer/default/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "outer_1/default/scope2/")

      with tf.variable_op_scope([], outer, "default", reuse=True):
        self.assertEqual(tf.get_variable("w", []).name,
                         "outer/w:0")
        with tf.name_scope("scope2") as sc2:
          self.assertEqual(sc2, "outer_2/scope2/")
        outer.reuse_variables()
        with tf.variable_op_scope([], None, "default"):
          self.assertEqual(tf.get_variable("w", []).name,
                           "outer/default/w:0")
          with tf.name_scope("scope2") as sc2:
            self.assertEqual(sc2, "outer_2/default/scope2/")
Exemplo n.º 6
0
    def testVarOpScopeReuseParam(self):
        with self.test_session():
            with tf.variable_scope("outer") as outer:
                with tf.variable_op_scope([], "tower", "default"):
                    self.assertEqual(
                        tf.get_variable("w", []).name, "outer/tower/w:0")
                    with tf.name_scope("scope2") as sc2:
                        self.assertEqual(sc2, "outer/tower/scope2/")
                with tf.variable_op_scope([], None, "default"):
                    self.assertEqual(
                        tf.get_variable("w", []).name, "outer/default/w:0")
                    with tf.name_scope("scope2") as sc2:
                        self.assertEqual(sc2, "outer/default/scope2/")

            with tf.variable_scope(outer) as outer:
                with tf.variable_op_scope([], "tower", "default", reuse=True):
                    self.assertEqual(
                        tf.get_variable("w", []).name, "outer/tower/w:0")
                    with tf.name_scope("scope2") as sc2:
                        self.assertEqual(sc2, "outer_1/tower/scope2/")
                outer.reuse_variables()
                with tf.variable_op_scope([], None, "default"):
                    self.assertEqual(
                        tf.get_variable("w", []).name, "outer/default/w:0")
                    with tf.name_scope("scope2") as sc2:
                        self.assertEqual(sc2, "outer_1/default/scope2/")
Exemplo n.º 7
0
 def model(x, is_training=True):
     # Create model
     with tf.variable_op_scope([x], None, name):
         output = m(x, is_training=is_training)
         with tf.variable_op_scope(None, 'fixShape', reuse=None):
             filterIn = x.get_shape()[3]
             filterOut = output.get_shape()[3]
             if filterIn != filterOut:
                 if fixShape[0] == 'pad':
                     x = tf.nn.avg_pool(
                         x,
                         ksize=[1, 1, 1, 1],
                         strides=[1, fixShape[1], fixShape[2], 1],
                         padding='VALID')
                     x = tf.pad(x, [[0, 0], [0, 0], [0, 0],
                                    [(filterOut - filterIn) // 2,
                                     (filterOut - filterIn) // 2]])
                 else:  #conv method
                     w = tf.get_variable('weight',
                                         [1, 1, filterIn, filterOut],
                                         initializer=tf.contrib.layers.
                                         xavier_initializer_conv2d())
                     x = tf.nn.conv2d(
                         x,
                         w,
                         strides=[1, fixShape[1], fixShape[2], 1],
                         padding='SAME')
         output = tf.add(output, x)
         return output
Exemplo n.º 8
0
 def seperated(name="Seperated"):
     with tf.variable_op_scope([incoming], name) as scope:
         sep = [incoming] * ncols
         for col in range(ncols):
             with tf.variable_op_scope([], None, "Column_{}".format(col)):
                 for idx, (W, b) in enumerate(zip(Ws[col], bs[col])):
                     with tf.variable_op_scope([incoming], None,
                                               "ConvBlock") as scope:
                         conv = (tf.nn.conv2d(sep[col], W, [1, 1, 1, 1],
                                              'SAME') + b)
                         conv = tflearn.batch_normalization(conv)
                         sep[col] = tf.nn.relu(conv)
         return random_col(sep)
Exemplo n.º 9
0
def _rnn_template(incoming, cell, dropout=None, return_seq=False,
                  return_state=False, initial_state=None, dynamic=False,
                  scope=None, name="LSTM"):
    """ RNN Layer Template. """
    sequence_length = None
    if dynamic:
        sequence_length = retrieve_seq_length_op(
            incoming if isinstance(incoming, tf.Tensor) else tf.pack(incoming))

    input_shape = utils.get_incoming_shape(incoming)

    with tf.variable_op_scope([incoming], scope, name) as scope:
        name = scope.name

        _cell = cell
        # Apply dropout
        if dropout:
            if type(dropout) in [tuple, list]:
                in_keep_prob = dropout[0]
                out_keep_prob = dropout[1]
            elif isinstance(dropout, float):
                in_keep_prob, out_keep_prob = dropout, dropout
            else:
                raise Exception("Invalid dropout type (must be a 2-D tuple of "
                                "float)")
            cell = DropoutWrapper(cell, in_keep_prob, out_keep_prob)

        inference = incoming
        # If a tensor given, convert it to a per timestep list
        if type(inference) not in [list, np.array]:
            ndim = len(input_shape)
            assert ndim >= 3, "Input dim should be at least 3."
            axes = [1, 0] + list(range(2, ndim))
            inference = tf.transpose(inference, (axes))
            inference = tf.unpack(inference)

        outputs, state = _rnn(cell, inference, dtype=tf.float32,
                              initial_state=initial_state, scope=name,
                              sequence_length=sequence_length)

        # Retrieve RNN Variables
        c = tf.GraphKeys.LAYER_VARIABLES + '/' + scope.name
        for v in [_cell.W, _cell.b]:
            if hasattr(v, "__len__"):
                for var in v: tf.add_to_collection(c, var)
            else:
                tf.add_to_collection(c, v)
        # Track activations.
        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, outputs[-1])

    if dynamic:
        outputs = tf.transpose(tf.pack(outputs), [1, 0, 2])
        o = advanced_indexing_op(outputs, sequence_length)
    else:
        o = outputs if return_seq else outputs[-1]

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, o)

    return (o, state) if return_state else o
Exemplo n.º 10
0
def prelu(x, alphas_init=0.25, name='prelu'):
    """PReLU.

    Parameteric Rectified Linear Unit

    Parameters
    ----------
    x : Tensor
    alphas_init : float
        Value to initialize coefficients
        (the default is 0.25, which is used in the original paper).
    name : str
        Name for the op scope.

    References
    ----------
    .. [1] He, et al.
       "Delving Deep into Rectifiers: Surpassing Human-Level Performance on
       ImageNet Classification."
       <http://arxiv.org/pdf/1502.01852v1.pdf>
    """
    a_shape = skflow.tensor.get_shape(x)[1:]
    op_scope = skflow.tensor.get_scope(x)
    with tf.variable_op_scope([x], op_scope + name, 'prelu') as scope:
        a_init = tf.constant_initializer(alphas_init)
        alphas = skflow.tensor.variable('alphas',
                                        shape=a_shape,
                                        initializer=a_init)
        x = tf.nn.relu(x) + tf.mul(alphas, (x - tf.abs(x))) * 0.5

    # save the alphas in the tensor to make it easy to grab later
    x.alphas = alphas

    return x
Exemplo n.º 11
0
 def b_conv2d(x, is_training=True):
     nInputPlane = x.get_shape().as_list()[3]
     with tf.variable_op_scope([x], None, name, reuse=reuse):
         w = tf.get_variable(
             'weight', [kH, kW, nInputPlane, nOutputPlane],
             initializer=tf.contrib.layers.xavier_initializer_conv2d())
         bin_w = fw(w)
         bin_x = fa(x)
         '''
         Note that we use binarized version of the input and the weights. Since the binarized function uses STE
         we calculate the gradients using bin_x and bin_w but we update w (the full precition version).
         '''
         out = tf.nn.conv2d(bin_x,
                            bin_w,
                            strides=[1, dH, dW, 1],
                            padding=padding)
         out = fg(out)
         if bias:
             b = tf.get_variable('bias', [nOutputPlane],
                                 initializer=tf.zeros_initializer)
             out = tf.nn.bias_add(out, b)
             # out = ReLU(out)
         tf.summary.histogram(name + '_bWeights', bin_w)
         tf.summary.histogram(name + '_bActivation', bin_x)
         return out
Exemplo n.º 12
0
def batch_norm(input, is_train, scope=None, reuse=None, decay=0.9):
    shape = input.get_shape()
    num_out = shape[-1]

    with tf.variable_op_scope([input], scope, 'BN', reuse=reuse):
        beta = tf.get_variable('beta', [num_out],
                initializer=tf.constant_initializer(0.0),
                trainable=True)
        gamma = tf.get_variable('gamma', [num_out],
                initializer=tf.constant_initializer(1.0),
                trainable=True)

        batch_mean, batch_var = tf.nn.moments(input, [0,1,2], name='moments') \
                if len(shape)==4 else tf.nn.moments(input, [0], name='moments')
        ema = tf.train.ExponentialMovingAverage(decay=decay)

        def mean_var_with_update():
            ema_apply_op = ema.apply([batch_mean, batch_var])
            with tf.control_dependencies([ema_apply_op]):
                return tf.identity(batch_mean), tf.identity(batch_var)

        mean, var = tf.cond(is_train,
                mean_var_with_update,
                lambda: (ema.average(batch_mean), ema.average(batch_var)))
        return tf.nn.batch_normalization(input, mean, var, beta, gamma, 1e-3)
Exemplo n.º 13
0
    def buid_net(self,state,action,scope,trainable):

        with tf.variable_scope(scope):

            init_w = tf.contrib.layers.xavier_initializer()
            init_b = tf.constant_initializer(0.01)

            with tf.variable_op_scope('net1'):

                n_net = 200
                w_s = tf.get_variable('w_s',[self.state_dim,n_net],initializer =init_w,trainable = trainable )
                w_a = tf.get_variable('w_a',[self.action_dim,n_net],initialzer = init_w,trainable = trainable)
                b1 = tf.get_variable('b1',[1, n_net],initializer = init_b,trainable  = Ture)
                net1  = tf.nn.relu6(tf.matmul(state,w_s) + tf.matmul(action,w_a) + b1)

            net2  = tf.layers.dense(net1,200,activation = tf.nn.relu6,
                                    kernel_initializer = init_w,bias_initializer = init_b,
                                    name = 'net2',trainable  = trainable)
            net3  =tf.layers.dense(net2,10,activation = tf.nn.relu,
                                   kernel_initializer = init_w,bias_initializer = init_b,
                                   name = 'net3',trainable = trainable)
            net_q = tf.layers.dense(net3,1, kernel_initializer = init_w,bias_initializer = init_b,
                                     name = 'net_q',trainable  =trainable )

            return net_q
Exemplo n.º 14
0
def qfunction(obs, act, theta, name="qfunction"):

    with tf.variable_op_scope([obs, act], name, name):
        x = tf.identity(obs, name='h0-obs')

        y = tf.identity(act, name='h0-act')

        u1 = tf.matmul(x, theta[0]) + theta[1]
        u1 = tf.nn.relu(u1)

        u2 = tf.matmul(u1, theta[2]) + theta[3]
        u2 = tf.nn.relu(u2)

        cz1 = tf.matmul(x, theta[4]) + theta[10]
        z1 = tf.matmul(y, theta[9]) + cz1
        z1 = lrelu(z1, FLAGS.lrelu)

        cz2 = tf.matmul(u1, theta[5]) + theta[12]
        z2 = tf.matmul(y, theta[11]) + tf.matmul(z1, tf.abs(theta[7])) + cz2
        z2 = lrelu(z2, FLAGS.lrelu)

        cz3 = tf.matmul(u2, theta[6]) + theta[14]
        z3 = tf.matmul(y, theta[13]) + tf.matmul(z2, tf.abs(theta[8])) + cz3
        z3 = -tf.squeeze(z3, [1], name='z3')

        return z3, cz1, cz2, cz3, z1, z2, u1, u2
Exemplo n.º 15
0
    def __call__(self, flow=None):
        """Constructs the layer in `Tensorflow` graph.

        Args:
            flow: This argument is ignored. (Default value = None)

        Returns:
            Output of this layer.

        """

        with tf.variable_op_scope([flow], self.name, 'Embedding', reuse=self.reuse):
            if not self.reuse:
                self._table_loader = tf.placeholder(tf.float32, shape=self._init_values.shape, name='loader')
                self._lookup_table = tf.get_variable(
                    'lookup_table',
                    initializer=self._table_loader,
                    trainable=self.trainable)
                self.params.append(self._lookup_table)
                tf.initialize_variables(self.params).run(feed_dict={self._table_loader: self._init_values})
                self.reuse = True

            flow = tf.placeholder(tf.int64, [None] + self._input_shape, 'input')
            tf.add_to_collection(GraphKeys.MODEL_INPUTS, flow)
            flow = tf.nn.embedding_lookup(self._lookup_table, flow)

        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, flow)
        return flow
Exemplo n.º 16
0
def drop_path(columns,
              coin):
  with tf.variable_op_scope([columns], None, "DropPath"):
    out = tf.cond(coin,
                  lambda : drop_some(columns),
                  lambda : random_column(columns))
  return out
Exemplo n.º 17
0
def l2_normalize(incoming, dim, epsilon=1e-12, name="l2_normalize"):
    """ L2 Normalization.

    Normalizes along dimension `dim` using an L2 norm.

    For a 1-D tensor with `dim = 0`, computes
    ```
    output = x / sqrt(max(sum(x**2), epsilon))
    ```

    For `x` with more dimensions, independently normalizes each 1-D slice along
    dimension `dim`.

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        dim: `int`. Dimension along which to normalize.
        epsilon: `float`. A lower bound value for the norm. Will use
            `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`.
        name: `str`. A name for this layer (optional).

    Returns:
      A `Tensor` with the same shape as `x`.
    """
    with tf.variable_op_scope([incoming], name) as name:
        x = tf.ops.convert_to_tensor(incoming, name="x")
        square_sum = tf.reduce_sum(tf.square(x), [dim], keep_dims=True)
        x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))

    return tf.mul(x, x_inv_norm, name=name)
Exemplo n.º 18
0
    def __call__(self, flow=None):
        """Constructs the Sequential and its inner pieces.

        Args:
            flow: Input `Tensor` object. (Default value = None)

        Returns:
            Output of this `Parallel`.

        """

        # build inner pieces.
        with tf.variable_op_scope([], self.name, 'Parallel', reuse=self.reuse):
            if not self.reuse:
                self.reuse = True

            outputs = []
            for i, piece in enumerate(self.child_pieces):
                outputs.append(piece(flow))

            if self.mode == 'concat':
                return tf.concat(self.along_dim, outputs)
            elif self.mode == 'mean':
                return tf.add_n(outputs) / len(self.child_pieces)
            elif self.mode == 'sum':
                return tf.add_n(outputs)
Exemplo n.º 19
0
def mysum2(a, b, name=None):
    with tf.variable_op_scope([a, b], name, "mysum2") as scope:
        v = tf.get_variable("v", 1)
        v2 = tf.Variable([0], name="v2")
        assert v.name == "mysum2/v:0", v.name
        assert v2.name == "mysum2/v2:0", v2.name
        return tf.add(a, b)
Exemplo n.º 20
0
 def __init__(self,
              depth,
              epsilon,
              ewma_trainer,
              scale_after_norm,
              keep_prob_prior=1.0,
              name=None):
     with tf.variable_op_scope([self, depth, ewma_trainer, epsilon], name,
                               'batch_normalizer') as scope:
         self.mean = tf.get_variable(
             'mean',
             shape=[depth],
             initializer=tf.constant_initializer(0.0),
             trainable=False)
         self.variance = tf.get_variable(
             'variance',
             shape=[depth],
             initializer=tf.constant_initializer(1.0),
             trainable=False)
         self.beta = tf.get_variable(
             'beta',
             shape=[depth],
             initializer=tf.constant_initializer(0.0))
         self.gamma = tf.get_variable(
             'gamma',
             shape=[depth],
             initializer=tf.constant_initializer(1.0))
         print(scope.name)
         self.ewma_trainer = ewma_trainer
         self.epsilon = epsilon
         self.keep_prob_prior = keep_prob_prior
Exemplo n.º 21
0
def qfunction(obs, act, theta, name="qfunction"):
    with tf.variable_op_scope([obs, act], name, name):
        h0_o = tf.identity(obs, name='h0-obs')
        h0_a = tf.identity(act, name='h0-act')
        h1_o = tf.matmul(h0_o, theta[0]) + theta[1]
        h1_a = tf.matmul(h0_a, theta[2]) + theta[3]
        h1 = tf.nn.relu(h1_o + h1_a, name='h1')
        h2_u = tf.matmul(h1_o, theta[4]) + theta[5]
        h2_a = tf.matmul(
            tf.multiply(h0_a,
                        tf.matmul(h1_o, theta[6]) + theta[7]), theta[8])
        h2_z = tf.matmul(
            tf.multiply(h1,
                        tf.matmul(h1_o, theta[9]) + theta[10]), theta[11])
        h2 = tf.nn.relu(h2_u + h2_a + h2_z)
        h4_u = tf.matmul(h2_u, theta[12]) + theta[13]
        h4_a = tf.matmul(
            tf.multiply(h0_a,
                        tf.matmul(h2_u, theta[14]) + theta[15]), theta[16])
        h4_z = tf.matmul(
            tf.multiply(h2,
                        tf.matmul(h2_u, theta[17]) + theta[18]), theta[19])
        qs = h4_u + h4_a + h4_z
        q = tf.squeeze(qs, [1], name='h3-q')
        return q
Exemplo n.º 22
0
def BinarizedSpatialConvolution(x,
                                nOutputPlane,
                                kW,
                                kH,
                                dW=1,
                                dH=1,
                                padding='VALID',
                                bias=True,
                                reuse=None,
                                name='BinarizedSpatialConvolution'):
    nInputPlane = x.get_shape().as_list()[3]
    with tf.variable_op_scope([x], None, name, reuse=reuse):
        w = tf.get_variable(
            'weight', [kH, kW, nInputPlane, nOutputPlane],
            initializer=tf.contrib.layers.xavier_initializer_conv2d())
        bin_w = binarize(w)
        bin_x = binarize_0(x)
        out = tf.nn.conv2d(bin_x,
                           bin_w,
                           strides=[1, dH, dW, 1],
                           padding=padding)
        if bias:
            b = tf.get_variable('bias', [nOutputPlane],
                                initializer=tf.zeros_initializer)
            out = tf.nn.bias_add(out, b)
        return out, bin_w, bin_x
Exemplo n.º 23
0
  def hidden_layer(data, input_size, layer_size, keep_prob_prior, name=None):
    with tf.variable_op_scope([data, input_size, layer_size], name, "hidden_layer") as scope:
      ewma = tf.train.ExponentialMovingAverage(decay=0.99, name='ema_' + name)                  
      bn = BatchNormalizer(layer_size, 0.001, ewma, True, keep_prob_prior,'bn_'+name)                                      
               
      weights = tf.get_variable('weights', 
        [input_size, layer_size],
        initializer=tf.truncated_normal_initializer(0,
                              stddev=math.sqrt(2.0 / ((1.0 + initial_a ** 2.0) * float(input_size)))))
      

      
      #weights = clip_weight_norm(weights, max_norm, name='clipped_weights')
      if not scope.reuse:
        tf.histogram_summary(weights.name, weights)            
      x = bn.normalize(tf.matmul(data,weights), train=keep_prob < 1.0)
      mean, variance = tf.nn.moments(x, [0])
      c = tf.div(tf.matmul(x-mean, x-mean, transpose_a=True), tf.to_float(tf.shape(x)[0]))
      weight_decay = 0.0
      if (keep_prob < 1.0):
        weight_decay = tf.nn.l2_loss(c) - tf.nn.l2_loss(variance)#tf.mul(tf.nn.l2_loss(weights), wd, name='weight_loss')
      
      tf.add_to_collection('losses', weight_decay)

      hidden = tf.nn.elu(x)
      #tf.scalar_summary('sparsity_'+hidden.name, tf.nn.zero_fraction(hidden))
      hidden_dropout = tf.nn.dropout(hidden, keep_prob)
      return hidden_dropout, bn
Exemplo n.º 24
0
def l2_normalize(incoming, dim, epsilon=1e-12, name="l2_normalize"):
    """ L2 Normalization.

    Normalizes along dimension `dim` using an L2 norm.

    For a 1-D tensor with `dim = 0`, computes
    ```
    output = x / sqrt(max(sum(x**2), epsilon))
    ```

    For `x` with more dimensions, independently normalizes each 1-D slice along
    dimension `dim`.

    Arguments:
        incoming: `Tensor`. Incoming Tensor.
        dim: `int`. Dimension along which to normalize.
        epsilon: `float`. A lower bound value for the norm. Will use
            `sqrt(epsilon)` as the divisor if `norm < sqrt(epsilon)`.
        name: `str`. A name for this layer (optional).

    Returns:
      A `Tensor` with the same shape as `x`.
    """
    with tf.variable_op_scope([incoming], name) as name:
        x = tf.ops.convert_to_tensor(incoming, name="x")
        square_sum = tf.reduce_sum(tf.square(x), [dim], keep_dims=True)
        x_inv_norm = tf.rsqrt(tf.maximum(square_sum, epsilon))

    return tf.mul(x, x_inv_norm, name=name)
def cnn_model(features,target):
    """
    2层的神经网络
    """
    target=tf.one_hot(target,15,1,0)
    word_vectors=tf.contrib.layers.embed_sequence(
            features,vocab_size=n_words,embed_dim=EMBEDDING_SIZE,scope='words')
    word_vectors=tf.expand_dims(word_vectors,3)
    with tf.variable_scope('CNN_Layer1'):
        conv1=tf.contrib.layers.convolution2d(word_vectors,N_FILTERS,FILTER_SHAPE1,padding='VALID')
        conv1=tf.nn.relu(conv1)
        #最大池化
        pol1=tf.nn.max_pool(conv1,ksize=[1,POOLING_WINDOW,1,1],strides=[1, POOLING_STRIDE,1,1],padding='SAME')
        pol1=tf.transpose(pol1,[0,1,3,2])
        
    with tf.variable_op_scope('CNN_Layer2'):
        conv2=tf.contrib.layers.convolution2d(pol1,N_FILTERS,FILTER_SHAPE2,padding='VALID')
        pol2=tf.squeeze(tf.reduce_max(conv2,1),squeeze_dims=[1])
        
    #全连接层
    logits=tf.contrib.layers.fully_connected(pol2,15,activation=None)
    loss=tf.losses.sigmoid_cross_entropy(target,logits)
    
    train_op=tf.contrib.layers.optimize_loss(loss,
                                             tf.contrib.framework.get_global_step(),
                                             optimizer='Adam',
                                              learning_rate=0.01)
    
    return ({
            'class':tf.arg_max(logits,1),
            'prob':tf.nn.softmax(logits)
            },loss,train_op)
Exemplo n.º 26
0
    def _build_graph(self):
        with tf.variable_op_scope([self.wide_inputs], None, "cb_unit", reuse=False) as scope:
            central_bias = tf.Variable(name='central_bias',
                                       initial_value=tf.random_normal(shape=[self.batch_size, 1], mean=0, stddev=1),
                                       trainable=True)

        wide_side = tf.contrib.layers.fully_connected(inputs=self.wide_inputs,
                                                      num_outputs=self.wide_side_node,
                                                      activation_fn=tf.nn.relu,
                                                      biases_initializer=None
                                                      )

        wide_side = tf.reduce_sum(wide_side, 1, name="reduce_sum")
        wide_side = tf.reshape(wide_side, [-1, 1])
        w_a_d = tf.concat([self.wide_inputs, self.deep_inputs], axis=1, name="concat")

        for k in range(len(self.deep_side_nodes)):
            w_a_d = tf.contrib.layers.fully_connected(w_a_d, self.deep_side_nodes[k], activation_fn=tf.nn.relu)
            w_a_d = tf.layers.dropout(
                inputs=w_a_d,
                rate=0.5,
                name="deep_dropout_%d" % k,
            )
        deep_side = tf.contrib.layers.fully_connected(w_a_d, 1,
                                                      activation_fn=None,
                                                      biases_initializer=None)
        deep_side = tf.reshape(deep_side, [-1, 1])
        w_a_d_logit = tf.add(deep_side, wide_side)
        self.w_a_d_logit = tf.add(w_a_d_logit, central_bias, name="wide_with_bias")
        self.w_a_d_output = tf.nn.softmax(self.w_a_d_logit, dim=-1)
        # 定义准确率
        self.predictions = tf.cast(tf.greater(self.w_a_d_output, 0), tf.int64) # 在最终预测的时候,神经网络的输出采用的是经过滑动平均的前向传播计算结果
        self.correct_prediction = tf.equal(self.predictions, tf.cast(self.Y, tf.int64))
        self.accuracy = tf.reduce_mean(tf.cast(self.correct_prediction, tf.float32))
Exemplo n.º 27
0
def repeat_op(repetitions, inputs, op, *args, **kwargs):
  """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
  scope = kwargs.pop('scope', None)
  with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
    tower = inputs
    for _ in range(repetitions):
      tower = op(tower, *args, **kwargs)
    return tower
Exemplo n.º 28
0
Arquivo: ops.py Projeto: zjjMaiMai/mdm
def repeat_op(repetitions, inputs, op, *args, **kwargs):
    """Build a sequential Tower starting from inputs by using an op repeatedly.

  It creates new scopes for each operation by increasing the counter.
  Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
    it will repeat the given op under the following variable_scopes:
      conv1/Conv
      conv1/Conv_1
      conv1/Conv_2

  Args:
    repetitions: number or repetitions.
    inputs: a tensor of size [batch_size, height, width, channels].
    op: an operation.
    *args: args for the op.
    **kwargs: kwargs for the op.

  Returns:
    a tensor result of applying the operation op, num times.
  Raises:
    ValueError: if the op is unknown or wrong.
  """
    scope = kwargs.pop('scope', None)
    with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
        tower = inputs
        for _ in range(repetitions):
            tower = op(tower, *args, **kwargs)
        return tower
Exemplo n.º 29
0
 def model(x, is_training=True):
     with tf.variable_op_scope([x], None, name, reuse=reuse):
         modules = []
         for i in xrange(0, N):
             if i == 0:
                 modules += Residual_func(nOutputPlane * K,
                                          kW,
                                          kH,
                                          dW,
                                          dH,
                                          padding=padding,
                                          bias=bias,
                                          reuse=reuse,
                                          fixShapeMethod=fixShapeMethod,
                                          bottleWidth=bottleWidth)
             else:
                 modules += Residual_func(nOutputPlane * K,
                                          kW,
                                          kH,
                                          1,
                                          1,
                                          padding=padding,
                                          bias=bias,
                                          reuse=reuse,
                                          fixShapeMethod=fixShapeMethod,
                                          bottleWidth=bottleWidth)
         m = Sequential(modules)
         output = m(x, is_training=is_training)
         return output
Exemplo n.º 30
0
def deconv2d(
    inputs,
    output_shape,
    kernel_size=5,
    stride=2,
    padding='SAME',
    activation=tf.nn.relu,
    stddev=0.02,
    bias=0.0,
    weight_decay=0,
    batch_norm_params=None,
    is_training=True,
    trainable=True,
    restore=True,
    scope=None,
    reuse=None,
):

    with tf.variable_op_scope([inputs], scope, 'Deconv', reuse=reuse):
        kernel_h, kernel_w = _two_element_tuple(kernel_size)
        stride_h, stride_w = _two_element_tuple(stride)
        num_filters_in = inputs.get_shape()[-1]
        num_filters_out = output_shape[-1]
        weights_shape = [kernel_h, kernel_w, num_filters_out, num_filters_in]
        weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
        l2_regularizer = None
        if weight_decay and weight_decay > 0:
            l2_regularizer = losses.l2_regularizer(weight_decay)
        weights = variables.variable('weights',
                                     shape=weights_shape,
                                     initializer=weights_initializer,
                                     regularizer=l2_regularizer,
                                     trainable=trainable,
                                     restore=restore)
        deconv = tf.nn.conv2d_transpose(inputs,
                                        weights,
                                        output_shape=output_shape,
                                        strides=[1, stride_h, stride_w, 1],
                                        padding=padding)
        if batch_norm_params is not None:
            with scopes.arg_scope([batch_norm],
                                  is_training=is_training,
                                  trainable=trainable,
                                  restore=restore):
                outputs = batch_norm(deconv, **batch_norm_params)
        else:
            bias_shape = [
                num_filters_out,
            ]
            bias_initializer = tf.constant_initializer(bias)
            biases = variables.variable('biases',
                                        shape=bias_shape,
                                        initializer=bias_initializer,
                                        trainable=trainable,
                                        restore=restore)
            outputs = tf.nn.bias_add(deconv, biases)
        if activation:
            outputs = activation(outputs)
        return outputs
Exemplo n.º 31
0
def embedding(incoming, input_dim, output_dim, validate_indices=False,
              weights_init='truncated_normal', trainable=True, restore=True,
              reuse=False, scope=None, name="Embedding"):
    """ Embedding.

    Embedding layer for a sequence of ids.

    Input:
        2-D Tensor [samples, ids].

    Output:
        3-D Tensor [samples, embedded_ids, features].

    Arguments:
        incoming: Incoming 2-D Tensor.
        input_dim: list of `int`. Vocabulary size (number of ids).
        output_dim: list of `int`. Embedding size.
        validate_indices: `bool`. Whether or not to validate gather indices.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'truncated_normal'.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share varibales between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'Embedding'.

    """

    input_shape = utils.get_incoming_shape(incoming)
    assert len(input_shape) == 2, "Incoming Tensor shape must be 2-D"

    W_init = weights_init
    if isinstance(weights_init, str):
        W_init = initializations.get(weights_init)()

    with tf.variable_op_scope([incoming], scope, name, reuse=reuse) as scope:
        name = scope.name
        with tf.device('/cpu:0'):
            W = vs.variable("W", shape=[input_dim, output_dim],
                            initializer=W_init, trainable=trainable,
                            restore=restore)
            tf.add_to_collection(tf.GraphKeys.LAYER_VARIABLES + '/' + name, W)

        inference = tf.cast(incoming, tf.int32)
        inference = tf.nn.embedding_lookup(W, inference,
                                           validate_indices=validate_indices)

    inference.W = W
    inference.scope = scope
    # Embedding doesn't support masking, so we save sequence length prior
    # to the lookup. Expand dim to 3d.
    shape = [-1] + inference.get_shape().as_list()[1:3] + [1]
    inference.seq_length = retrieve_seq_length_op(tf.reshape(incoming, shape))

    return inference
Exemplo n.º 32
0
def hidden_layers(obs, theta, name='hidden'):
    with tf.variable_op_scope([obs], name, name):
        h0 = tf.identity(obs, name='h0-obs')
        h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
        h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name='h2')

        summary = hist_summaries(h0, h1, h2)
        return h2, summary
Exemplo n.º 33
0
 def create_policy_network(self, state, theta, name="policy_network"):
     with tf.variable_op_scope([state], name, name):
         h0 = tf.identity(state, "state")
         h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
         h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name="h2")
         h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3')
         action = tf.nn.tanh(h3, name='action')
         return action
Exemplo n.º 34
0
def my_op_with_vars_scope_b(a, b, scope=None):
    with tf.variable_op_scope([a, b], scope, "MyXX") as scope:
        a = tf.convert_to_tensor(a, name="a")
        b = tf.convert_to_tensor(b, name="b")

        print("scope a : {0}".format(a.name))
        print("scope b : {0}".format(b.name))
        return tf.mul(a, b)
Exemplo n.º 35
0
def policy_network(state,theta,name='policy'):
  with tf.variable_op_scope([state],name,name):
    h0 = tf.identity(state,name='h0-state')
    h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
    h2 = tf.nn.relu( tf.matmul(h1,theta[2]) + theta[3],name='h2')
    h3 = tf.identity(tf.matmul(h2,theta[4]) + theta[5],name='h3')
    action = tf.nn.tanh(h3,name='h4-action')
    return action
Exemplo n.º 36
0
def policy(obs, theta, name='policy'):
    with tf.variable_op_scope([obs], name, name):
        h0 = tf.identity(obs, name='h0-obs')
        h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
        h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name='h2')
        h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3')
        action = tf.nn.tanh(h3, name='h4-action')
        return action
Exemplo n.º 37
0
def join(cols, drop_prob=.15):
    if len(cols) == 1:
        return cols[0]
    with tf.variable_op_scope(cols, None, "Join"):
        joined = tf.reduce_mean(cols, 0)
        out = tf.cond(tflearn.get_training_mode(),
                      lambda: local_drop(cols, drop_prob), lambda: joined)
    return joined
Exemplo n.º 38
0
 def create_policy_network(self, state, theta, name="policy_network"):
     with tf.variable_op_scope([state], name, name):
         h0 = tf.identity(state, "state")
         h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
         h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name="h2")
         h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3')
         action = tf.nn.tanh(h3, name='action')
         return action
Exemplo n.º 39
0
    def model(x, is_training=True):
        with tf.variable_op_scope([x], None, "ABCConv_1", reuse=None):
            w1 = weight_variable(shape=([3, 3, 3, 128]), name="weight_1")
            alphas_training_op1, ABCLayer1, alphas_loss1 = ABC(w1, padding="SAME")
            alphas_training_operations.append(alphas_training_op1)
            conv1 = ABCLayer1(x)
            bn_conv1 = tf.layers.batch_normalization(conv1, axis=-1)
            h_conv1 = tf.nn.relu(bn_conv1)

            w2 = weight_variable(shape=([3, 3, 128, 128]), name="weight_2")
            alphas_training_op2, ABCLayer2, alphas_loss2 = ABC(w2, padding="SAME")
            alphas_training_operations.append(alphas_training_op2)
            conv2 = ABCLayer2(h_conv1)
            pool2 = max_pool_2x2(conv2)
            bn_conv2 = tf.layers.batch_normalization(pool2, axis=-1)
            h_conv2= tf.nn.relu(bn_conv2)

            w3 = weight_variable(shape=([3, 3, 128, 256]), name="weight_3")
            alphas_training_op3, ABCLayer3, alphas_loss3 = ABC(w3, padding="SAME")
            alphas_training_operations.append(alphas_training_op3)
            conv3 = ABCLayer3(h_conv2)
            bn_conv3 = tf.layers.batch_normalization(conv3, axis=-1)
            h_conv3= tf.nn.relu(bn_conv3)

            w4 = weight_variable(shape=([3, 3, 256, 256]), name="weight_4")
            alphas_training_op4, ABCLayer4, alphas_loss4 = ABC(w4, padding="SAME")
            alphas_training_operations.append(alphas_training_op4)
            conv4 = ABCLayer4(h_conv3)
            pool4 = max_pool_2x2(conv4)
            bn_conv4 = tf.layers.batch_normalization(pool4, axis=-1)
            h_conv4= tf.nn.relu(bn_conv4)

            w5 = weight_variable(shape=([3, 3, 256,512]), name="weight_5")
            alphas_training_op5, ABCLayer5, alphas_loss5 = ABC(w5, padding="SAME")
            alphas_training_operations.append(alphas_training_op5)
            conv5 = ABCLayer5(h_conv4)
            bn_conv5 = tf.layers.batch_normalization(conv5, axis=-1)
            h_conv5= tf.nn.relu(bn_conv5)

            w6 = weight_variable(shape=([3, 3, 512,512]), name="weight_6")
            alphas_training_op6, ABCLayer6, alphas_loss6 = ABC(w6, padding="SAME")
            alphas_training_operations.append(alphas_training_op6)
            conv6 = ABCLayer6(h_conv5)
            pool6 = max_pool_2x2(conv6)
            bn_conv6 = tf.layers.batch_normalization(pool6, axis=-1)
            h_conv6= tf.nn.relu(bn_conv6)

            reshaped = tf.reshape(h_conv6, [h_conv6.get_shape().as_list()[0], -1])
            nInputPlane = reshaped.get_shape().as_list()[1]
            w_fc1 = tf.get_variable('weight_fc1', [nInputPlane, 1024], initializer=tf.contrib.layers.xavier_initializer())
            fc1 = tf.nn.relu(tf.matmul(reshaped, w_fc1))
            bn_fc1 = tf.layers.batch_normalization(fc1, axis=-1)
            h_fc1 = tf.nn.relu(bn_fc1)

            w_fc2 = tf.get_variable('weight_fc2', [1024, 10], initializer=tf.contrib.layers.xavier_initializer())
            output = tf.nn.relu(tf.matmul(h_fc1, w_fc2))

        return output
Exemplo n.º 40
0
 def model(x, is_training=True):
 # Create model
     outputs = []
     for i,m in enumerate(moduleList):
         name = 'layer_'+str(i)
         with tf.variable_op_scope([x], name, 'Layer', reuse=reuse):
             outputs[i] = m(x, is_training=is_training)
         output = tf.concat(dim, outputs)
     return output
Exemplo n.º 41
0
def critic(obs, act, theta, name="critic"):
    with tf.variable_op_scope([obs, act], name, name):
        h0 = tf.identity(obs, name='h0')
        h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
        h1a = tf.concat([h1, act], 1)
        h2 = tf.nn.relu(tf.matmul(h1a, theta[2]) + theta[3], name='h2')
        qs = tf.matmul(h2, theta[4]) + theta[5]
        qfunction = tf.squeeze(qs, [1], name='h3-q')
        return qfunction
Exemplo n.º 42
0
def sin_bank(x, bank_size, length, scope=None):
    with tf.variable_op_scope([x], scope, "SinBank") as scope:
        bank = tf.get_variable("bank", dtype=tf.float32, shape=[bank_size, ],
                        initializer=tf.random_uniform_initializer(0.0, length))
        shift = tf.get_variable("shift", dtype=tf.float32, shape=[bank_size, ],
                        initializer=tf.random_uniform_initializer(0.0, length))
        if not tf.get_variable_scope().reuse:
            tf.histogram_summary(bank.name, bank)
        return tf.sin(x*bank+shift)
Exemplo n.º 43
0
 def model(x, is_training=True):
   # Create model
   outputs = []
   for i, m in enumerate(moduleList):
     name = 'layer_'+str(i)
     with tf.variable_op_scope([x], name, 'Layer', reuse=reuse):
       outputs[i] = m(x, is_training=is_training)
     output = tf.concat(dim, outputs)
   return output
Exemplo n.º 44
0
 def dropout_layer(x, is_training=True):
   with tf.variable_op_scope([x], None, name):
     # def drop(): return tf.nn.dropout(x,p)
     # def no_drop(): return x
     # return tf.cond(is_training, drop, no_drop)
     if is_training:
       return tf.nn.dropout(x, p)
     else:
       return x
Exemplo n.º 45
0
 def dropout_layer(x, is_training=True):
     with tf.variable_op_scope([x], None, name):
         # def drop(): return tf.nn.dropout(x,p)
         # def no_drop(): return x
         # return tf.cond(is_training, drop, no_drop)
         if is_training:
             return tf.nn.dropout(x,p)
         else:
             return x
Exemplo n.º 46
0
def local_drop(cols, drop_prob=.85):
    size = len(cols) - 1
    with tf.variable_op_scope(cols, None, "LocalDropPath"):
        out = tf.to_float(cols)
        drop_mask = tf.to_float(
            tf.concat(0, [[1], tf.random_uniform([size])]) > drop_prob)
        masked = T(mul(T(out), tf.random_shuffle(drop_mask)))
        dropped = tf.reduce_sum(masked, 0) / tf.reduce_sum(drop_mask, 0)
    return dropped
Exemplo n.º 47
0
def policy(obs,theta,name='policy'):
  with tf.variable_op_scope([obs],name,name):
    h0 = tf.identity(obs,name='h0-obs')
    h1 = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
    h2 = tf.nn.relu( tf.matmul(h1,theta[2]) + theta[3],name='h2')
    h3 = tf.identity(tf.matmul(h2,theta[4]) + theta[5],name='h3')
    action = tf.nn.tanh(h3,name='h4-action')
    summary = hist_summaries(h0,h1,h2,h3,action)
    return action,summary
Exemplo n.º 48
0
def vgg_a(inputs,
          num_classes=1000,
          dropout_keep_prob=0.5,
          is_training=True,
          spatial_squeeze=True,
          scope='vgg_a'):
  """Oxford Net VGG 11-Layers version A Example.

  Note: All the fully_connected layers have been transformed to conv2d layers.
        To use in classification mode, resize input to 224x224.

  Args:
    inputs: a tensor of size [batch_size, height, width, channels].
    num_classes: number of predicted classes.
    dropout_keep_prob: the probability that activations are kept in the dropout
      layers during training.
    is_training: whether or not the model is being trained.
    spatial_squeeze: whether or not should squeeze the spatial dimensions of the
      outputs. Useful to remove unnecessary dimensions for classification.
    scope: Optional scope for the variables.

  Returns:
    the last op containing the log predictions and end_points dict.
  """
  with tf.variable_op_scope([inputs], scope, 'vgg_a') as sc:
    end_points_collection = sc.name + '_end_points'
    # Collect outputs for conv2d, fully_connected and max_pool2d.
    with slim.arg_scope([slim.conv2d, slim.max_pool2d],
                        outputs_collections=end_points_collection):
      net = slim.repeat(inputs, 1, slim.conv2d, 64, [3, 3], scope='conv1')
      net = slim.max_pool2d(net, [2, 2], scope='pool1')
      net = slim.repeat(net, 1, slim.conv2d, 128, [3, 3], scope='conv2')
      net = slim.max_pool2d(net, [2, 2], scope='pool2')
      net = slim.repeat(net, 2, slim.conv2d, 256, [3, 3], scope='conv3')
      net = slim.max_pool2d(net, [2, 2], scope='pool3')
      net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv4')
      net = slim.max_pool2d(net, [2, 2], scope='pool4')
      net = slim.repeat(net, 2, slim.conv2d, 512, [3, 3], scope='conv5')
      net = slim.max_pool2d(net, [2, 2], scope='pool5')
      # Use conv2d instead of fully_connected layers.
      net = slim.conv2d(net, 4096, [7, 7], padding='VALID', scope='fc6')
      net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
                         scope='dropout6')
      net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
      net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
                         scope='dropout7')
      net = slim.conv2d(net, num_classes, [1, 1],
                        activation_fn=None,
                        normalizer_fn=None,
                        scope='fc8')
      # Convert end_points_collection into a end_point dict.
      end_points = dict(tf.get_collection(end_points_collection))
      if spatial_squeeze:
        net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
        end_points[sc.name + '/fc8'] = net
      return net, end_points
Exemplo n.º 49
0
def Residual_func(nOutputPlane, kW, kH, dW=1, dH=1,
        padding='VALID', bias=True, name='Residual_func',reuse=None,fixShapeMethod='pad',type='basic',bottleWidth=2):
        with tf.variable_op_scope(None,None, name, reuse=reuse):
            if type=='basic':
                curr_layers = [
                    SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
                    BatchNormalization(),
                    ReLU(),
                    SpatialConvolution(nOutputPlane,kW,kH,1,1, padding=padding,bias=bias),
                    BatchNormalization()
                ]
            elif type=='pre':
                curr_layers = [
                    BatchNormalization(),
                    ReLU(),
                    SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
                    BatchNormalization(),
                    ReLU(),
                    SpatialConvolution(nOutputPlane,kW,kH,1,1, padding=padding,bias=bias)
                ]
            elif type=='bottleneck':
                curr_layers = [
                    SpatialConvolution(nOutputPlane,1,1,1,1, padding='valid',bias=bias),
                    BatchNormalization(),
                    ReLU(),
                    SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
                    BatchNormalization(),
                    ReLU(),
                    SpatialConvolution(nOutputPlane*bottleWidth,1,1,1,1, padding='valid',bias=bias),
                    BatchNormalization()
                ]
            if type=='dropout':
                curr_layers = [
                    SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
                    ReLU(),
                    Dropout(0.5),
                    SpatialConvolution(nOutputPlane,kW,kH,1,1, padding=padding,bias=bias)
                ]
            elif type=='prebottleneck':
                curr_layers = [
                    BatchNormalization(),
                    ReLU(),
                    SpatialConvolution(nOutputPlane,1,1,1,1, padding='valid',bias=bias),
                    BatchNormalization(),
                    ReLU(),
                    SpatialConvolution(nOutputPlane,kW,kH,dW,dH, padding=padding,bias=bias),
                    BatchNormalization(),
                    ReLU(),
                    SpatialConvolution(nOutputPlane*bottleWidth,1,1,1,1, padding='valid',bias=bias)
                ]
            modules = []
            if 'pre' in type:
                modules=[Residual(curr_layers,fixShape=[fixShapeMethod,dW,dH])]
            else:
                modules=[Residual(curr_layers,fixShape=[fixShapeMethod,dW,dH])]+[ReLU()]
        return modules
Exemplo n.º 50
0
 def conv2d(x, is_training=True):
     nInputPlane = x.get_shape().as_list()[3]
     with tf.variable_op_scope([x], None, name, reuse=reuse):
         w = tf.get_variable('weight', [kH, kW, nInputPlane, nOutputPlane],
                         initializer=tf.contrib.layers.xavier_initializer_conv2d())
         out = tf.nn.conv2d(x, w, strides=[1, dH, dW, 1], padding=padding)
         if bias:
             b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
             out = tf.nn.bias_add(out, b)
         return out
Exemplo n.º 51
0
def policy(obs, theta, name="policy"):
    with tf.variable_op_scope([obs], name, name):
        h0 = tf.identity(obs, name="h0-obs")
        h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name="h1")
        h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name="h2")
        h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name="h3")
        action = tf.nn.tanh(h3, name="h4-action")
        # print(action.get_shape())
        summary = hist_summaries(h0, h1, h2, h3, action)
        return action, summary
Exemplo n.º 52
0
 def create_q_network(self, state, action, theta, name='q_network'):
     with tf.variable_op_scope([state, action], name, name):
         h0 = tf.identity(state, name='state')
         h1_state = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1])
         # h1 = concat(h1_state,action)
         h1 = tf.concat(1, [h1_state, action], name="h1")
         h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name="h2")
         h3 = tf.add(tf.matmul(h2, theta[4]), theta[5], name='h3')
         q = tf.squeeze(h3, [1], name='q')
         return q
Exemplo n.º 53
0
 def affineLayer(x, is_training=True):
     with tf.variable_op_scope([x], name, 'Affine', reuse=reuse):
         reshaped = tf.reshape(x, [x.get_shape().as_list()[0], -1])
         nInputPlane = reshaped.get_shape().as_list()[1]
         w = tf.get_variable('weight', [nInputPlane, nOutputPlane], initializer=tf.contrib.layers.xavier_initializer())
         output = tf.matmul(reshaped, w)
         if bias:
             b = tf.get_variable('bias', [nOutputPlane],initializer=tf.zeros_initializer)
             output = tf.nn.bias_add(output, b)
     return output
Exemplo n.º 54
0
Arquivo: ops.py Projeto: lukemetz/cppn
def conv2d_transpose(inputs,
           num_filters_out,
           kernel_size,
           stride=1,
           padding='SAME',
           activation=tf.nn.relu,
           stddev=0.01,
           bias=0.0,
           weight_decay=0,
           batch_norm_params=None,
           is_training=True,
           trainable=True,
           restore=True,
           scope=None,
           reuse=None):

  with tf.variable_op_scope([inputs], scope, 'Conv_Transpose', reuse=reuse):
    kernel_h, kernel_w = _two_element_tuple(kernel_size)
    stride_h, stride_w = _two_element_tuple(stride)
    num_filters_in = inputs.get_shape()[-1]
    weights_shape = [kernel_h, kernel_w,
                     num_filters_out, num_filters_in]
    weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
    l2_regularizer = None
    if weight_decay and weight_decay > 0:
      l2_regularizer = losses.l2_regularizer(weight_decay)
    weights = variables.variable('weights',
                                 shape=weights_shape,
                                 initializer=weights_initializer,
                                 regularizer=l2_regularizer,
                                 trainable=trainable,
                                 restore=restore)
    h = inputs.get_shape()[1].value
    w = inputs.get_shape()[2].value
    c = inputs.get_shape()[3].value
    output_shape = tf.concat(0, [tf.shape(inputs)[0:1], [h*stride[0], w*stride[1], num_filters_out]])
    conv = tf.nn.conv2d_transpose(inputs, weights, strides=[1, stride_h, stride_w, 1],
                                  output_shape=output_shape, padding=padding)
    conv.set_shape((None, h*stride[0], w*stride[1], num_filters_out))
    if batch_norm_params is not None:
      with scopes.arg_scope([batch_norm], is_training=is_training,
                            trainable=trainable, restore=restore):
        outputs = batch_norm(conv, **batch_norm_params)
    else:
      bias_shape = [num_filters_out,]
      bias_initializer = tf.constant_initializer(bias)
      biases = variables.variable('biases',
                                  shape=bias_shape,
                                  initializer=bias_initializer,
                                  trainable=trainable,
                                  restore=restore)
      outputs = tf.nn.bias_add(conv, biases)
    if activation:
      outputs = activation(outputs)
    return outputs
Exemplo n.º 55
0
    def q_net(self,obs, act, theta, name="qfunction"):
        with tf.variable_op_scope([obs, act], name, name):
            h0 = tf.identity(obs, name='h0-obs')
            h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
            h1a = tf.concat(1, [h1, act])
            h2 = tf.nn.relu(tf.matmul(h1a, theta[2]) + theta[3], name='h2')
            qs = tf.matmul(h2, theta[4]) + theta[5]
            q = tf.squeeze(qs, [1], name='h3-q')

            summary = self.hist_summaries(h0, h1, h2, q)
            return q, summary
Exemplo n.º 56
0
def coin_flip(prob=.5):
  """Random boolean variable, with `prob` chance of being true.

  Used to choose between local and global drop path.

  Args:
    prob:float, probability of being True.
  """
  with tf.variable_op_scope([],None,"CoinFlip"):
    coin = tf.random_uniform([1])[0]>prob
  return coin
Exemplo n.º 57
0
def q_network(state,action,theta, name="q_network"):
  with tf.variable_op_scope([state,action],name,name):
    h0 = tf.identity(state,name='h0-state')
    h0a = tf.identity(action,name='h0-act')
    h1  = tf.nn.relu( tf.matmul(h0,theta[0]) + theta[1],name='h1')
    h1a = tf.concat(1,[h1,action])
    h2  = tf.nn.relu( tf.matmul(h1a,theta[2]) + theta[3],name='h2')
    qs  = tf.matmul(h2,theta[4]) + theta[5]
    q = tf.squeeze(qs,[1],name='h3-q')
    
    return q
Exemplo n.º 58
0
    def mu_net(self, obs, theta, name='policy'):
        with tf.variable_op_scope([obs], name, name):
            h0 = tf.identity(obs, name='h0-obs')
            h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
            h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name='h2')
            h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3')
            action = tf.nn.tanh(h3, name='h4-action')

            action_add =  tf.add(action,tf.constant(1.0, dtype=tf.float32, shape= [1]))

            summary = self.hist_summaries(h0, h1, h2, h3, action_add)
            return action_add, summary
Exemplo n.º 59
0
def qfunction(obs, act, theta, name="qfunction"):
    with tf.variable_op_scope([obs, act], name, name):
        h0 = tf.identity(obs, name="h0-obs")
        h0a = tf.identity(act, name="h0-act")
        h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name="h1")
        h1a = tf.concat(1, [h1, act])
        h2 = tf.nn.relu(tf.matmul(h1a, theta[2]) + theta[3], name="h2")
        qs = tf.matmul(h2, theta[4]) + theta[5]
        q = tf.squeeze(qs, [1], name="h3-q")

        summary = hist_summaries(h0, h0a, h1, h2, q)
        return q, summary
Exemplo n.º 60
0
    def __call__(self, flow):
        """Applies this layer to the input `Tensor` and returns the output `Tensor`.

        Args:
            flow: The input `Tensor`.

        Returns:
            Output of this layer.

        """

        with tf.variable_op_scope([flow], self.name, 'Conv', reuse=self.reuse):
            if not self.reuse:
                full_shape = self._filter_shape + [flow.get_shape()[-1].value, self._n_output_channels]
                self.filter = tf.get_variable(
                    'filter',
                    full_shape,
                    initializer=self._weight_init,
                    regularizer=self._weight_regularizer,
                    trainable=self.trainable)
                self.params.append(self.filter)
                tf.add_to_collection(tf.GraphKeys.WEIGHTS, self.filter)

                if self._has_bias:
                    self.bias = tf.get_variable(
                        'bias',
                        self._n_output_channels,
                        initializer=self._bias_init,
                        regularizer=self._bias_regularizer,
                        trainable=self.trainable)
                    self.params.append(self.bias)
                    tf.add_to_collection(tf.GraphKeys.BIASES, self.bias)

                tf.initialize_variables(self.params).run()
                self.reuse = True

            flow = tf.nn.conv2d(
                flow,
                self.filter,
                [1] + self._strides + [1],
                self._padding,
                self._use_cudnn_on_gpu)

            flow = tf.nn.bias_add(flow, self.bias)

            if self._activation_fn is not None:
                flow = self._activation_fn(flow)

        tf.add_to_collection(tf.GraphKeys.ACTIVATIONS, flow)
        return flow