Пример #1
0
def nn_layer(X, n_neurons, name, activation=None):  # pylint: disable=invalid-name
    """Creates NN layer.

  Creates NN layer with W's initialized as truncated normal and
  b's as zeros.

  Args:
    X: Input tensor.
    n_neurons: An integer. Number of neurons in the layer.
    name: A string. Name of the layer.
    activation: Activation function. Optional.

  """
    with tf.name_scope(name):
        # dimension of each x in X
        n_inputs = int(X.get_shape()[1])

        stddev = 2.0 / np.sqrt(n_inputs)
        init = tf.truncated_normal((n_inputs, n_neurons), stddev=stddev)

        with tf.device(_gpu_device_name(0)):
            W = tf.Variable(init, name='W')  # pylint: disable=invalid-name

            b = tf.Variable(tf.zeros([n_neurons]), name='b')  # pylint: disable=invalid-name

            Z = tf.matmul(X, W) + b  # pylint: disable=invalid-name

            if activation is not None:  # pylint: disable=no-else-return
                return activation(Z)
            else:
                return Z
Пример #2
0
  def apply_gradients(self, grads_and_vars, beta): # pylint: disable=arguments-differ
    with tf.device(_gpu_device_name(self.replica_id)):

      c = tf.sqrt(np.float32(2*self.learning_rate/beta)) # pylint: disable=invalid-name
      ops_ = [tf.assign(
          v,
          v - self.learning_rate*g + c*tf.random_normal(v.shape, stddev=1))
              for g, v in grads_and_vars]
    return tf.group(ops_)
Пример #3
0
  def apply_gradients(self, grads_and_vars):
    """Applies gradients.

    Args:
      grads_and_vars: list of tuples as returned by
        optimizer.compute_gradients()

    Returns:
      An op for gradient computation.
    """
    with tf.device(_gpu_device_name(self.replica_id)):
      ops_ = [tf.assign(v, v - self.learning_rate*g)
              for g, v in grads_and_vars]

      train_op = tf.group(ops_)
    return train_op
Пример #4
0
  def apply_gradients(self, grads_and_vars, stddev): # pylint: disable=arguments-differ
    """Applies gradients and adds normal noise with `stddev`.

    Args:
      grads_and_vars: list of tuples as returned by
        optimizer.compute_gradients()
      stddev:     standard deviation of normal noise

    Returns:
      An op for gradient computation.
    """

    with tf.device(_gpu_device_name(self.replica_id)):
      ops_ = [tf.assign(
          var,
          (var
           - self.learning_rate*grad
           + tf.random_normal(var.shape, stddev=stddev)))
              for grad, var in grads_and_vars]
      train_op = tf.group(ops_)
    return train_op
Пример #5
0
def cnn_cifar10_model(graph):

    with graph.as_default():
        with tf.name_scope('Inputs'):
            with tf.name_scope('X'):
                X = tf.placeholder(tf.float32, shape=(None, 3072), name='X')
                X_reshaped = tf.reshape(X,
                                        shape=tf.TensorShape([-1, 32, 32, 3]))
            with tf.name_scope('y'):
                y = tf.placeholder(tf.int64, shape=(None), name='y')

        with tf.device(_gpu_device_name(0)):
            with tf.name_scope('conv1'):
                kernel = _variable_with_weight_decay(name='kernel1',
                                                     shape=[5, 5, 3, 64],
                                                     stddev=5e-2,
                                                     wd=None)
                conv = tf.nn.conv2d(input=X_reshaped,
                                    filter=kernel,
                                    strides=[1, 1, 1, 1],
                                    padding='SAME')
                biases = tf.get_variable(
                    name='biases1',
                    shape=[64],
                    initializer=tf.constant_initializer(0.0))
                pre_activation = tf.nn.bias_add(conv, biases)
                conv1 = tf.nn.relu(pre_activation)

            with tf.name_scope('pool1'):

                pool1 = tf.nn.max_pool(conv1,
                                       ksize=[1, 3, 3, 1],
                                       strides=[1, 2, 2, 1],
                                       padding='SAME',
                                       name='pool1')

            with tf.name_scope('norm1'):
                norm1 = tf.nn.lrn(pool1,
                                  4,
                                  bias=1.0,
                                  alpha=0.001 / 9.0,
                                  beta=0.75,
                                  name='norm1')

            with tf.name_scope('conv2'):
                kernel = _variable_with_weight_decay('kernel2',
                                                     shape=[5, 5, 64, 64],
                                                     stddev=5e-2,
                                                     wd=None)
                conv = tf.nn.conv2d(input=norm1,
                                    filter=kernel,
                                    strides=[1, 1, 1, 1],
                                    padding='SAME')
                biases = tf.get_variable(
                    name='biases2',
                    shape=[64],
                    initializer=tf.constant_initializer(0.1))
                pre_activation = tf.nn.bias_add(conv, biases)
                conv2 = tf.nn.relu(pre_activation)

            with tf.name_scope('norm2'):
                norm2 = tf.nn.lrn(conv2,
                                  4,
                                  bias=1.0,
                                  alpha=0.001 / 9.0,
                                  beta=0.75,
                                  name='norm2')

            with tf.name_scope('pool2'):
                pool2 = tf.nn.max_pool(norm2,
                                       ksize=[1, 3, 3, 1],
                                       strides=[1, 2, 2, 1],
                                       padding='SAME',
                                       name='pool2')

            with tf.name_scope('fully_connected1'):
                reshaped = tf.reshape(
                    pool2, [X_reshaped.get_shape().as_list()[0], -1])
                fc1 = nn_layer(reshaped, 384, 'fully_connected1', tf.nn.relu)

            keep_prob = tf.placeholder(tf.float32, name='keep_prob')
            with tf.name_scope('dropout1'):
                fc1_dropout = tf.nn.dropout(fc1, keep_prob)

            with tf.name_scope('fully_connected2'):
                fc2 = nn_layer(fc1_dropout, 192, 'fully_connected2',
                               tf.nn.relu)

            with tf.name_scope('dropout2'):
                fc2_dropout = tf.nn.dropout(fc2, keep_prob)

            with tf.name_scope('logits'):
                logits = nn_layer(fc2_dropout, 10, 'logits')

    return X, y, keep_prob, logits
Пример #6
0
def cnn_cifar10_model2(graph):
    height = 28
    width = 28
    channels = 1
    n_inputs = height * width

    conv1_fmaps = 32
    conv1_ksize = 3
    conv1_stride = 1
    conv1_pad = "SAME"

    conv2_fmaps = 64
    conv2_ksize = 3
    conv2_stride = 2
    conv2_pad = "SAME"

    pool3_fmaps = conv2_fmaps

    n_fc1 = 64
    n_outputs = 10

    with graph.as_default():
        with tf.name_scope('Input'):
            X = tf.placeholder(tf.float32,
                               shape=[None, width * height * n_channels],
                               name='X')
            y = tf.placeholder(tf.float32, shape=[None, n_classes])
            X_reshaped = tf.reshape(X,
                                    shape=[-1, width, height, n_channels],
                                    name='y')

            #keep_prob = tf.placeholder(tf.float32, shape=[], name='keep_proba')
            #keep_prob_val = 1.0 - keep_prob
            #keep_prob_val = tf.Variable(tf.zeros([]), name='keep_prob_val')
            #keep_prob_val = tf.assign(keep_prob_val, 1.0 - keep_prob, name='assigned_prob')
        with tf.device(_gpu_device_name(0)):
            with tf.name_scope('conv1'):
                conv1 = tf.layers.conv2d(X_reshaped, )
            """
      with tf.name_scope('conv1') as scope:
        W = tf.Variable('W', )
        conv = tf.layers.conv2d(inputs=X_reshaped, filters=32,
          kernel_size=[3,3], padding='SAME', activation=tf.nn.relu)
        conv = tf.layers.conv2d(inputs=conv, filters=64, 
          kernel_size=[3,3], padding='SAME', activation=tf.nn.relu)
        pool = tf.layers.max_pooling2d(conv, pool_size=[2,2], 
          strides=2, padding='SAME')
        drop = tf.layers.dropout(pool, rate=keep_prob_val, name=scope)

      with tf.name_scope('conv2'):
        conv = tf.layers.conv2d(inputs=drop, filters=128,
          kernel_size=[2,2], padding='SAME', activation=tf.nn.relu)
        pool = tf.layers.max_pooling2d(conv, pool_size=[2,2],
          strides=2, padding='SAME')
        conv = tf.layers.conv2d(inputs=pool, filters=128, 
          kernel_size=[2,2], padding='SAME', activation=tf.nn.relu)
        pool = tf.layers.max_pooling2d(conv, pool_size=[2,2],
          strides=2, padding='SAME')
        drop = tf.layers.dropout(pool, rate=keep_prob_val)

      with tf.name_scope('fully_connected'):
        flat = tf.reshape(drop, [-1, 4 * 4 * 128])
        fc = nn_layer(flat, 1500, name='fully_connected', 
          activation=tf.nn.relu)
        drop = tf.layers.dropout(fc, rate=keep_prob_val)
        logits = nn_layer(drop, 10, name='logits', activation=tf.nn.softmax)
      """
    return X, y, keep_prob, logits
Пример #7
0
def cnn_cifar10_model3(graph):
    height = 32
    width = 32
    channels = 3
    n_inputs = height * width * channels

    conv1_fmaps = 32
    conv1_ksize = 3
    conv1_stride = 1
    conv1_pad = "SAME"

    conv2_fmaps = 64
    conv2_ksize = 3
    conv2_stride = 2
    conv2_pad = "SAME"

    pool3_fmaps = conv2_fmaps

    n_fc1 = 64
    n_outputs = 10

    gpu_device_name = _gpu_device_name(0)

    with graph.as_default():
        with tf.name_scope('Input'):
            with tf.name_scope('X'):
                X = tf.placeholder(tf.float32,
                                   shape=[None, n_inputs],
                                   name='X')
                X_reshaped = tf.reshape(X, shape=[-1, height, width, channels])
            with tf.name_scope('y'):
                y = tf.placeholder(tf.int32, shape=[None], name='y')
        with tf.device(gpu_device_name):
            with tf.name_scope('conv1'):
                conv1 = tf.layers.conv2d(X_reshaped,
                                         filters=conv1_fmaps,
                                         kernel_size=conv1_ksize,
                                         strides=conv1_stride,
                                         padding=conv1_pad,
                                         activation=tf.nn.relu,
                                         name='conv1')
            with tf.name_scope('conv2'):
                conv2 = tf.layers.conv2d(conv1,
                                         filters=conv2_fmaps,
                                         kernel_size=conv2_ksize,
                                         strides=conv2_stride,
                                         padding=conv2_pad,
                                         activation=tf.nn.relu,
                                         name='conv2')

            with tf.name_scope('pool3'):
                pool3 = tf.nn.max_pool(conv2,
                                       ksize=[1, 2, 2, 1],
                                       strides=[1, 2, 2, 1],
                                       padding='VALID')
                pool3_flat = tf.reshape(pool3, shape=[-1, pool3_fmaps * 7 * 7])

        with tf.name_scope('fully_connected'):
            fc = nn_layer(pool3_flat, n_fc1, activation=tf.nn.relu, name='fc')

        with tf.device(gpu_device_name):
            with tf.name_scope('dropout'):
                keep_prob = tf.placeholder(tf.float32, name='keep_prob')
                fc_dropout = tf.nn.dropout(fc, keep_prob)

            with tf.name_scope('logits'):
                logits = tf.layers.dense(fc_dropout, n_outputs, name='logits')

    return X, y, keep_prob, logits
Пример #8
0
 def compute_gradients(self, loss):
   """Wrapper for tf.train.Optimizer.minimize()"""
   var_list = self._get_dependencies(loss)
   with tf.device(_gpu_device_name(self.replica_id)):
     grads_and_vars = self.tf_optimizer.compute_gradients(loss, var_list)
   return grads_and_vars
Пример #9
0
 def minimize(self, loss):
   self.trainable_variables = self._get_dependencies(loss) # pylint: disable=attribute-defined-outside-init
   with tf.device(_gpu_device_name(self.replica_id)):
     self.train_op = self.tf_optimizer.minimize(loss) # pylint: disable=attribute-defined-outside-init
   return self.train_op
Пример #10
0
def copy_variable_to_graph(org_instance, to_graph, namespace):  # pylint: disable=too-many-locals
    """Copies the Variable instance 'org_instance' into the graph
  'to_graph', under the given namespace.
  The dict 'COPIED_VARIABLES', if provided, will be updated with
  mapping the new variable's name to the instance.
  """
    # global COPIED_VARIABLES
    if not isinstance(org_instance, tf.Variable):
        raise TypeError(str(org_instance) + " is not a Variable")

    #The name of the new variable
    if namespace != '':
        new_name = (namespace + '/' +
                    org_instance.name[:org_instance.name.index(':')])
    else:
        new_name = org_instance.name[:org_instance.name.index(':')]

    ###############################################################
    if namespace != '':
        replica_id = int(namespace.split('_')[1])
    else:
        replica_id = -1
    ###############################################################
    #Get the collections that the new instance needs to be added to.
    #The new collections will also be a part of the given namespace,
    #except the special ones required for variable initialization and
    #training.
    collections = []
    for name, collection in org_instance.graph._collections.items():  # pylint: disable=protected-access
        if org_instance in collection:
            if (name == ops.GraphKeys.GLOBAL_VARIABLES
                    or name == ops.GraphKeys.TRAINABLE_VARIABLES
                    or namespace == ''):
                collections.append(name)
            else:
                collections.append(namespace + '/' + name)

    #See if its trainable.
    trainable = (org_instance in org_instance.graph.get_collection(
        ops.GraphKeys.TRAINABLE_VARIABLES))

    #Get the initial value

    with org_instance.graph.as_default():
        temp_session = tf.Session()  # pylint: disable=unused-variable
        # init_value = temp_session.run(org_instance.initialized_value())

    #Initialize the new variable
    with to_graph.as_default():
        '''
    new_var = PLACER.copy_and_init_variable_on_cpu( org_instance,
                            new_name,
                            trainable=trainable,
                            collections=collections,
                            validate_shape=True)
    '''
        ######################################################
        if (replica_id >= 0 and 'gpu' in org_instance.device.lower()):
            device_name = _gpu_device_name(replica_id)
        else:
            device_name = '/cpu:0'

        with tf.device(device_name):
            n_inputs = int(org_instance.get_shape()[0])
            try:
                n_neurons = int(org_instance.get_shape()[1])
                stddev = 2.0 / np.sqrt(n_inputs)
                init = tf.truncated_normal((n_inputs, n_neurons),
                                           stddev=stddev)
            except IndexError:
                init = tf.zeros([n_inputs])

            new_var = tf.Variable(init,
                                  trainable=trainable,
                                  name=new_name,
                                  collections=collections,
                                  validate_shape=True)
        ######################################################
        '''
    new_var = tf.Variable(init_value,
                trainable=trainable,
                name=new_name,
                collections=collections,
                validate_shape=True)
    '''
    #Add to the COPIED_VARIABLES dict
    COPIED_VARIABLES[new_var.name] = new_var
    '''
  if (replica_id >=0 and
    'gpu' in org_instance.device.lower()):
    new_var._set_device(_gpu_device_name(replica_id))
  '''
    #print('var:', org_instance.device, new_var.device)

    return new_var
Пример #11
0
def copy_to_graph(org_instance, to_graph, namespace="", exclude=None):  # pylint: disable=too-many-locals, too-many-statements, too-many-branches
    """
  Makes a copy of the Operation/Tensor instance 'org_instance'
  for the graph 'to_graph', recursively. Therefore, all required
  structures linked to org_instance will be automatically copied.
  'COPIED_VARIABLES' should be a dict mapping pertinent copied variable
  names to the copied instances.

  The new instances are automatically inserted into the given 'namespace'.
  If namespace='', it is inserted into the graph's global namespace.
  However, to avoid naming conflicts, its better to provide a namespace.
  If the instance(s) happens to be a part of collection(s), they are
  are added to the appropriate collections in to_graph as well.
  For example, for collection 'C' which the instance happens to be a
  part of, given a namespace 'N', the new instance will be a part of
  'N/C' in to_graph.

  Returns the corresponding instance with respect to to_graph.

  TODO: Order of insertion into collections is not preserved
  """
    #print(org_instance.name)
    #print(org_instance.name)

    ####################################################################
    if namespace != '':
        replica_id = int(namespace.split('_')[1])
    else:
        replica_id = -1
    global COPIED_VARIABLES  # pylint: disable=global-statement
    if exclude:
        for exc in exclude:
            if org_instance.name.split(':')[0] == exc.name.split(':')[0]:
                return exc

    ####################################################################

    #The name of the new instance
    if namespace != '':
        new_name = namespace + '/' + org_instance.name
    else:
        new_name = org_instance.name

    #If a variable by the new name already exists, return the
    #correspondng tensor that will act as an input
    if new_name in COPIED_VARIABLES:
        return to_graph.get_tensor_by_name(COPIED_VARIABLES[new_name].name)

    #If an instance of the same name exists, return appropriately
    try:
        already_present = to_graph.as_graph_element(new_name,
                                                    allow_tensor=True,
                                                    allow_operation=True)
        return already_present
    except:  # pylint: disable=bare-except
        pass

    #Get the collections that the new instance needs to be added to.
    #The new collections will also be a part of the given namespace.
    collections = []
    for name, collection in org_instance.graph._collections.items():  # pylint: disable=protected-access
        if org_instance in collection:
            if namespace == '':
                collections.append(name)
            else:
                collections.append(namespace + '/' + name)

    #Take action based on the class of the instance

    if isinstance(org_instance, tf.Tensor):  # pylint: disable=no-else-return

        #If its a Tensor, it is one of the outputs of the underlying
        #op. Therefore, copy the op itself and return the appropriate
        #output.
        op = org_instance.op  # pylint: disable=invalid-name
        new_op = copy_to_graph(op, to_graph, namespace, exclude=exclude)
        output_index = op.outputs.index(org_instance)
        new_tensor = new_op.outputs[output_index]
        #Add to collections if any
        for collection in collections:
            to_graph.add_to_collection(collection, new_tensor)

        return new_tensor

    elif isinstance(org_instance, tf.Operation):

        op = org_instance  # pylint: disable=invalid-name

        #If it has an original_op parameter, copy it
        if op._original_op is not None:  # pylint: disable=protected-access
            new_original_op = copy_to_graph(
                op._original_op,
                to_graph,  # pylint: disable=protected-access
                namespace,
                exclude=exclude)
        else:
            new_original_op = None

        #If it has control inputs, call this function recursively on each.
        new_control_inputs = [
            copy_to_graph(x, to_graph, namespace, exclude=exclude)
            for x in op.control_inputs
        ]

        #If it has inputs, call this function recursively on each.
        new_inputs = [
            copy_to_graph(x, to_graph, namespace, exclude=exclude)
            for x in op.inputs
        ]

        #Make a new node_def based on that of the original.
        #An instance of tensorflow.core.framework.graph_pb2.NodeDef, it
        #stores String-based info such as name, device and type of the op.
        #Unique to every Operation instance.
        new_node_def = deepcopy(op.node_def)
        #Change the name
        new_node_def.name = new_name

        #Copy the other inputs needed for initialization
        output_types = op._output_types[:]  # pylint: disable=protected-access
        input_types = op._input_types[:]  # pylint: disable=protected-access

        #print('name:', new_name)
        #print('output_types:',output_types)
        #print('input types',input_types)
        #print('new inputs', new_inputs)
        #print('##########################')

        #Make a copy of the op_def too.
        #Its unique to every _type_ of Operation.
        op_def = deepcopy(op.op_def)

        #Initialize a new Operation instance
        new_op = tf.Operation(new_node_def, to_graph, new_inputs, output_types,
                              new_control_inputs, input_types, new_original_op,
                              op_def)
        #Use Graph's hidden methods to add the op
        to_graph._add_op(new_op)  # pylint: disable=protected-access
        to_graph._record_op_seen_by_control_dependencies(new_op)  # pylint: disable=protected-access
        #print(to_graph._device_function_stack)
        for device_function in reversed(to_graph._device_function_stack):  # pylint: disable=protected-access

            new_op._set_device(device_function(new_op))  # pylint: disable=protected-access
            #########################################################
            #print(device_function(new_op))
            #new_op = PLACER.set_on_gpu(new_op, replica_id)
            ########################################################

        ########################################################
        if (replica_id >= 0 and 'gpu' in op.device.lower()):
            new_op._set_device(_gpu_device_name(replica_id))  # pylint: disable=protected-access

        return new_op
        ########################################################
        '''
    return new_op
    '''
    else:
        raise TypeError("Could not copy instance: " + str(org_instance))