コード例 #1
0
  def __init__(self,
               input_shape=None,
               batch_input_shape=None,
               input_dtype=None,
               name=None):
    self.uses_learning_phase = False
    self.trainable = False

    if not name:
      prefix = 'input'
      # TODO(rbharath): Keras uses a global var here to maintain
      # unique counts. This seems dangerous. How does tensorflow handle?
      name = prefix + '_' + str(model_ops.get_uid(prefix))
    self.name = name

    if input_shape and batch_input_shape:
      raise ValueError('Only provide the input_shape OR '
                       'batch_input_shape argument to '
                       'InputLayer, not both at the same time.')
    if not batch_input_shape:
      if not input_shape:
        raise ValueError('An Input layer should be passed either '
                         'a `batch_input_shape` or an `input_shape`.')
      else:
        batch_input_shape = (None,) + tuple(input_shape)
    else:
      batch_input_shape = tuple(batch_input_shape)

    if not input_dtype:
      input_dtype = tf.float32

    self.batch_input_shape = batch_input_shape
    self.input_dtype = input_dtype
コード例 #2
0
ファイル: copy.py プロジェクト: bowenliu16/deepchem
  def __init__(self, **kwargs):
    # These properties should have been set
    # by the child class, as appropriate.
    if not hasattr(self, 'input_spec'):
      self.input_spec = None
    if not hasattr(self, 'uses_learning_phase'):
      self.uses_learning_phase = False

    # These lists will be filled via successive calls
    # to self.add_inbound_node().
    self.inbound_nodes = []
    self.outbound_nodes = []

    # These properties will be set upon call of self.build(),
    # which itself will be called upon self.add_inbound_node if necessary.
    if not hasattr(self, '_trainable_weights'):
      self._trainable_weights = []
    if not hasattr(self, '_non_trainable_weights'):
      self._non_trainable_weights = []
    if not hasattr(self, 'losses'):
      self.losses = []
    if not hasattr(self, 'constraints'):
      self.constraints = {}  # dict {tensor: constraint instance}
    self.built = False

    # These properties should be set by the user via keyword arguments.
    # note that 'input_dtype', 'input_shape' and 'batch_input_shape'
    # are only applicable to input layers: do not pass these keywords
    # to non-input layers.
    allowed_kwargs = {'input_shape',
                      'batch_input_shape',
                      'input_dtype',
                      'name',
                      'trainable'}
    for kwarg in kwargs.keys():
      if kwarg not in allowed_kwargs:
        raise TypeError('Keyword argument not understood:', kwarg)
    name = kwargs.get('name')
    if not name:
      prefix = self.__class__.__name__.lower()
      name = prefix + '_' + str(model_ops.get_uid(prefix))
    self.name = name

    self.trainable = kwargs.get('trainable', True)
    if 'batch_input_shape' in kwargs or 'input_shape' in kwargs:
      # In this case we will create an input layer
      # to insert before the current layer
      if 'batch_input_shape' in kwargs:
        batch_input_shape = tuple(kwargs['batch_input_shape'])
      elif 'input_shape' in kwargs:
        batch_input_shape = (None,) + tuple(kwargs['input_shape'])
      self.batch_input_shape = batch_input_shape
      input_dtype = kwargs.get('input_dtype', tf.float32)
      self.input_dtype = input_dtype
コード例 #3
0
    def __init__(self, **kwargs):
        # These properties should have been set
        # by the child class, as appropriate.
        if not hasattr(self, 'input_spec'):
            self.input_spec = None
        if not hasattr(self, 'uses_learning_phase'):
            self.uses_learning_phase = False

        # These lists will be filled via successive calls
        # to self.add_inbound_node().
        self.inbound_nodes = []
        self.outbound_nodes = []

        # These properties will be set upon call of self.build(),
        # which itself will be called upon self.add_inbound_node if necessary.
        if not hasattr(self, '_trainable_weights'):
            self._trainable_weights = []
        if not hasattr(self, '_non_trainable_weights'):
            self._non_trainable_weights = []
        if not hasattr(self, 'losses'):
            self.losses = []
        if not hasattr(self, 'constraints'):
            self.constraints = {}  # dict {tensor: constraint instance}
        self.built = False

        # These properties should be set by the user via keyword arguments.
        # note that 'input_dtype', 'input_shape' and 'batch_input_shape'
        # are only applicable to input layers: do not pass these keywords
        # to non-input layers.
        allowed_kwargs = {
            'input_shape', 'batch_input_shape', 'input_dtype', 'name',
            'trainable'
        }
        for kwarg in kwargs.keys():
            if kwarg not in allowed_kwargs:
                raise TypeError('Keyword argument not understood:', kwarg)
        name = kwargs.get('name')
        if not name:
            prefix = self.__class__.__name__.lower()
            name = prefix + '_' + str(model_ops.get_uid(prefix))
        self.name = name

        self.trainable = kwargs.get('trainable', True)
        if 'batch_input_shape' in kwargs or 'input_shape' in kwargs:
            # In this case we will create an input layer
            # to insert before the current layer
            if 'batch_input_shape' in kwargs:
                batch_input_shape = tuple(kwargs['batch_input_shape'])
            elif 'input_shape' in kwargs:
                batch_input_shape = (None, ) + tuple(kwargs['input_shape'])
            self.batch_input_shape = batch_input_shape
            input_dtype = kwargs.get('input_dtype', tf.float32)
            self.input_dtype = input_dtype
コード例 #4
0
    def __init__(self, name=None, logdir=None):
        super(Sequential, self).__init__(self, model_dir=logdir)
        self.layers = []  # stack of layers
        self.outputs = None  # tensors (length 1)

        if not name:
            prefix = 'sequential_'
            name = prefix + str(model_ops.get_uid(prefix))
        self.name = name
        self.graph = tf.Graph()

        config = tf.ConfigProto(allow_soft_placement=True)
        self.session = tf.Session(graph=self.graph, config=config)
        # Path to save checkpoint files
        self._save_path = os.path.join(self.model_dir, 'model.ckpt')
コード例 #5
0
ファイル: sequential.py プロジェクト: joegomes/deepchem
  def __init__(self, name=None, logdir=None):
    self.layers = []  # stack of layers
    self.outputs = None  # tensors (length 1)

    if not name:
      prefix = 'sequential_'
      name = prefix + str(model_ops.get_uid(prefix))
    self.name = name
    self.graph = tf.Graph()

    config = tf.ConfigProto(allow_soft_placement=True)
    self.session = tf.Session(graph=self.graph, config=config)
    # Path to save checkpoint files
    if logdir is not None:
      if not os.path.exists(logdir):
        os.makedirs(logdir)
    else:
      logdir = tempfile.mkdtemp()
    self.logdir = logdir
    self._save_path = os.path.join(self.logdir, 'model.ckpt')
コード例 #6
0
ファイル: sequential.py プロジェクト: XericZephyr/deepchem
    def __init__(self, name=None, logdir=None):
        self.layers = []  # stack of layers
        self.outputs = None  # tensors (length 1)

        if not name:
            prefix = 'sequential_'
            name = prefix + str(model_ops.get_uid(prefix))
        self.name = name
        self.graph = tf.Graph()

        config = tf.ConfigProto(allow_soft_placement=True)
        self.session = tf.Session(graph=self.graph, config=config)
        # Path to save checkpoint files
        if logdir is not None:
            if not os.path.exists(logdir):
                os.makedirs(logdir)
        else:
            logdir = tempfile.mkdtemp()
        self.logdir = logdir
        self._save_path = os.path.join(self.logdir, 'model.ckpt')
コード例 #7
0
  def __init__(self, **kwargs):
    # These properties should have been set
    # by the child class, as appropriate.
    if not hasattr(self, 'uses_learning_phase'):
      self.uses_learning_phase = False

    if not hasattr(self, 'losses'):
      self.losses = []

    # These properties should be set by the user via keyword arguments.
    # note that 'input_dtype', 'input_shape' and 'batch_input_shape'
    # are only applicable to input layers: do not pass these keywords
    # to non-input layers.
    allowed_kwargs = {
        'input_shape', 'batch_input_shape', 'input_dtype', 'name', 'trainable'
    }
    for kwarg in kwargs.keys():
      if kwarg not in allowed_kwargs:
        raise TypeError('Keyword argument not understood:', kwarg)
    name = kwargs.get('name')
    if not name:
      prefix = self.__class__.__name__.lower()
      name = prefix + '_' + str(model_ops.get_uid(prefix))
    self.name = name

    self.trainable = kwargs.get('trainable', True)
    if 'batch_input_shape' in kwargs or 'input_shape' in kwargs:
      # In this case we will create an input layer
      # to insert before the current layer
      if 'batch_input_shape' in kwargs:
        batch_input_shape = tuple(kwargs['batch_input_shape'])
      elif 'input_shape' in kwargs:
        batch_input_shape = (None,) + tuple(kwargs['input_shape'])
      self.batch_input_shape = batch_input_shape
      input_dtype = kwargs.get('input_dtype', tf.float32)
      self.input_dtype = input_dtype
コード例 #8
0
ファイル: copy.py プロジェクト: bowenliu16/deepchem
    def __init__(self, input_shape=None, batch_input_shape=None,
                 input_dtype=None, input_tensor=None, name=None):
      self.input_spec = None
      self.uses_learning_phase = False
      self.trainable = False
      self.built = True
      self._trainable_weights = []
      self._non_trainable_weights = []
      self.inbound_nodes = []
      self.outbound_nodes = []
      self.constraints = {}

      if not name:
        prefix = 'input'
        # TODO(rbharath): Keras uses a global var here to maintain
        # unique counts. This seems dangerous. How does tensorflow handle?
        name = prefix + '_' + str(model_ops.get_uid(prefix))
      self.name = name

      if input_shape and batch_input_shape:
        raise ValueError('Only provide the input_shape OR '
                         'batch_input_shape argument to '
                         'InputLayer, not both at the same time.')
      if input_tensor is not None:
        # Attempt automatic input shape inference.
        try:
          batch_input_shape = model_ops.int_shape(input_tensor)
        except:
          if not input_shape and not batch_input_shape:
            raise ValueError('InputLayer was provided '
                             'an input_tensor argument, '
                             'but its input shape cannot be '
                             'automatically inferred. '
                             'You should pass an input_shape or '
                             'batch_input_shape argument.')
      if not batch_input_shape:
        if not input_shape:
          raise ValueError('An Input layer should be passed either '
                           'a `batch_input_shape` or an `input_shape`.')
        else:
          batch_input_shape = (None,) + tuple(input_shape)
      else:
        batch_input_shape = tuple(batch_input_shape)

      if not input_dtype:
        if input_tensor is None:
          input_dtype = tf.float32
        else:
          input_dtype = model_ops.get_dtype(input_tensor)

      self.batch_input_shape = batch_input_shape
      self.input_dtype = input_dtype

      if input_tensor is None:
        input_tensor = tf.placeholder(dtype=input_dtype,
                                      shape=batch_input_shape,
                                      name=self.name)
      else:
        input_tensor._keras_shape = batch_input_shape
      # Create an input node to add to self.outbound_node
      # and set output_tensors' _keras_history.
      input_tensor._uses_learning_phase = False
      input_tensor._keras_history = (self, 0, 0)
      Node(self,
           inbound_layers=[],
           node_indices=[],
           tensor_indices=[],
           input_tensors=[input_tensor],
           output_tensors=[input_tensor],
           input_shapes=[batch_input_shape],
           output_shapes=[batch_input_shape])
コード例 #9
0
    def __init__(self,
                 input_shape=None,
                 batch_input_shape=None,
                 input_dtype=None,
                 input_tensor=None,
                 name=None):
        self.input_spec = None
        self.uses_learning_phase = False
        self.trainable = False
        self.built = True
        self._trainable_weights = []
        self._non_trainable_weights = []
        self.inbound_nodes = []
        self.outbound_nodes = []
        self.constraints = {}

        if not name:
            prefix = 'input'
            # TODO(rbharath): Keras uses a global var here to maintain
            # unique counts. This seems dangerous. How does tensorflow handle?
            name = prefix + '_' + str(model_ops.get_uid(prefix))
        self.name = name

        if input_shape and batch_input_shape:
            raise ValueError('Only provide the input_shape OR '
                             'batch_input_shape argument to '
                             'InputLayer, not both at the same time.')
        if input_tensor is not None:
            # Attempt automatic input shape inference.
            try:
                batch_input_shape = model_ops.int_shape(input_tensor)
            except:
                if not input_shape and not batch_input_shape:
                    raise ValueError('InputLayer was provided '
                                     'an input_tensor argument, '
                                     'but its input shape cannot be '
                                     'automatically inferred. '
                                     'You should pass an input_shape or '
                                     'batch_input_shape argument.')
        if not batch_input_shape:
            if not input_shape:
                raise ValueError('An Input layer should be passed either '
                                 'a `batch_input_shape` or an `input_shape`.')
            else:
                batch_input_shape = (None, ) + tuple(input_shape)
        else:
            batch_input_shape = tuple(batch_input_shape)

        if not input_dtype:
            if input_tensor is None:
                input_dtype = tf.float32
            else:
                input_dtype = model_ops.get_dtype(input_tensor)

        self.batch_input_shape = batch_input_shape
        self.input_dtype = input_dtype

        if input_tensor is None:
            input_tensor = tf.placeholder(dtype=input_dtype,
                                          shape=batch_input_shape,
                                          name=self.name)
        else:
            input_tensor._keras_shape = batch_input_shape
        # Create an input node to add to self.outbound_node
        # and set output_tensors' _keras_history.
        input_tensor._uses_learning_phase = False
        input_tensor._keras_history = (self, 0, 0)
        Node(self,
             inbound_layers=[],
             node_indices=[],
             tensor_indices=[],
             input_tensors=[input_tensor],
             output_tensors=[input_tensor],
             input_shapes=[batch_input_shape],
             output_shapes=[batch_input_shape])