Beispiel #1
0
def get(identifier):
    """Retrieves a Keras Optimizer instance.
    # Arguments
        identifier: Optimizer identifier, one of
            - String: name of an optimizer
            - Dictionary: configuration dictionary.
            - Keras Optimizer instance (it will be returned unchanged).
            - TensorFlow Optimizer instance
                (it will be wrapped as a Keras Optimizer).
    # Returns
        A Keras Optimizer instance.
    # Raises
        ValueError: If `identifier` cannot be interpreted.
    """
    if K.backend() == 'tensorflow':
        # Wrap TF optimizer instances
        if isinstance(identifier, tf.train.Optimizer):
            return optimizers.TFOptimizer(identifier)
    if isinstance(identifier, dict):
        return deserialize(identifier)
    elif isinstance(identifier, six.string_types):
        config = {'class_name': str(identifier), 'config': {}}
        return deserialize(config)
    if isinstance(identifier, optimizers.Optimizer):
        return identifier
    else:
        raise ValueError('Could not interpret optimizer identifier:',
                         identifier)
Beispiel #2
0
def test_tfoptimizer():
    from keras import constraints
    import tensorflow as tf
    if tf.__version__.startswith('1.'):
        optimizer = optimizers.TFOptimizer(tf.train.AdamOptimizer())
    else:
        optimizer = tf.keras.optimizers.Adam()

    model = Sequential()
    model.add(
        Dense(num_classes,
              input_shape=(3, ),
              kernel_constraint=constraints.MaxNorm(1)))
    model.compile(loss='mean_squared_error', optimizer=optimizer)
    model.fit(np.random.random((5, 3)),
              np.random.random((5, num_classes)),
              epochs=1,
              batch_size=5,
              verbose=0)

    if tf.__version__.startswith('1.'):
        with pytest.raises(NotImplementedError):
            optimizer.weights
        with pytest.raises(NotImplementedError):
            optimizer.get_config()
        with pytest.raises(NotImplementedError):
            optimizer.from_config(None)
def test_tfoptimizer_pass_correct_named_params_to_native_tensorflow_optimizer(
):
    from keras import constraints
    from tensorflow import train

    class MyTfOptimizer(train.Optimizer):
        wrapping_optimizer = train.AdamOptimizer()

        def compute_gradients(self, loss, **kwargs):
            return super(MyTfOptimizer, self).compute_gradients(loss, **kwargs)

        def apply_gradients(self, grads_and_vars, **kwargs):
            return self.wrapping_optimizer.apply_gradients(
                grads_and_vars, **kwargs)

    my_tf_optimizer = MyTfOptimizer(use_locking=False, name='MyTfOptimizer')
    optimizer = optimizers.TFOptimizer(my_tf_optimizer)
    model = Sequential()
    model.add(
        Dense(num_classes,
              input_shape=(3, ),
              kernel_constraint=constraints.MaxNorm(1)))
    model.compile(loss='mean_squared_error', optimizer=optimizer)
    model.fit(np.random.random((5, 3)),
              np.random.random((5, num_classes)),
              epochs=1,
              batch_size=5,
              verbose=0)
Beispiel #4
0
    def __init__(self, feature_size, hidden_size, weight_decay, learning_rate):
        self.feature_size = feature_size
        self.hidden_size = hidden_size
        self.weight_decay = weight_decay
        self.learning_rate = learning_rate

        self.model = Sequential()
        self.model.add(
            Dense(self.hidden_size,
                  input_dim=self.feature_size,
                  activation='tanh',
                  weights=layer_0_param,
                  kernel_regularizer=regularizers.l2(self.weight_decay),
                  kernel_initializer=initializers.random_normal(stddev=0.01)))
        self.model.add(
            Dense(1,
                  input_dim=self.hidden_size,
                  weights=layer_1_param,
                  kernel_regularizer=regularizers.l2(self.weight_decay)))
        self.model.add(Reshape([-1]))
        self.model.add(Activation('sigmoid'))
        self.model.summary()
        self.model.compile(loss='binary_crossentropy',
                           optimizer=optimizers.TFOptimizer(
                               tf.train.GradientDescentOptimizer(
                                   self.learning_rate)),
                           metrics=['accuracy'])
Beispiel #5
0
def selu_network(input_dim, output_dim, retmodel):
    """
	# Returns
		A Keras model instance (compiled).
	"""

    #hyper params to be otpimized at some point
    nnProps = {
        'nlayers': 2,
        'mlayers': 2,
        'choke': 40,
        'start': 100,
        'end': 50,
        'dropout_rate': .1,
        'activation': 'selu',
        "kernel_initializer": 'lecun_normal',
        'optimizer': 'adam'
    }

    layers = hourglass(nnProps['nlayers'], nnProps['mlayers'],
                       nnProps['choke'], nnProps['start'], nnProps['end'])
    model = Sequential()

    for i, size in enumerate(layers):
        if i == 0:
            #add selu layer 1st
            model.add(
                Dense(size,
                      kernel_initializer=nnProps['kernel_initializer'],
                      input_shape=(input_dim, )))
            model.add(Activation(nnProps['activation']))
            model.add(AlphaDropout(nnProps['dropout_rate']))
        else:
            #add rectified linear units
            model.add(
                PReLU(alpha_initializer='zeros',
                      alpha_regularizer=None,
                      alpha_constraint=None,
                      shared_axes=None))
            model.add(Dropout(nnProps['dropout_rate']))
    #output layer is categorical
    model.add(Dense(output_dim, activation='softmax'))
    #compile

    x = tf.placeholder(tf.float32, shape=(None, input_dim))
    #placeholder y
    y = model(x)
    #use tensorflow optimizer
    tfOptimizer = tf.train.AdamOptimizer
    optimizer = optimizers.TFOptimizer(tfOptimizer)
    lossfun = losses.categorical_crossentropy

    model.compile(loss=lossfun, optimizer=optimizer, metrics=['accuracy'])

    if retmodel == True:
        return model
    else:
        return model, x, y, lossfun, tfOptimizer
Beispiel #6
0
    def __init__(self,
                 feature_size,
                 hidden_size,
                 weight_decay,
                 learning_rate,
                 temperature=1.0,
                 layer_0_param=None,
                 layer_1_param=None):
        self.feature_size = feature_size
        self.hidden_size = hidden_size
        self.weight_decay = weight_decay
        self.learning_rate = learning_rate
        self.temperature = temperature

        self.pred_data = Input(shape=(None, self.feature_size))
        self.reward = Input(shape=(None, ))
        self.important_sampling = Input(shape=(None, ))

        self.Dense_1_result = Dense(self.hidden_size,
                                    input_dim=self.feature_size,
                                    activation='tanh',
                                    weights=layer_0_param,
                                    kernel_regularizer=regularizers.l2(
                                        self.weight_decay))(self.pred_data)
        self.Dense_2_result = Dense(1,
                                    input_dim=self.hidden_size,
                                    weights=layer_1_param,
                                    kernel_regularizer=regularizers.l2(
                                        self.weight_decay))(
                                            self.Dense_1_result)

        # Given batch query-url pairs, calculate the matching score
        # For all urls of one query
        self.score = Lambda(lambda x: x / self.temperature)(
            self.Dense_2_result)
        self.score = Reshape([-1])(self.score)
        self.prob = Activation('softmax')(self.score)

        self.model = Model(
            inputs=[self.pred_data, self.reward, self.important_sampling],
            outputs=[self.prob])

        self.model.summary()
        self.model.compile(
            loss=self.__loss(self.reward, self.important_sampling),
            optimizer=optimizers.TFOptimizer(
                tf.train.GradientDescentOptimizer(self.learning_rate)),
            metrics=['accuracy'])
Beispiel #7
0
def test_tfoptimizer():
    from keras import constraints
    from tensorflow import train
    optimizer = optimizers.TFOptimizer(train.AdamOptimizer())
    model = Sequential()
    model.add(Dense(num_classes, input_shape=(3,), kernel_constraint=constraints.MaxNorm(1)))
    model.compile(loss='mean_squared_error', optimizer=optimizer)
    model.fit(np.random.random((5, 3)), np.random.random((5, num_classes)),
              epochs=1, batch_size=5, verbose=0)
    # not supported
    with pytest.raises(NotImplementedError):
        optimizer.weights
    with pytest.raises(NotImplementedError):
        optimizer.get_config()
    with pytest.raises(NotImplementedError):
        optimizer.from_config(None)
def test_tf_optimizer():
    with pytest.raises(NotImplementedError):
        import tensorflow as tf
        tf_opt = optimizers.TFOptimizer(tf.train.GradientDescentOptimizer(0.1))
        NormalizedOptimizer(tf_opt, normalization='l2')
Beispiel #9
0
    def __init__(self, vocab, config, hps):
        super(SummarizationModel, self).__init__(config)
        self.__name = 'pointer_generator_summarizer'
        self.config = config
        self.hps = hps

        self.mode = config['mode']
        self.use_coverage = config['use_coverage']
        self.pointer_gen = config['pointer_gen']
        self.embed_trainable = config['train_embed']
        self.embedding_size = config['embed_size']
        self.vsize = config['vocab_size']
        self.rand_unif_init_mag = config['rand_unif_init_mag']
        self.trunc_norm_init_std = config['trunc_norm_init_std']
        self.hidden_units = self.config['hidden_units']
        self.cov_loss_wt = self.config['cov_loss_wt']

        # Initializers:
        self.rand_unif_init = RandomUniform(minval=-self.rand_unif_init_mag,
                                            maxval=self.rand_unif_init_mag,
                                            seed=123)
        self.trunc_norm_init = TruncatedNormal(stddev=self.trunc_norm_init_std)
        # Optimizers:
        self.adg = optimizers.TFOptimizer(
            K.tf.train.AdagradOptimizer(
                self.hps.lr,
                initial_accumulator_value=self.hps.adagrad_init_acc))
        # Layers
        self.Emb = Embedding(self.vsize,
                             self.embedding_size,
                             weights=config['embed'],
                             trainable=self.embed_trainable)

        # different dictionary for source and target

        # Bi-directional lstm encoder, return (output, states)
        # Dimension: 2*hidden_units
        # concatenated forward and backward vectors
        self.Encoder = Bidirectional(
            CuDNNLSTM(self.hidden_units,
                      return_state=True,
                      return_sequences=True,
                      kernel_initializer=self.rand_unif_init))
        # Decoder is not bi-directional, perform linear reduction...
        # Dense_layer_dimension=encoder_hidden_units

        # Encoder states and output tensors are separated...
        # to initialize decoder

        # Decoder cell input: [input, state_h, state_c]
        self.DecoderCell = LSTMCell(self.hidden_units,
                                    kernel_initializer=self.rand_unif_init,
                                    bias_initializer="zeros",
                                    recurrent_initializer=self.rand_unif_init)
        # Decoder output projector
        # to probabilities[word_index]
        self.DecoderOutputProjector = Dense(
            self.vsize,
            kernel_initializer=self.trunc_norm_init,
            bias_initializer=self.trunc_norm_init,
            activation=None)
        self.ConcatenateAxis1 = Concatenate(axis=1)
        self.ConcatenateLastDim = Concatenate(axis=-1)
        self.StackSecondDim = Lambda(lambda x: K.tf.stack(x, axis=1))
        self.SoftmaxforScore = Softmax(axis=-1)

        self._batch_size = None
        self._enc_batch = None
        self._enc_lens = None
        self._enc_padding_mask = None
        self._enc_batch_extend_vocab = None
        self._max_art_oovs = None
        self._max_art_oovs_inp = None
        self._dec_batch = None
        self._target_batch = None
        self._dec_padding_mask = None
        self._dec_in_state = None
        self._enc_states = None
        self._dec_out_state = None
        self.p_gens = None
        self.prev_coverage = None
        self.coverage = None
        self._coverage_loss = None

        self.check_list = []

        if not self.check():
            pass
        pass