Beispiel #1
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Beispiel #2
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_classes))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_classes))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Beispiel #3
0
def test_merge_sum():
    (x_train, y_train), (x_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(num_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(num_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([x_train, x_train], y_train, batch_size=batch_size, epochs=epochs, verbose=0, validation_data=([x_test, x_test], y_test))
    model.fit([x_train, x_train], y_train, batch_size=batch_size, epochs=epochs, verbose=0, validation_split=0.1)
    model.fit([x_train, x_train], y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit([x_train, x_train], y_train, batch_size=batch_size, epochs=epochs, verbose=0, shuffle=False)

    loss = model.evaluate([x_test, x_test], y_test, verbose=0)

    model.predict([x_test, x_test], verbose=0)
    model.predict_classes([x_test, x_test], verbose=0)
    model.predict_proba([x_test, x_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(num_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(num_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([x_test, x_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Beispiel #5
0
def test_vector_classification():
    '''
    Classify random float vectors into 2 classes with logistic regression
    using 2 layer neural network with ReLU hidden units.
    '''
    (x_train, y_train), (x_test,
                         y_test) = get_test_data(num_train=500,
                                                 num_test=200,
                                                 input_shape=(20, ),
                                                 classification=True,
                                                 num_classes=num_classes)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    # Test with Sequential API
    model = Sequential([
        layers.Dense(16, input_shape=(x_train.shape[-1], ), activation='relu'),
        layers.Dense(8),
        layers.Activation('relu'),
        layers.Dense(num_classes, activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(1e-3),
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train,
                        y_train,
                        epochs=15,
                        batch_size=16,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert (history.history['val_accuracy'][-1] > 0.8)
    config = model.get_config()
    model = Sequential.from_config(config)
Beispiel #6
0
def randomize_layers(nb_layers, old_model, model_type='Model'):
    """ Randomize the n top layers of a model.
    In lack of a better solution, for now this function generate a new model, 
    and then copy the weigts of the old model."""

    config = old_model.get_config()
    if model_type == 'Model':
        new_model = Model.from_config(config)
    elif model_type == 'Sequential':
        new_model = Sequential.from_config(config)
    else:
        print('Wrong parameter, model can only be Sequential or Model.')

    if nb_layers == -1:
        nb_layers = len(new_model.layers)
    else:
        nb_layers = min(nb_layers, len(new_model.layers))

    # Copy the weights of the non-randomized layers.
    for layer_i in range(len(new_model.layers) - nb_layers):
        new_model.layers[layer_i].set_weights(
            old_model.layers[layer_i].get_weights())

    del old_model

    return new_model
Beispiel #7
0
    def _build_models(self, batch_size, embedding_size, rnn_size, num_layers):
        model = Sequential()
        model.add(
            Embedding(self.vectorizer.vocab_size,
                      embedding_size,
                      batch_input_shape=(batch_size, None)))
        for layer in range(num_layers):
            model.add(LSTM(rnn_size, stateful=True, return_sequences=True))
            model.add(Dropout(0.2))
        model.add(
            TimeDistributed(
                Dense(self.vectorizer.vocab_size, activation='softmax')))
        # With sparse_categorical_crossentropy we can leave as labels as
        # integers instead of one-hot vectors
        model.compile(loss='sparse_categorical_crossentropy',
                      optimizer='rmsprop',
                      metrics=['accuracy'])
        model.summary()

        # Keep a separate model with batch_size 1 for training
        self.train_model = model
        config = model.get_config()
        config[0]['config']['batch_input_shape'] = (1, None)
        self.sample_model = Sequential.from_config(config)
        self.sample_model.trainable = False
def create_duplicate_model(model):
    """Create a duplicate keras model."""
    new_model = Sequential.from_config(model.get_config())
    new_model.set_weights(copy.deepcopy(model.get_weights()))
    new_model.compile(loss=model.loss, optimizer=model.optimizer)

    return new_model
def test_image_classification():
    np.random.seed(1337)
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    config = model.get_config()
    model = Sequential.from_config(config)
Beispiel #10
0
def test_vector_classification():
    '''
    Classify random float vectors into 2 classes with logistic regression
    using 2 layer neural network with ReLU hidden units.
    '''
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=(20,),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    # Test with Sequential API
    model = Sequential([
        layers.Dense(16, input_shape=(x_train.shape[-1],), activation='relu'),
        layers.Dense(8),
        layers.Activation('relu'),
        layers.Dense(y_train.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=15, batch_size=16,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['val_acc'][-1] > 0.8)
    config = model.get_config()
    model = Sequential.from_config(config)
Beispiel #11
0
    def __init__(self, num_actions, input_shape, **kwargs):
        # set params
        self.time_step = 0
        self.num_actions = num_actions
        self.discount = kwargs.get('discount', 0.99)
        self.c = kwargs.get(
            'c', 10000)  # how many time steps between target network updates
        self.batch_size = kwargs.get('batch_size', 32)
        self.hist_length = kwargs.get('hist_length', 4)

        # build conv model
        q_model = Sequential()
        q_model.add(
            Convolution2D(32, (8, 8),
                          activation='relu',
                          input_shape=input_shape,
                          dim_ordering='th',
                          strides=(4, 4)))
        q_model.add(BatchNormalization())
        q_model.add(
            Convolution2D(64, (4, 4), activation='relu', strides=(2, 2)))
        q_model.add(BatchNormalization())
        q_model.add(
            Convolution2D(64, (3, 3), activation='relu', strides=(1, 1)))
        q_model.add(BatchNormalization())
        q_model.add(Flatten())
        q_model.add(Dense(512, activation='relu'))
        q_model.add(BatchNormalization())
        q_model.add(Dense(num_actions, activation='linear'))
        q_model.compile(optimizer='RMSprop', loss=huber_loss)

        self.q_model = q_model
        self.q_target_model = Sequential.from_config(
            q_model.get_config())  # create target model
Beispiel #12
0
def test_temporal_classification():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    np.random.seed(1337)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
                                                         num_test=20,
                                                         input_shape=(3, 4),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential()
    model.add(layers.GRU(8,
                         input_shape=(x_train.shape[1], x_train.shape[2])))
    model.add(layers.Dense(y_train.shape[-1], activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=5, batch_size=10,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['accuracy'][-1] >= 0.8)
    config = model.get_config()
    model = Sequential.from_config(config)
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_nested_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Beispiel #14
0
def test_recursive():
    # test layer-like API
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32, ))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1',
                     inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32, )))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)

    # test serialization
    config = seq.get_config()
    new_graph = Sequential.from_config(config)

    seq.summary()
    json_str = seq.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = seq.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Beispiel #15
0
    def Initialize(self):
        self.SetStartDate(2019, 1, 1)  # Set Start Date
        self.SetEndDate(2020, 4, 1)  # Set End Date
        self.SetCash(100000)  # Set Strategy Cash

        self.modelBySymbol = {}

        for ticker in ["SPY", "QQQ", "TLT"]:
            symbol = self.AddEquity(ticker).Symbol

            # Read the model saved in the ObjectStore
            if self.ObjectStore.ContainsKey(f'{symbol}_model'):
                modelStr = self.ObjectStore.Read(f'{symbol}_model')
                config = json.loads(modelStr)['config']
                self.modelBySymbol[symbol] = Sequential.from_config(config)
                self.Debug(
                    f'Model for {symbol} sucessfully retrieved from the ObjectStore'
                )

        # Look-back period for training set
        self.lookback = 30

        # Train Neural Network every monday
        self.Train(self.DateRules.Every(DayOfWeek.Monday),
                   self.TimeRules.AfterMarketOpen("SPY"),
                   self.NeuralNetworkTraining)

        # Place trades on Monday, 30 minutes after the market is open
        self.Schedule.On(self.DateRules.EveryDay("SPY"),
                         self.TimeRules.AfterMarketOpen("SPY", 30), self.Trade)
def test_image_classification():
    np.random.seed(1337)
    input_shape = (16, 16, 3)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=500,
                                                         num_test=200,
                                                         input_shape=input_shape,
                                                         classification=True,
                                                         num_classes=4)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential([
        layers.Conv2D(filters=8, kernel_size=3,
                      activation='relu',
                      input_shape=input_shape),
        layers.MaxPooling2D(pool_size=2),
        layers.Conv2D(filters=4, kernel_size=(3, 3),
                      activation='relu', padding='same'),
        layers.GlobalAveragePooling2D(),
        layers.Dense(y_test.shape[-1], activation='softmax')
    ])
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert history.history['val_acc'][-1] > 0.75
    config = model.get_config()
    model = Sequential.from_config(config)
Beispiel #17
0
def get_hard_target_model_updates(target, source, dueling=False):
    """Return list of target model update ops.

    These are hard target updates. The source weights are copied
    directly to the target network.

    Parameters
    ----------
    target: keras.models.Model
      The target model. Should have same architecture as source model.
    source: keras.models.Model
      The source model. Should have same architecture as target model.
    dueling: Boolean
      Dueling architecture
    Returns
    -------
    list(tf.Tensor)
      List of tensor update ops.
    """
    config_src = source.get_config()
    weights_src = source.get_weights()
    if dueling: target = Model.from_config(config_src)
    else: target = Sequential.from_config(config_src)
    target.set_weights(weights_src)
    return target
Beispiel #18
0
    def model(self):
        """
        Model component of Learner object, implemented as cached_property
        so that model persisitance can be achieved with all of the
        different types of models from different libraries.
        """
        # scikit-learn model definition
        if isinstance(self._model, BaseEstimator):
            model = self._model

        # keras classifier object
        elif 'KerasClassifier' in str(type(self._model)):
            model = self._model

        # keras model definition
        elif isinstance(self._model, dict) and 'config' in self._model:
            from keras.models import Sequential
            from keras.wrappers import scikit_learn
            cl = getattr(scikit_learn, self._model['type'])(lambda: 1)
            cl.model = Sequential.from_config(self._model['config'])
            cl.model.set_weights(self._model['weights'])
            if self._model['type'] == 'KerasClassifier':
                cl.classes_ = self._model['classes']
                cl.n_classes_ = self._model['n_classes']
            model = cl

        # pickle object
        elif isinstance(self._model, basestring):
            model = joblib.load(self._model)
        return model
def test_recursive():
    # test layer-like API
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)

    # test serialization
    config = seq.get_config()
    new_graph = Sequential.from_config(config)

    seq.summary()
    json_str = seq.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = seq.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Beispiel #20
0
 def fisher_sample(self):
     if self.sampler is None:
         print("preprocessing")
         if self.dico_fisher is None:
             print('you need to compute the fisher information first')
             return
         self.sampler = Sampling(self.build_mean(), self.dico_fisher)
         print('sampling ok')
     config = self.network.get_config()
     if self.network.__class__.__name__=='Sequential':
         new_model = Sequential.from_config(config)
     else:
         new_model = Model.from_config(config)
     new_params = self.sampler.sample()
     """
     means = self.sampler.mean
     
     for key in means:
         if np.max(np.abs(means[key] - new_params[key]))==0:
             print key
     print('kikou')
     import pdb; pdb.set_trace()
     """
     #tmp_prob = self.sampler.prob(new_params)
     new_model.compile(loss=self.network.loss,
                       optimizer=str.lower(self.network.optimizer.__class__.__name__),
                       metrics = self.network.metrics)
     new_model.set_weights(self.network.get_weights())
     self.copy_weights(new_model, new_params)
     return new_model
def test_temporal_classification():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    np.random.seed(1337)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
                                                         num_test=20,
                                                         input_shape=(3, 4),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    model = Sequential()
    model.add(layers.GRU(8,
                         input_shape=(x_train.shape[1], x_train.shape[2])))
    model.add(layers.Dense(y_train.shape[-1], activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    model.summary()
    history = model.fit(x_train, y_train, epochs=4, batch_size=10,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['acc'][-1] >= 0.8)
    config = model.get_config()
    model = Sequential.from_config(config)
def create_duplicate_model(model):
    """Create a duplicate keras model."""    
    
    new_model = Sequential.from_config(model.get_config())    
    new_model.set_weights(copy.deepcopy(model.get_weights()))
    new_model.compile(loss=model.loss,optimizer=model.optimizer)
            
    return new_model
Beispiel #23
0
def clone_model(model, custom_objects={}):
    config = model.get_config()
    try:
        clone = Sequential.from_config(config, custom_objects)
    except:
        clone = Model.from_config(config, custom_objects)
    clone.set_weights(model.get_weights())
    return clone
Beispiel #24
0
def clone_model(model, custom_objects={}):
    config = model.get_config()
    try:
        clone = Sequential.from_config(config, custom_objects)
    except:
        clone = Model.from_config(config, custom_objects)
    clone.set_weights(model.get_weights())
    return clone
def export_model_to_tensorflow(path_to_trained_keras_model: str):
    print("Loading model for exporting to Protocol Buffer format...")
    model = keras.models.load_model(path_to_trained_keras_model)

    sess = K.get_session()

    # serialize the model and get its weights, for quick re-building
    config = model.get_config()
    weights = model.get_weights()

    # re-build a model where the learning phase is now hard-coded to 0
    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = os.path.abspath(os.path.join("export", "simple"))  # where to save the exported graph
    os.makedirs(export_path)
    checkpoint_state_name = "checkpoint_state"
    export_version = 1  # version number (integer)
    saver = tensorflow.train.Saver(sharded=True, name=checkpoint_state_name)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(input_tensor=model.input, scores_tensor=model.output)

    # # Version 1 of exporter
    # model_exporter.init(sess.graph.as_graph_def(), default_graph_signature=signature)
    # model_exporter.export(export_path, tensorflow.constant(export_version), sess)
    #
    # # Version 2 of exporter
    # tensorflow.train.write_graph(sess.graph.as_graph_def(), logdir=".", name="simple.pbtxt", as_text=True)

    # Version 3 with Freezer from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph_test.py
    input_graph_name = "input_graph.pb"
    output_graph_name = "output_graph.pb"
    saver_write_version = saver_pb2.SaverDef.V2

    # We'll create an input graph that has a single variable containing 1.0,
    # and that then multiplies it by 2.
    saver = tensorflow.train.Saver(write_version=saver_write_version)
    checkpoint_path = saver.save(sess, export_path, global_step=0, latest_filename=checkpoint_state_name)
    graph_io.write_graph(sess.graph, export_path, input_graph_name)

    # We save out the graph to disk, and then call the const conversion
    # routine.
    input_graph_path = os.path.join(export_path, input_graph_name)
    input_saver_def_path = ""
    input_binary = False
    output_node_names = "output_node/Softmax"
    restore_op_name = "save/restore_all"
    filename_tensor_name = "save/Const:0"
    output_graph_path = os.path.join(export_path, output_graph_name)
    clear_devices = False
    freeze_graph.freeze_graph(input_graph_path, input_saver_def_path,
                              input_binary, checkpoint_path, output_node_names,
                              restore_op_name, filename_tensor_name,
                              output_graph_path, clear_devices, "")

    shutil.copy(os.path.join("export", "simple", "output_graph.pb"), output_graph_name)
    shutil.rmtree("export")
    print("Exported model: {0}".format(os.path.abspath(output_graph_name)))
Beispiel #26
0
    def load_model(self, model_bin):
        model_data = pickle.loads(model_bin)
        m_model, m_weights = model_data['model'], model_data['weights']

        self.__X_scale, self.__Y_scale = model_data['x_scale'], model_data[
            'y_scale']

        self.__model = Sequential.from_config(m_model)
        self.__model.set_weights(np.array(m_weights))
Beispiel #27
0
    def __init__ (self, model, max_memory=2000, discount=0.7, unfreeze_count=5):
        self.max_memory = max_memory
        self.discount = discount
        self.unfreeze_count = unfreeze_count
        self.memory = []
        self.buffer = []

        self.frozen_model = Sequential.from_config (model.get_config ())
        self.frozen_model.compile (sgd(lr=.01), "mse")
    def _clone_model(self, model: Model):
        '''
        Clone an existing Keras model

        :return: A copy of the input model
        '''
        if isinstance(model, Sequential):
            return Sequential.from_config(model.get_config())
        elif isinstance(model, Model):
            return Model.from_config(model.get_config())
Beispiel #29
0
 def __init__(self, config, training, evaluating):
     self.config = config
     self.model = Sequential.from_config(self.config['sequential'])
     self.model.compile(loss      = self.config['compilation']['loss'],
                        optimizer = self.config['compilation']['optimizer'],
                        metrics   = self.config['compilation']['metrics'])
     self.training = training
     self.evaluating = evaluating
     self.config = deepcopy(config)
     # for the sake of simplicity later:
     self.config["compilation"]["metrics"] = ['loss'] + self.config["compilation"]["metrics"]
    def __init__ (self, model, max_memory, discount, unfreeze_count, num_actions):
        self.max_memory = max_memory
        self.discount = discount
        self.unfreeze_count = unfreeze_count
        self.num_actions = num_actions
        self.memory = list ()

        # TODO dont assume sequential model
        # note taining algo has no affect because frozen model is never trianed
        self.frozen_model = Sequential.from_config (model.get_config ())
        self.frozen_model.compile (sgd(lr=.01), "mse")
Beispiel #31
0
def build_inference_model(model, batch_size=1, seq_len=1):
    """
    Build inference model from model config.
    Input shape modified to (1, 1)
    """
    logger.info("building inference model.")
    config = model.get_config()
    # Edit batch_size and seq_len
    config[0]["config"]["batch_input_shape"] = (batch_size, seq_len)
    inference_model = Sequential.from_config(config)
    inference_model.trainable = False
    return inference_model
Beispiel #32
0
def keras_model_deep_copy(keras_model):
    config = keras_model.get_config()
    if isinstance(keras_model, Sequential):
        new_model = Sequential.from_config(config)
    else:
        new_model = model_from_config(config)
    shuffle_weights(new_model)
    loss = keras_model.loss
    metrics = keras_model.metrics
    optimizer = keras_model.optimizer
    new_model.compile(optimizer, loss, metrics)
    return new_model
    def deepcopy_CNN(self, base_estimator0):
        #Copy CNN (self.base_estimator_) to estimator:
        config=base_estimator0.get_config()
        #estimator = Models.model_from_config(config)
        estimator = Sequential.from_config(config)

        
        weights = base_estimator0.get_weights()
        estimator.set_weights(weights)
        estimator.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        return estimator 
Beispiel #34
0
    def build_model(self):
        super(DoubleDQN, self).build_model()

        model2 = Sequential.from_config(self.model.get_config())
        logger.info("Model 2 summary")
        model2.summary()
        self.model2 = model2

        self.model2.compile(
            loss='mean_squared_error', optimizer=self.optimizer)
        logger.info("Models built and compiled")
        return self.model, self.model2
Beispiel #35
0
 def load(self, path=None):
     if path:
         self.model_path = path
     print('Loading..')
     with open(self.model_path) as f:
         data = json.load(f)
     self.inputs, self.targets = data["inputs"], data["targets"]
     keras_model = Sequential.from_config(data["k_model"])
     keras_model.compile(loss='binary_crossentropy',
                         optimizer='adam', metrics=['accuracy'])
     self.model = Model(kmodel=keras_model)
     return self
Beispiel #36
0
def clone_model(model, custom_objects={}):
    if len(custom_objects) > 0:
        warnings.warn('Due to an API problem with Keras, custom_objects is currently ignored. Sorry about that.')
    config = model.get_config()
    try:
        # TODO: re-enable custom_objects
        clone = Sequential.from_config(config)
    except:
        # TODO: re-enable custom_objects
        clone = Model.from_config(config)
    clone.set_weights(model.get_weights())
    return clone
Beispiel #37
0
 def _build_inference_model(model: Model, batch_size=1, seq_len=1) -> Model:
     """
     build inference model from model config
     input shape modified to (1, 1)
     """
     print("building inference model.")
     config = model.get_config()
     # edit batch_size and seq_len
     config[0]["config"]["batch_input_shape"] = (batch_size, seq_len)
     inference_model = Sequential.from_config(config)
     inference_model.trainable = False
     return inference_model
def Mitosis_IA(Player,grupo):  # No se como hacer un metodo que replique otro individuo sin liarme con los atributos, por eso he hecho una funcion externa,
    # pero igual es mejor un metodo para que sea todo mas elegante
    jugador = player_IA()  # Nuevo agente
    new_model = Sequential.from_config(Player.config_model)  # Mismo red neuronal
    new_model.set_weights(Player.Pesos)  # Mismos pesos
    jugador.model = new_model  # Asignacion de la red clonada
    jugador.config_model = Player.config_model  # Almacena su nuevo modelo por si se reproduce
    jugador.Pesos = Player.Pesos  # Almacena sus nuevos pesos por si se reproduce
    jugador.energia = Player.energia / 10  # Se quedan un 10% de la energia que tenia el individuo original perdiendo un 80% en el proceso
    Player.energia = Player.energia / 10  # Se quedan un 10% de la energia que tenia el individuo original perdiendo un 80% en el proceso
    jugador.vmax = Player.vmax + Player.vmax * (rd.random() - rd.random())  # Puede aumentar o disminuir la velocidad del nuevo individuo
    grupo.add(jugador)  # Se añade a la lista de agentes
    return grupo
Beispiel #39
0
def build_model_from_file(model_file):
    # a,b,c = pickle.load(open(model_file, 'rb'))
    print(model_file)

    structure, weights = pickle.load(open(model_file, 'rb'))
    # print("XXXXXXXXXXXXXXXX"+str(structure))

    model = Sequential.from_config(structure)
    # print("YYYYYYYYYYYYYYy")

    model.set_weights(weights)
    # print("zzzzzzzzzzzzzzz")
    return model
Beispiel #40
0
def keras_spark_predict(model_path, weights_path, partition):
    # load model
    model = Sequential.from_config(model_path.value)
    model.set_weights(weights_path.value)

    # Create a list containing features.
    featurs_list = map(lambda x: [x[:]], partition)
    featurs_df = pd.DataFrame(featurs_list)

    # predict with keras model
    predictions = model.predict_on_batch(featurs_df)
    predictions_return = map(lambda prediction: Row(prediction=prediction[0].item()), predictions)
    return iter(predictions_return)
Beispiel #41
0
def create_dropout_predict_function(model, dropout):
    """
    Create a keras function to predict with dropout
    model : keras model
    dropout : fraction dropout to apply to all layers

    Returns
    predict_with_dropout : keras function for predicting with dropout
    """

    # Load the config of the original model
    conf = model.get_config()
    # Add the specified dropout to all layers
    for layer in conf['layers']:
        # Dropout layers
        if layer["class_name"] == "Dropout":
            layer["config"]["rate"] = dropout
            print('config-rate')
        # Recurrent layers with dropout
        elif "dropout" in layer["config"].keys():
            layer["config"]["dropout"] = dropout
            print(layer["config"]["dropout"],'config-droupout')
    # Create a new model with specified dropout
    if type(model) == Sequential:
        # Sequential
        newmodel = keras.models.clone_model(model)
        # newmodel.compile(optimizer='adam',
        #                     loss='mse',
        #                     metric='mse')
        # newmodel.set_weights(model.get_weights())

        model_dropout = Sequential.from_config(conf)
        # print('model1')
    else:
        # Functional
        model_dropout = Model.from_config(conf)
        # print('model2')

    model_dropout.set_weights(model.get_weights())
    # model_dropout.compile(optimizer='adam',
    #                       loss='mse')

    # final_conv_layer = get_output_layer(model, "conv5_3")
    # get_output = K.function([model.layers[0].input],
    #                         [final_conv_layer.output, model.layers[-1].output])
    # [conv_outputs, predictions] = get_output([img])
    # Create a function to predict with the dropout on
    predict_with_dropout = K.function([model_dropout.layers[0].input,K.learning_phase()],
                                      [model_dropout.layers[-1].output])

    return predict_with_dropout
Beispiel #42
0
def cross_valid(model, model_name, input_type='2d', epochs=5, batch_size=32):
    (xtrain, ytrain), (xvalid, yvalid), (xtest, ytest) = md.load_mnist()

    #model = create_model(model_name)
    #history=HistoryCallback()
    model_config = model.get_config()
    print(model.summary())

    if (not model_name in os.listdir(".")):
        os.mkdir(model_name)
    os.chdir(model_name)
    md.save_model(model_name, model)

    out_name = "{0}_train_res.txt".format(model_name)
    res = open(out_name, "w")
    for i in range(1, 6):
        print("\nИтерация {}".format(i))
        #model = create_model()

        #history=HistoryCallback()
        h = model.fit(xtrain,
                      ytrain,
                      epochs=epochs,
                      batch_size=batch_size,
                      validation_data=(xvalid, yvalid))

        swap_intl = range((i - 1) * 10000, (i * 10000 - 1))
        swap = (xtrain[swap_intl], ytrain[swap_intl])
        (xtrain[swap_intl], ytrain[swap_intl]) = (xvalid[0:9999],
                                                  yvalid[0:9999])
        (xvalid, yvalid) = swap

        model.save_weights("{}-{}_weights.h5".format(model_name, i))

        print("evaluating...")
        score = model.evaluate(xtest, ytest)

        res.write("#, vloss, vacc, tloss, tvacc, %\n")
        y = model.predict(xtest)
        pc = md.predict_percent(y, ytest)
        res.write("{0},{1},{2},{3},{4},{5}\n".format(i, h.history["val_loss"],
                                                     h.history["val_acc"],
                                                     score[0], score[1], pc))
        res.flush()
        del model
        model = Sequential.from_config(model_config)
        model.compile(loss=keras.losses.categorical_crossentropy,
                      optimizer=keras.optimizers.Adadelta(),
                      metrics=['accuracy'])
    res.close()
    os.chdir("..")
Beispiel #43
0
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    print(model.layers)
    model.save_weights(fname, overwrite=True)
    print(model.trainable_weights)

    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Beispiel #44
0
    def __init__(self, env,
                       nn = None,
                       **config):
        """
        Based on:
            https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf

        Parameters
        """

        # if not isinstance(env.action_space, discrete.Discrete):
        #     raise UnsupportedSpace('Action space {} incompatible with {}. (Only supports Discrete action spaces.)'.format(action_space, self))
        
        self.env = env
        self.config = {
            'eps': 0.05,
            'gamma': 0.95,
            'store_every':5,
            'train_every':5,
            'minibatch_size': 1,
            'max_experience': 5000,
            'target_nn_update_rate': 0.01,
            'maxepoch': 100,
            'maxstep': 100,
            'outdir': '/tmp/brainresults',
            'plot': True,
            'render': True,
        }
        self.config.update(config)
        self.plotter = LivePlot(self.config['outdir'])

        # Deep Q Agent State
        self._action_ctr = 0 # actions excuted so far
        self._iter_ctr = 0
        self._store_ctr = 0
        self._train_ctr = 0
        self._experience = deque()

        self.nn = nn # The keras network should be compiled outside
        self.tnn = Sequential.from_config(nn.get_config()) # Init target NN
        self.tnn.set_weights(self.nn.get_weights())
def load_keras_model_from_disk(
        model_json_path,
        weights_hdf_path,
        name=None):
    """
    Loads a model from two files on disk: a JSON configuration and HDF5 weights.

    Parameters
    ----------
    model_json_path : str

    weights_hdf_path : str

    name : str, optional

    Returns a Keras model.
    """

    if not exists(model_json_path):
        raise ValueError("Model file %s (name = %s) not found" % (
            model_json_path, name,))

    with open(model_json_path, "r") as f:
        config_dict = json.load(f)

    if isinstance(config_dict, list):
        # not sure if this is a Keras bug but depending on the model I get back
        # either a list or a dict, the list is only usable with a Sequential
        # model
        model = Sequential.from_config(config_dict)
    else:
        model = model_from_config(config_dict)

    if weights_hdf_path is not None:
        if not exists(weights_hdf_path):
            raise ValueError(
                "Missing model weights file %s (name = %s)" % (weights_hdf_path, name))
        model.load_weights(weights_hdf_path)
    return model
def test_sequential_deferred_build():
    model = keras.models.Sequential()
    model.add(keras.layers.Dense(3))
    model.add(keras.layers.Dense(3))
    model.compile('sgd', 'mse')

    assert model.built is False
    assert len(model.layers) == 2
    assert len(model.weights) == 0

    model.train_on_batch(
        np.random.random((2, 4)), np.random.random((2, 3)))

    assert model.built is True
    assert len(model.layers) == 2
    assert len(model.weights) == 4

    # Test serialization
    config = model.get_config()
    assert 'name' in config
    new_model = Sequential.from_config(config)
    assert new_model.built is True
    assert len(new_model.layers) == 2
    assert len(new_model.weights) == 4
def test_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(x_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2,
              validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test)

    prediction = model.predict_generator(data_generator(x_test, y_test), 1,
                                         max_queue_size=2, verbose=1)
    gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50), 1,
                                        max_queue_size=2)
    pred_loss = K.eval(K.mean(losses.get(model.loss)(K.variable(y_test),
                                                     K.variable(prediction))))

    assert(np.isclose(pred_loss, loss))
    assert(np.isclose(gen_loss, loss))

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # Test serialization
    config = model.get_config()
    assert 'name' in config
    new_model = Sequential.from_config(config)
    assert new_model.weights  # Model should be built.

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
import json
from keras.models import model_from_json
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential

(X_train, y_train), (X_test, y_test) = mnist.load_data()

X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_test = X_test.astype('float32')
X_test /= 255
nb_classes = 10
Y_test = np_utils.to_categorical(y_test, nb_classes)

# Load trained model
json_string = open('model.json').read()  
model_config = model_from_json(json_string).get_config()
model = Sequential.from_config(model_config)

model.compile(loss = 'categorical_crossentropy', optimizer = 'adadelta', metrics=['accuracy'])
model.load_weights('modelweights.hdf5')

# Evaluate with previously trained model
score = model.evaluate(X_test, Y_test, verbose = 0)

print('Test score:', score[0])
print('Test accuracy:', score[1])
def visualizeSequentialOutput(model, layerIdx, df):

    if not os.path.exists(cc.cfg['plots']['seq_output_dir']):
        os.makedirs(cc.cfg['plots']['seq_output_dir'])


    if cc.cfg['plots']['seq_output_seq_input_name'] == 'smiles':
        input = data.formatSequentialInput(df)
    elif cc.cfg['plots']['seq_output_seq_input_name'] == 'fasta':
        input = data.formatFastaInput(df)
    else:
        raise 'visual err'


    # model.layers[layerIdx].return_sequences = True
    # model.compile(loss="mean_squared_error", optimizer="rmsprop")


    cfg = model.get_config()[:4]

    cfg = model.get_config()[:layerIdx+1]
    del cfg[2]
    layerIdx -= 1

    # print cfg
    cfg[layerIdx]['config']['return_sequences'] = True

    seqModel = Sequential.from_config(cfg)
    seqModel.set_weights(model.get_weights())
    seqModel.layers[layerIdx].return_sequences = True


    outputFunction = K.function([seqModel.layers[0].input],
              [seqModel.layers[layerIdx].output])

    output = outputFunction([input])[0]

    '''
    sns.set()
    for i,smilesOutput in enumerate(output):
        g = sns.clustermap(smilesOutput.T, col_cluster=False,  method='single',metric='cosine')
        g.savefig('{}/seq_output.png'.format(cc.cfg['plots']['seq_output_dir']))
    '''

    dropSet = Set(cc.cfg['plots']['seq_output_ignore_neurons'])
    if cc.cfg['plots']['seq_output_select_neurons']:
        arrMask = cc.cfg['plots']['seq_output_select_neurons']
    else:
        arrMask = list(range(output.shape[2]))
    arrMask = np.array([x for x in arrMask if not x in dropSet])

    fig = plt.figure(figsize=(input.shape[1] * 0.3,len(arrMask) * len(df) * 1.5))


    for i,seqOutput in enumerate(output):

        # print seqOutput.shape
        # print seqOutput

        selected = seqOutput.T[arrMask]

        Z = sch.linkage(selected, method='single', metric='cosine')
        leaves = sch.leaves_list(Z)
        # leaves = range(len(selected))
        reordered = selected[leaves]

        ax = fig.add_subplot(len(df),1,i+1)

        print 'foo'

        ppl.pcolormesh(fig, ax, reordered,
               xticklabels=list(df.values[i][0]),
               yticklabels=arrMask[leaves],
               vmin=-1,
               vmax=1)

        print 'foo'

    print 'bar'

    fig.savefig('{}/{}'.format(cc.cfg['plots']['seq_output_dir'],cc.cfg['plots']['seq_output_name']))

    print 'bar'
def printPrediction(model, smilesData):
    # FIXME hardcoded

    smilesDf = pd.DataFrame(smilesData, columns=[cc.exp['params']['data']['smiles']])

    input = data.formatSequentialInput(smilesDf)

    output = model.predict(input)

    for i, smiles in enumerate(smilesData):
        print 'Prediction for {}'.format(smiles)
        print output[i]

    distanceMatrixCosine = pairwise_distances(output, metric='cosine')
    distanceMatrixCorrel = pairwise_distances(output, metric='correlation')
    distanceMatrixEuclid = pairwise_distances(output, metric='euclidean')

    print 'Distance matrix cosine'
    print distanceMatrixCosine
    print 'Distance matrix correlation'
    print distanceMatrixCorrel
    print 'Distance matrix euclid'
    print distanceMatrixEuclid

    '''

    layerIdx = 1
    cfg = model.get_config()[:layerIdx+1]
    cfg[0]['config']['dropout_U'] = 0
    cfg[0]['config']['dropout_W'] = 0

    print cfg[0]
    print cfg[1]
    # del cfg[1]
    # layerIdx -= 1
    # print cfg
    cfg[layerIdx]['config']['return_sequences'] = True
    '''


    layerIdx = 2
    cfg = model.get_config()[:layerIdx+1]
    del cfg[1]
    layerIdx -= 1
    # print cfg
    cfg[layerIdx]['config']['return_sequences'] = True

    seqModel = Sequential.from_config(cfg)
    seqModel.set_weights(model.get_weights())
    seqModel.layers[layerIdx].return_sequences = True


    outputFunction = K.function([seqModel.layers[0].input],
              [seqModel.layers[layerIdx].output])

    outputSymbols = outputFunction([input])[0]

    outputLastSymbol = outputSymbols[:,outputSymbols.shape[1]-1,:]

    distanceMatrixLastSymbolCorrel = np.corrcoef(outputLastSymbol)

    print 'Distance matrix last symbol correlation'
    print distanceMatrixLastSymbolCorrel
def test_merge_recursivity():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation("relu"))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation("relu"))

    righter = Sequential()
    righter.add(Dense(nb_hidden, input_shape=(input_dim,)))
    righter.add(Activation("relu"))

    intermediate = Sequential()
    intermediate.add(Merge([left, right], mode="sum"))
    intermediate.add(Dense(nb_hidden))
    intermediate.add(Activation("relu"))

    model = Sequential()
    model.add(Merge([intermediate, righter], mode="sum"))
    model.add(Dense(nb_class))
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(
        [X_train, X_train, X_train],
        y_train,
        batch_size=batch_size,
        nb_epoch=nb_epoch,
        verbose=0,
        validation_data=([X_test, X_test, X_test], y_test),
    )
    model.fit(
        [X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1
    )
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test, X_test], verbose=0)

    fname = "test_merge_recursivity_temp.h5"
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate([X_test, X_test, X_test], y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
def test_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(X_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation("relu"))
    model.add(Dense(nb_class))
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test)

    prediction = model.predict_generator(data_generator(X_test, y_test), X_test.shape[0], max_q_size=2)
    gen_loss = model.evaluate_generator(data_generator(X_test, y_test, 50), X_test.shape[0], max_q_size=2)
    pred_loss = K.eval(K.mean(objectives.get(model.loss)(K.variable(y_test), K.variable(prediction))))

    assert np.isclose(pred_loss, loss)
    assert np.isclose(gen_loss, loss)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation("relu"))
    model.add(Dense(nb_class))
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Beispiel #53
0
def fam(train_i, train_o, test_i, test_o):
    sess = tf.Session()
    K.set_session(sess)
    K.set_learning_phase(1)

    batch_size = 60
    nb_classes = len(MOD)
    nb_epoch = 20

    img_rows, img_cols = 2 * P * L, 2 * Np
    nb_filters = 96
    nb_pool = 2

    X_train,Y_train = shuffle_in_unison_inplace( np.array(train_i) , np.array(train_o) )

    model = Sequential()
    model.add(Convolution2D(64, 11, 11,subsample=(2,2),
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Convolution2D(128, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2,2)))

    model.add(Flatten())
    model.add(Dense(512,activation='relu'))
    model.add(Dropout(0.5)) 

    model.add(Dense(512,activation='relu'))
    model.add(Dropout(0.5)) 

    model.add(Dense(nb_classes,init='normal'))
    model.add(Activation('softmax', name="out"))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    """
    datagen = ImageDataGenerator(
        #featurewise_center=True,
        #featurewise_std_normalization=True,
        rotation_range=20,
        #width_shift_range=0.3,
        #height_shift_range=0.3,
        #zoom_range=[0,1.3],
        horizontal_flip=True,
        vertical_flip=True)

    datagen.fit(X_train)

    model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size,shuffle=True),
                    samples_per_epoch=len(X_train), nb_epoch=5,verbose=1,validation_data=(test_i[0], test_o[0]))

    """

    model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch,
              verbose=1, shuffle=True, validation_data=(test_i[0], test_o[0]))


    for s in range(len(test_i)):
        if len(test_i[s]) == 0:
            continue
        X_test = test_i[s]
        Y_test = test_o[s]
        score = model.evaluate(X_test, Y_test, verbose=0)
        print("SNR", SNR[s], "Test accuracy:", score[1])

    K.set_learning_phase(0)
    config = model.get_config()
    weights = model.get_weights()

    new_model = Sequential.from_config(config)
    new_model.set_weights(weights)

    export_path = "/tmp/fam"
    export_version = 1

    labels_tensor = tf.constant(MOD)

    saver = tf.train.Saver(sharded=True)
    model_exporter = exporter.Exporter(saver)
    signature = exporter.classification_signature(
        input_tensor=new_model.input,classes_tensor=labels_tensor,scores_tensor=new_model.output)
    model_exporter.init(
        sess.graph.as_graph_def(),
        default_graph_signature=signature)
    model_exporter.export(export_path, tf.constant(export_version), sess)