Example #1
0
def test_nested_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test, verbose=0)

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(num_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(num_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
def train_lstm(n_symbols,embedding_weights,x_train,y_train,x_test,y_test):
    print 'Defining a Simple Keras Model...'
    model = Sequential()  # or Graph or whatever
    model.add(Embedding(output_dim=vocab_dim,
                        input_dim=n_symbols,
                        mask_zero=True,
                        weights=[embedding_weights],
                        input_length=input_length))  # Adding Input Length
    model.add(LSTM(output_dim=50, activation='sigmoid', inner_activation='hard_sigmoid'))
    model.add(Dropout(0.5))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    print 'Compiling the Model...'
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',metrics=['accuracy'])

    print "Train..."
    model.fit(x_train, y_train, batch_size=batch_size, nb_epoch=n_epoch,verbose=1, validation_data=(x_test, y_test),show_accuracy=True)

    print "Evaluate..."
    score = model.evaluate(x_test, y_test,
                                batch_size=batch_size)

    yaml_string = model.to_yaml()
    with open('lstm_data/lstm.yml', 'w') as outfile:
        outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
    model.save_weights('lstm_data/lstm.h5')
    print 'Test score:', score
Example #3
0
def test_recursive():
    # test layer-like API
    graph = Graph()
    graph.add_input(name='input1', input_shape=(32,))
    graph.add_node(Dense(16), name='dense1', input='input1')
    graph.add_node(Dense(4), name='dense2', input='input1')
    graph.add_node(Dense(4), name='dense3', input='dense1')
    graph.add_output(name='output1', inputs=['dense2', 'dense3'],
                     merge_mode='sum')

    seq = Sequential()
    seq.add(Dense(32, input_shape=(32,)))
    seq.add(graph)
    seq.add(Dense(4))

    seq.compile('rmsprop', 'mse')

    seq.fit(X_train_graph, y_train_graph, batch_size=10, nb_epoch=10)
    loss = seq.evaluate(X_test_graph, y_test_graph)

    # test serialization
    config = seq.get_config()
    new_graph = Sequential.from_config(config)

    seq.summary()
    json_str = seq.to_json()
    new_graph = model_from_json(json_str)

    yaml_str = seq.to_yaml()
    new_graph = model_from_yaml(yaml_str)
Example #4
0
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = "test_nested_sequential_temp.h5"
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation("relu"))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation("softmax"))
    model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert loss == nloss

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Example #5
0
def test_merge_sum():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)

    loss = model.evaluate([X_test, X_test], y_test, verbose=0)

    model.predict([X_test, X_test], verbose=0)
    model.predict_classes([X_test, X_test], verbose=0)
    model.predict_proba([X_test, X_test], verbose=0)

    # test weight saving
    fname = 'test_merge_sum_temp.h5'
    model.save_weights(fname, overwrite=True)
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))
    right = Sequential()
    right.add(Dense(nb_hidden, input_shape=(input_dim,)))
    right.add(Activation('relu'))
    model = Sequential()
    model.add(Merge([left, right], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.load_weights(fname)
    os.remove(fname)
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #6
0
def test_constant_initializer_with_numpy():
    model = Sequential()
    model.add(Dense(2, input_shape=(3,),
                    kernel_initializer=Constant(np.ones((3, 2)))))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='sgd', metrics=['acc'])

    json_str = model.to_json()
    model_from_json(json_str).summary()

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str).summary()
Example #7
0
    def test_sequential(self):
        print('Test sequential')
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print('loss:', loss)
        if loss > 0.7:
            raise Exception('Score too low, learning issue.')
        model.predict(X_test, verbose=0)
        model.predict_classes(X_test, verbose=0)
        model.predict_proba(X_test, verbose=0)
        model.get_config(verbose=0)

        print('test weight saving')
        fname = 'test_sequential_temp.h5'
        model.save_weights(fname, overwrite=True)
        model = Sequential()
        model.add(Dense(nb_hidden, input_shape=(input_dim,)))
        model.add(Activation('relu'))
        model.add(Dense(nb_class))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.load_weights(fname)
        os.remove(fname)

        nloss = model.evaluate(X_train, y_train, verbose=0)
        assert(loss == nloss)

        # test json serialization
        json_data = model.to_json()
        model = model_from_json(json_data)

        # test yaml serialization
        yaml_data = model.to_yaml()
        model = model_from_yaml(yaml_data)
Example #8
0
def test_merge_overlap():
    (X_train, y_train), (X_test, y_test) = _get_test_data()
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    print(model.layers)
    model.save_weights(fname, overwrite=True)
    print(model.trainable_weights)

    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test serialization
    config = model.get_config()
    new_model = Sequential.from_config(config)

    model.summary()
    json_str = model.to_json()
    new_model = model_from_json(json_str)

    yaml_str = model.to_yaml()
    new_model = model_from_yaml(yaml_str)
Example #9
0
    def train_mlp(self, input, output):
        self.in_real = input.data['real']
        self.in_imag = input.data['imag']
        self.out_real = output.data['real']
        self.out_imag = output.data['imag']

        (i_dim_x, i_dim_y, i_dim_z) = self.in_real.shape
        in_dim = i_dim_x*i_dim_y*i_dim_z
        input_data = self.in_real.reshape(in_dim, 1)

        (o_dim_x, o_dim_y, o_dim_z) = self.out_real.shape
        out_dim = o_dim_x*o_dim_y*o_dim_z
        output_data = self.out_real.reshape(out_dim, 1)

        model = Sequential()
        model.add(Dense(200, input_dim=in_dim, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(200))#, init='uniform'))
        model.add(Activation('relu'))
        #  model.add(Dropout(0.25))

        model.add(Dense(out_dim))#, init='uniform'))
        model.add(Activation('softmax'))

        model.compile(loss='categorical_crossentropy', optimizer='sgd',\
                metrics=['accuracy'])

        early_stop = EarlyStopping(monitor='val_loss', patience=2)
        hist = model.fit(input_data, output_data, nb_epoch=50, \
                         batch_size=64, validation_split=0.2, \
                         shuffle=True, callbacks=[early_stop])
        print(hist.history)
        #TODO: batch train
        model.train_on_batch()

        # Save model
        model_to_save_json = model.to_json()
        open('model_architecture.json', 'w').write(model_to_save_json)
        model_to_save_yaml = model.to_yaml()
        open('model_architecture.yaml', 'w').write(model_to_save_yaml)
        model.save_weights('weights.h5')
def model(X_train, Y_train, X_test, Y_test):
    '''
    Model providing function:

    Create Keras model with double curly brackets dropped-in as needed.
    Return value has to be a valid python dictionary with two customary keys:
        - loss: Specify a numeric evaluation metric to be minimized
        - status: Just use STATUS_OK and see hyperopt documentation if not feasible
    The last one is optional, though recommended, namely:
        - model: specify the model just created so that we can later use it again.
    '''
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation
    from keras.optimizers import RMSprop

    model = Sequential()
    model.add(Dense(512, input_shape=(784,)))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense({{choice([256, 512, 1024])}}))
    model.add(Activation('relu'))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(10))
    model.add(Activation('softmax'))

    rms = RMSprop()
    model.compile(loss='categorical_crossentropy', optimizer=rms)

    model.fit(X_train, Y_train,
              batch_size={{choice([64, 128])}},
              nb_epoch=1,
              show_accuracy=True,
              verbose=2,
              validation_data=(X_test, Y_test))
    score, acc = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model.to_yaml(), 'weights': pickle.dumps(model.get_weights())}
Example #11
0
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adam')


# Initialize Elephas Spark ML Estimator
adagrad = elephas_optimizers.Adagrad()

estimator = ElephasEstimator()
estimator.setFeaturesCol("scaled_features")
estimator.setLabelCol("index_category")
estimator.set_keras_model_config(model.to_yaml())
estimator.set_optimizer_config(adagrad.get_config())
estimator.set_nb_epoch(10)
estimator.set_batch_size(128)
estimator.set_num_workers(4)
estimator.set_verbosity(0)
estimator.set_validation_split(0.15)
estimator.set_categorical_labels(True)
estimator.set_nb_classes(nb_classes)

# Fitting a model returns a Transformer
pipeline = Pipeline(stages=[string_indexer, scaler, estimator])
fitted_pipeline = pipeline.fit(train_df)

from pyspark.mllib.evaluation import MulticlassMetrics
# Evaluate Spark model
Example #12
0
sample_weight=np.asarray(sample_weight)   
X_sample_weight,X_sample_weight_rest=train_test_split(sample_weight,test_size=0.1,random_state=47)
sample_weight_val,sample_weight_test=train_test_split(X_sample_weight_rest,test_size=0.5,random_state=47)
#X_sample_weight=np.concatenate((X_sample_weight,sample_weight_val))
#model.add(Reshape((100,100),input_shape=(10000,)))
model.add(LSTM(60,return_sequences=True,input_shape=(timesteps, data_dim)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(TimeDistributedDense(28, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',sample_weight_mode="temporal")
totaltrain=np.asarray(totaltrain)
totallabel=np.asarray(totallabel)
print len(x_val)
x_train=np.asarray(x_train)
x_test=np.asarray(x_test)
x_val=np.asarray(x_val)
y_train=np.asarray(y_train)
y_val=np.asarray(y_val)
y_test=np.asarray(y_test)
print y_train.shape
x_train=x_train[:,:100,:]
y_train=y_train[:,:100,:]
print y_train.shape
early_stop = EarlyStopping(monitor='val_loss', patience=100, verbose=1) 
model.fit(x_train,y_train, callbacks=[early_stop],nb_epoch=300, sample_weight=X_sample_weight,batch_size=100,show_accuracy=True, validation_split=0.1)
with open('yaml','w') as f:
    f.write(model.to_yaml())
model.save_weights('NERmode_weights.h5',overwrite=True)

Example #13
0
def test_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(train):
        if train:
            max_batch_index = len(X_train) // batch_size
        else:
            max_batch_index = len(X_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (X_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (X_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.summary()

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    gen_loss = model.evaluate_generator(data_generator(True), 256, verbose=0)
    assert(gen_loss < 0.8)

    loss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss < 0.8)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(nb_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test json serialization
    json_data = model.to_json()
    model = model_from_json(json_data)

    # test yaml serialization
    yaml_data = model.to_yaml()
    model = model_from_yaml(yaml_data)
Example #14
0
def test_nested_sequential():
    (X_train, y_train), (X_test, y_test) = _get_test_data()

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.summary()

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss < 0.8)

    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_nested_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)

    inner = Sequential()
    inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
    inner.add(Activation('relu'))
    inner.add(Dense(nb_class))

    middle = Sequential()
    middle.add(inner)

    model = Sequential()
    model.add(middle)
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_test, y_test, verbose=0)
    assert(loss == nloss)

    # test json serialization
    json_data = model.to_json()
    model = model_from_json(json_data)

    # test yaml serialization
    yaml_data = model.to_yaml()
    model = model_from_yaml(yaml_data)
Example #15
0
    def test_sequential(self):
        print("Test sequential")
        model = Sequential()
        model.add(Dense(input_dim, nb_hidden))
        model.add(Activation("relu"))
        model.add(Dense(nb_hidden, nb_class))
        model.add(Activation("softmax"))
        model.compile(loss="categorical_crossentropy", optimizer="rmsprop")

        model.fit(
            X_train,
            y_train,
            batch_size=batch_size,
            nb_epoch=nb_epoch,
            show_accuracy=True,
            verbose=1,
            validation_data=(X_test, y_test),
        )
        model.fit(
            X_train,
            y_train,
            batch_size=batch_size,
            nb_epoch=nb_epoch,
            show_accuracy=False,
            verbose=2,
            validation_data=(X_test, y_test),
        )
        model.fit(
            X_train,
            y_train,
            batch_size=batch_size,
            nb_epoch=nb_epoch,
            show_accuracy=True,
            verbose=2,
            validation_split=0.1,
        )
        model.fit(
            X_train,
            y_train,
            batch_size=batch_size,
            nb_epoch=nb_epoch,
            show_accuracy=False,
            verbose=1,
            validation_split=0.1,
        )
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
        model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

        model.train_on_batch(X_train[:32], y_train[:32])

        loss = model.evaluate(X_train, y_train, verbose=0)
        print("loss:", loss)
        if loss > 0.6:
            raise Exception("Score too low, learning issue.")
        preds = model.predict(X_test, verbose=0)
        classes = model.predict_classes(X_test, verbose=0)
        probas = model.predict_proba(X_test, verbose=0)
        print(model.get_config(verbose=1))

        print("test weight saving")
        model.save_weights("temp.h5", overwrite=True)
        model = Sequential()
        model.add(Dense(input_dim, nb_hidden))
        model.add(Activation("relu"))
        model.add(Dense(nb_hidden, nb_class))
        model.add(Activation("softmax"))
        model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
        model.load_weights("temp.h5")

        nloss = model.evaluate(X_train, y_train, verbose=0)
        print(nloss)
        assert loss == nloss

        # test json serialization
        json_data = model.to_json()
        model = model_from_json(json_data)

        # test yaml serialization
        yaml_data = model.to_yaml()
        model = model_from_yaml(yaml_data)
Example #16
0
            X.append(util.sentence2array(line, N_FEATURES))
            y.append(label)
    return (X, y)


if __name__ == '__main__':
    (x, y) = vectorize("negative.txt", 0)
    (xx, yy) = vectorize("positive.txt", 1)
    x += xx
    y += yy
    x = np.array(x)
    y = np.array(y)
    (X_train, X_test, y_train, y_test) = train_test_split(x, y, test_size=0.3)
    X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)
    X_test = sequence.pad_sequences(X_test, maxlen=MAX_LEN)

    model = Sequential()
    model.add(Embedding(N_FEATURES, EMBEDDING_OUT_DIM, input_length=MAX_LEN))
    model.add(LSTM(LSTM_UNITS))
    model.add(Dropout(DROPOUT_RATE))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam',
                  class_mode='binary', metrics=['accuracy'])
    model.fit(X_train, y_train, batch_size=BATCH, nb_epoch=EPOCH,
              validation_data=(X_test, y_test))
    model.evaluate(X_test, y_test, batch_size=BATCH)

    open('sentiment_model.yaml', 'w').write(model.to_yaml())
    model.save_weights('sentiment_weights.hdf5')
Example #17
0
class DeepNeuroBrain:
    def __init__(self, step="test"):
        """ Input : step : 'train' or 'test', depending if you want to fit the
        neural network or not at each game."""

        self.name = "neuroBrain"
        self.computingTimes = []
        self.U = np.array([0]).reshape(1,1) # evaluation function, output of the NN

        # Neural Net constants
        self.model = None
        self.createNeuralNetwork()
        self.gamma = 0.9 # discount factor
        self.epsilon = 0.1 # epsilon-greedy algorithm
        self.normalReward = -1 # reward in normal games (not winning or losing)

        self.winningReward = 100
        self.losingReward = -100
        self.step = step
        self.verbose = 0 # if 0, only print the value of U at each game

    def createNeuralNetwork(self):
        """ Create and compile a convolutional neural network with Keras """

        print("Create the neural network...")
        self.model = Sequential()
        self.model.add(Convolution2D(32, 4, 4, border_mode='same', input_shape=(1,8, 8)))
        self.model.add(Convolution2D(16, 4, 4, border_mode='same', input_shape=(1,8, 8)))

        self.model.add(Flatten())

        self.model.add(Dense(25, activation="relu", init='lecun_uniform'))

        self.model.add(Dense(1, init='lecun_uniform'))

        rms = RMSprop()
        self.model.compile(loss='mse', optimizer=rms)
        yaml_string = self.model.to_yaml()
        with open("model.yaml", "w") as f:
            f.write(yaml_string)
        print("[+] Neural network created")

    def play(self, gameState, timeLimit):
        possibleMoves = gameState.getStateMoveDict()
        if self.verbose:
            print("Authorized moves : ")
            for m in possibleMoves.values(): print(m.toPDN())
        string = ""
        try:
            if self.step == "train":
                string = self.nextMoveTrain(gameState)
            else:
                string = self.nextMoveTest(gameState)

            move = Move.fromPDN(string)
            choice = gameState.doMove(move, inplace = False)
            if str(choice) not in possibleMoves.keys(): raise Exception
        except Exception:
            print(string+' is an invalid move !')
            raise

        return choice

    def getMinUR(self, gameState):
        """ Given an ennemi game state, compute the best move for him,
        ie with the worst U, and return the couple (U,reward) corresponding """

        if not gameState.getStateMoveDict():
            reward = self.getReward(gameState) # either win or draw, because the ennemi (gameState) has no possible move
            return (reward,reward)
        possibleMoves = list(gameState.getStateMoveDict().values())
        minU = INFINI
        for action in possibleMoves:
            newGameState = gameState.doMove(action)
            reward = self.getReward(newGameState)
            if not newGameState.getStateMoveDict():
                return (reward,reward) # either lose or draw
            if reward + self.gamma * self.predict(newGameState) < minU:
                minU = reward + self.gamma * self.predict(newGameState)
                minReward = reward
        return (minU,self.normalReward)

    def getListNextUR(self, gameState, possibleMoves):
        """ Given our gameState and a list of possibleMoves, return a list of
        the U functions and rewards corresponding to all our moves. To compute
        the U function after a move, as the new board is ennemi, we consider the
        U of his best move (by calling the function getMinUR). It's a sort of
        deep-2 minMax. """

        listU = []
        listR = []
        for action in possibleMoves:
            newGameState = gameState.doMove(action)
            newU, newR = self.getMinUR(newGameState)
            listU.append(newU)
            listR.append(newR)
        return (listU, listR)

    def nextMoveTrain(self, gameState):
        """ Reinforcement learning algorithm (TD(0)) with epsilon-greedy to chose the action
        Perform a sort of min-max with deep 2 to determine the best action
        (for each action, the eval function (called U) of the new game state is the eval function
        of the game state after the ennemi took his best move)
        The U function is the result of a neural network, and is updated after each move """

        possibleMoves = list(gameState.getStateMoveDict().values())
        U = self.predict(gameState)
        print ("U : " + str(U), end="")

        newU = []
        if (random.random() < self.epsilon): #choose random action (epsilon-greedy part)
            print(" : random")
            action = possibleMoves[np.random.randint(0,len(possibleMoves))]
            newGameState = gameState.doMove(action)
            newU, reward = self.getMinUR(newGameState) # newU corresponds to the best move of the ennemi after we took the random action
        else:
            print("")
            newUR = self.getListNextUR(gameState, possibleMoves) # newUR = (listOfU, listOfReward)
            iBestMove = np.argmax(newUR[0]) # We take the best action (with the best U)
            if self.verbose:
                print("New UR : ", newUR)
                print("iBestMove : ", iBestMove)
            reward = newUR[1][iBestMove]
            newU = newUR[0][iBestMove]
            action = possibleMoves[iBestMove]

        if self.verbose:
            print("Action selected : " + str(action.toPDN()))

        if reward != self.normalReward:
            update = reward
        else:
            update = reward + self.gamma * newU # updated U, according to TD(0) algorithm
        y = np.array([update]).reshape(1,1)

        if self.verbose:
            print("Update : " + str(update))
            print("Fitting...")

        self.fit(gameState, y)
        #time.sleep(0.04)

        return action.toPDN()

    def nextMoveTest(self, gameState):
        """ Same than nextMoveTrain, but without fitting with an update """
        possibleMoves = list(gameState.getStateMoveDict().values())
        U = self.predict(gameState)
        print ("U : " + str(U))

        newUR = self.getListNextUR(gameState, possibleMoves) # newUR = (listOfU, listOfReward)
        iBestMove = np.argmax(newUR[0])
        if self.verbose:
            print("New UR : ", newUR)
            print("iBestMove : ", iBestMove)
        reward = newUR[1][iBestMove]
        newU = newUR[0][iBestMove]
        action = possibleMoves[iBestMove]

        if self.verbose:
            print("Action selected : " + str(action.toPDN()))

        return action.toPDN()

    def getReward(self, gameState):
        if not gameState.getStateMoveDict():
            hasWon = not gameState.isWhiteTurn
            if hasWon:
                return self.winningReward
            else:
                return -self.winningReward
        else:
            return self.normalReward

    def getInput(self,gameState):
        """ Turn the gameState into the format given to the input of the NN """
        listCells = gameState.boardState.cells
        tInput = np.zeros((8,8))
        nbrWhites, nbrBlacks, nbrKingsBlack, nbrKingsWhite = 0,0,0,0
        iterCell = listCells.__iter__()
        for row in range(8):
            for col in range(8):
                if (row + col) % 2 == 1:
                    cell = iterCell.__next__()
                    if cell == Cell.empty:
                        tInput[row,col] = 0
                    if cell == Cell.b:
                        tInput[row,col] = -1
                    if cell == Cell.B:
                        tInput[row,col] = -3
                    if cell == Cell.w:
                        tInput[row,col] = 1
                    if cell == Cell.W:
                        tInput[row,col] = 3
        return tInput.reshape(1,1,8,8)

    def predict(self, gameState):
        return self.model.predict(self.getInput(gameState), batch_size=1)

    def fit(self, gameState, y):
        return self.model.fit(self.getInput(gameState), y, batch_size=1, nb_epoch=1, verbose=self.verbose)

    def saveWeights(self, filename='weights.h5'):
        self.model.save_weights(filename, overwrite=True)

    def loadWeights(self, filename='weights.h5'):
        self.model.load_weights(filename)

    def __str__(self):
        return self.name
	def ann(self):
		#print self.company.X_train.shape[1]
		
		model = Sequential()
		model.add(Dense(input_dim=self.company.X_train.shape[1], output_dim=50, init="glorot_uniform"))
		#model.add(Activation('tanh'))
		model.add(Dropout(0.1))
		model.add(Dense(input_dim=50, output_dim=10, init="uniform"))
		model.add(Activation('tanh'))
		#model.add(Dropout(0.5))
		model.add(Dense(input_dim=10, output_dim=1, init="glorot_uniform"))
		model.add(Activation("tanh"))

		sgd = SGD(lr=0.3, decay=1e-6, momentum=0.9, nesterov=True)
		model.compile(loss='mean_squared_error', optimizer='rmsprop')
		early_stopping = EarlyStopping(monitor='val_loss', patience=110)

		model.fit(self.company.X_train, self.company.y_train, nb_epoch=1000, validation_split=.1, batch_size=16, verbose = 1, show_accuracy = True, shuffle = False, callbacks=[early_stopping])
		self.ann_mse = model.evaluate(self.company.X_cv, self.company.y_cv, show_accuracy=True, batch_size=16)
		print self.ann_mse
		self.ann_preds = model.predict(self.company.X_test)

		yaml_string = model.to_yaml()
		with open(self.company.fin_dir + '/ann-models/' + self.company.experiment_version +'_ann_model.yml', 'w+') as outfile:
			outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
		#model.save_weights(self.company.fin_dir + '/ann-models/' + self.company.experiment_version +'_ann_weights')
		"""
		nb_features = self.company.X_train.shape[1]
		X_train = self.company.X_train.reshape(self.company.X_train.shape + (1, ))
		X_test = self.company.X_test.reshape(self.company.X_test.shape + (1, ))
		print X_train.shape

		model = Sequential()
		model.add(Convolution1D(nb_filter = 24, filter_length = 1, input_shape =(nb_features,1) ))
		model.add(Activation("tanh"))
		model.add(Dropout(0.2)) # some dropout to help w/ overfitting
		model.add(Convolution1D(nb_filter = 48, filter_length= 1, subsample_length= 1))
		model.add(Activation("tanh"))
		model.add(Convolution1D(nb_filter = 96, filter_length= 1, subsample_length=1))
		model.add(Activation("tanh"))
		model.add(Dropout(0.3))
		model.add(Convolution1D(nb_filter = 192, filter_length= 1, subsample_length=1))
		model.add(Activation("tanh"))
		model.add(Dropout(0.6))
		model.add(MaxPooling1D(pool_length=2))
		# flatten to add dense layers
		model.add(Flatten())
		#model.add(Dense(input_dim=nb_features, output_dim=50))
		model.add(Dense(nb_features * 2))
		model.add(Activation("tanh"))
		#model.add(Dropout(0.5))
		model.add(Dense(1))
		model.add(Activation("linear"))
		sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
		model.compile(loss='mean_squared_error', optimizer='sgd')
		early_stopping = EarlyStopping(monitor='val_loss', patience=5)

		model.fit(X_train, self.company.y_train, nb_epoch=50, validation_split=0.25, verbose = 1, callbacks=[early_stopping])
		self.ann_preds = model.predict(X_test)
		"""
		#print self.ann_preds
		#print "Trained ANN Score: %r" % score
		# visualize
		#plot(model, to_file= '/ann-training/' + self.company.fin_file_name + '.png')

		return
        print 'compiling...'

        dl.compile(loss='binary_crossentropy', optimizer='adam', class_mode='binary')

        print 'training!'
        
    	h = dl.fit(X[train_ix], y[train_ix], batch_size=32, nb_epoch=50, show_accuracy=True, 
    	               validation_data=(X[test_ix], y[test_ix]), 
    	               callbacks = 
    	               [
    	                   EarlyStopping(verbose=True, patience=10, monitor='val_loss'),
    	                   ModelCheckpoint('./trainings/final-slac-maxout-hypercube-unnormalized-logloss-cvFold{}.h5'.format(foldN), monitor='val_loss', verbose=True, save_best_only=True),
    	                   ROCModelCheckpoint('./trainings/final-slac-maxout-hypercube-unnormalized-roc-cvFold{}.h5'.format(foldN), X[test_ix], y[test_ix], cube_weights[test_ix], verbose=True)
    	               ],
                       sample_weight=cube_weights[train_ix]
                )
        foldN += 1
	               # sample_weight=np.power(weights, 0.7))
except KeyboardInterrupt:
	print 'ended early!'

# yhat = dl.predict(X, verbose=True).ravel()

# np.save('./yhat-cube.npy', yhat.astype('float32'))


with open('./trainings/final-slac-maxout-unnormalizedphypercube.yaml', 'wb') as f:
	f.write(dl.to_yaml())


Example #20
0
#proba = model.predict_proba(X_test, batch_size=3)
#print('Test predict_proba:', proba)
#plot(model, to_file='model.png')

#predict = model.predict(X_test, batch_size=batch_size, verbose=0)
#print( predict )
#print( 'sum', np.sum(y_test-predict) )
exit()
###########
# Save
###########
# save as JSON
json_string = model.to_json()

# save as YAML
yaml_string = model.to_yaml()

model.save_weights('my_model_weights.h5')

###########
# Load
###########
model = model_from_json(open('my_model_architecture.json').read())
model.load_weights('my_model_weights.h5')

###########
# compile
###########
model.compile(optimizer='adagrad', loss='mse')
"""
#使用data augmentation的方法
        vertical_flip=False) # randomly flip images
    # compute quantities required for featurewise normalization
    # (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)

print('Predicting train labels...')
(X_train2,y_train2)=[(X,y) for X,y in datagen.flow(X_train,y_train,batch_size=X_train.shape[0])][0]
y_pred_train=model.predict_classes(X_train2)
from sklearn.metrics import accuracy_score
print('Train accuracy:',accuracy_score(y_train,y_pred_train))

print('Predicting test labels...')
(X_test2,y_test2)=[(X,y) for X,y in datagen.flow(X_test,y_test,batch_size=X_test.shape[0])][0]
y_pred_test=model.predict_classes(X_test2)
print('Test accuracy:',accuracy_score(y_test,y_pred_test))

#Save the model:
yml_model=model.to_yaml()
ff=open('model_architecture.yaml','w')
ff.write(yml_model)
ff.close()

# Save the predictions to HDF5 File:
f=h5py.File('model_predictions_50epoch.h5','w')
f.create_dataset('X_train',data=X_train2)
f.create_dataset('y_train',data=y_train2)
f.create_dataset('y_pred_train',data=y_pred_train)
f.create_dataset('X_test',data=X_test2)
f.create_dataset('y_test',data=y_test2)
f.create_dataset('y_pred_test',data=y_pred_test)
Example #22
0
    # define model
    model = Sequential()
    model.add(Embedding(vocab_size, args.embedding,
                        input_length=args.length - 1))
    model.add(LSTM(args.unit, dropout=args.dropout))
    model.add(Dense(vocab_size, activation='softmax'))

    optimizer = select_optimizer(args.optimizer.lower())
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer(clipnorm=args.clipnorm),
                  metrics=[perplexity])

    # fit network
    train_generator = DataGenerator(args.train_path, args.batch, args.length, vocab_size)
    valid_generator = DataGenerator(args.dev_path, args.batch, args.length, vocab_size)

    train_data_size = calc_data_size(args.train_path)
    dev_data_size = calc_data_size(args.dev_path)

    model.fit_generator(generator=train_generator,
                        validation_data=valid_generator,
                        steps_per_epoch=int(np.ceil(train_data_size / args.batch)),
                        validation_steps=int(np.ceil(dev_data_size / args.batch)),
                        epochs=args.epoch,
                        use_multiprocessing=False, verbose=1)

    # Save model to file
    open(os.path.join(args.out, 'rnnlm.yaml'), 'w').write(model.to_yaml())
    model.save_weights(os.path.join(args.out, 'rnnlm.hdf5'))
Example #23
0
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))

keras_loss = 'categorical_crossentropy'
keras_optimizer = 'sgd'

# Compile model
#sgd = SGD(lr=0.01)
model.compile(loss=keras_loss, optimizer=keras_optimizer, metrics=["accuracy"])
# Build RDD from numpy features and labels
rdd = to_simple_rdd(sc, x_train, y_train)

# Initialize SparkModel from Keras model and Spark context
print(model.to_yaml())
adagrad = elephas_optimizers.Adagrad(lr=0.01)
#sgd = elephas_optimizers.SGD(lr=1.0)

spark_model = SparkModel(sc,
                         model,
                         keras_losss=keras_loss,
                         keras_optimizer=keras_optimizer,
                         optimizer=adagrad,
                         frequency='batch',
                         mode='asynchronous',
                         num_workers=4)

# Train Spark model
spark_model.train(rdd, nb_epoch=nb_epoch, batch_size=batch_size, verbose=2, validation_split=0.1)
class DenoisingAutoencoder(object):
    """
    Neural network which implements a denoising auto-encoder. Inputs are
    convoluted before being fed into an encoding layer. From the encoding layer
    we learn to recover the original signal.
    """
    def __init__(self, window_size, model_path=None, weight_path=None):
        self.num_filters = 8
        self.window_size = window_size
        self.size = (window_size - 3) * self.num_filters

        if model_path is not None:
            self.load_model(model_path)
        else:
            self.initialize_model()

        if weight_path is not None:
            self.load_weights(weight_path)

    def initialize_model(self):
        """Initialize the network model."""
        self.model = Sequential()

        self.model.add(Convolution1D(self.num_filters, 4, 'uniform', 'linear',
                                     border_mode='valid', subsample_length=1,
                                     input_dim=1,
                                     input_length=self.window_size))
        self.model.add(Flatten())
        self.model.add(Dense(output_dim=self.size, init='uniform',
                             activation='relu'))
        self.model.add(Dense(128, 'uniform', 'relu'))
        self.model.add(Dense(self.size, 'uniform', 'relu'))
        self.model.add(Reshape(dims=(self.window_size - 3, self.num_filters)))
        self.model.add(Convolution1D(1, 4, 'uniform', 'linear',
                                     border_mode='valid', subsample_length=1,
                                     input_dim=1, input_length=self.size))

        self.model.compile(loss='mean_squared_error', optimizer='rmsprop')

    def train(self, aggregate_power, device_power):
        """Train the network given the aggregate and device powers."""
        self.model.fit(aggregate_power, device_power, batch_size=10, nb_epoch=1)

    def save_model(self, path):
        """Save the network model to the given path as yaml."""
        yaml_string = self.model.to_yaml()
        path = os.path.abspath(path)

        with open(path, 'w') as fd:
            fd.write(yaml_string)

    def save_weights(self, path):
        """Save the network weights to the given path in HDF5."""
        path = os.path.abspath(path)
        self.model.save_weights(path, overwrite=True)

    def load_model(self, model_path):
        """ Load the network model from the given path."""
        model_path = os.path.abspath(model_path)
        with open(model_path, 'r') as fd:
            self.model = model_from_yaml(fd.read())

    def load_weights(self, weight_path):
        """Load the network weights from the given path."""
        weight_path = os.path.abspath(weight_path)
        self.model.load_weights(weight_path)
def main(hypes_file, data_dir, override):
    """Orchestrate."""
    with open(hypes_file, 'r') as f:
        hypes = json.load(f)
    if 'training' not in hypes:
        hypes['training'] = {}
    if 'make_equal' not in hypes['training']:
        hypes['training']['make_equal'] = False

    base = os.path.dirname(hypes_file)
    model_file_path = os.path.join(base, '%s.yaml' % hypes['model']['name'])
    model_file_path = os.path.abspath(model_file_path)
    weights_file_path = os.path.join(base, '%s.hdf5' % hypes['model']['name'])
    weights_file_path = os.path.abspath(weights_file_path)

    if not os.path.isfile(model_file_path) or override:
        if not os.path.isfile(model_file_path):
            logging.info("Did not find '%s'. Start training...",
                         model_file_path)
        else:
            logging.info("Override '%s'. Start training...",
                         model_file_path)

        # Get data
        # x_files, y_files = inputs(hypes, None, 'train', data_dir)
        x_files, y_files = get_file_list(hypes, 'train')
        x_files, y_files = sklearn.utils.shuffle(x_files,
                                                 y_files,
                                                 random_state=0)

        x_train, y_train = get_traindata_single_file(hypes,
                                                     x_files[0],
                                                     y_files[0])

        nb_features = x_train[0].shape[0]
        logging.info("Input gets %i features", nb_features)

        # Make model
        model = Sequential()
        model.add(Dense(64,
                  input_dim=nb_features,
                  init='uniform',
                  activation='sigmoid'))
        model.add(Dropout(0.5))
        model.add(Dense(64, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1, activation='sigmoid'))
        model.compile(loss='binary_crossentropy',
                      optimizer='adagrad',  # rmsprop
                      metrics=['accuracy'])

        generator = generate_training_data(hypes, x_files, y_files)
        t0 = time.time()
        sep = hypes['solver']['samples_per_epoch']
        if True:
            class_weight = get_class_weight(hypes)
            logging.info("class_weights = %s", class_weight)
            model.fit_generator(generator,
                                samples_per_epoch=sep,
                                nb_epoch=hypes['solver']['epochs'],
                                verbose=1,
                                validation_data=(x_train, y_train),
                                class_weight=class_weight)
        else:
            logging.info("Fit with .fit")
            x_train, y_train = inputs(hypes, None, 'train', data_dir)
            model.fit(x_train, y_train, batch_size=128, nb_epoch=1)
        t1 = time.time()
        print("Training Time: %0.4f" % (t1 - t0))

        # save as YAML
        yaml_string = model.to_yaml()
        with open(model_file_path, 'w') as f:
            f.write(yaml_string)
        model.save_weights(weights_file_path)

        # Evaluate
        data = get_file_list(hypes, 'test')
        logging.info("Start segmentation")
        analyze.evaluate(hypes,
                         data,
                         data_dir,
                         model,
                         elements=[0, 1],
                         get_segmentation=get_segmentation)
    else:
        logging.info("## Found '%s'.", model_file_path)
        with open(model_file_path) as f:
            yaml_string = f.read()
        model = model_from_yaml(yaml_string)
        model.load_weights(weights_file_path)
        model.compile(optimizer='adagrad', loss='binary_crossentropy')
        data = get_file_list(hypes, 'test')
        analyze.evaluate(hypes,
                         data,
                         data_dir,
                         model,
                         elements=[0, 1],
                         get_segmentation=get_segmentation)
Example #26
0
    def train_cnn(self, input, output, test_input):
# Add Distance Prior
        #input = add_dist_prior(input)
        print(input.shape)
        num_samples, num_channels, num_rows, num_cols = input.shape
        print(output.shape)
        output = output.reshape(1600,1,32,32)

        # Configurations
        batch_size = 100
        num_epoch = 3000

        model = Sequential()
        model.add(ZeroPadding2D((2,2), \
                input_shape=(num_channels,num_rows,num_cols)
                ))
        model.add(Convolution2D(64,5,5, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((2,2)))
        model.add(Convolution2D(64,5,5, \
                subsample=(1,1), \
                activation='relu'))
        #model.add(MaxPooling2D((2,2)))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(64,4,4, \
                subsample=(2,2), \
                activation='relu'))

        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128,3,3, \
                subsample=(1,1), \
                activation='relu'))
        #model.add(MaxPooling2D((2,2)))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128,4,4, \
                subsample=(2,2), \
                activation='relu'))

        '''
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(256,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(256,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(256,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(MaxPooling2D((4,4)))
        '''

        '''
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(1024,3,3, \
                subsample=(1,1), \
                activation='relu'))

        model.add(Flatten())
        model.add(Dense(512, \
                activation='relu'))
        model.add(Dropout(0.25))
        model.add(Dense(1024*4*4, \
                activation='relu'))
        model.add(Dropout(0.25))
        model.add(Reshape((1024,4,4)))
        '''

        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128,3,3, \
                subsample=(1,1), \
                activation='relu'))

        model.add(ZeroPadding2D((9,9)))
        model.add(Convolution2D(64,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(64,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(64,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(64,3,3, \
                subsample=(1,1), \
                activation='relu'))

        '''
        model.add(UpSampling2D((2,2)))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(512,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(512,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(512,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(Dropout(0.3))

        model.add(UpSampling2D((2,2)))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(256,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(256,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(256,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(Dropout(0.3))

        model.add(UpSampling2D((2,2)))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(128,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(Dropout(0.3))
        '''

        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(32,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(16,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(4,3,3, \
                subsample=(1,1), \
                activation='relu'))
        model.add(ZeroPadding2D((1,1)))
        model.add(Convolution2D(1,3,3, \
                subsample=(1,1), \
                activation='relu'))

        # Compile
        sgd = SGD(lr=0.01, decay=0.0005, momentum=0.9, nesterov=False)
        model.compile( optimizer='sgd', \
                loss='mean_squared_error' )

        #early_stop = EarlyStopping(monitor='val_loss', patience=2)
        #early_stop = EarlyStopping(monitor='loss', patience=4)
        hist = model.fit(input, output, \
                  batch_size=batch_size, nb_epoch=num_epoch, verbose=1, \
                  shuffle=True, \
                  validation_split=0.1)
                  #callbacks=[early_stop])

        # TODO: move Prediction to a seperated func
        # Prediction
        eval = model.evaluate(input, output, batch_size=batch_size)
        print('eval', eval)
        predict = model.predict(test_input, batch_size=batch_size)
        #predict = model.predict(input, batch_size=batch_size)
        print('predict', predict)

        # Save model
        model.save('model.h5')
        model_json = model.to_json()
        open('model_architecture.json', 'w').write(model_json)
        model_yaml = model.to_yaml()
        open('model_architecture.yaml', 'w').write(model_yaml)
        model.save_weights('weights.h5')

# Visualization
        '''
        I1 = input
        print("I1 shape: ", I1.shape)
        print('layer 0: ', model.layers[0].get_config())
        print

        l1f = T.function([model.layers[0].input], \
                model.layers[1].output, allow_input_downcast=True)
        l1o = np.array(l1f(I1))
        print('layer 1: ', model.layers[1].get_config())
        print("l1o shape: ", l1o.shape)
        l1w = np.squeeze(model.layers[1].W.get_value(borrow=True))
        #  W1 = model.layers[1].get_weights()[0] # 0 is W, 1 is b
        print("l1w shape: ", l1w.shape)
        print

        l2f = T.function([model.layers[1].input], \
                act1.output, allow_input_downcast=True)
        l2o = np.array(l2f(I1))
        print('layer 2: ', model.layers[2].get_config())
        print("l2o shape: ", l2o.shape)
        print

        f = plt.figure()
        plt.title('I1')
        nice_show(f,I1[0])
        f = plt.figure()
        plt.title('l1w')
        nice_show(f,l1w[0])
        f = plt.figure()
        plt.title('l2o')
        nice_show(f,l2o[0])

        plt.show()
        '''

        return predict
Example #27
0
Y = dataset[:,8]
# create model
model = Sequential()
model.add(Dense(12, input_dim=8, init='uniform', activation='relu'))
model.add(Dense(8, init='uniform', activation='relu'))
model.add(Dense(1, init='uniform', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Fit the model
model.fit(X, Y, nb_epoch=150, batch_size=10, verbose=0)
# evaluate the model
scores = model.evaluate(X, Y, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

# serialize model to YAML
model_yaml = model.to_yaml()
with open("model.yaml", "w") as yaml_file:
    yaml_file.write(model_yaml)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")

# later...

# load YAML and create model
yaml_file = open('model.yaml', 'r')
loaded_model_yaml = yaml_file.read()
yaml_file.close()
loaded_model = model_from_yaml(loaded_model_yaml)
# load weights into new model
loaded_model.load_weights("model.h5")
model.add(Dense(1024, activation = 'relu'))
model.add(Dense(512, activation = 'relu'))
model.add(Dense(128, activation = 'relu'))
model.add(Dense(8 * opts["color_patch_size" * opts["color_patch_size"]], W_regularizer = l2(0.01), b_regularizer = l2(0.01)))
model.add(Activation("relu"))
model.add(Dropout(0.5))
model.add(Dense(2 * opts["color_patch_size"] * opts["color_patch_size"], W_regularizer = l2(0.01), b_regularizer = l2(0.01)))

print("Compiling model")
sgd = SGD(lr = 10e-4, momentum = 0.9, decay = 10e-4)
rms = RMSprop()

#sgd = SGD()
model.compile(loss = 'mean_squared_error', optimizer = sgd)
#texture_model.compile(loss = 'mean_squared_error', optimizer = sgd)
yaml_model = texture_model.to_yaml()
open(model_path + model_file, "w").write(yaml_model)
#deal with command line parameters
if (len(sys.argv) > 1):
    if (sys.argv[1] == "train"):
        opts["train_flag"] = True
    elif (sys.argv[1] == "test"):
        opts["train_flag"] = False
    else:
        print("Wrong parameter")
        sys.exit

if (opts["train_flag"]):
    print("Get random patches")
    [train_x_patches, train_x_pixel_patches, train_y_patches] = rand_patches(train_x, train_y, opts)
    # [train_x_patches, train_x_pixel_patches, train_y_patches] = split_test_data(train_x, train_y, opts)
Example #29
0
def test_sequential(in_tmpdir):
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    # TODO: factor out
    def data_generator(x, y, batch_size=50):
        index_array = np.arange(len(x))
        while 1:
            batches = make_batches(len(x_test), batch_size)
            for batch_index, (batch_start, batch_end) in enumerate(batches):
                batch_ids = index_array[batch_start:batch_end]
                x_batch = x[batch_ids]
                y_batch = y[batch_ids]
                yield (x_batch, y_batch)

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
              validation_data=(x_test, y_test))
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2,
              validation_split=0.1)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1,
              shuffle=False)

    model.train_on_batch(x_train[:32], y_train[:32])

    loss = model.evaluate(x_test, y_test)

    prediction = model.predict_generator(data_generator(x_test, y_test), 1,
                                         max_queue_size=2, verbose=1)
    gen_loss = model.evaluate_generator(data_generator(x_test, y_test, 50), 1,
                                        max_queue_size=2)
    pred_loss = K.eval(K.mean(losses.get(model.loss)(K.variable(y_test),
                                                     K.variable(prediction))))

    assert(np.isclose(pred_loss, loss))
    assert(np.isclose(gen_loss, loss))

    model.predict(x_test, verbose=0)
    model.predict_classes(x_test, verbose=0)
    model.predict_proba(x_test, verbose=0)

    fname = 'test_sequential_temp.h5'
    model.save_weights(fname, overwrite=True)
    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(x_test, y_test, verbose=0)
    assert(loss == nloss)

    # Test serialization
    config = model.get_config()
    assert 'name' in config
    new_model = Sequential.from_config(config)
    assert new_model.weights  # Model should be built.

    model.summary()
    json_str = model.to_json()
    model_from_json(json_str)

    yaml_str = model.to_yaml()
    model_from_yaml(yaml_str)
Example #30
0
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)

model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, 128))
model.add(Dropout(0.5))
model.add(Dense(128, 1, W_regularizer='identity', b_constraint='maxnorm'))
model.add(Activation('sigmoid'))

model.get_config(verbose=1)

#####################################
# save model w/o parameters to yaml #
#####################################

yaml_no_params = model.to_yaml()

no_param_model = model_from_yaml(yaml_no_params)
no_param_model.get_config(verbose=1)

######################################
# save multi-branch sequential model #
######################################

seq = Sequential()
seq.add(Merge([model, model], mode='sum'))
seq.get_config(verbose=1)
merge_yaml = seq.to_yaml()
merge_model = model_from_yaml(merge_yaml)

large_model = Sequential()