예제 #1
0
def test_sequential_serialization(spark_context, classification_model):
    classification_model.compile(optimizer="sgd",
                                 loss="categorical_crossentropy",
                                 metrics=["acc"])
    spark_model = SparkModel(classification_model,
                             frequency='epoch',
                             mode='synchronous')
    spark_model.save("elephas_sequential.h5")
def test_sequential_serialization():
    # Create Spark context
    pytest.mark.usefixtures("spark_context")

    seq_model = Sequential()
    seq_model.add(Dense(128, input_dim=784))
    seq_model.add(Activation('relu'))
    seq_model.add(Dropout(0.2))
    seq_model.add(Dense(128))
    seq_model.add(Activation('relu'))
    seq_model.add(Dropout(0.2))
    seq_model.add(Dense(10))
    seq_model.add(Activation('softmax'))

    seq_model.compile(
        optimizer="sgd", loss="categorical_crossentropy", metrics=["acc"])
    spark_model = SparkModel(seq_model, frequency='epoch', mode='synchronous')
    spark_model.save("elephas_sequential.h5")
def test_model_serialization():
    # This returns a tensor
    inputs = Input(shape=(784,))

    # a layer instance is callable on a tensor, and returns a tensor
    x = Dense(64, activation='relu')(inputs)
    x = Dense(64, activation='relu')(x)
    predictions = Dense(10, activation='softmax')(x)

    # This creates a model that includes
    # the Input layer and three Dense layers
    model = Model(inputs=inputs, outputs=predictions)
    model.compile(optimizer='rmsprop',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])

    spark_model = SparkModel(model, frequency='epoch', mode='synchronous')
    spark_model.save("elephas_model.h5")
def test_sequential_serialization():
    # Define basic parameters
    batch_size = 64
    nb_classes = 10
    epochs = 1

    # Create Spark context
    pytest.mark.usefixtures("spark_context")

    # Load data
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype("float32")
    x_test = x_test.astype("float32")
    x_train /= 255
    x_test /= 255
    print(x_train.shape[0], 'train samples')
    print(x_test.shape[0], 'test samples')

    # Convert class vectors to binary class matrices
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)

    seq_model = Sequential()
    seq_model.add(Dense(128, input_dim=784))
    seq_model.add(Activation('relu'))
    seq_model.add(Dropout(0.2))
    seq_model.add(Dense(128))
    seq_model.add(Activation('relu'))
    seq_model.add(Dropout(0.2))
    seq_model.add(Dense(10))
    seq_model.add(Activation('softmax'))

    seq_model.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=["acc"])
    spark_model = SparkModel(seq_model, frequency='epoch', mode='synchronous')
    spark_model.save("elephas_sequential.h5")
예제 #5
0
                         num_workers=3)
# Train the Spark model.
spark_model.fit(rdd, epochs=10, batch_size=32, verbose=1, validation_split=0.1)

score = spark_model.master_network.evaluate(x_test, y_test, verbose=1)
print('Test accuracy:', score)
"""### Predcit and evaluate Model"""
"""### Save Model"""

import json
#lets assume 'model' is main model
model_json = model_9.to_json()
with open("model_in_json.json", "w") as json_file:
    json.dump(model_json, json_file)

spark_model.save('resnet.h5')

# Load saved Model and using keras to predict

from keras.models import load_model
from keras.models import model_from_json
#import json

with open('model_in_json.json', 'r') as f:
    model_json = json.load(f)

model = model_from_json(model_json)
model = load_model('resnet.h5')
print("Loaded model sucessfully")

results = model.evaluate(x_test, y_test, verbose=2)
예제 #6
0
labels = []
features = []

for message in consumer:
    #print(message.value)
    labels.append(message.value["label"])
    features.append(message.value["features"]["values"])

labeledpoints = np.array(labels, features)

model = Sequential()
model.add(Dense(2, input_dim=11))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer=SGD())

lp_rdd = to_simple_rdd(sc, features, labels, categorical=True)
spark_model = SparkModel(model, frequency='epoch', mode='asynchronous')
spark_model.fit(lp_rdd,
                epochs=20,
                batch_size=32,
                verbose=0,
                validation_split=0.1)

spark_model.save("model.h5")
예제 #7
0
# ============ MODEL SETUP ===========
from keras.optimizers import SGD

wavenet_model = create_wavenet(stack_layers, n_output_channels, n_filter_list, num_stacks, skip=False)
adam_opt = keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, amsgrad=False)
wavenet_model.compile(optimizer=SGD(), loss='categorical_crossentropy')
print(wavenet_model.summary())


# ============ ELEPHAS TRAIN ===========
spark_model = SparkModel(wavenet_model, mode='hogwild', num_workers=128)
spark_model.fit(train_rdd, epochs=64, batch_size=64, verbose=1, validation_split=0.1)

print("Finished Training :)")

# =========== SAVE FITTED MDOEL ===========
# Save model and weights out to local
filename_out = model_save_out + "_weights.h5"
spark_model.save(filename_out)

wavenet_json = wavenet_model.to_json()
with open(model_save_out + ".json", "w") as save_model:
    save_model.write(wavenet_json)


x_test = np.array(train_rdd.map(lambda x: x[0]).take(1))
y_test = np.array(train_rdd.map(lambda x: x[1]).take(1))

print("Final Loss = ", spark_model.master_network.evaluate(x_test, y_test, verbose=2))

예제 #8
0
                         num_workers=3)
# Train the Spark model.
spark_model.fit(rdd, epochs=10, batch_size=32, verbose=1, validation_split=0.1)

score = spark_model.master_network.evaluate(x_test, y_test, verbose=1)
print('Test accuracy:', score)
"""### Predcit and evaluate Model"""
"""### Save Model"""

import json
#lets assume 'model' is main model
model_json = model_14.to_json()
with open("model_in_json.json", "w") as json_file:
    json.dump(model_json, json_file)

spark_model.save('spark_model.h5')

# Load saved Model and using keras to predict

from keras.models import load_model
from keras.models import model_from_json
#import json

with open('model_in_json.json', 'r') as f:
    model_json = json.load(f)

model = model_from_json(model_json)
model = load_model('spark_model.h5')
print("Loaded model sucessfully")

results = model.evaluate(x_test, y_test, verbose=2)
예제 #9
0
def test_serialization():
    spark_model = SparkModel(model, frequency='epoch', mode='synchronous', num_workers=2)
    spark_model.save("test.h5")
    recov = load_spark_model("test.h5")