コード例 #1
0
ファイル: -add-spark_keras.py プロジェクト: xyj77/CodeRecord
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy', optimizer='adadelta')

## spark
conf = SparkConf().setAppName(APP_NAME).setMaster(MASTER_IP)
sc = SparkContext(conf=conf)

# Build RDD from numpy features and labels
rdd = to_simple_rdd(sc, X_train, Y_train)

# Initialize SparkModel from Keras model and Spark context
spark_model = SparkModel(sc,model)

# Train Spark model
spark_model.train(rdd, nb_epoch=nb_epoch, batch_size=batch_size, verbose=0, validation_split=0.1, num_workers=24)

# Evaluate Spark model by evaluating the underlying model
score = spark_model.get_network().evaluate(X_test, Y_test, show_accuracy=True, verbose=2)
print('Test accuracy:', score[1])
コード例 #2
0
#early_stopping = EarlyStopping(monitor='val_acc', patience=5)
#print 'Start training...'
#model.fit( X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=[checkpointer],validation_split=validation_split, shuffle=shuffle,show_accuracy=show_accuracy)

# Create Spark Context
conf = SparkConf().setAppName(MODEL)
sc = SparkContext(conf=conf)

# Build RDD from numpy features and labels
rdd = to_simple_rdd(sc, X_train, Y_train)

# Initialize SparkModel from Keras model and Spark Context

rmsprop = elephas_optimizers.RMSprop()

spark_model = SparkModel(sc,\
                        model,\
                        optimizer=rmsprop,\
                        frequency='epoch',\
                        mode='asynchronous',\
                        num_workers=3)

spark_model.train(rdd,\
                    nb_epoch=nb_epoch,\
                    batch_size=batch_size,\
                    verbose=2,\
                    validation_split=validation_split)

spark_model.get_network().save_weights(MODEL_FILE_NAME)

コード例 #3
0
model = Sequential()
model.add(Dense(784, 128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128, 128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128, 10))
model.add(Activation('softmax'))

# Compile model
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms)

# Create Spark context
conf = SparkConf().setAppName('Mnist_Spark_MLP').setMaster('local[8]')
sc = SparkContext(conf=conf)

# Build RDD from numpy features and labels
rdd = to_simple_rdd(sc, X_train, Y_train)

# Initialize SparkModel from Keras model and Spark context
spark_model = SparkModel(sc,model)

# Train Spark model
spark_model.train(rdd, nb_epoch=nb_epoch, batch_size=batch_size, verbose=0, validation_split=0.1, num_workers=8)

# Evaluate Spark model by evaluating the underlying model
score = spark_model.get_network().evaluate(X_test, Y_test, show_accuracy=True, verbose=2)
print('Test accuracy:', score[1])
コード例 #4
0
#checkpointer = ModelCheckpoint(filepath=MODEL_ROOT+MODEL+".h5", verbose=1, save_best_only=False)
#early_stopping = EarlyStopping(monitor='val_acc', patience=5)
#print 'Start training...'
#model.fit( X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose, callbacks=[checkpointer],validation_split=validation_split, shuffle=shuffle,show_accuracy=show_accuracy)

# Create Spark Context
conf = SparkConf().setAppName(MODEL)
sc = SparkContext(conf=conf)

# Build RDD from numpy features and labels
rdd = to_simple_rdd(sc, X_train, Y_train)

# Initialize SparkModel from Keras model and Spark Context

rmsprop = elephas_optimizers.RMSprop()

spark_model = SparkModel(sc,\
                        model,\
                        optimizer=rmsprop,\
                        frequency='epoch',\
                        mode='asynchronous',\
                        num_workers=3)

spark_model.train(rdd,\
                    nb_epoch=nb_epoch,\
                    batch_size=batch_size,\
                    verbose=2,\
                    validation_split=validation_split)

spark_model.get_network().save_weights(MODEL_FILE_NAME)