Example #1
0
def network2(feature_dimension):
    models = Sequential()
    #W_regularizer=l2(0.0001)
    models.add(Dense(256, input_dim=feature_dimension, init='uniform', W_regularizer=l2(0.0001)))
    models.add(PReLU())
    models.add(BatchNormalization())
    models.add(Dropout(0.8))
    models.add(Dense(64, activation='relu'))
    models.add(Dropout(0.3))
    models.add(Dense(32, activation='relu'))
    models.add(Dropout(0.2))
    models.add(Dense(3, activation='softmax'))
    #models.add(Activation('softmax'))
    opt = optimizers.Adagrad(lr=0.015)
    models.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])

    return models
Example #2
0
y_test = np.asarray(test_label).astype('float16')
print('4')

x_val = x_train[:10000]
partial_x_train = x_train[10000:]

y_val = y_train[:10000]
partial_y_train = y_train[10000:]

models = models.Sequential()
models.add(layers.Dense(16, activation='relu', input_shape=(10000, )))
models.add(layers.Dense(16, activation='relu'))
models.add(layers.Dense(1, activation='sigmoid'))

models.compile(optimizer='rmsprop',
               loss='binary_crossentropy',
               metrics=['accuracy'])

history = models.fit(partial_x_train,
                     partial_y_train,
                     epochs=20,
                     batch_size=50,
                     validation_data=(x_val, y_val))

history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']

epochs = range(1, len(loss_values) + 1)

plt.plot(epochs, loss_values, 'bo', label='Training loss')
Example #3
0
base_model = InceptionResNetV2(input_shape=(img_height, img_width, 3),
                               weights='imagenet',
                               include_top=False)
x = base_model.output
x = GlobalAveragePooling2D()(x)
predictions = Dense(nclasses, activation='softmax')(x)
pmodel = Model(base_model.input, predictions)
model = multi_gpu_model(pmodel, ngpus)
for layer in model.layers:
    layer.trainable = True
nadam = Nadam(lr=learn_rate)
print(
    f'=> creating model replicas for distributed training across {ngpus} gpus <='
)
model.compile(optimizer=nadam,
              loss='categorical_crossentropy',
              metrics=['accuracy'])
print('=> done building model <=')

#Tensor-Board

tensorboard = TensorBoard(log_dir='./logs'.format(Name),
                          histogram_freq=0,
                          write_graph=True,
                          write_images=False)
callbacks_list = [
    ModelCheckpoint(final_weights_path,
                    monitor='val_acc',
                    verbose=1,
                    save_best_only=True), tensorboard,
    EarlyStopping(monitor='val_loss', patience=5, verbose=1)
# Fully Connected Layer
model.add(Flatten())
model.add(Dense(128))  # Fully connected layer in Keras
model.add(Activation('relu'))

# dropout some neurons to reduce overfitting
model.add(Dropout(drop_prob))

# Readout layer
model.add(Dense(num_classes))
model.add(Activation('softmax'))

# Set loss and measurement, optimizer, and metric used to evaluate loss
model.compile(
    loss='categorical_crossentropy',
    optimizer='adam',  # was adadelta
    metrics=['accuracy'])

#   Training settings
batch_size = 128
num_epoch = 2

# fit the training data to the model.  Nicely displays the time, loss, and validation accuracy on the test data
model.fit(train_images,
          mnist.train.labels,
          batch_size=batch_size,
          nb_epoch=num_epoch,
          verbose=1,
          validation_data=(test_images, mnist.test.labels))
Example #5
0
one_hot_train_labels = to_one_hot(train_labels)
one_hot_test_labels = to_one_hot(test_labels)
'''keras内置方法实现标签向量化
from keras.utils.np_utils import to_categorical
one_hot_train_labels = to_categorical(train_labels)
one_hot_test_labels = to_categorical(test_labels)
'''

#构建网络
models = models.Sequential()
models.add(layers.Dense(64, activation='relu', input_shape=(10000, )))
models.add(layers.Dense(64, activation='relu'))
models.add(layers.Dense(46, activation='softmax'))

models.compile(optimizer='rmsprop',
               loss='categorical_crossentropy',
               metrics=['acc'])

#数据集分类
x_val = x_train[0:1000]
pratial_x_train = x_train[1000:]

y_val = one_hot_train_labels[0:1000]
pratial_y_train = one_hot_train_labels[1000:]

#开始训练
history = models.fit(pratial_x_train,
                     pratial_y_train,
                     epochs=20,
                     batch_size=512,
                     validation_data=(x_val, y_val))
models.add(layers.Conv2D(64, (3, 3), activation='relu'))
models.add(layers.MaxPooling2D(2, 2))
models.add(layers.Conv2D(128, (3, 3), activation='relu'))
models.add(layers.MaxPooling2D(2, 2))
models.add(layers.Conv2D(128, (3, 3), activation='relu'))
models.add(layers.MaxPooling2D(2, 2))
models.add(layers.Flatten())
models.add(layers.Dense(512, activation='relu'))
models.add(layers.Dense(1, activation='sigmoid'))

# Compile Network

# RMSprop optimizer, as usual. Because you ended the network with a single sigmoid unit,
#  you’ll use binary crossentropy as the loss
models.compile(loss='binary_crossentropy',
               optimizer= optimizers.RMSprop(lr=1e-4),
               metrics=['acc'])

# Data Pre Processing:
# Converting Images to Floating Point tensors

# 1. Read the picture files.
# 2. Decode the JPEG content to RGB grids of pixels.
# 3. Convert these into floating-point tensors.
# 4. Rescale the pixel values (between 0 and 255) to the [0, 1] interval
# (as you know, neural networks prefer to deal with small input values).


train_datagen = ImageDataGenerator(rescale=1./255) # Rescales all images by 1/255
test_datagen = ImageDataGenerator(rescale=1./255)