示例#1
0
def transfer_model(X_train,y_train,batch_size,epoch):
    base_model = ResNet50(include_top=False, weights='imagenet',input_shape=(224,224,3))
    for layers in base_model.layers:
        layers.trainable = False
    model = Flatten()(base_model.output)
    model = Dense(128, activation='relu')(model)
    model = Dropout(0.5)(model)
    model = Dense(121, activation='softmax')(model)
    model = Model(inputs=base_model.input, outputs=model)
    model.compile(optimizer=keras.optimizers.Adam(lr=0.0001, decay=1e-5),loss='categorical_crossentropy',metrics=['accuracy'])
    
    callbacks = [
    # 把TensorBoard日志写到'logs'
    keras.callbacks.TensorBoard(log_dir='./logs'),
    # 当categorical_accuracy,也就是分类精度在10个epoh之内都没提升时,降低learning rate
    keras.callbacks.ReduceLROnPlateau(monitor='categorical_accuracy', patience=10, verbose=2),
    # 当categorical_accuracy在15个epoch内没有提升的时候,停止训练
    keras.callbacks.EarlyStopping(monitor='categorical_accuracy', patience=15, verbose=2)]
    
    model.fit(X_train,y_train, batch_size=batch_size, epochs=epoch)
    # parallel_model = multi_gpu_model(model, gpus=2)
    # parallel_model.compile(loss='categorical_crossentropy',
    #                    optimizer='adam',metrics=['accuracy']) 
    # parallel_model.fit(X_train,y_train, batch_size=batch_size, epochs=epoch)
    return model
def counter_model(x_train_all, x_val_all, y_train_all, y_val_all):

    res_model = ResNet50(weights='imagenet',
                         include_top=False,
                         input_shape=(320, 320, 3))
    model = res_model.output
    model = Flatten(name='flatten')(model)
    model = Dense(1024, activation='relu')(model)
    model = Dense(512,
                  activation='relu',
                  activity_regularizer=regularizers.l2(0.2))(model)
    leaf_pred = Dense(1)(model)

    epoch = 50
    csv_logger = keras.callbacks.CSVLogger('training.log', separator=',')
    early_stop = EarlyStopping(monitor='val_loss',
                               min_delta=0.03,
                               mode='min',
                               patience=8)

    model = Model(inputs=res_model.input, outputs=leaf_pred)
    model.compile(optimizer=Adam(lr=0.0001), loss='mse')
    fitted_model = model.fit(x_train_all,
                             y_train_all,
                             epochs=epoch,
                             validation_data=(x_val_all, y_val_all),
                             batch_size=16,
                             callbacks=[csv_logger])

    return model
示例#3
0
 def _train(self) -> Model:
     data, label = load_data0_cycle()
     train_data, test_data, train_label, test_label = train_test_split(
         data, label, test_size=0.2)
     train_data = np.reshape(train_data, train_data.shape + (1, ))
     train_label = to_categorical(train_label)
     test_data = np.reshape(test_data, test_data.shape + (1, ))
     test_label = to_categorical(test_label)
     network_input = Input(shape=(8, 200, 1))
     # 如果这里修改了网络结构,记得去下面修改可视化函数里的参数
     network = Conv2D(filters=20, kernel_size=(1, 10))(network_input)
     network = Conv2D(filters=40, kernel_size=(4, 10),
                      activation=tanh)(network)
     network = MaxPool2D((2, 2))(network)
     network = Flatten()(network)
     network = Dense(units=40, activation=tanh)(network)
     network = Dense(units=10, activation=softmax)(network)
     network = Model(inputs=[network_input], outputs=[network])
     network.compile(optimizer=RMSprop(),
                     loss=categorical_crossentropy,
                     metrics=[categorical_accuracy])
     network.summary()
     self.train_history = network.fit(train_data,
                                      train_label,
                                      batch_size=32,
                                      epochs=16)
     self.evaluate_history = network.evaluate(test_data, test_label)
     return network
示例#4
0
def model_fashion_vgg16():
    filepath = './model/model_cifar_vgg16.hdf5'
    (X_train, Y_train), (X_test, Y_test) = cifar10.load_data()  # 32*32
    X_train = X_train.astype('float32').reshape(-1, 32, 32, 3)
    X_test = X_test.astype('float32').reshape(-1, 32, 32, 3)
    X_train /= 255
    X_test /= 255
    y_train = Y_train.reshape(-1)
    y_test = Y_test.reshape(-1)
    ### modify
    print('Train:{},Test:{}'.format(len(X_train), len(X_test)))
    nb_classes = 10
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_test = np_utils.to_categorical(y_test, nb_classes)
    print('data success')
    # base_model = applications.VGG16(weights='imagenet', include_top=False, input_shape=(28, 28, 1))
    model_vgg = VGG16(include_top=False,
                      weights='imagenet',
                      input_shape=(32, 32, 3))
    # for layer in model_vgg.layers:  # 冻结权重
    #     layer.trainable = False
    model = Flatten()(model_vgg.output)
    model = Dense(1024, activation='relu', name='fc1')(model)
    # model = Dropout(0.5)(model)
    model = Dense(512, activation='relu', name='fc2')(model)
    # model = Dropout(0.5)(model)
    model = Dense(10, activation='softmax', name='prediction')(model)
    model = Model(inputs=model_vgg.input, outputs=model, name='vgg16_pretrain')
    model.summary()
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_accuracy',
                                 mode='auto',
                                 save_best_only='True')
    model.fit(X_train,
              y_train,
              batch_size=128,
              nb_epoch=30,
              validation_data=(X_test, y_test),
              callbacks=[checkpoint])
    model = load_model(filepath)
    score = model.evaluate(X_test, y_test, verbose=0)
    print(score)
示例#5
0
def flower_model(X_train, y_train):
    base_model = ResNet50(include_top=False,
                          weights='imagenet',
                          input_shape=(224, 224, 3))
    for layers in base_model.layers:
        layers.trainable = False

        model = Flatten()(base_model.output)
        model = Dense(128, activation='relu')(model)
        model = Dropout(0.5)(model)
        model = Dense(2, activation='softmax')(model)

        model = Model(inputs=base_model.input, outputs=model)
        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        model.fit(X_train, y_train, batch_size=128, epochs=5)

    return model
示例#6
0
def model_fashion_vgg16():
    filepath = './model/model_svhn_vgg16.hdf5'
    (X_train, y_train), (X_test, y_test) = SVNH_DatasetUtil.load_data()
    ### modify
    print('Train:{},Test:{}'.format(len(X_train), len(X_test)))
    model_vgg = VGG16(include_top=False,
                      weights='imagenet',
                      input_shape=(32, 32, 3))
    # for layer in model_vgg.layers:  # 冻结权重
    #     #     layer.trainable = False
    # model_vgg = VGG16(include_top=False, weights='imagenet', input_shape=(32, 32, 3))
    # model_vgg = VGG16(include_top=False, weights=None, input_shape=(32, 32, 3))
    model = Flatten()(model_vgg.output)
    model = Dense(1024, activation='relu', name='fc1')(model)
    # model = Dropout(0.5)(model)
    model = Dense(512, activation='relu', name='fc2')(model)
    # model = Dropout(0.5)(model)
    model = Dense(10, activation='softmax', name='prediction')(model)
    model = Model(inputs=model_vgg.input, outputs=model, name='vgg16_pretrain')
    model.summary()
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    checkpoint = ModelCheckpoint(filepath=filepath,
                                 monitor='val_accuracy',
                                 mode='auto',
                                 save_best_only='True')
    model.fit(X_train,
              y_train,
              batch_size=64,
              nb_epoch=15,
              validation_data=(X_test, y_test),
              callbacks=[checkpoint])
    model = load_model(filepath)
    score = model.evaluate(X_test, y_test, verbose=0)
    print(score)
示例#7
0
def train_model(logdir, hp):
    model2 = Flatten()(model.layers[-1].output)
    for unit in hp['units']:
        model2 = Dense(unit, activation='relu')(model2)
        model2 = Dropout(hp['dropout'])(model2)
    model2 = Dense(1, activation='sigmoid')(model2)

    model2 = Model(input=model.layers[0].input, output=model2)
    model2.summary()
    model2.layers[0].trainable = False

    plot_model(model2,
               to_file='model2.png',
               show_shapes=True,
               show_layer_names=False)
    model2 = multi_gpu_model(model2, gpus=2)
    model2.compile(optimizer=optimizers.Adam(lr=hp['lr'], decay=0.01),
                   loss='binary_crossentropy',
                   metrics=['accuracy'])

    cb = [callbacks.TensorBoard(log_dir=logdir)]
    history = model2.fit(x_train,
                         y_train,
                         validation_data=(x_test, y_test),
                         batch_size=8,
                         epochs=1000,
                         callbacks=cb,
                         verbose=2)

    # Plot training & validation accuracy values
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('Model accuracy')
    plt.ylabel('Accuracy')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()

    # Plot training & validation loss values
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('Model loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    plt.legend(['Train', 'Test'], loc='upper left')
    plt.show()
    return model2
示例#8
0
model = Dense(28)(model)
model = Activation('relu')(model)
model = BatchNormalization()(model)

model = Dense(1)(model)
model = Activation('sigmoid')(model)
    
model = Model(inp, model)
    


model.compile(loss='binary_crossentropy',
              optimizer='rmsprop',
              metrics=['accuracy'])

model.fit(x_train, Y_train, batch_size=16, epochs=10)
score = model.evaluate(x_test, Y_test, batch_size=16)


print x_test
y_pred = model.predict(x_test, batch_size=64)

Y_pred = []
print y_pred

for i in range(0,len(y_pred)):
	if y_pred[i][0] >= y_pred[i][1]:
		a = 1
	else:
		a = -1
	Y_pred.append(a)
示例#9
0
        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        print(model.summary())
        # Training callbacks
        callbacks = [
            EarlyStopping(patience=10),
            TensorBoard(log_dir='output/logs_{}'.format(FILENAME)),
            ModelCheckpoint(filepath='output/{}.h5'.format(FILENAME),
                            save_best_only=True),
        ]
        # Run training
        print('Training model')
        model.fit(x=X_train,
                  y=y_train,
                  batch_size=BATCH_SIZE,
                  validation_data=(X_valid, y_valid),
                  epochs=1000,
                  callbacks=callbacks)
        # Reload best model
        model = load_model('output/{}.h5'.format(FILENAME))
    finally:
        # Evaluate test dataset
        print('Evaluating test dataset')
        X_test = get_evaluated(X_test, 'test', BATCH_SIZE)
        loss, acc = model.evaluate(X_test, y_test, batch_size=BATCH_SIZE)
        prediction = model.predict(X_test)

        # Save results
        with open('output/{}.json'.format(FILENAME), 'w') as f:
            json.dump(
                {
示例#10
0
# model.summary()
# 卷积层的输出,可以作为深度特征
# conv1_feature_output = K.function([l1_input, K.learning_phase()], [l6_flatten])
optimizers = SGD(lr=1e-2, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizers, metrics=['accuracy'])
# early_stop = EarlyStopping(patience=200, verbose=1)
train_y = to_categorical(train_y)
train_X_feature = train_X_feature.reshape(train_X_feature.shape[0],
                                          1,
                                          train_X_feature.shape[1],
                                          train_X_feature.shape[2])
model.fit([train_X_feature],
          train_y,
          nb_epoch=20,
          verbose=1,
          # validation_split=0.1,
          # validation_data=(validation_X, validation_y),
          shuffle=True,
          batch_size=32,
          # callbacks=[early_stop]
          )
print(model.evaluate(train_X_feature, train_y))

model.summary()
quit()



logging.debug('=' * 20)
# ****************************************************************
# ------------- region end : 3、构建onehot编码 -------------
# ****************************************************************
示例#11
0
                  optimizer='adam',
                  metrics=['accuracy'])


#process data
def load_data():
    # load data
    (X_train, y_train), (X_test, y_test) = mnist.load_data()
    # normalize inputs from 0-255 to 0-1
    X_train = X_train / 255
    X_test = X_test / 255
    # one hot encode outputs
    y_train = np_utils.to_categorical(y_train, classes)
    y_test = np_utils.to_categorical(y_test, classes)
    return (X_train, y_train), (X_test, y_test)


(X_train, y_train), (X_test, y_test) = load_data()
X_train = np.expand_dims(X_train, axis=3)
X_test = np.expand_dims(X_test, axis=3)
#開始訓練
top_model.fit(X_train,
              y_train,
              epochs=15,
              batch_size=100,
              verbose=2,
              validation_data=(X_test, y_test),
              shuffle=True)
top_model.save_weights(top_model_weights_path)
#pre=model.predict(test_data)
#print(np.argmax(pre, axis=1))
示例#12
0
model = BatchNormalization()(model)

output_label = Dense(1, activation='sigmoid')(model)
model = Model(inputs=[question1, question2], outputs=output_label)
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

callbacks = [
    ModelCheckpoint(MODEL_WEIGHTS_FILE, monitor='val_acc', save_best_only=True)
]
history = model.fit([q1_trainset, q2_trainset],
                    Y_train,
                    epochs=NB_EPOCHS,
                    validation_split=VALIDATION_SPLIT,
                    verbose=2,
                    batch_size=BATCH_SIZE,
                    callbacks=callbacks)

import matplotlib.pyplot as plt

plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])

model.save(MODEL_WEIGHTS_FILE)
model.load_weights(model_weight_file)
loss, accuracy = model.evaluate([q1_testset, q2_testset], Y_test, verbose=0)

print('loss = {0:.4f}, accuracy = {1:.4f}'.format(loss, accuracy))
示例#13
0
def scratchVGG16_Model():
    data = helper.prepDataforCNN(numChannel=3, feat_norm=True)
    trainX = data["trainX"]
    valdX = data["valdX"]
    trainY = data["trainY"]
    valdY = data["valdY"]

    _, row, col, channel = trainX.shape
    digLen = 5  # including category 0
    numDigits = 11
    epochs = 50
    batch_size = 64

    vgg16Model = VGG16(include_top=False, weights=None)
    vgg16Model.summary()
    ptInput = keras.Input(shape=(row, col, channel), name='vgg16Scratch')
    vgg16 = vgg16Model(ptInput)

    # vgg16 = Conv2D(64,(3, 3), activation ='relu', padding='same')(input)
    # vgg16 = Conv2D(64,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)
    #
    # vgg16 = Conv2D(128,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(128,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)
    #
    # vgg16 = Conv2D(256,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(256,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)
    #
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)
    #
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = Conv2D(512,(3, 3), activation ='relu', padding='same')(vgg16)
    # vgg16 = MaxPooling2D(pool_size=(2, 2))(vgg16)

    vgg16 = Flatten()(vgg16)
    vgg16 = Dense(512, activation='relu')(vgg16)
    vgg16 = Dense(512, activation='relu')(vgg16)
    # vgg16 = Dense(1000, activation='relu')(vgg16)
    vgg16 = Dropout(0.5)(vgg16)

    numd_SM = Dense(digLen, activation='softmax', name='num')(vgg16)
    dig1_SM = Dense(numDigits, activation='softmax', name='dig1')(vgg16)
    dig2_SM = Dense(numDigits, activation='softmax', name='dig2')(vgg16)
    dig3_SM = Dense(numDigits, activation='softmax', name='dig3')(vgg16)
    dig4_SM = Dense(numDigits, activation='softmax', name='dig4')(vgg16)
    numB_SM = Dense(2, activation='softmax', name='nC')(vgg16)
    out = [numd_SM, dig1_SM, dig2_SM, dig3_SM, dig4_SM, numB_SM]

    vgg16 = keras.Model(inputs=ptInput, outputs=out)

    callback = []
    optim = optimizers.Adam(lr=0.001,
                            beta_1=0.9,
                            beta_2=0.999,
                            epsilon=None,
                            decay=0.0,
                            amsgrad=True)

    checkpointer = keras.callbacks.ModelCheckpoint(
        filepath='saved_models/vgg16.classifier.hdf5',
        monitor='loss',
        save_best_only=True,
        verbose=2)
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                  factor=0.1,
                                                  verbose=1,
                                                  patience=3,
                                                  cooldown=0,
                                                  min_lr=0.000001)
    # tb = keras.callbacks.TensorBoard(log_dir='logs', write_graph=True, write_images=True)
    es = keras.callbacks.EarlyStopping(monitor='val_loss',
                                       min_delta=0.00000001,
                                       patience=5,
                                       verbose=1,
                                       mode='auto')
    callback.append(es)
    callback.append(checkpointer)
    callback.append(reduce_lr)
    vgg16.summary()

    vgg16.compile(loss='sparse_categorical_crossentropy',
                  optimizer=optim,
                  metrics=['accuracy'])

    vgg16History = vgg16.fit(x=trainX,
                             y=trainY,
                             batch_size=batch_size,
                             epochs=epochs,
                             verbose=1,
                             shuffle=True,
                             validation_data=(valdX, valdY),
                             callbacks=callback)

    print(vgg16History.history.keys())
    modName = 'vgg16_Scratch'
    print(vgg16History.history.keys())
    createSaveMetricsPlot(vgg16History, modName, data, vgg16)
示例#14
0
X_train = np.array(X_train)
X_test = np.array(X_test)

# Generate a OneHot 10D vector as a line of Y_train: Y_train has 60,000 lines of OneHot as output
Y_train = (np.arange(10) == y_train[:, None]).astype(int)  #
Y_test = (np.arange(10) == y_test[:, None]).astype(int)

# Normalize to [0,1]
X_train = X_train / 255
X_test = X_test / 255

start = datetime.now()
history = model.fit(X_train,
                    Y_train,
                    batch_size=tBatchSize,
                    epochs=5,
                    shuffle=True,
                    validation_split=0.3)
duration = datetime.now() - start
print("Training completed in time: ", duration)

score = model.evaluate(X_test, Y_test, batch_size=tBatchSize)
print('Test Loss:', score[0])
print('Test Accuracy:', score[1])

import matplotlib.pyplot as plt
plt.plot(history.history["accuracy"])
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title("Model accuracy")