def train(data, file_name, nlayer, num_epochs=10, batch_size=128, train_temp=1, init=None, activation=tf.nn.relu): """ Train a n-layer CNN for MNIST and CIFAR """ inputs = Input(shape=(28, 28, 1)) if nlayer == 2: x = Residual2(8, activation)(inputs) x = Lambda(activation)(x) x = Residual2(16, activation)(x) x = Lambda(activation)(x) x = AveragePooling2D(pool_size=7)(x) x = Flatten()(x) x = Dense(10)(x) if nlayer == 3: x = Residual2(8, activation)(inputs) x = Lambda(activation)(x) x = Residual(8, activation)(x) x = Lambda(activation)(x) x = Residual2(16, activation)(x) x = Lambda(activation)(x) x = AveragePooling2D(pool_size=7)(x) x = Flatten()(x) x = Dense(10)(x) if nlayer == 4: x = Residual2(8, activation)(inputs) x = Lambda(activation)(x) x = Residual(8, activation)(x) x = Lambda(activation)(x) x = Residual2(16, activation)(x) x = Lambda(activation)(x) x = Residual(16, activation)(x) x = Lambda(activation)(x) x = AveragePooling2D(pool_size=7)(x) x = Flatten()(x) x = Dense(10)(x) if nlayer == 5: x = Residual2(8, activation)(inputs) x = Lambda(activation)(x) x = Residual(8, activation)(x) x = Lambda(activation)(x) x = Residual(8, activation)(x) x = Lambda(activation)(x) x = Residual2(16, activation)(x) x = Lambda(activation)(x) x = Residual(16, activation)(x) x = Lambda(activation)(x) x = AveragePooling2D(pool_size=7)(x) x = Flatten()(x) x = Dense(10)(x) model = Model(inputs=inputs, outputs=x) # load initial weights when given if init != None: model.load_weights(init) # define the loss function which is the cross entropy between prediction and true label def fn(correct, predicted): return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted / train_temp) # initiate the Adam optimizer sgd = Adam() # compile the Keras model, given the specified loss and optimizer model.compile(loss=fn, optimizer=sgd, metrics=['accuracy']) model.summary() # run training with given dataset, and print progress history = model.fit(data.train_data, data.train_labels, batch_size=batch_size, validation_data=(data.validation_data, data.validation_labels), epochs=num_epochs, shuffle=True) # save model to a file if file_name != None: model.save(file_name) return {'model': model, 'history': history}
output = lenet(myinput) # 建立模型 model = Model(myinput, output) # 定义优化器,这里选用Adam优化器,学习率设置为0.0003 adam = Adam(lr=0.0003) # 编译模型 model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=['accuracy']) # 准备数据 # 获取输入的图像 X = GetTrainDataByLabel('data') # 获取图像的label,这里使用to_categorical函数返回one-hot之后的label Y = to_categorical(GetTrainDataByLabel('labels')) # 开始训练模型,batch设置为200,一共50个epoch model.fit(X, Y, 200, 50, 1, callbacks=[ TensorBoard('./LeNet/log', write_images=1, histogram_freq=1) ], validation_split=0.2, shuffle=True) # 保存模型 model.save("lenet-no-activation-model.h5")