Beispiel #1
0
sgd = optimizers.SGD(lr=LearningRate,decay=LearningRate/n_epochs,momentum = 0.9,nesterov = True)
top_model.compile(optimizer = sgd,loss = 'categorical_crossentropy',metrics=['accuracy'])
trainable_params = int(np.sum([K.count_params(p)for p in set(top_model.trainable_weights)]))
non_trainable_params = int(np.sum([K.count_params(p) for p in set(top_model.non_trainable_weights)]))
print("model Stats")
print("="*30)
print("Total Parameters:{:,}".format((trainable_params+non_trainable_params)))
print("Non-Trainable Parameters:{:,}".format(non_trainable_params))
print("Trainable Parameters:{:,}\n".format(trainable_params))
train_folders = '/cptjack/totem/yanyiting/Eight_classification/gray/8_focus/train'
validation_folders = '/cptjack/totem/yanyiting/Eight_classification/gray/8_focus/val/'

img_width,img_height = 224,224
batch_size_for_generators = 32
train_datagen = DataGenerator(rescale = 1./255,rotation_range=178,horizontal_flip=True,vertical_flip=True,shear_range=0.6,fill_mode='nearest',stain_transformation = True)
train_generator = train_datagen.flow_from_directory(train_folders,target_size = (img_width,img_height),batch_size = 32,class_mode = 'categorical')
validation_datagen = DataGenerator(rescale = 1./255)
validation_generator = validation_datagen.flow_from_directory(validation_folders,target_size=(img_width,img_height),
                                                              batch_size = 32,class_mode = 'categorical')

nb_train_samples = sum([len(files)for root,dirs,files in os.walk(train_folders)])
nb_validation_samples = sum([len(files)for root,dirs,files in os.walk(validation_folders)])




class Mycbk(ModelCheckpoint):
    def __init__(self, model, filepath ,monitor = 'val_loss',mode='min', save_best_only=True):
        self.single_model = model
        super(Mycbk,self).__init__(filepath, monitor, save_best_only, mode)
    def set_model(self,model):
Beispiel #2
0
#test_folders = "./NCT/validation/"
test_folders = '/cptjack/totem/yanyiting/gray_focus_unfocus/gray/data/micro_png_224'

batch_size_for_generators = 32
nb_test_samples = sum(
    [len(files) for root, dirs, files in os.walk(test_folders)])
test_steps = nb_test_samples // batch_size_for_generators

print("\nImages for Testing")
print("=" * 30)

img_width, img_height = 224, 224
test_datagen = DataGenerator(rescale=1. / 255)
test_generator = test_datagen.flow_from_directory(test_folders,
                                                  target_size=(img_width,
                                                               img_height),
                                                  batch_size=32,
                                                  class_mode='categorical',
                                                  shuffle=False)

test_loss, test_accuracy = top_model.evaluate_generator(test_generator,
                                                        steps=test_steps,
                                                        verbose=1)

predictions = top_model.predict_generator(test_generator,
                                          steps=test_steps,
                                          verbose=1)
prediction_list = np.argmax(predictions, axis=1)
labels = test_generator.classes[:len(predictions)]
test_generator.class_indices

print("\nTest Loss: %.3f" % (test_loss))