示例#1
0
    # Step 3 - Flattening
    classifier.add(Flatten())

    # Step 4 - Full connection
    classifier.add(Dense(units=64, activation='relu'))
    classifier.add(Dropout(0.5))
    classifier.add(Dense(units=1, activation='sigmoid'))

    # Compiling the CNN
    classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

    # Part 2 - Fitting the CNN to the images
    batch_size = 32
    train_datagen = ImageDataGenerator(rescale=1. / 255,
                                       shear_range=0.2,
                                       zoom_range=0.2,
                                       horizontal_flip=True)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    training_set = train_datagen.flow_from_directory('G:\\ANALYTICS_WORLD_R_SAS\\python_world\\deep learning\\Convolutional_Neural_Networks\\dataset\\training_set',
                                                     target_size=input_size,
                                                     batch_size=batch_size,
                                                     class_mode='binary')

    test_set = test_datagen.flow_from_directory('G:\\ANALYTICS_WORLD_R_SAS\\python_world\\deep learning\\Convolutional_Neural_Networks\\dataset\\test_set',
                                                target_size=input_size,
                                                batch_size=batch_size,
                                                class_mode='binary')

    # Create a loss history
示例#2
0
model_name2 = 'model2.h5'
num_classes = 3
train_data_dir = 'data/miniimagenet/train' #train文件夹下有对应为3分类名称文件夹
test_data_dir = 'data/miniimagenet/val'   #
img_rows = 224 # 227
img_cols = 224 # 227
epochs = 3
# 批量大小
batch_size = 4
# 训练样本总数
nb_train_samples = 3120 #3*1040
#all num of val samples
nb_validation_samples = 780 #3*260
##################数据导入及预处理###########################
train_datagen = ImageDataGenerator(
	rescale=1. / 255,
	horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./ 255)
train_generator = train_datagen.flow_from_directory(
	train_data_dir,
	target_size=(img_rows, img_cols),
	batch_size=batch_size,
	class_mode='categorical')#多分类; 'binary')
print("train_generator.filenames",train_generator.filenames)# 按顺序输出文件的名字
print("train_generator.class_indices", train_generator.class_indices)  #输出对应的标签文件夹
validation_generator = test_datagen.flow_from_directory(
	test_data_dir,
	target_size=(img_rows, img_cols),
	batch_size=batch_size,
	class_mode='categorical')#多分类; 'binary')
print("validation_generator.filenames",validation_generator.filenames)# 按顺序输出文件的名字
示例#3
0
for layer in model.layers:
    layer.trainable=False
model.add(Dense(5,activation='softmax'))

optimizer=RMSprop(lr=0.001,rho=0.9,epsilon=1e-08,decay=0.0)

model.compile(optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
learning_rate_reduction=ReduceLROnPlateau(monitor='val_acc',patience=3,verbose=1,factor=0.5,minlr=0.00001)

batch_size = 32
train_datagen = ImageDataGenerator(
        featurewise_center=False,  
        samplewise_center=False,  
        featurewise_std_normalization=False,  
        samplewise_std_normalization=False,  
        zca_whitening=False,
        rotation_range=10,  
        zoom_range = 0.1, 
        width_shift_range=0.1,
        height_shift_range=0.1,  
        horizontal_flip=False,
        vertical_flip=False) 

test_datagen = ImageDataGenerator(rescale=1. / 255, validation_split=0.33)

training_set = train_datagen.flow_from_directory(training_set_path,
                                                 target_size=input_size,
                                                 batch_size=batch_size,
                                                 subset="training",
                                                 class_mode='binary')

test_set = test_datagen.flow_from_directory(test_set_path,
示例#4
0
model.add(Dropout(0.2))
model.add(Dense(5))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

script_dir = os.path.dirname(".")
training_set_path = os.path.join(script_dir, '/content/drive/My Drive/train/ProjTrain/')
test_set_path = os.path.join(script_dir, '/content/drive/My Drive/train/ProjTrain/')

batch_size = 32
input_size = (512,512)
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)

test_datagen = ImageDataGenerator(rescale=1. / 255, validation_split=0.33)

training_set = train_datagen.flow_from_directory(training_set_path,
                                                 target_size=input_size,
                                                 batch_size=batch_size,
                                                 subset="training",
                                                 class_mode='categorical')



test_set = test_datagen.flow_from_directory(test_set_path,
                                            target_size=input_size,
                                            batch_size=batch_size,
示例#5
0
 
# Step 3 - Flattening
classifier.add(Flatten())
 
# Step 4 - Full connection
classifier.add(Dense(units=64, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(units=1, activation='sigmoid'))
 
# Compiling the CNN
classifier.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
 
# Part 2 - Fitting the CNN to the images
batch_size = 32
train_datagen = ImageDataGenerator(rescale=1. / 255,
                                   shear_range=0.2,
                                   zoom_range=0.2,
                                   horizontal_flip=True)
 
test_datagen = ImageDataGenerator(rescale=1. / 255)
 
training_set = train_datagen.flow_from_directory(training_set_path,
                                                 target_size=input_size,
                                                 batch_size=batch_size,
                                                 class_mode='binary')
 
test_set = test_datagen.flow_from_directory(test_set_path,
                                            target_size=input_size,
                                            batch_size=batch_size,
                                            class_mode='binary')
 
# Create a loss history