Beispiel #1
0
    def setUp(self):
        import Exercise08_01
        self.exercises = Exercise08_01

        self.file_url = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
        self.zip_dir = tf.keras.utils.get_file('cats_and_dogs.zip',
                                               origin=self.file_url,
                                               extract=True)
        self.path = pathlib.Path(
            self.zip_dir).parent / 'cats_and_dogs_filtered'
        self.train_dir = self.path / 'train'
        self.validation_dir = self.path / 'validation'
        self.total_train = 2000
        self.total_val = 1000

        self.train_image_generator = ImageDataGenerator(rescale=1. / 255)
        self.validation_image_generator = ImageDataGenerator(rescale=1. / 255)
        self.batch_size = 32
        self.img_height = 224
        self.img_width = 224
        self.channel = 3
        self.train_data_gen = self.train_image_generator.flow_from_directory(
            batch_size=self.batch_size,
            directory=self.train_dir,
            shuffle=True,
            target_size=(self.img_height, self.img_width),
            class_mode='binary')
        self.val_data_gen = self.validation_image_generator.flow_from_directory(
            batch_size=self.batch_size,
            directory=self.validation_dir,
            target_size=(self.img_height, self.img_width),
            class_mode='binary')

        np.random.seed(8)
        tf.random.set_seed(8)

        self.base_model = NASNetMobile(include_top=False,
                                       input_shape=(self.img_height,
                                                    self.img_width,
                                                    self.channel),
                                       weights='imagenet')
        self.base_model.trainable = False

        self.model = tf.keras.Sequential([
            self.base_model,
            layers.Flatten(),
            layers.Dense(500, activation='relu'),
            layers.Dense(1, activation='sigmoid')
        ])
        self.model.compile(loss='binary_crossentropy',
                           optimizer=tf.keras.optimizers.Adam(0.001),
                           metrics=['accuracy'])
Beispiel #2
0
 def build_model(self):
     conv_base = NASNetMobile(include_top=False,
                              weights='imagenet',
                              input_shape=config.input_shape)
     conv_base.trainable = False
     print(conv_base.summary())
     inputs = Input(shape=config.input_shape)
     x = conv_base(inputs)
     x = Flatten()(x)
     x = Dense(units=256, activation=nasnet['activation'])(x)
     outputs = Dense(units=config.num_classes,
                     activation=nasnet['outact'])(x)
     self.model = Model(inputs=inputs, outputs=outputs)
Beispiel #3
0
    def _build_transfer_learning_model(self):
        input = Input(shape=(224, 224, 3))
        self.model = NASNetMobile(weights='imagenet',
                                  input_tensor=input,
                                  include_top=False,
                                  input_shape=(224, 224, 3))

        last_layer = self.model.output
        dropout = Dropout(0.5)(last_layer)
        x = Flatten(name='flatten')(dropout)
        # x = Dense(1000, activation='softmax')(x)
        out = Dense(2,
                    activation='linear',
                    kernel_regularizer=l2(0.01),
                    bias_regularizer=l2(0.01),
                    name='output_layer')(x)

        self.model = Model(inputs=input, outputs=out)
        for layer in self.model.layers[:-1]:
            layer.trainable = False

        self.model.layers[-1].trainable
        # self.model.layers[-2].trainable
        self.model.compile(loss='categorical_hinge',
                           optimizer=Adam(lr=0.0005, decay=0.003),
                           metrics=['accuracy'])
def get_model(
    input_size: tuple = (224, 224, 3),
    hidden_size: int = 128,
    dropout: float = 0.5,
    num_classes: int = 3
):

    # load the VGG16 network, ensuring the head FC layer sets are left off
    baseModel = NASNetMobile(
        weights="imagenet",
        include_top=False,
        input_tensor=Input(shape=input_size)
    )
    # construct the head of the model that will be placed on top of the
    # the base model
    headModel = baseModel.output
    headModel = AveragePooling2D(pool_size=(4, 4))(headModel)
    headModel = Flatten(name="flatten")(headModel)
    headModel = Dense(hidden_size)(headModel)
    headModel = BatchNormalization()(headModel)
    headModel = PReLU()(headModel)
    headModel = Dropout(dropout)(headModel)
    headModel = Dense(num_classes, activation="softmax")(headModel)

    # place the head FC model on top of the base model
    model = Model(inputs=baseModel.input, outputs=headModel)

    return model
Beispiel #5
0
 def createModel(self):
     base_model=NASNetMobile( 
         include_top=True,
         weights=None,
         input_tensor=None,
         input_shape=(224,224,3),
         pooling=None,
         classes=8,
         ) 
     
     # x=base_model.output
     # x=Flatten()(x)
     # x=Dense(1024,activation='relu')(x) 
     # x=Dense(1024,activation='relu')(x) 
     # x=Dense(512,activation='relu')(x) 
     # preds=Dense(7,activation='softmax')(x) 
     
     model=Model(inputs=base_model.input,outputs=base_model.output)
     # for layer in model.layers:
     #     layer.trainable=False
     # or if we want to set the first 20 layers of the network to be non-trainabl
     # for layer in model.layers[:20]:
     #     layer.trainable=False
     # for layer in model.layers[20:]:
     #     layer.trainable=True
     
     return model
Beispiel #6
0
def build_model(classes = 2):
  inputs = Input(shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
  x = preprocess_input(inputs)
  x = NASNetMobile(weights=None, classes=classes)(x)
  model = Model(inputs=inputs, outputs=x)
  model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
  return model
Beispiel #7
0
def NASNetMobilemodel(no_classes, shape):
    """
    NASNetMobile Learning Transferable Architectures for Scalable Image Recognition,2018
    """
    base_model = NASNetMobile(include_top=False,
                              weights='imagenet',
                              input_shape=shape)
    base_model.trainable = False
    inputs = Input(shape=shape)
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    #x = Dense(1024,activation='relu')(x)
    x = Dense(1056, activation='relu')(x)
    predictions = Dense(no_classes, activation='softmax',
                        name='predictions')(x)
    model = Model(inputs, outputs=predictions)
    return model
 def __init__(self, model_name=None):
     if model_name == 'Xception':
         base_model = Xception(weights='imagenet')
         self.preprocess_input = xception.preprocess_input
     elif model_name == 'VGG19':
         base_model = VGG19(weights='imagenet')
         self.preprocess_input = vgg19.preprocess_input
     elif model_name == 'ResNet50':
         base_model = ResNet50(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet101':
         base_model = ResNet101(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet152':
         base_model = ResNet152(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet50V2':
         base_model = ResNet50V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'ResNet101V2':
         base_model = ResNet101V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'ResNet152V2':
         base_model = ResNet152V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'InceptionV3':
         base_model = InceptionV3(weights='imagenet')
         self.preprocess_input = inception_v3.preprocess_input
     elif model_name == 'InceptionResNetV2':
         base_model = InceptionResNetV2(weights='imagenet')
         self.preprocess_input = inception_resnet_v2.preprocess_input
     elif model_name == 'DenseNet121':
         base_model = DenseNet121(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'DenseNet169':
         base_model = DenseNet169(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'DenseNet201':
         base_model = DenseNet201(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'NASNetLarge':
         base_model = NASNetLarge(weights='imagenet')
         self.preprocess_input = nasnet.preprocess_input
     elif model_name == 'NASNetMobile':
         base_model = NASNetMobile(weights='imagenet')
         self.preprocess_input = nasnet.preprocess_input
     elif model_name == 'MobileNet':
         base_model = MobileNet(weights='imagenet')
         self.preprocess_input = mobilenet.preprocess_input
     elif model_name == 'MobileNetV2':
         base_model = MobileNetV2(weights='imagenet')
         self.preprocess_input = mobilenet_v2.preprocess_input
     else:
         base_model = VGG16(weights='imagenet')
         self.preprocess_input = vgg16.preprocess_input
     self.model = Model(inputs=base_model.input,
                        outputs=base_model.layers[-2].output)
class Test(unittest.TestCase):
	def setUp(self):
		import Activity08_01
		self.exercises = Activity08_01

		self.file_url = 'https://github.com/PacktWorkshops/The-TensorFlow-Workshop/blob/master/Chapter08/dataset/fruits360.zip'
		self.zip_dir = tf.keras.utils.get_file('fruits360.zip', origin=self.file_url, extract=True)
		self.path = pathlib.Path(self.zip_dir).parent / 'fruits360_filtered'
		self.train_dir = self.path / 'Training'
		self.validation_dir = self.path / 'Test'
		self.total_train = 11398
		self.total_val = 4752

		self.train_image_generator = ImageDataGenerator(rescale=1./255, rotation_range=40, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode='nearest')
		self.validation_image_generator = ImageDataGenerator(rescale=1./255)
		self.batch_size = 32
		self.img_height = 224
		self.img_width = 224
		self.channel = 3
		self.train_data_gen = self.train_image_generator.flow_from_directory(batch_size=self.batch_size, directory=self.train_dir, target_size=(self.img_height, self.img_width))
		self.val_data_gen = self.validation_image_generator.flow_from_directory(batch_size=self.batch_size, directory=self.validation_dir, target_size=(self.img_height, self.img_width))

		np.random.seed(8)
		tf.random.set_seed(8)

		self.base_model = NASNetMobile(include_top=False, input_shape=(self.img_height, self.img_width, self.channel), weights='imagenet')

		for layer in self.base_model.layers[:700]:
			layer.trainable = False

		self.model = tf.keras.Sequential([
			self.base_model,
			layers.Flatten(),
			layers.Dense(500, activation='relu'),
			layers.Dense(120, activation='softmax')
		])
		self.model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(0.001), metrics=['accuracy'])

	def test_file_url(self):
		self.assertEqual(self.exercises.file_url, self.file_url)

	def test_total_train(self):
		self.assertEqual(self.exercises.total_train, self.total_train)

	def test_total_val(self):
		self.assertEqual(self.exercises.total_val, self.total_val)

	def test_base_model_summary(self):
		self.assertEqual(self.exercises.base_model.summary(), self.base_model.summary())

	def test_model_summary(self):
		self.assertEqual(self.exercises.model.summary(), self.model.summary())
Beispiel #10
0
def get_nasnet_model(
    input_size: tuple = (224, 224, 3),
    hidden_size: int = 512,
    dropout: float = 0.5,
    num_classes: int = 3,
    trainable_layers: int = 0,
    log_softmax: bool = False,
    **kwargs
):
    """Get a NasNet model

    Keyword Arguments:
        input_size {tuple} -- [size of input images] (default: {(224, 224, 3)})
        hidden_size {int} -- [description] (default: {64})
        dropout {float} -- [description] (default: {0.5})
        num_classes {int} -- [description] (default: {3})
        trainable_layers {int} -- [description] (default: {0})
        log_softmax {bool} -- [description] (default: {False})

    Returns:
        [type] -- [description]
    """
    act_fn = tf.nn.softmax if not log_softmax else tf.nn.log_softmax

    baseModel = NASNetMobile(
        weights="imagenet",
        include_top=False,
        input_tensor=Input(shape=input_size)
    )
    # construct the head of the model that will be placed on top of the
    # the base model
    headModel = baseModel.output
    headModel = AveragePooling2D(pool_size=(4, 4))(headModel)
    headModel = Flatten(name="flatten")(headModel)
    headModel = Dense(hidden_size)(headModel)
    headModel = ReLU()(headModel)
    headModel = Dropout(dropout)(headModel)
    headModel = BatchNormalization()(headModel)
    headModel = Dense(num_classes, activation=act_fn)(headModel)

    # place the head FC model on top of the base model
    model = Model(inputs=baseModel.input, outputs=headModel)

    model = fix_layers(model, num_flex_layers=trainable_layers + 8)

    return model
Beispiel #11
0
    def __init__(self, input_size, weights):
        input_image = Input(shape=(input_size[0], input_size[1], 3))

        if weights == 'imagenet':
            nasnetmobile = NASNetMobile(input_tensor=input_image,
                                        include_top=False,
                                        weights='imagenet',
                                        pooling=None)
            print('Successfully loaded imagenet backend weights')
        else:
            nasnetmobile = NASNetMobile(input_tensor=input_image,
                                        include_top=False,
                                        weights=None,
                                        pooling=None)
            if weights:
                nasnetmobile.load_weights(weights)
                print('Loaded backend weigths: ' + weights)
        self.feature_extractor = nasnetmobile
     headModel = baseModel.output
     headModel = Dense(51, activation='softmax', use_bias=True)(headModel)
 elif network == "DenseNet201":
     print(network)
     train, test, lb2, labelsTest = preprocessing_EfficcientNet()
     baseModel = DenseNet201(weights=pretraining,
                             include_top=False,
                             input_tensor=Input(shape=(224, 224, 3)),
                             pooling="avg")
     headModel = baseModel.output
     headModel = Dense(51, activation='softmax')(headModel)
 elif network == "NASNetMobile":
     print(network)
     train, test, lb2, labelsTest = preprocessing(network)
     baseModel = NASNetMobile(weights=pretraining,
                              include_top=False,
                              input_tensor=Input(shape=(224, 224, 3)),
                              pooling="avg")
     headModel = baseModel.output
     headModel = Dense(51, activation='softmax')(headModel)
 elif network == "EfficientNetB0":
     print(network)
     train, test, lb2, labelsTest = preprocessing_EfficcientNet()
     baseModel = EfficientNetB0(weights=pretraining,
                                include_top=False,
                                input_tensor=Input(shape=(224, 224, 3)),
                                pooling="avg")
     headModel = baseModel.output
     headModel = Dropout(0.2)(headModel)
     headModel = Dense(51, activation='softmax')(headModel)
 else:
     print(network)
y_train = y_train.reshape(y_train.shape[0], )
y_test = y_test.reshape(y_test.shape[0], )

# OneHotEncoding
from sklearn.preprocessing import OneHotEncoder

y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)

ohencoder = OneHotEncoder()
ohencoder.fit(y_train)
y_train = ohencoder.transform(y_train).toarray()
y_test = ohencoder.transform(y_test).toarray()

vgg19 = NASNetMobile(weights='imagenet',
                     include_top=False,
                     input_shape=(32, 32, 3))
vgg19.trainable = True
x_train = preprocess_input(x_train)
x_test = preprocess_input(x_test)

x_train = x_train.astype('float32') / 255.  # 전처리
x_test = x_test.astype('float32') / 255.  # 전처리

model = Sequential()
model.add(vgg19)
model.add(Flatten())
model.add(Dense(128))
model.add(Dense(64))
model.add(Dense(10, activation='softmax'))
model.summary()
print('----------------------------------------------------------------------------')
vgg16 = ResNet50()
# vgg16.summary()
print("ResNet50",len(vgg16.trainable_weights)/2) 
print('----------------------------------------------------------------------------')
vgg16 = ResNet50V2()
# vgg16.summary()
print("ResNet50V2",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = NASNetLarge()
# vgg16.summary()
print("NASNetLarge",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = NASNetMobile()
# vgg16.summary()
print("NASNetMobile",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = DenseNet121()
# vgg16.summary()
print("DenseNet121",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = DenseNet169()
# vgg16.summary()
print("DenseNet169",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = DenseNet201()
    base_model = VGG16(weights='imagenet', include_top=False)
    model_name = 'VGG16'
    epoch_num = 70
    
elif use_the_model is 3: 
    base_model = ResNet50(weights='imagenet', include_top=False)
    model_name = 'ResNet50'
    epoch_num = 30
    
elif use_the_model is 4: 
    base_model = InceptionResNetV2(weights='imagenet', include_top=False)
    model_name = 'InceptionResNetV2'
    epoch_num = 50
    
elif use_the_model is 5: 
    base_model = NASNetMobile(input_shape=(224,224,3), weights='imagenet', include_top=False)
    model_name = 'NASNetMobile'
    epoch_num = 50
elif use_the_model is 6: 
    base_model = NASNetLarge(input_shape=(331,331,3), weights='imagenet', include_top=False)
    model_name = 'NASNetLarge'
    epoch_num = 50
    
elif use_the_model is 7: 
    base_model = MobileNetV2(weights='imagenet', include_top=False)
    model_name = 'MobileNetV2'
    epoch_num = 70
    
elif use_the_model is 8: 
    base_model = DenseNet121(weights='imagenet', include_top=False)
    model_name = 'DenseNet121'
Beispiel #16
0
print(x.shape, y.shape) # (1073, 128, 862) (1073,)

x_train, x_test, y_train, y_test = train_test_split(
    x, y, train_size=0.8, shuffle=True, random_state=42
)

aaa = 1 
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], aaa)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], aaa)
print(x_train.shape, y_train.shape) # (3628, 128, 862, 1) (3628,)
print(x_test.shape, y_test.shape)   # (908, 128, 862, 1) (908,)

model = NASNetMobile(
    include_top=True,
    input_shape=(128,862,1),
    classes=2,
    pooling=None,
    weights=None,
)

model.summary()
# model.trainable = False

model.save('C:/nmb/nmb_data/h5/5s/Nasnet/nasnet_adadelta_1.h5')

# 컴파일, 훈련
op = Adadelta(lr=1e-3)
batch_size = 4

es = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True, verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
Beispiel #17
0
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.applications import NASNetMobile
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, UpSampling2D
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

print(x_train.shape, x_test.shape)
print(y_train.shape, y_test.shape)

nasnetmobile = NASNetMobile(weights='imagenet',
                            include_top=False,
                            input_shape=(224, 224, 3))

nasnetmobile.summary()

nasnetmobile.trainable = False

model = Sequential()
model.add(UpSampling2D(size=(7, 7)))
model.add(nasnetmobile)
model.add(Flatten())
model.add(Dense(256))
model.add(Dense(64))
model.add(Dense(10, activation='softmax'))

# model.summary()
Beispiel #18
0
def main():
    directory = 'img'  # 画像が保存されているフォルダ
    df_train = pd.read_csv('train.csv')  # 学習データの情報がかかれたDataFrame
    df_validation = pd.read_csv('val.csv')  # 検証データの情報がかかれたDataFrame
    df_test = pd.read_csv('test.csv')  # テストデータの情報がかかれたDataFrame
    label_list = ['AMD', 'DR_DM', 'Gla', 'MH', 'Normal', 'RD', 'RP',
                  'RVO']  # ラベル名
    image_size = (224, 224)  # 入力画像サイズ
    classes = len(label_list)  # 分類クラス数
    batch_size = 32  # バッチサイズ
    epochs = 300  # エポック数
    loss = 'categorical_crossentropy'  # 損失関数
    optimizer = Adam(lr=0.00001, amsgrad=True)  # 最適化関数
    metrics = 'accuracy'  # 評価方法
    # ImageDataGenerator画像増幅のパラメータ
    aug_params = {
        'rotation_range': 5,
        'width_shift_range': 0.05,
        'height_shift_range': 0.05,
        'shear_range': 0.1,
        'zoom_range': 0.05,
        'horizontal_flip': True,
        'vertical_flip': True
    }

    # val_lossが最小になったときのみmodelを保存
    mc_cb = ModelCheckpoint('model_weights.h5',
                            monitor='val_loss',
                            verbose=1,
                            save_best_only=True,
                            mode='min')
    # 学習が停滞したとき、学習率を0.2倍に
    rl_cb = ReduceLROnPlateau(monitor='loss',
                              factor=0.2,
                              patience=3,
                              verbose=1,
                              mode='auto',
                              min_delta=0.0001,
                              cooldown=0,
                              min_lr=0)
    # 学習が進まなくなったら、強制的に学習終了
    es_cb = EarlyStopping(monitor='loss',
                          min_delta=0,
                          patience=5,
                          verbose=1,
                          mode='auto')

    # データの数に合わせて損失の重みを調整
    weight_balanced = {}
    for i, label in enumerate(label_list):
        weight_balanced[i] = (df_train['label'] == label).sum()
    max_count = max(weight_balanced.values())
    for label in weight_balanced:
        weight_balanced[label] = max_count / weight_balanced[label]
    print(weight_balanced)

    # ジェネレータの生成
    ## 学習データのジェネレータ
    datagen = ImageDataGenerator(rescale=1. / 255, **aug_params)
    train_generator = datagen.flow_from_dataframe(dataframe=df_train,
                                                  directory=directory,
                                                  x_col='filename',
                                                  y_col='label',
                                                  target_size=image_size,
                                                  class_mode='categorical',
                                                  classes=label_list,
                                                  batch_size=batch_size)
    step_size_train = train_generator.n // train_generator.batch_size
    ## 検証データのジェネレータ
    datagen = ImageDataGenerator(rescale=1. / 255)
    validation_generator = datagen.flow_from_dataframe(
        dataframe=df_validation,
        directory=directory,
        x_col='filename',
        y_col='label',
        target_size=image_size,
        class_mode='categorical',
        classes=label_list,
        batch_size=batch_size)
    step_size_validation = validation_generator.n // validation_generator.batch_size

    # ネットワーク構築
    base_model = NASNetMobile(include_top=False,
                              weights='imagenet',
                              pooling='avg',
                              input_shape=(image_size[0], image_size[1], 3))
    x = Dense(256, kernel_initializer='he_normal')(base_model.output)
    x = Dense(classes, kernel_initializer='he_normal')(x)
    outputs = Activation('softmax')(x)
    model = Model(inputs=base_model.inputs, outputs=outputs)

    model.summary()
    model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])

    # 学習
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=step_size_train,
                                  epochs=epochs,
                                  verbose=1,
                                  callbacks=[mc_cb, rl_cb, es_cb],
                                  validation_data=validation_generator,
                                  validation_steps=step_size_validation,
                                  class_weight=weight_balanced,
                                  workers=3)

    # 学習曲線の保存
    plot_history(history)

    # テストデータの評価
    ## 学習済み重みの読み込み
    model.load_weights('model_weights.h5')

    ## 推論
    X = df_test['filename'].values
    y_true = list(map(lambda x: label_list.index(x), df_test['label'].values))
    y_pred = []
    for file in tqdm(X, desc='pred'):
        # 学習時と同じ条件になるように画像をリサイズ&変換
        img = Image.open(f'{directory}/{file}')
        img = img.resize(image_size)
        img = np.array(img, dtype=np.float32)
        img *= 1. / 255
        img = np.expand_dims(img, axis=0)

        y_pred.append(np.argmax(model.predict(img)[0]))

    ## 評価
    print(classification_report(y_true, y_pred, target_names=label_list))
Beispiel #19
0
def create_model(
    model_name, log_dir, args
):  # optimizer, learning rate, activation, neurons, batch size, epochs...

    input_shape = input_size(model_name, args)

    if args.head == 'max' or (args.base_trainable
                              and args.head != 't_complex'):
        pool = 'max'
    else:
        pool = 'none'

    if model_name == 'VGG16':
        conv_base = VGG16(weights='imagenet',
                          include_top=False,
                          pooling=pool,
                          input_shape=input_shape)
    elif model_name == 'VGG19':
        conv_base = VGG19(weights='imagenet',
                          include_top=False,
                          pooling=pool,
                          input_shape=input_shape)
    elif model_name == 'ResNet50':
        conv_base = ResNet50(weights='imagenet',
                             include_top=False,
                             pooling=pool,
                             input_shape=input_shape)
    elif model_name == 'InceptionV3':
        conv_base = InceptionV3(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'Xception':
        conv_base = Xception(weights='imagenet',
                             include_top=False,
                             pooling=pool,
                             input_shape=input_shape)
    elif model_name == 'InceptionResNetV2':
        conv_base = InceptionResNetV2(weights='imagenet',
                                      include_top=False,
                                      pooling=pool,
                                      input_shape=input_shape)
    elif model_name == 'NASNetMobile':
        conv_base = NASNetMobile(weights='imagenet',
                                 include_top=False,
                                 pooling=pool,
                                 input_shape=input_shape)
    elif model_name == 'NASNetLarge':
        conv_base = NASNetLarge(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'DenseNet201':
        conv_base = DenseNet201(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    elif model_name == 'MobileNetV2':
        conv_base = MobileNetV2(weights='imagenet',
                                include_top=False,
                                pooling=pool,
                                input_shape=input_shape)
    else:
        conv_base = None
        print("Model name not known!")
        exit()

    conv_base.trainable = args.base_trainable

    model = models.Sequential()
    if args.base_trainable:
        if args.head == 't_complex':
            model = models.Sequential()
            model.add(conv_base)
            model.add(
                layers.Conv2D(filters=1024,
                              kernel_size=(3, 3),
                              padding='same',
                              strides=1))
            model.add(layers.Flatten())  # ??
            model.add(layers.Dense(1024, activation='sigmoid'))
            model.add(layers.Dense(256, activation='sigmoid'))
            model.add(layers.Dense(args.CLASSES_NO, activation='softmax')
                      )  # (samples, new_rows, new_cols, filters)
        else:
            model.add(conv_base)
            model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'dense':
        # outside only?
        model.add(conv_base)
        model.add(layers.Flatten())
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(128, activation='relu'))
        model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'max':
        model.add(conv_base)
        model.add(layers.Dense(512, activation='relu'))
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dense(args.CLASSES_NO, activation='softmax'))
    elif args.head == 'mod':
        model = models.Sequential()
        model.add(conv_base)
        model.add(
            layers.Conv2D(filters=2048, kernel_size=(3, 3), padding='valid'))
        model.add(layers.Flatten())  # ??
        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(1024, activation='sigmoid'))
        model.add(layers.Dense(256, activation='relu'))
        model.add(layers.Dense(
            args.CLASSES_NO,
            activation='softmax'))  # (samples, new_rows, new_cols, filters)

    if args.lr_decay:
        lr_schedule = ExponentialDecay(args.INIT_LEARN_RATE,
                                       decay_steps=args.DECAY_STEPS,
                                       decay_rate=args.DECAY_RATE,
                                       staircase=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer=SGD(lr_schedule),
                      metrics=['acc'])  # To different optimisers?
    else:
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=args.LEARNING_RATE),
                      metrics=['acc'])

    with open(os.path.join(log_dir, 'modelsummary.txt'), 'w') as f:
        with redirect_stdout(f):
            model.summary()
    print(model.summary())
    return model
#실습
# cifar10으로 vgg16만들기
#결과치 비교
import numpy as np
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

print(x_train.shape)  # (50000,32,32,3)
print(x_test.shape)  # (50000,1)

from tensorflow.keras.applications import NASNetMobile
from tensorflow.keras.layers import Dense, Flatten, UpSampling2D
from tensorflow.keras.models import Sequential

nASNetMobile = NASNetMobile(
    weights='imagenet', include_top=False,
    input_shape=(96, 96, 3))  #원하는 사이즈는 include_top=False / 디폴트 224*224
# print(model.weights)

nASNetMobile.trainable = False
# vgg16.summary()
# print(len(vgg16.weights))           # 26
# print(len(vgg16.trainable_weights)) # 0

model = Sequential()
model.add(UpSampling2D(size=(0.5, 0.5)))
model.add(nASNetMobile)
model.add(Flatten())
model.add(Dense(10))
model.add(Dense(5))
model.add(Dense(1))  #, activation='softmax'))
Beispiel #21
0
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
ohencoder = OneHotEncoder()
ohencoder.fit(y_train)
y_train = ohencoder.transform(y_train).toarray()
y_test = ohencoder.transform(y_test).toarray()

x_train = preprocess_input(x_train)
x_test = preprocess_input(x_test)
x_train = x_train.astype('float32')/255.  # 전처리
x_test = x_test.astype('float32')/255.  # 전처리

# print(model.weights)
# ============== 모델링 =====================
input_tensor = Input(shape=(32, 32, 3))
apl = NASNetMobile(weights='imagenet', include_top=False,input_tensor = input_tensor)
apl.trainable = True # apl이라고 입력해야 model에서 인식됨
# nes.summary()
# 즉, 16개의 레이어지만 연산되는 것은 13개 이고 그래서 len=26개
# print(len(NASNetMobile.weights)) # 26
# print(len(NASNetMobile.trainable_weights)) # 0

model = Sequential()
model.add(apl) # 3차원 -> layer 26개
model.add(Flatten())
model.add(Dense(10))
model.add(Dense(5))
model.add(Dense(10, activation='softmax'))
model.summary()

#3. 컴파일, 훈련
Beispiel #22
0
 def backbone(x_in):
     if backbone_type == 'ResNet50':
         return ResNet50(input_shape=x_in.shape[1:],
                         include_top=False,
                         weights=weights)(x_in)
     elif backbone_type == 'ResNet50V2':
         return ResNet50V2(input_shape=x_in.shape[1:],
                           include_top=False,
                           weights=weights)(x_in)
     elif backbone_type == 'ResNet101V2':
         return ResNet101V2(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'InceptionResNetV2':
         return InceptionResNetV2(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=weights)(x_in)
     elif backbone_type == 'InceptionV3':
         return InceptionV3(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'MobileNet':
         return MobileNet(input_shape=x_in.shape[1:],
                          include_top=False,
                          weights=weights)(x_in)
     elif backbone_type == 'MobileNetV2':
         return MobileNetV2(input_shape=x_in.shape[1:],
                            include_top=False,
                            weights=weights)(x_in)
     elif backbone_type == 'NASNetLarge':
         model = NASNetLarge(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None)
         model.load_weights(WEIGHTS_DIR + "nasnet_large_no_top.h5")
         return model(x_in)
     elif backbone_type == 'NASNetMobile':
         model = NASNetMobile(input_shape=x_in.shape[1:],
                              include_top=False,
                              weights=None)
         model.load_weights(WEIGHTS_DIR + "nasnet_mobile_no_top.h5")
         return model(x_in)
     elif backbone_type == 'Xception':
         return Xception(input_shape=x_in.shape[1:],
                         include_top=False,
                         weights=weights)(x_in)
     elif backbone_type == 'MobileNetV3Small':
         model = MobileNetV3Small(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=None)
         model.load_weights(WEIGHTS_DIR + "mobilenet_v3_small_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'MobileNetV3Large':
         model = MobileNetV3Large(input_shape=x_in.shape[1:],
                                  include_top=False,
                                  weights=None)
         model.load_weights(WEIGHTS_DIR + "mobilenet_v3_large_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite0':
         model = EfficientNetLite0(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite0_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite1':
         model = EfficientNetLite1(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite1_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite2':
         model = EfficientNetLite2(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite2_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite3':
         model = EfficientNetLite3(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite3_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite4':
         model = EfficientNetLite4(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite4_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite5':
         model = EfficientNetLite5(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite5_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetLite6':
         model = EfficientNetLite6(input_shape=x_in.shape[1:],
                                   include_top=False,
                                   weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnet_lite6_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB0':
         model = EfficientNetB0(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb0_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB1':
         model = EfficientNetB1(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb1_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB2':
         model = EfficientNetB2(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb2_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB3':
         model = EfficientNetB3(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb3_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB4':
         model = EfficientNetB4(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb4_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB5':
         model = EfficientNetB5(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb5_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB6':
         model = EfficientNetB6(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         if use_pretrain:
             model.load_weights(WEIGHTS_DIR + "efficientnetb6_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'EfficientNetB7':
         model = EfficientNetB7(input_shape=x_in.shape[1:],
                                include_top=False,
                                weights=None)
         model.load_weights(WEIGHTS_DIR + "efficientnetb7_notop.ckpt")
         return model(x_in)
     elif backbone_type == 'MnasNetA1':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetA1")(x_in)
     elif backbone_type == 'MnasNetB1':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetB1")(x_in)
     elif backbone_type == 'MnasNetSmall':
         return MnasNetModel(input_shape=x_in.shape[1:],
                             include_top=False,
                             weights=None,
                             name="MnasNetSmall")(x_in)
     else:
         raise TypeError('backbone_type error!')
# 모델별로 가장 순수했을때의, 파라미터의 갯수와 가중치 수를 정리하시오

from tensorflow.keras.applications import VGG16, VGG19, Xception
from tensorflow.keras.applications import ResNet101, ResNet101V2
from tensorflow.keras.applications import ResNet152, ResNet152V2
from tensorflow.keras.applications import InceptionV3, InceptionResNetV2
from tensorflow.keras.applications import MobileNet, MobileNetV2
from tensorflow.keras.applications import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications import NASNetLarge, NASNetMobile

from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Dropout, Activation
from tensorflow.keras.models import Sequential

model = NASNetMobile()
model.trainable = True
model.summary()
print('동결하기 전 훈련되는 가중치의 수 :', len(model.trainable_weights))
print(model.name)

# VGG16 모델은 Total params: 138,357,544 // 동결하기 전 훈련되는 가중치의 수 : 32
# VGG19 모델은 Total params: 143,667,240 // 동결하기 전 훈련되는 가중치의 수 : 38
# Xception 모델은 Total params: 22,910,480 // 동결하기 전 훈련되는 가중치의 수 : 156
# ResNet101 모델은 Total params: 44,707,176 // 동결하기 전 훈련되는 가중치의 수 : 418
# ResNet101V2 모델은 Total params: 44,675,560 // 동결하기 전 훈련되는 가중치의 수 : 344
# ResNet152 모델은 Total params: 60,419,944 // 동결하기 전 훈련되는 가중치의 수 : 622
# ResNet152V2 모델은 Total params: 60,380,648 // 동결하기 전 훈련되는 가중치의 수 : 514
# InceptionV3 모델은 Total params: 23,851,784 // 동결하기 전 훈련되는 가중치의 수 : 190
# InceptionResNetV2 모델은 Total params: 55,873,736 // 동결하기 전 훈련되는 가중치의 수 : 490
# MobileNet 모델은 Total params: 4,253,864 // 동결하기 전 훈련되는 가중치의 수 : 83
# MobileNetV2 모델은 Total params: 3,538,984 // 동결하기 전 훈련되는 가중치의 수 : 158
# DenseNet121 모델은 Total params: 8,062,504 // 동결하기 전 훈련되는 가중치의 수 : 364
Beispiel #24
0
                                                  stratify=labels,
                                                  random_state=42)

# construct the training image generator for data augmentation
aug = ImageDataGenerator(rotation_range=20,
                         zoom_range=0.15,
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.15,
                         horizontal_flip=True,
                         fill_mode="nearest")

# load the MobileNetV2 network, ensuring the head FC layer sets are
# left off
baseModel = NASNetMobile(weights="imagenet",
                         include_top=False,
                         input_tensor=Input(shape=(224, 224, 3)))

# construct the head of the model that will be placed on top of the
# the base model
headModel = baseModel.output
headModel = AveragePooling2D(pool_size=(7, 7))(headModel)
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(128, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(2, activation="softmax")(headModel)

# place the head FC model on top of the base model (this will become
# the actual model we will train)
model = Model(inputs=baseModel.input, outputs=headModel)
Beispiel #25
0
# OneHotEncoding
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape, y_test.shape)

# CNN을 위한 reshape
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],
                          x_train.shape[3])
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                        x_train.shape[3])
print("reshape x:", x_train.shape, x_test.shape)

# 2. 모델
model1 = NASNetMobile(weights='imagenet',
                      include_top=False,
                      input_shape=(32, 32, 3))

model1.trainable = False

model = Sequential()
model.add(model1)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
# model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))

# 3. 컴파일, 훈련
Beispiel #26
0
#전처리
(x_train, y_train), (x_test, y_test)  = cifar10.load_data()

x_train = x_train.astype('float32')/255. ## 10000,32,32,3
x_test  =x_test.astype('float32')/255.

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)



#모델링
##여기만 수정
name = 'NASNetMobile'
t_model = NASNetMobile(,)///
t_model = NASNetMobile(include_top=False, weights='imagenet', input_shape=(32,32,3))
##
t_model.trainable=False
model = Sequential()
model.add(t_model)
model.add(Flatten())
model.add(Dense(10, activation='softmax'))

#컴파일 및 훈련
model.compile(loss='categorical_crossentropy', metrics=['acc'], optimizer='adam')


ealystopping = EarlyStopping(monitor='loss',patience=10, mode='auto')

reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=3,
                             factor=0.5, verbose=1)
                         width_shift_range=0.2,
                         height_shift_range=0.2,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode='nearest')

# test_dataset = train.flow_from_directory('C:/Users/param/Face-Mask-Detection/dataset/test',
# target_size=(224, 224),
# batch_size=32,
# class_mode='binary')
# train_dataset.class_indices

# local_weights_file = 'C:/Users/param/Face-Mask-Detection/face_detector/inceptionv3-model-10ep.h5'
pre_trained_model = NASNetMobile(weights="imagenet",
                                 include_top=False,
                                 input_tensor=Input(shape=(224, 224, 3)))

# pre_trained_model.load_weights(local_weights_file)
#

#
pre_trained_model.summary()

last_layer = pre_trained_model.get_layer('mixed7')
# print('last layer output shape: ', last_layer.output_shape)
last_output = last_layer.output
# construct the head of the model that will be placed on top of the
# the base model
# headModel = pre_trained_model.output
# headModel = AveragePooling2D(pool_size=(5, 5))(last_output)