예제 #1
0
 def build_model(self):
     conv_base = NASNetMobile(include_top=False,
                              weights='imagenet',
                              input_shape=config.input_shape)
     conv_base.trainable = False
     print(conv_base.summary())
     inputs = Input(shape=config.input_shape)
     x = conv_base(inputs)
     x = Flatten()(x)
     x = Dense(units=256, activation=nasnet['activation'])(x)
     outputs = Dense(units=config.num_classes,
                     activation=nasnet['outact'])(x)
     self.model = Model(inputs=inputs, outputs=outputs)
예제 #2
0
파일: models.py 프로젝트: azeus404/thesis
def NASNetMobilemodel(no_classes, shape):
    """
    NASNetMobile Learning Transferable Architectures for Scalable Image Recognition,2018
    """
    base_model = NASNetMobile(include_top=False,
                              weights='imagenet',
                              input_shape=shape)
    base_model.trainable = False
    inputs = Input(shape=shape)
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    #x = Dense(1024,activation='relu')(x)
    x = Dense(1056, activation='relu')(x)
    predictions = Dense(no_classes, activation='softmax',
                        name='predictions')(x)
    model = Model(inputs, outputs=predictions)
    return model
예제 #3
0
# OneHotEncoding
from sklearn.preprocessing import OneHotEncoder

y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)

ohencoder = OneHotEncoder()
ohencoder.fit(y_train)
y_train = ohencoder.transform(y_train).toarray()
y_test = ohencoder.transform(y_test).toarray()

vgg19 = NASNetMobile(weights='imagenet',
                     include_top=False,
                     input_shape=(32, 32, 3))
vgg19.trainable = True
x_train = preprocess_input(x_train)
x_test = preprocess_input(x_test)

x_train = x_train.astype('float32') / 255.  # 전처리
x_test = x_test.astype('float32') / 255.  # 전처리

model = Sequential()
model.add(vgg19)
model.add(Flatten())
model.add(Dense(128))
model.add(Dense(64))
model.add(Dense(10, activation='softmax'))
model.summary()

from tensorflow.keras.optimizers import Adam
예제 #4
0
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

print(x_train.shape, x_test.shape)
print(y_train.shape, y_test.shape)

nasnetmobile = NASNetMobile(weights='imagenet',
                            include_top=False,
                            input_shape=(224, 224, 3))

nasnetmobile.summary()

nasnetmobile.trainable = False

model = Sequential()
model.add(UpSampling2D(size=(7, 7)))
model.add(nasnetmobile)
model.add(Flatten())
model.add(Dense(256))
model.add(Dense(64))
model.add(Dense(10, activation='softmax'))

# model.summary()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
예제 #5
0
y_test = to_categorical(y_test)
print(y_train.shape, y_test.shape)

# CNN을 위한 reshape
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],
                          x_train.shape[3])
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                        x_train.shape[3])
print("reshape x:", x_train.shape, x_test.shape)

# 2. 모델
model1 = NASNetMobile(weights='imagenet',
                      include_top=False,
                      input_shape=(32, 32, 3))

model1.trainable = False

model = Sequential()
model.add(model1)
model.add(Flatten())
model.add(Dense(256))
model.add(BatchNormalization())
# model.add(Dropout(0.2))
model.add(Activation('relu'))
model.add(Dense(256))
model.add(Dense(10, activation='softmax'))

# 3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['acc'])
from tensorflow.keras.datasets import cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()

print(x_train.shape)  # (50000,32,32,3)
print(x_test.shape)  # (50000,1)

from tensorflow.keras.applications import NASNetMobile
from tensorflow.keras.layers import Dense, Flatten, UpSampling2D
from tensorflow.keras.models import Sequential

nASNetMobile = NASNetMobile(
    weights='imagenet', include_top=False,
    input_shape=(96, 96, 3))  #원하는 사이즈는 include_top=False / 디폴트 224*224
# print(model.weights)

nASNetMobile.trainable = False
# vgg16.summary()
# print(len(vgg16.weights))           # 26
# print(len(vgg16.trainable_weights)) # 0

model = Sequential()
model.add(UpSampling2D(size=(0.5, 0.5)))
model.add(nASNetMobile)
model.add(Flatten())
model.add(Dense(10))
model.add(Dense(5))
model.add(Dense(1))  #, activation='softmax'))
model.summary()

#3. 컴파일, 훈련
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
# 모델별로 가장 순수했을때의, 파라미터의 갯수와 가중치 수를 정리하시오

from tensorflow.keras.applications import VGG16, VGG19, Xception
from tensorflow.keras.applications import ResNet101, ResNet101V2
from tensorflow.keras.applications import ResNet152, ResNet152V2
from tensorflow.keras.applications import InceptionV3, InceptionResNetV2
from tensorflow.keras.applications import MobileNet, MobileNetV2
from tensorflow.keras.applications import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications import NASNetLarge, NASNetMobile

from tensorflow.keras.layers import Dense, Flatten, BatchNormalization, Dropout, Activation
from tensorflow.keras.models import Sequential

model = NASNetMobile()
model.trainable = True
model.summary()
print('동결하기 전 훈련되는 가중치의 수 :', len(model.trainable_weights))
print(model.name)

# VGG16 모델은 Total params: 138,357,544 // 동결하기 전 훈련되는 가중치의 수 : 32
# VGG19 모델은 Total params: 143,667,240 // 동결하기 전 훈련되는 가중치의 수 : 38
# Xception 모델은 Total params: 22,910,480 // 동결하기 전 훈련되는 가중치의 수 : 156
# ResNet101 모델은 Total params: 44,707,176 // 동결하기 전 훈련되는 가중치의 수 : 418
# ResNet101V2 모델은 Total params: 44,675,560 // 동결하기 전 훈련되는 가중치의 수 : 344
# ResNet152 모델은 Total params: 60,419,944 // 동결하기 전 훈련되는 가중치의 수 : 622
# ResNet152V2 모델은 Total params: 60,380,648 // 동결하기 전 훈련되는 가중치의 수 : 514
# InceptionV3 모델은 Total params: 23,851,784 // 동결하기 전 훈련되는 가중치의 수 : 190
# InceptionResNetV2 모델은 Total params: 55,873,736 // 동결하기 전 훈련되는 가중치의 수 : 490
# MobileNet 모델은 Total params: 4,253,864 // 동결하기 전 훈련되는 가중치의 수 : 83
# MobileNetV2 모델은 Total params: 3,538,984 // 동결하기 전 훈련되는 가중치의 수 : 158
# DenseNet121 모델은 Total params: 8,062,504 // 동결하기 전 훈련되는 가중치의 수 : 364
예제 #8
0
y_test = y_test.reshape(-1,1)
ohencoder = OneHotEncoder()
ohencoder.fit(y_train)
y_train = ohencoder.transform(y_train).toarray()
y_test = ohencoder.transform(y_test).toarray()

x_train = preprocess_input(x_train)
x_test = preprocess_input(x_test)
x_train = x_train.astype('float32')/255.  # 전처리
x_test = x_test.astype('float32')/255.  # 전처리

# print(model.weights)
# ============== 모델링 =====================
input_tensor = Input(shape=(32, 32, 3))
apl = NASNetMobile(weights='imagenet', include_top=False,input_tensor = input_tensor)
apl.trainable = True # apl이라고 입력해야 model에서 인식됨
# nes.summary()
# 즉, 16개의 레이어지만 연산되는 것은 13개 이고 그래서 len=26개
# print(len(NASNetMobile.weights)) # 26
# print(len(NASNetMobile.trainable_weights)) # 0

model = Sequential()
model.add(apl) # 3차원 -> layer 26개
model.add(Flatten())
model.add(Dense(10))
model.add(Dense(5))
model.add(Dense(10, activation='softmax'))
model.summary()

#3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy',