Exemplo n.º 1
0
    def __get_cnn(self):
        size_sq = 224
        dict_cnn = {
            'resnet':
            ResNet50(
                weights='imagenet',
                include_top=False,  # remove the last layer
                input_shape=(size_sq, size_sq, 3)),
            'vgg':
            VGG16(weights='imagenet',
                  include_top=False,
                  input_shape=(size_sq, size_sq, 3)),
            'inceptV3':
            InceptionV3(
                weights="imagenet",
                include_top=False,
                input_shape=(size_sq, size_sq, 3),
            ),
            'effnetB0':
            EfficientNetB0(weights='imagenet',
                           include_top=False,
                           input_shape=(size_sq, size_sq, 3)),
            'effnetB7':
            EfficientNetB7(weights='imagenet',
                           include_top=False,
                           input_shape=(size_sq, size_sq, 3))
        }

        return dict_cnn[self.cnn_name]
Exemplo n.º 2
0
def build_model(num_classes, model="B7"):
    inputs = layers.Input(shape=(WIDTH, HEIGHT, 3))
    if model == "B7":
        model = EfficientNetB7(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B6":
        model = EfficientNetB6(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B5":
        model = EfficientNetB5(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B4":
        model = EfficientNetB4(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B3":
        model = EfficientNetB3(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B2":
        model = EfficientNetB2(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B1":
        model = EfficientNetB1(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B0":
        model = EfficientNetB0(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    # Freeze the pretrained weights
    model.trainable = False

    # Rebuild top
    x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
    x = layers.BatchNormalization()(x)

    top_dropout_rate = 0.2
    x = layers.Dropout(top_dropout_rate, name="top_dropout", seed=SEED)(x)
    outputs = layers.Dense(num_classes,
                           activation="softmax",
                           name="predictions")(x)

    # Compile
    model = tf.keras.Model(inputs, outputs, name="EfficientNet")
    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
    model.compile(
        optimizer=optimizer,
        loss="sparse_categorical_crossentropy",
        metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)])
    return model
Exemplo n.º 3
0
def EfficientNetB7model(no_classes, shape):
    """
    EfficientNetB7
    https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/
    Uses a fixed input size 224,224
    """
    base_model = EfficientNetB7(include_top=False,
                                weights='imagenet',
                                input_shape=shape)
    base_model.trainable = False
    inputs = Input(shape=shape)
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(no_classes, activation='softmax',
                        name='predictions')(x)
    model = Model(inputs, outputs=predictions)
    return model
Exemplo n.º 4
0
def convmodel():
    resent = EfficientNetB7(include_top=False,
                            weights='imagenet',
                            input_shape=(128, 128, 3))
    x = resent.output
    x = MaxPooling2D(pool_size=(2, 2))(x)
    x = Dropout(0.5)(x)
    x = Flatten()(x)

    x = Dense(128, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dense(64, activation='relu')(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)
    x = Dense(1, activation='sigmoid')(x)

    model = Model(inputs=resent.input, outputs=x)
    model.compile(loss='binary_crossentropy',
                  optimizer=Adam(1e-5),
                  metrics=['acc'])
    return model
Exemplo n.º 5
0
 def getEfficientNetModelB7():
     model = EfficientNetB7(include_top=True,
                            weights=None,
                            classes=3,
                            classifier_activation='softmax')
     return model
Exemplo n.º 6
0
    x_val_ = x_train[val_idx]
    y_train_ = y_train[train_idx]
    y_val_ = y_train[val_idx]

    print(x_train_.shape, x_val_.shape)
    print(y_train_.shape, y_val_.shape)

    train_generator = ImageDataGenerator(width_shift_range=(-1, 1),
                                         height_shift_range=(-1, 1),
                                         zoom_range=0.15).flow(x_train_,
                                                               y_train_,
                                                               batch_size=16)
    val_generator = ImageDataGenerator().flow(x_val_, y_val_)

    b7 = EfficientNetB7(weights='imagenet',
                        include_top=False,
                        input_shape=(128, 128, 3))
    # b7.summary()

    # b7.trainable = False

    layer = b7.output
    layer = GlobalAveragePooling2D()(layer)
    output_tensor = Dense(1000, activation='softmax')(layer)

    model = Model(inputs=b7.input, outputs=output_tensor)

    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer=SGD(learning_rate=0.015, momentum=0.9),
Exemplo n.º 7
0
        verbose=1)

    x_train = x[train_index]
    x_valid = x[valid_index]
    y_train = y[train_index]
    y_valid = y[valid_index]

    train_generator = idg.flow(x_train, y_train, batch_size=16, seed=2048)
    # seed => random_state
    valid_generator = idg2.flow(x_valid, y_valid)
    test_generator = x_pred

    from tensorflow.keras.models import Model
    from tensorflow.keras.layers import GlobalAveragePooling2D, Flatten, BatchNormalization, Dense, Activation
    efficientnetb7 = EfficientNetB7(include_top=False,
                                    weights='imagenet',
                                    input_shape=x_train.shape[1:])
    efficientnetb7.trainable = False
    a = efficientnetb7.output
    a = GlobalAveragePooling2D()(a)
    a = Flatten()(a)
    a = Dense(128)(a)
    a = BatchNormalization()(a)
    a = Activation('relu')(a)
    a = Dense(64)(a)
    a = BatchNormalization()(a)
    a = Activation('relu')(a)
    a = Dense(1000, activation='softmax')(a)

    model = Model(inputs=efficientnetb7.input, outputs=a)
from tensorflow.keras.models import load_model, save_model
from tensorflow.keras.applications import EfficientNetB7
from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.applications.EfficientNetB7 import preprocess_input
from tensorflow.keras.metrics import Precision, Recall

gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        tf.config.experimental.set_memory_growth(gpus[0], True)
    except RuntimeError as e:
        print(e)

model = EfficientNetB7(include_top=False, weights="imagenet")
global_average_layer = GlobalAveragePooling2D()(model.layers[-1].output)
output = Dense(10, activation='softmax')(global_average_layer)
model = Model(inputs=model.inputs, outputs=output)
model.summary()

datagen = ImageDataGenerator(
    preprocessing_function=preprocess_input,
    horizontal_flip=True,
    shear_range=5,
    zoom_range=0.1,
    width_shift_range=0.1,
    height_shift_range=0.1,
    fill_mode='nearest',
    rotation_range=5,
)
Exemplo n.º 9
0
mc = ModelCheckpoint('../data/lotte/mc/lotte_b7_adam.h5',
                     save_best_only=True,
                     verbose=1)

train_generator = idg.flow(x_train, y_train, batch_size=32)
# seed => random_state
valid_generator = idg2.flow(x_valid, y_valid)
test_generator = x_pred
print(x_train.shape, y_train.shape)

from tensorflow.keras.models import Model
from tensorflow.keras.layers import GlobalAveragePooling2D, Flatten, BatchNormalization, Dense, Activation
from tensorflow.keras.applications import VGG19, MobileNet, EfficientNetB4, EfficientNetB7

ef = EfficientNetB7(weights="imagenet",
                    include_top=False,
                    input_shape=(200, 200, 3))
top_model = ef.output
top_model = Flatten()(top_model)
# top_model = Dense(1024, activation="relu")(top_model)
# top_model = Dropout(0.2)(top_model)
top_model = Dense(1000, activation="softmax")(top_model)

model = Model(inputs=ef.input, outputs=top_model)

model.summary()

from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau
es = EarlyStopping(patience=20)
lr = ReduceLROnPlateau(patience=10, factor=0.5)
Exemplo n.º 10
0
def create_efficientnet(width, height, depth, model_base,
                        first_layers_to_freeze, num_classes, learning_rate,
                        epochs):
    inputShape = (height, width, depth)

    inputs = K.Input(shape=inputShape)

    if model_base == "b0":
        effnet = EfficientNetB0(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b1":
        effnet = EfficientNetB1(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b2":
        effnet = EfficientNetB2(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b3":
        effnet = EfficientNetB3(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b4":
        effnet = EfficientNetB4(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b5":
        effnet = EfficientNetB5(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b6":
        effnet = EfficientNetB6(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    else:
        effnet = EfficientNetB7(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")

    # # Print architecture of effnet
    # for i, layer in enumerate(effnet.layers[:]):
    # 	print(i, layer.name, layer.output_shape)
    # print(f"Effnet len: {len(effnet.layers[:])}")

    # b0: 20; b2: 33; b4: 236; b6: 45; b7: 265
    for i, layer in enumerate(effnet.layers[:first_layers_to_freeze]):
        layer.trainable = False
    for i, layer in enumerate(effnet.layers[first_layers_to_freeze:]):
        if not isinstance(layer, K.layers.BatchNormalization):
            layer.trainable = True

    model = Sequential()
    model.add(effnet)
    model.add(K.layers.Dropout(0.25))
    model.add(K.layers.Dense(effnet.layers[-1].output_shape[3]))
    model.add(K.layers.LeakyReLU())
    model.add(K.layers.GlobalAveragePooling2D())
    model.add(K.layers.BatchNormalization())
    model.add(K.layers.Dropout(0.5))
    model.add(K.layers.Dense(num_classes, activation='softmax'))

    # Freeze the batchnorm layer of our model
    for i, layer in enumerate(model.layers[:]):
        if isinstance(layer, K.layers.BatchNormalization):
            layer.trainable = False

    opt = Adam(lr=learning_rate, decay=learning_rate / epochs)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    model.summary()

    return model
x_val = scaler.transform(x_val)

#to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)

#reshape
x_train = x_train.reshape(-1, 32, 32, 3)
x_test = x_test.reshape(-1, 32, 32, 3)
x_val = x_val.reshape(-1, 32, 32, 3)
print(x_train.shape, x_test.shape, x_val.shape)

#2. 모델링
TF = EfficientNetB7(weights='imagenet',
                    include_top=False,
                    input_shape=(32, 32, 3))  #레이어 16개
TF.trainable = False  #훈련시키지 않고 가중치만 가져오겠다.
model = Sequential()
model.add(TF)
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(10,
                activation='softmax'))  #activation='softmax')) #mnist사용할 경우
model.summary()
print(len(TF.weights))  # 26
print(len(TF.trainable_weights))  # 0

#컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
Exemplo n.º 12
0
from tensorflow.keras.applications import VGG16, VGG19, Xception
from tensorflow.keras.applications import ResNet101, ResNet101V2, ResNet152, ResNet152V2
from tensorflow.keras.applications import ResNet50, ResNet50V2
from tensorflow.keras.applications import InceptionResNetV2, InceptionV3
from tensorflow.keras.applications import MobileNet, MobileNetV2
from tensorflow.keras.applications import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications import NASNetLarge, NASNetMobile
from tensorflow.keras.applications import EfficientNetB0, EfficientNetB1, EfficientNetB7

model = EfficientNetB7()
model.trainable = False

model.summary()
print(len(model.weights))
print(len(model.trainable_weights))

# VGG16
# Total params: 138,357,544
# Trainable params: 0
# Non-trainable params: 138,357,544
# _________________________________________________________________
# 32
# 0

# VGG19
# Total params: 143,667,240
# Trainable params: 0
# Non-trainable params: 143,667,240
# _________________________________________________________________
# 38
# 0
Exemplo n.º 13
0
def build_model(encoder='efficientnetb7', center='dac', full_skip=True, attention='sc', upscore='upall'):

	MODEL_NAME = encoder
	if center is not None:
		MODEL_NAME = MODEL_NAME+'_'+center
	if attention is not None:
		MODEL_NAME = MODEL_NAME+'_'+attention
	if full_skip:
		MODEL_NAME = MODEL_NAME + '_fullskip'
	if upscore is not None:
		MODEL_NAME = MODEL_NAME + '_'+upscore


	if encoder == 'resnet50':
		encoder = ResNet50(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_relu', 'conv2_block3_out', 'conv3_block4_out', 'conv4_block6_out']
		encoder_output = encoder.get_layer('conv5_block3_out').output
		# data    320x320x3
		# conv1_relu    160x160x64
		# conv2_block3_out     80x80x256
		# conv3_block4_out    40x40x512
		# conv4_block6_out    20x20x1024
		# conv5_block3_out    10x10x2048  --> encoder output

	elif encoder == 'resnet101':
		encoder = ResNet101(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_relu', 'conv2_block3_out', 'conv3_block4_out']
		encoder_output = encoder.get_layer('conv4_block23_out').output
		#data   320x320x3
		#conv1_relu   160x160x64
		#conv2_block3_out   80x80x256
		#conv3_block4_out    40x40x512
		#conv4_block23_out   20x20x1024 --> encoder output
		#conv5_block3_out  10x10x2048

	elif encoder == 'resnet50v2':
		encoder = ResNet50V2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_conv', 'conv2_block3_1_relu', 'conv3_block4_1_relu', 'conv4_block6_1_relu']
		encoder_output = encoder.get_layer('post_relu').output
		# data   320x320x3
		# conv1_conv   160x160x64
		# conv2_block3_1_relu   80x80x64
		# conv3_block4_1_relu   40x40x128
		# conv4_block6_1_relu   20x20x256
		# post_relu   10x10x2048  --> encoder output

	elif encoder == 'resnet101v2':
		encoder = ResNet101V2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'conv1_conv', 'conv2_block3_1_relu', 'conv3_block4_1_relu', 'conv4_block23_1_relu']
		encoder_output = encoder.get_layer('post_relu').output
		#data   320x320x3
		#conv1_conv   160x160x64
		#conv2_block3_1_relu   80x80x64
		#conv3_block4_1_relu    40x40x128
		#conv4_block23_1_relu   20x20x256 
		#post_relu  10x10x2048 --> encoder output

	elif encoder == 'vgg19':
		encoder = VGG19(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), weights='imagenet', include_top=False)
		skip_names = ['block1_conv2', 'block2_conv2', 'block3_conv4', 'block4_conv4', 'block5_conv4']
		encoder_output = encoder.get_layer('block5_pool').output
		# block1_conv2   320x320x64
		# block2_conv2   160x160x128
		# block3_conv4   80x80x256
		# block4_conv4   40x40x512
		# block5_conv4   20x20x512
		# block5_pool   10x10x512   --> encoder output

	elif encoder == 'efficientnetb6':
		encoder = EfficientNetB6(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block2a_expand_activation', 'block3a_expand_activation', 'block4a_expand_activation']
		encoder_output = encoder.get_layer('block6a_expand_activation').output
		#data   320x320x3
		#block2a_expand_activation   160x160x192
		#block3a_expand_activation   80x80x240
		#block4a_expand_activation    40x40x432
		#block6a_expand_activation   20x20x1200 --> encoder output
		#top_activation   10x10x2304

	elif encoder == 'efficientnetb7':
		encoder = EfficientNetB7(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block2a_expand_activation', 'block3a_expand_activation', 'block4a_expand_activation']
		encoder_output = encoder.get_layer('block6a_expand_activation').output
		#data   320x320x3
		#block2a_expand_activation   160x160x192
		#block3a_expand_activation   80x80x288
		#block4a_expand_activation    40x40x480
		#block6a_expand_activation   20x20x1344 --> encoder output
		#top_activation   10x10x

	elif encoder == 'mobilenetv2':
		encoder = MobileNetV2(input_tensor=Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3), name='data'), weights='imagenet', include_top=False)
		skip_names = ['data', 'block_1_expand_relu', 'block_3_expand_relu', 'block_6_expand_relu', 'block_13_expand_relu']
		encoder_output = encoder.get_layer('out_relu').output
		# data   320x320x3
		# block_1_expand_relu   160x160x96
		# block_3_expand_relu   80x80x144
		# block_6_expand_relu    40x40x192
		# block_13_expand_relu   20x20x576
		# out_relu   10x10x1248   --> encoder output

	skip_layers = [encoder.get_layer(i).output for i in skip_names]
	# Center --------------
	if center == 'atrous':
		x = atrous_block(encoder_output)
	elif center == 'dac':
		x = dense_atrous_block(encoder_output)
	elif center == 'aspp':
		x = aspp_block(encoder_output)
	elif center is None:
		x = encoder_output

    # Decoder --------------
	if attention == 'se':
		attn_block = se_block
	elif attention == 'cbam':
		attn_block = cbam_block
	elif attention == 'sc':
		attn_block = scSE_block

	filters = [i.shape[-1] for i in skip_layers]
	filters[0] = 64

	scales = [2 ** i for i in range(1, len(filters))][::-1]
	X = []
	for i in range(1, len(filters) + 1):
		X.append(x)

		down = []
		if full_skip:
			for j in range(len(scales) - (i - 1)):
				d = down_skip(skip_layers[j], scales[j + (i - 1)], filters[-1]//4)
				if attention is not None:
					d = attn_block(d) 
				down.append(d)


		direct = direct_skip(skip_layers[-i], filters[-1]//4)
		if attention is not None:
			direct = attn_block(direct)


		x = convtranspose_block(x, filters[-1]//4)
		if attention is not None:
			x = attn_block(x)

		x = Concatenate()([x] + [direct] + down)
		
		x = conv3_block(x, x.shape[-1])

	if upscore is not None:
		if upscore=='upall':
			up_scales=[2 ** i for i in range(1, len(filters)+1)][::-1]
			UP = [upscore_block(x, 32, up_scales[i]) for i, x in enumerate(X)]
			if attention is not None:
				UP = [attn_block(x) for x in UP]

			up = Concatenate()(UP)
     
		elif upscore=='upcenter':
			up = upscore_block(X[0], 64, 2 ** len(filters))
			if attention is not None:
				up = attn_block(up)

		x = Concatenate()([x, up])


	x = Conv2D(1, 1, padding='same')(x)
	x = Activation('sigmoid')(x)

	model = Model(encoder.input, x)

	metrics = [dice_coef, Recall(), Precision()]
	opt = Nadam(LR)
	model.compile(loss=bce_dice_loss, optimizer=opt, metrics=metrics)

	return model, MODEL_NAME
Exemplo n.º 14
0
                                  input_dim=(1024, 1024),
                                  n_channels=3,
                                  n_classes=4,
                                  normalize=False,
                                  zoom_range=[0.5, 1],
                                  rotation=False,
                                  brightness_range=[0.8, 1],
                                  shuffle=False)
    # set GPU
    G = 2
    # disable eager execution
    tf.compat.v1.disable_eager_execution()
    print("[INFO] training with {} GPUs...".format(G))

    model = EfficientNetB7(input_shape=(1024, 1024, 3),
                           include_top=False,
                           weights='imagenet')

    # config params
    INIT_LR = 2e-4
    EPOCHS = 100
    DECAY = 1e-2
    model = create_model(model, 4, lr=INIT_LR, decay=DECAY)
    print(model.summary())

    #load weights
    model.load_weights(
        "/home/VinBigData_ChestXray/data_classify/B6_100epoch.h5")

    # Start training
    my_checkpointer = [
Exemplo n.º 15
0
    def __init__(self,
                 data_format,
                 name='',
                 include_top=False,
                 pooling=None,
                 block3_strides=False,
                 average_pooling=True,
                 classes=1000,
                 gem_power=3.0,
                 embedding_layer=False,
                 embedding_layer_dim=2048):
        super(efficientNetb7, self).__init__(name=name)

        valid_channel_values = ('channels_first', 'channels_last')
        if data_format not in valid_channel_values:
            raise ValueError('Unknown data_format: %s. Valid values: %s' %
                             (data_format, valid_channel_values))
        self.include_top = include_top
        self.block3_strides = block3_strides
        self.average_pooling = average_pooling
        self.pooling = pooling

        def conv_block(filters, stage, block, strides=(2, 2)):
            return _ConvBlock(3,
                              filters,
                              stage=stage,
                              block=block,
                              data_format=data_format,
                              strides=strides)

        self.conv1 = layers.Conv2D(64, (7, 7),
                                   strides=(2, 2),
                                   data_format=data_format,
                                   padding='same',
                                   name='conv1')
        bn_axis = 1 if data_format == 'channels_first' else 3
        self.bn_conv1 = layers.BatchNormalization(axis=bn_axis,
                                                  name='bn_conv1')
        self.max_pool = layers.MaxPooling2D((3, 3),
                                            strides=(2, 2),
                                            data_format=data_format)

        # Striding layer that can be used on top of block3 to produce feature maps
        # with the same resolution as the TF-Slim implementation.
        if self.block3_strides:
            self.subsampling_layer = layers.MaxPooling2D(
                (1, 1), strides=(2, 2), data_format=data_format)
            self.l5a = conv_block([512, 512, 2048],
                                  stage=5,
                                  block='a',
                                  strides=(1, 1))
        else:
            self.l5a = conv_block([512, 512, 2048], stage=5, block='a')

        self.avg_pool = layers.AveragePooling2D((7, 7),
                                                strides=(7, 7),
                                                data_format=data_format)
        if self.include_top:
            self.efficientbase = EfficientNetB7(include_top=True,
                                                weights='imagenet',
                                                input_tensor=None,
                                                input_shape=None,
                                                pooling=None)
        else:
            self.efficientbase = EfficientNetB7(include_top=False,
                                                weights='imagenet',
                                                input_shape=(448, 448, 3))
        '''if self.include_top:
      self.flatten = layers.Flatten()
      self.fc1000 = layers.Dense(classes, name='fc1000')
    else:'''
        self.flatten = layers.Flatten()
        self.fc = layers.Dense(classes, name='fc81313')

        reduction_indices = [1, 2
                             ] if data_format == 'channels_last' else [2, 3]
        reduction_indices = tf.constant(reduction_indices)
        if pooling == 'avg':
            self.global_pooling = functools.partial(tf.reduce_mean,
                                                    axis=reduction_indices,
                                                    keepdims=False)
        elif pooling == 'max':
            self.global_pooling = functools.partial(tf.reduce_max,
                                                    axis=reduction_indices,
                                                    keepdims=False)
        elif pooling == 'gem':
            logging.info('Adding GeMPooling layer with power %f', gem_power)
            self.global_pooling = functools.partial(gem_pooling,
                                                    axis=reduction_indices,
                                                    power=gem_power)
        else:
            self.global_pooling = None

        if embedding_layer:
            logging.info('Adding embedding layer with dimension %d',
                         embedding_layer_dim)
            self.embedding_layer = layers.Dense(embedding_layer_dim,
                                                name='embedding_layer')
        else:
            self.embedding_layer = None
Exemplo n.º 16
0
# Training
#

# Prepare, test, and train the data
train_df, validate_df = train_test_split(df,
                                         test_size=TEST_SIZE,
                                         random_state=RANDOM_STATE)
train_df = train_df.reset_index(drop=True)
validate_df = validate_df.reset_index(drop=True)
train_df['category'] = train_df['category'].astype('str')
validate_df['category'] = validate_df['category'].astype('str')
total_train = train_df.shape[0]
total_validate = validate_df.shape[0]

# load model
model = EfficientNetB7(include_top=False, input_shape=IMAGE_SHAPE)

# mark loaded layers as not trainable
for layer in model.layers:
    layer.trainable = False

# add new classifier layers
flat1 = Flatten()(model.layers[-1].output)
class1 = Dense(128, activation='relu', kernel_initializer='he_uniform')(flat1)
output = Dense(1, activation='sigmoid')(class1)

# define new model
model = Model(inputs=model.inputs, outputs=output)

# Horovod: adjust learning rate based on number of GPUs.
# opt = SGD(lr=0.001, momentum=0.9)
Exemplo n.º 17
0
import tensorflow as tf
import tensorflow_hub as hub

from tensorflow.keras import layers
from tensorflow.keras.applications import EfficientNetB7

if __name__ == '__main__':
    # model = hub.KerasLayer("https://tfhub.dev/google/nnlm-en-dim128/2")
    # embeddings = model(["The rain in Spain.", "falls",
    #                     "mainly", "In the plain!"])

    # print(embeddings.shape)
    print('GPU usable: ', tf.test.is_gpu_available())

    exit()

    model = EfficientNetB7(include_top=True,
                           weights='imagenet',
                           input_shape=None,
                           pooling=None,
                           classes=1000,
                           classifier_activation="softmax")

    print(model.summary())
Exemplo n.º 18
0
def create_efficientnet(width, height, depth, model_base,
                        first_layers_to_freeze):
    inputShape = (height, width, depth)

    inputs = K.Input(shape=inputShape)

    if model_base == "b0":
        effnet = EfficientNetB0(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b1":
        effnet = EfficientNetB1(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b2":
        effnet = EfficientNetB2(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b3":
        effnet = EfficientNetB3(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b4":
        effnet = EfficientNetB4(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b5":
        effnet = EfficientNetB5(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b6":
        effnet = EfficientNetB6(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    else:
        effnet = EfficientNetB7(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")

    # # Print architecture of effnet
    # for i, layer in enumerate(effnet.layers[:]):
    # 	print(i, layer.name, layer.output_shape)

    # b0: 20; b2: 33; b4: 147; b6: 45; b7: 265

    for i, layer in enumerate(effnet.layers[:first_layers_to_freeze]):
        layer.trainable = False
    for i, layer in enumerate(effnet.layers[first_layers_to_freeze:]):
        layer.trainable = True

    effnet.summary()

    model = Sequential()
    model.add(effnet)
    model.add(K.layers.Dropout(0.25))
    model.add(K.layers.Dense(effnet.layers[-1].output_shape[3]))
    model.add(K.layers.LeakyReLU())
    model.add(K.layers.GlobalAveragePooling2D())
    model.add(K.layers.Dropout(0.5))
    model.add(K.layers.Dense(1, activation='linear'))

    return model