예제 #1
0
파일: img2svg.py 프로젝트: Jephthia/NNs
    def load_img_encoder(self):
        img_encoder = Sequential()

        img_encoder.add(EfficientNetB1(input_shape=(100,100,3), include_top=False, weights=None))
        img_encoder.add(GlobalAveragePooling2D())

        return img_encoder
예제 #2
0
def get_model(config):
    if not 'kp_blurpool' in config:
        config['kp_blurpool'] = False 
        
    inputs = tf.keras.layers.Input(shape=(config['img_height'], config['img_width'], 3))
    
    from tensorflow.keras.applications import EfficientNetB1, EfficientNetB6
    encoder = EfficientNetB1(include_top=False, weights='imagenet', drop_connect_rate=0.2,input_tensor=inputs)
    # start training with untrainable base
    encoder.trainable = False 
    for l in encoder.layers:
        l.trainable = False 
    encoding = encoder.get_layer('block3a_expand_activation') # 56x56x144
    
    outputs = [ ]
    filters = 256
    x = upsample(filters,1,1,norm_type=None,act=None)(encoding.output) 
    x = BottleneckBlock(x, filters)
    for i in range(config['kp_num_hourglass']):
        x = hourglass(config, x, 3, filters) 
        x = upsample(filters,1,1)(x)
        y = upsample(1+len(config['keypoint_names']),1,1,norm_type=None,act=tf.keras.layers.Activation('softmax'))(x)  
        ybig = tf.keras.layers.Lambda( lambda image: tf.image.resize(image,(config['img_height'], config['img_width']),method = tf.image.ResizeMethod.BICUBIC))(y)
        outputs.append(ybig) 

        # add initial block again 
        if i < config['kp_num_hourglass']:
            x = upsample(filters,1,1,norm_type=None,act=None)(x) + upsample(filters,1,1,norm_type=None,act=None)(y)
    
    net = tf.keras.Model(inputs,outputs,name="StackedHourglass")
    return encoder, net 
예제 #3
0
def efficientnetB1(
        input_shape, nclasses=2, num_dense_blocks=3, growth_rate=12, depth=100, compression_factor=0.5,
        data_augmentation=True, regularization=0.
):

    keras_shape = input_shape
    if input_shape[-1] == 1:
        keras_shape = (32, 32, 3)

    keras_model = EfficientNetB1(
        include_top=False,
        input_shape=keras_shape,
        weights=None
    )

    keras_model.trainable = True

    # adding regularization
    # regularizer = l2(regularization)
    #
    # for layer in keras_model.layers:
    #     for attr in ['kernel_regularizer']:
    #         if hasattr(layer, attr):
    #             setattr(layer, attr, regularizer)
    #
    # tmp_weights_path = os.path.join(tempfile.gettempdir(), 'tmp_weights.h5')
    # keras_model.save_weights(tmp_weights_path)
    #
    # keras_json = keras_model.to_json()
    # keras_model = models.model_from_json(keras_json)
    # keras_model.load_weights(tmp_weights_path, by_name=True)

    outputs = keras_model.output
    inputs = keras_model.input
    if input_shape[-1] == 1:
        inputs = layers.Input(shape=input_shape)
        x = layers.ZeroPadding2D(padding=(2, 2))(inputs)
        output_shape = K.int_shape(x)
        output_shape = output_shape[:-1] + (3,)
        x = layers.Lambda(lambda x: K.tile(x, (1, 1, 1, 3)), output_shape=output_shape)(x)
        outputs = keras_model(x)

    # outputs = layers.Flatten()(outputs)
    outputs = layers.GlobalAveragePooling2D()(outputs)
    # outputs = layers.Dropout(rate=.2)(outputs)
    outputs = layers.Dense(nclasses,
                           kernel_initializer='he_normal',
                           kernel_regularizer=None,
                           activation='softmax')(outputs)

    # instantiate and compile model
    # orig paper uses SGD but RMSprop works better for DenseNet
    model = models.Model(inputs=inputs, outputs=outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.SGD(1e-4),
                  metrics=['acc'])

    return model
예제 #4
0
def EfficientNet(cfg):
    regularizer = l2(cfg.TRAIN.WD)

    if cfg.MODEL.SIZE == 0:
        backbone = EfficientNetB0(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))
    elif cfg.MODEL.SIZE == 1:
        backbone = EfficientNetB1(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))
    elif cfg.MODEL.SIZE == 2:
        backbone = EfficientNetB2(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))
    elif cfg.MODEL.SIZE == 3:
        backbone = EfficientNetB3(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))
    elif cfg.MODEL.SIZE == 4:
        backbone = EfficientNetB4(include_top=False,
                                  input_shape=tuple(cfg.DATASET.INPUT_SHAPE))

    backbone = add_regularization(backbone, regularizer)

    d, w, _ = scaling_parameters(cfg.DATASET.INPUT_SHAPE)

    width_coefficient = cfg.MODEL.WIDTH_COEFFICIENT * w
    depth_divisor = cfg.MODEL.DEPTH_DIVISOR
    head_filters = cfg.MODEL.HEAD_CHANNELS
    head_kernel = cfg.MODEL.HEAD_KERNEL
    head_activation = cfg.MODEL.HEAD_ACTIVATION
    keypoints = cfg.DATASET.OUTPUT_SHAPE[-1]
    regularizer = l2(cfg.TRAIN.WD)

    x = backbone.layers[-1].output
    for i in range(cfg.MODEL.HEAD_BLOCKS):
        x = layers.Conv2DTranspose(round_filters(head_filters,
                                                 width_coefficient,
                                                 depth_divisor),
                                   head_kernel,
                                   strides=2,
                                   padding='same',
                                   use_bias=False,
                                   kernel_initializer=CONV_KERNEL_INITIALIZER,
                                   kernel_regularizer=regularizer,
                                   name='head_block{}_conv'.format(i + 1))(x)
        x = layers.BatchNormalization(name='head_block{}_bn'.format(i + 1))(x)
        x = layers.Activation(head_activation,
                              name='head_block{}_activation'.format(i + 1))(x)

    x = layers.Conv2D(keypoints,
                      cfg.MODEL.FINAL_KERNEL,
                      padding='same',
                      use_bias=True,
                      kernel_initializer=DENSE_KERNEL_INITIALIZER,
                      kernel_regularizer=regularizer,
                      name='final_conv')(x)

    return Model(backbone.input, x, name=f'EfficientNetLite_{cfg.MODEL.SIZE}')
예제 #5
0
def build_model(num_classes, model="B7"):
    inputs = layers.Input(shape=(WIDTH, HEIGHT, 3))
    if model == "B7":
        model = EfficientNetB7(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B6":
        model = EfficientNetB6(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B5":
        model = EfficientNetB5(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B4":
        model = EfficientNetB4(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B3":
        model = EfficientNetB3(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B2":
        model = EfficientNetB2(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B1":
        model = EfficientNetB1(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    elif model == "B0":
        model = EfficientNetB0(include_top=False,
                               input_tensor=inputs,
                               weights="imagenet")
    # Freeze the pretrained weights
    model.trainable = False

    # Rebuild top
    x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
    x = layers.BatchNormalization()(x)

    top_dropout_rate = 0.2
    x = layers.Dropout(top_dropout_rate, name="top_dropout", seed=SEED)(x)
    outputs = layers.Dense(num_classes,
                           activation="softmax",
                           name="predictions")(x)

    # Compile
    model = tf.keras.Model(inputs, outputs, name="EfficientNet")
    optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)
    model.compile(
        optimizer=optimizer,
        loss="sparse_categorical_crossentropy",
        metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)])
    return model
예제 #6
0
파일: models.py 프로젝트: azeus404/thesis
def EfficientNetB1model(no_classes, shape):
    """
    EfficientNetB1
    https://keras.io/examples/vision/image_classification_efficientnet_fine_tuning/
    Uses a fixed input size 224,224
    """
    base_model = EfficientNetB1(include_top=False,
                                weights='imagenet',
                                input_shape=shape)
    base_model.trainable = False
    inputs = Input(shape=shape)
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(no_classes, activation='softmax',
                        name='predictions')(x)
    model = Model(inputs, outputs=predictions)
    return model
예제 #7
0
def get_base_model(inputs, is_fine_tune=False, fine_tune_layers=None):
    """"""
    from tensorflow.keras.applications import EfficientNetB1

    model = EfficientNetB1(include_top=False,
                           input_tensor=inputs,
                           weights="imagenet")

    # base model 是否 fine tune
    model.trainable = is_fine_tune if fine_tune_layers is None else False

    # 如果仅需要部分层参与 fine tune(BatchNormalization 层不需要)
    # if fine_tune_layers is not None:
    #     model_layers = [model.layers[i] for i in fine_tune_layers]
    #     for layer in model_layers:  # 前 20 层中的非 BatchNormalization 层参与训练
    #         if not isinstance(layer, layers.BatchNormalization):
    #             layer.trainable = True

    return model
예제 #8
0
x_val = scaler.transform(x_val)

#to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)

#reshape
x_train = x_train.reshape(-1, 32, 32, 3)
x_test = x_test.reshape(-1, 32, 32, 3)
x_val = x_val.reshape(-1, 32, 32, 3)
print(x_train.shape, x_test.shape, x_val.shape)

#2. 모델링
TF = EfficientNetB1(weights='imagenet',
                    include_top=False,
                    input_shape=(32, 32, 3))  #레이어 16개
TF.trainable = False  #훈련시키지 않고 가중치만 가져오겠다.
model = Sequential()
model.add(TF)
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10,
                activation='softmax'))  #activation='softmax')) #mnist사용할 경우
model.summary()
print(len(TF.weights))  # 26
print(len(TF.trainable_weights))  # 0

#컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
예제 #9
0
from tensorflow.keras.applications import VGG16, VGG19, Xception
from tensorflow.keras.applications import ResNet101, ResNet101V2, ResNet152, ResNet152V2
from tensorflow.keras.applications import ResNet50, ResNet50V2
from tensorflow.keras.applications import InceptionV3, InceptionResNetV2
from tensorflow.keras.applications import MobileNet, MobileNetV2
from tensorflow.keras.applications import DenseNet121, DenseNet169, DenseNet201
from tensorflow.keras.applications import NASNetLarge, NASNetMobile
from tensorflow.keras.applications import EfficientNetB0, EfficientNetB1

model = EfficientNetB1()

# model.trainable = True
model.trainable = False

model.summary()
print(len(model.weights))
print(len(model.trainable_weights))

# VGG16
# Total params: 138,357,544
# Trainable params: 0
# Non-trainable params: 138,357,544
# _________________________________________________________________
# 32
# 0

# VGG19
# Total params: 143,667,240
# Trainable params: 0
# Non-trainable params: 143,667,240
# _________________________________________________________________
예제 #10
0
def create_efficientnet(width, height, depth, model_base,
                        first_layers_to_freeze, num_classes, learning_rate,
                        epochs):
    inputShape = (height, width, depth)

    inputs = K.Input(shape=inputShape)

    if model_base == "b0":
        effnet = EfficientNetB0(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b1":
        effnet = EfficientNetB1(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b2":
        effnet = EfficientNetB2(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b3":
        effnet = EfficientNetB3(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b4":
        effnet = EfficientNetB4(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b5":
        effnet = EfficientNetB5(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b6":
        effnet = EfficientNetB6(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    else:
        effnet = EfficientNetB7(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")

    # # Print architecture of effnet
    # for i, layer in enumerate(effnet.layers[:]):
    # 	print(i, layer.name, layer.output_shape)
    # print(f"Effnet len: {len(effnet.layers[:])}")

    # b0: 20; b2: 33; b4: 236; b6: 45; b7: 265
    for i, layer in enumerate(effnet.layers[:first_layers_to_freeze]):
        layer.trainable = False
    for i, layer in enumerate(effnet.layers[first_layers_to_freeze:]):
        if not isinstance(layer, K.layers.BatchNormalization):
            layer.trainable = True

    model = Sequential()
    model.add(effnet)
    model.add(K.layers.Dropout(0.25))
    model.add(K.layers.Dense(effnet.layers[-1].output_shape[3]))
    model.add(K.layers.LeakyReLU())
    model.add(K.layers.GlobalAveragePooling2D())
    model.add(K.layers.BatchNormalization())
    model.add(K.layers.Dropout(0.5))
    model.add(K.layers.Dense(num_classes, activation='softmax'))

    # Freeze the batchnorm layer of our model
    for i, layer in enumerate(model.layers[:]):
        if isinstance(layer, K.layers.BatchNormalization):
            layer.trainable = False

    opt = Adam(lr=learning_rate, decay=learning_rate / epochs)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    model.summary()

    return model
예제 #11
0
def get_psp_model(config):
    ### https://arxiv.org/abs/1612.01105

    def pyramid_pooling_block(input_tensor, bin_sizes):
        concat_list = [input_tensor]
        w = config['img_width'] // 8
        h = config['img_height'] // 8
        #concat_list[0] = tf.keras.layers.Lambda(lambda x: tf.image.resize(x, (w,h)))(concat_list[0])

        for bin_size in bin_sizes:
            x = tf.keras.layers.AveragePooling2D(
                pool_size=(h // bin_size, w // bin_size),
                strides=(h // bin_size, w // bin_size))(input_tensor)
            x = upsample(128, 3, strides=-2)(x)
            x = upsample(128 / len(bin_sizes), 1, strides=1)(x)
            #print('pyramid',bin_size,x.shape)
            x = tf.keras.layers.Lambda(lambda x: tf.image.resize(x, (h, w)))(x)
            concat_list.append(x)
        return tf.keras.layers.concatenate(concat_list)

    size = (config['img_height'], config['img_width'])
    inputs = tf.keras.layers.Input(shape=(config['img_height'],
                                          config['img_width'], 3))
    #x = tf.keras.layers.GaussianNoise(20)(inputs)
    x = inputs
    from tensorflow.keras.applications import EfficientNetB1, EfficientNetB6
    weights = 'imagenet'
    if 'should_init_pretrained' in config and config[
            'should_init_pretrained'] == False:
        weights = None

    encoder = EfficientNetB1(include_top=False,
                             weights=weights,
                             drop_connect_rate=0.2,
                             input_tensor=x)
    encoder.trainable = False
    for l in encoder.layers:
        l.trainable = False

    if 0:
        for i, l in enumerate(encoder.layers):
            print(i, l.name, l.output.shape)
            if l.name == encoded_layer_name:
                x = l.output

    encoded_layer_name = 'block4a_expand_activation'  # (28,28,240)
    x = encoder.get_layer(encoded_layer_name).output

    nf = 256
    x = upsample(nf, 1, strides=1)(x)
    # add some extra resnet blocks
    for i_block in range(4):
        y = upsample(nf, 3, strides=1, act=None)(x)
        y = tf.keras.layers.Dropout(0.5)(y)
        y = upsample(nf, 3, strides=1)(y)
        x = x + y

    x = pyramid_pooling_block(x, [2, 4, 6, 12])
    x = upsample(64, 3, strides=1)(x)
    # final classification layer
    x = upsample(1 + len(config['keypoint_names']),
                 1,
                 1,
                 norm_type=None,
                 act=tf.keras.layers.Activation('softmax'))(x)
    x = tf.keras.layers.Lambda(lambda x: tf.image.resize(
        x, (config['img_height'], config['img_width'])))(x)

    net = tf.keras.Model(inputs=encoder.inputs, outputs=[[x]], name="PSP")
    return encoder, net
예제 #12
0
def create_efficientnet(width, height, depth, model_base,
                        first_layers_to_freeze):
    inputShape = (height, width, depth)

    inputs = K.Input(shape=inputShape)

    if model_base == "b0":
        effnet = EfficientNetB0(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b1":
        effnet = EfficientNetB1(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b2":
        effnet = EfficientNetB2(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b3":
        effnet = EfficientNetB3(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b4":
        effnet = EfficientNetB4(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b5":
        effnet = EfficientNetB5(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    elif model_base == "b6":
        effnet = EfficientNetB6(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")
    else:
        effnet = EfficientNetB7(include_top=False,
                                input_tensor=inputs,
                                weights="imagenet")

    # # Print architecture of effnet
    # for i, layer in enumerate(effnet.layers[:]):
    # 	print(i, layer.name, layer.output_shape)

    # b0: 20; b2: 33; b4: 147; b6: 45; b7: 265

    for i, layer in enumerate(effnet.layers[:first_layers_to_freeze]):
        layer.trainable = False
    for i, layer in enumerate(effnet.layers[first_layers_to_freeze:]):
        layer.trainable = True

    effnet.summary()

    model = Sequential()
    model.add(effnet)
    model.add(K.layers.Dropout(0.25))
    model.add(K.layers.Dense(effnet.layers[-1].output_shape[3]))
    model.add(K.layers.LeakyReLU())
    model.add(K.layers.GlobalAveragePooling2D())
    model.add(K.layers.Dropout(0.5))
    model.add(K.layers.Dense(1, activation='linear'))

    return model