Пример #1
0
def build_model(classes = 2):
  inputs = Input(shape = (IMAGE_SIZE, IMAGE_SIZE, 3))
  x = preprocess_input(inputs)
  x = DenseNet169(weights=None, classes=classes)(x)
  model = Model(inputs=inputs, outputs=x)
  model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
  return model
Пример #2
0
    def modelInit(self):
        rnet = DenseNet169(input_shape=(32, 32, 3),
                           weights="imagenet",
                           include_top=False,
                           pooling=max)
        rnet.trainable = True
        model = keras.models.Sequential()

        model.add(tf.keras.layers.GaussianNoise(0.15))
        model.add(
            tf.keras.layers.experimental.preprocessing.Rescaling(1. / 255))
        model.add(
            keras.layers.experimental.preprocessing.RandomFlip(
                "horizontal_and_vertical"))
        model.add(
            tf.keras.layers.experimental.preprocessing.RandomContrast(0.7))
        model.add(keras.layers.experimental.preprocessing.RandomRotation(0.5))

        model.add(rnet)
        model.add(keras.layers.GlobalAveragePooling2D())

        model.add(keras.layers.Dense(10, activation='softmax'))

        model.compile(loss='categorical_crossentropy',
                      optimizer='adam',
                      metrics=['accuracy'])
        return model
Пример #3
0
def densenet_fpn(input_shape, channels=1, activation="sigmoid"):
    densenet = DenseNet169(input_shape=input_shape, include_top=False)
    conv1 = densenet.get_layer("conv1/relu").output
    conv2 = densenet.get_layer("pool2_relu").output
    conv3 = densenet.get_layer("pool3_relu").output
    conv4 = densenet.get_layer("pool4_relu").output
    conv5 = densenet.get_layer("bn").output
    conv5 = Activation("relu", name="conv5_relu")(conv5)

    P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4,
                                                 conv5)
    x = concatenate([
        prediction_fpn_block(P5, "P5", (8, 8)),
        prediction_fpn_block(P4, "P4", (4, 4)),
        prediction_fpn_block(P3, "P3", (2, 2)),
        prediction_fpn_block(P2, "P2"),
    ])
    x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation")
    x = decoder_block_no_bn(x, 128, conv1, 'up4')
    x = UpSampling2D()(x)
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1")
    x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2")
    if activation == 'softmax':
        name = 'mask_softmax'
        x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)
    else:
        x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x)
    model = Model(densenet.input, x)
    return model
Пример #4
0
    def __init__(self):
        super(Encoder, self).__init__()
        self.base_model = DenseNet169(input_shape=(480,640, 3), include_top=False, weights='imagenet')
        print('Base model loaded {}'.format(DenseNet169.__name__))

        # Create encoder model that produce final features along with multiple intermediate features
        outputs = [self.base_model.outputs[-1]]
        for name in ['pool1', 'pool2_pool', 'pool3_pool', 'conv1/relu'] : outputs.append( self.base_model.get_layer(name).output )
        self.encoder = Model(inputs=self.base_model.inputs, outputs=outputs)
Пример #5
0
 def __init__(self, model_name=None):
     if model_name == 'Xception':
         base_model = Xception(weights='imagenet')
         self.preprocess_input = xception.preprocess_input
     elif model_name == 'VGG19':
         base_model = VGG19(weights='imagenet')
         self.preprocess_input = vgg19.preprocess_input
     elif model_name == 'ResNet50':
         base_model = ResNet50(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet101':
         base_model = ResNet101(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet152':
         base_model = ResNet152(weights='imagenet')
         self.preprocess_input = resnet.preprocess_input
     elif model_name == 'ResNet50V2':
         base_model = ResNet50V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'ResNet101V2':
         base_model = ResNet101V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'ResNet152V2':
         base_model = ResNet152V2(weights='imagenet')
         self.preprocess_input = resnet_v2.preprocess_input
     elif model_name == 'InceptionV3':
         base_model = InceptionV3(weights='imagenet')
         self.preprocess_input = inception_v3.preprocess_input
     elif model_name == 'InceptionResNetV2':
         base_model = InceptionResNetV2(weights='imagenet')
         self.preprocess_input = inception_resnet_v2.preprocess_input
     elif model_name == 'DenseNet121':
         base_model = DenseNet121(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'DenseNet169':
         base_model = DenseNet169(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'DenseNet201':
         base_model = DenseNet201(weights='imagenet')
         self.preprocess_input = densenet.preprocess_input
     elif model_name == 'NASNetLarge':
         base_model = NASNetLarge(weights='imagenet')
         self.preprocess_input = nasnet.preprocess_input
     elif model_name == 'NASNetMobile':
         base_model = NASNetMobile(weights='imagenet')
         self.preprocess_input = nasnet.preprocess_input
     elif model_name == 'MobileNet':
         base_model = MobileNet(weights='imagenet')
         self.preprocess_input = mobilenet.preprocess_input
     elif model_name == 'MobileNetV2':
         base_model = MobileNetV2(weights='imagenet')
         self.preprocess_input = mobilenet_v2.preprocess_input
     else:
         base_model = VGG16(weights='imagenet')
         self.preprocess_input = vgg16.preprocess_input
     self.model = Model(inputs=base_model.input,
                        outputs=base_model.layers[-2].output)
Пример #6
0
    def __init__(self):
        super(Encoder, self).__init__()
        self.base_model = DenseNet169(include_top=False,
                                      input_shape=(None, None, 3),
                                      weights='imagenet')

        outputs = [self.base_model.outputs[-1]]
        for name in ['pool1', 'pool2_pool', 'pool3_pool', 'conv1/relu']:
            outputs.append(self.base_model.get_layer(name).output)
        self.encoder = Model(
            inputs=self.base_model.inputs, outputs=outputs
        )  #create a model w preset inputs and outputs from densenet-169
Пример #7
0
def pretrainded_model(type: str, trainable=False):
    with strategy.scope():
        if type == 'VGG16':
            pretrained_model = VGG16(weights='imagenet',
                                     include_top=False,
                                     input_shape=[*IMAGE_SIZE, 3])
        elif type == 'VGG19':
            pretrained_model = VGG19(weights='imagenet',
                                     include_top=False,
                                     input_shape=[*IMAGE_SIZE, 3])
        elif type == 'DenseNet121':
            pretrained_model = DenseNet121(weights='imagenet',
                                           include_top=False,
                                           input_shape=[*IMAGE_SIZE, 3])
        elif type == 'DenseNet169':
            pretrained_model = DenseNet169(weights='imagenet',
                                           include_top=False,
                                           input_shape=[*IMAGE_SIZE, 3])
        elif type == 'DenseNet201':
            pretrained_model = DenseNet201(weights='imagenet',
                                           include_top=False,
                                           input_shape=[*IMAGE_SIZE, 3])

        pretrained_model.trainable = trainable

        model = Sequential([
            # To a base pretrained on ImageNet to extract features from images...
            pretrained_model,
            # ... attach a new head to act as a classifier.
            Flatten(),
            Dense(256, activation='relu'),
            BatchNormalization(),
            Dropout(0.2),
            Dense(256, activation='relu'),
            BatchNormalization(),
            Dropout(0.2),
            Dense(256, activation='relu'),
            BatchNormalization(),
            Dropout(0.2),
            Dense(256, activation='relu'),
            BatchNormalization(),
            Dropout(0.2),
            Dense(256, activation='relu'),
            BatchNormalization(),
            Dropout(0.2),
            tf.keras.layers.Dense(len(CLASSES),
                                  activation='softmax',
                                  use_bias=False)
        ])

    return model
Пример #8
0
    def get_model(self,
                  class_names,
                  model_name="DenseNet121",
                  use_base_weights=True,
                  weights_path=None,
                  input_shape=None):

        if use_base_weights is True:
            base_weights = "imagenet"
        else:
            base_weights = None
        '''
        base_model_class = getattr(
            importlib.import_module(
                "keras.applications." + self.models_[model_name]['module_name']
            ),
            model_name)
        '''
        input_shape = (256, 256, 3)
        if input_shape is None:
            input_shape = self.models_[model_name]["input_shape"]

        img_input = tf.keras.layers.Input(shape=input_shape)
        '''
        base_model = base_model_class(
            include_top=False,
            input_tensor=img_input,
            input_shape=input_shape,
            weights=base_weights,
            pooling="avg")
        '''
        base_model = DenseNet169(include_top=False,
                                 input_tensor=img_input,
                                 input_shape=input_shape,
                                 weights=base_weights,
                                 pooling="avg")
        x = base_model.output
        predictions = Dense(len(class_names),
                            activation="sigmoid",
                            name="predictions")(x)
        model = Model(inputs=img_input, outputs=predictions)

        if weights_path == "":
            weights_path = None

        if weights_path is not None:
            print("load model weights_path: {weights_path}")
            model.load_weights(weights_path)
        return model
Пример #9
0
def get_model(class_names, weights_path, input_shape=(256, 256, 3)):

    img_input = Input(shape=input_shape)
    base_model = DenseNet169(include_top=False,
                             input_tensor=img_input,
                             input_shape=input_shape,
                             weights=None,
                             pooling="avg")
    x = base_model.output
    predictions = Dense(len(class_names),
                        activation="sigmoid",
                        name="predictions")(x)
    model = Model(inputs=img_input, outputs=predictions)

    model.load_weights(weights_path)
    return model
Пример #10
0
def DenseNet169model(no_classes, shape):
    """
    DenseNet169
    """
    base_model = DenseNet169(include_top=False,
                             weights='imagenet',
                             input_shape=shape)
    base_model.trainable = False
    inputs = Input(shape=shape)
    x = base_model(inputs, training=False)
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(no_classes, activation='softmax',
                        name='predictions')(x)
    model = Model(inputs, outputs=predictions)
    return model
vgg16 = NASNetLarge()
# vgg16.summary()
print("NASNetLarge",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = NASNetMobile()
# vgg16.summary()
print("NASNetMobile",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = DenseNet121()
# vgg16.summary()
print("DenseNet121",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = DenseNet169()
# vgg16.summary()
print("DenseNet169",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = DenseNet201()
# vgg16.summary()
print("DenseNet201",len(vgg16.trainable_weights)/2) 

print('----------------------------------------------------------------------------')
vgg16 = MobileNetV2()
# vgg16.summary()
print("MobileNetV2",len(vgg16.trainable_weights)/2) 


print('----------------------------------------------------------------------------')
x_test = scaler.transform(x_test)
x_val = scaler.transform(x_val)

#to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
y_val = to_categorical(y_val)

#reshape
x_train = x_train.reshape(-1, 32, 32, 3)
x_test = x_test.reshape(-1, 32, 32, 3)
x_val = x_val.reshape(-1, 32, 32, 3)
print(x_train.shape, x_test.shape, x_val.shape)

#2. 모델링
TF = DenseNet169(weights= 'imagenet', include_top = False, input_shape = (32, 32, 3)) #레이어 16개
TF.trainable = False #훈련시키지 않고 가중치만 가져오겠다.
model = Sequential()
model.add(TF)
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax')) #activation='softmax')) #mnist사용할 경우
model.summary()
print(len(TF.weights)) # 26
print(len(TF.trainable_weights)) # 0

#컴파일, 훈련
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor = 'loss', patience = 7, mode = 'auto')
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['acc'])
Пример #13
0
def build_fc_densenet(n_classes,
                      h,
                      w,
                      n_layers=201,
                      use_bottleneck=False,
                      bottleneck_blocks=32):
    """
  Build a Fully Convolutional Densenet model.
  Parameters:
    n_classes: Number of classes to predict
    h: Height of input images
    w: Width of input images
    n_layers: Numbers of Densenet's layers. Values in [121,169,201]. Densenet201 is used by default or if the value is not in the valid set.
    use_bottleneck: Whether or not use a bottleneck block as mentioned in the paper.
    bottleneck_blocks: Number of blocks to use if use_bottleneck parameter is True
  Return:
    A tf.keras Model instance
  """
    if n_layers == 121:
        blocks = [6, 12, 24, 16]
        base_model = DenseNet121(input_shape=[h, w, 3], include_top=False)
    elif n_layers == 169:
        blocks = [6, 12, 32, 32]
        base_model = DenseNet169(input_shape=[h, w, 3], include_top=False)
    else:
        blocks = [6, 12, 48, 32]
        base_model = DenseNet201(input_shape=[h, w, 3], include_top=False)

    skips_n = 3
    grown_factor = 32

    #Encoder
    skip_names = [
        str.format('conv{0}_block{1}_concat', i + 2, blocks[i])
        for i in range(skips_n + 1)
    ]
    upsample_factors = [4, 2, 2, 2]
    skip_layers = [base_model.get_layer(name).output for name in skip_names]
    base = Model(inputs=base_model.inputs, outputs=skip_layers)

    inputs = Input(shape=[h, w, 3])
    skips = base(inputs)

    x = skips[-1]
    #bottleneck
    if use_bottleneck:
        x = dense_block(x, bottleneck_blocks, name='bottleneck')

    #Upsample path
    for i in range(1, 4):
        print('upsampling', x, skips[-i - 1])
        skip = skips[-i - 1]
        x = transition_up(skip, x)
        x = dense_block(x, blocks[-i], name='upsample' + str(i))

    #4x upsampling
    x = Conv2DTranspose(64,
                        3,
                        4,
                        padding='same',
                        kernel_initializer='he_uniform')(x)
    x = score(x, n_classes)

    #ending model
    model = Model(inputs=inputs, outputs=x)
    return model
train_ds = prepare_for_training(labeled_ds)

valid_ds = prepare_for_training(val_labeled_ds, shuffle=False)

mirrored_strategy = tf.distribute.MirroredStrategy(
    devices=["/gpu:0", "/gpu:1"])
with mirrored_strategy.scope():

    architectures = [("DenseNet121",
                      DenseNet121(input_shape=IMG_SHAPE,
                                  include_top=False,
                                  weights='imagenet')),
                     ("DenseNet169",
                      DenseNet169(input_shape=IMG_SHAPE,
                                  include_top=False,
                                  weights='imagenet')),
                     ("DenseNet201",
                      DenseNet201(input_shape=IMG_SHAPE,
                                  include_top=False,
                                  weights='imagenet')),
                     ("InceptionResNetV2",
                      InceptionResNetV2(input_shape=IMG_SHAPE,
                                        include_top=False,
                                        weights='imagenet')),
                     ("MobileNet",
                      MobileNet(input_shape=IMG_SHAPE,
                                include_top=False,
                                weights='imagenet')),
                     ("MobileNetV2",
                      MobileNetV2(input_shape=IMG_SHAPE,
Пример #15
0
def DenseNet_greyscale(blocks,input_shape,pooling,trainable):
    
    if blocks == 121:
        blocks = [6, 12, 24, 16]
    elif blocks == 169:
        blocks == [6, 12, 32, 32]
    elif blocks == 201:
        blocks == [6, 12, 48, 32]
        
    img_input = layers.Input(shape=input_shape)
    bn_axis = 3

    x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)))(img_input)
    x = layers.Conv2D(64, 7, strides=2, use_bias=False, name='conv1/conv')(x)
    x = layers.BatchNormalization(
        axis=bn_axis, epsilon=1.001e-5, name='conv1/bn')(x)
    x = layers.Activation('relu', name='conv1/relu')(x)
    x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)))(x)
    x = layers.MaxPooling2D(3, strides=2, name='pool1')(x)

    x = dense_block(x, blocks[0], name='conv2')
    x = transition_block(x, 0.5, name='pool2')
    x = dense_block(x, blocks[1], name='conv3')
    x = transition_block(x, 0.5, name='pool3')
    x = dense_block(x, blocks[2], name='conv4')
    x = transition_block(x, 0.5, name='pool4')
    x = dense_block(x, blocks[3], name='conv5')

    x = layers.BatchNormalization(
        axis=bn_axis, epsilon=1.001e-5, name='bn')(x)
    x = layers.Activation('relu', name='relu')(x)

    if pooling == 'avg':
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    elif pooling == 'max':
        x = layers.GlobalMaxPooling2D(name='max_pool')(x)

    # Create model.
    if blocks == [6, 12, 24, 16]:
        model = models.Model(img_input, x, name='densenet121')
    elif blocks == [6, 12, 32, 32]:
        model = models.Model(img_input, x, name='densenet169')
    elif blocks == [6, 12, 48, 32]:
        model = models.Model(img_input, x, name='densenet201')

    # Load weights
    if blocks == [6, 12, 24, 16]:
        pretrained_model = DenseNet121(include_top=False,pooling=pooling)
    elif blocks == [6, 12, 32, 32]:
        pretrained_model = DenseNet169(include_top=False,pooling=pooling)
    elif blocks == [6, 12, 48, 32]:
        pretrained_model = DenseNet201(include_top=False,pooling=pooling)
    
    w = pretrained_model.layers[2].get_weights()[0].sum(2,keepdims=True)
    model.layers[2].set_weights([w])
    model.layers[2].trainable = trainable
    model.trainable = trainable
    
    for l1,l2 in zip(model.layers[3:],pretrained_model.layers[3:]):
        l1.set_weights(l2.get_weights())
        l1.trainable = trainable
    return model

#test = DenseNet_greyscale(121,(224,224,1),'max',False)
Пример #16
0
def get_arch(arg, input_shape, classes, **kwargs):
    input_tensor = Input(shape=input_shape)

    if "Normalization" in kwargs:
        if kwargs["Normalization"] == "BatchNormalization":
            kwargs["Normalization"] = BatchNormalization
        elif kwargs["Normalization"] == "LayerNormalization":
            kwargs["Normalization"] = LayerNormalization
        elif kwarfs["Normalization"] == "NoNormalization":
            kwargs["Normalization"] = NoNormalization
        # if its not a string assume its a normalization layer
        elif type(kwargs["Normalization"]) == str:
            print("Warning: couldn't understand your normalization")
            kwargs["Normalization"] = NoNormalization

    if arg == "AlexNet":
        return AlexNet(input_tensor=input_tensor, classes=classes, **kwargs)
    elif arg == "SmolAlexNet":
        return SmolAlexNet(input_tensor=input_tensor,
                           classes=classes,
                           **kwargs)
    elif arg == "VGG16":
        return VGG16(input_tensor=input_tensor,
                     classes=classes,
                     weights=None,
                     **kwargs)
    elif arg == "VGG19":
        return VGG19(input_tensor=input_tensor,
                     classes=classes,
                     weights=None,
                     **kwargs)
    elif arg == "ResNet50":
        return ResNet50(input_tensor=input_tensor,
                        classes=classes,
                        weights=None,
                        **kwargs)
    elif arg == "ResNet152":
        return ResNet152(input_tensor=input_tensor,
                         classes=classes,
                         weights=None,
                         **kwargs)
    elif arg == "CifarResNet":
        return CifarResNet(3, input_tensor=input_tensor, classes=classes)
    elif arg == "DenseNet169":
        return DenseNet169(input_tensor=input_tensor,
                           classes=classes,
                           weights=None,
                           **kwargs)
    elif arg == "DenseNet121":
        return DenseNet121(input_tensor=input_tensor,
                           classes=classes,
                           weights=None,
                           **kwargs)
    elif arg == "MobileNetV2":
        return MobileNetV2(input_tensor=input_tensor,
                           classes=classes,
                           weights=None,
                           **kwargs)
    elif arg == "DenseNetCifar":
        return DenseNetCifar(input_tensor, classes, 12, 16)
    else:
        show_available()
        raise Exception(arg + " not an available architecture")
def construct_model(pretrainedNN):

    model = Sequential()
    if (pretrainedNN == 'VGG16'):
        model.add(
            VGG16(weights=None, include_top=False, input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'VGG19'):
        model.add(
            VGG19(weights=None, include_top=False, input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet101'):
        model.add(
            ResNet101(weights=None, include_top=False,
                      input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet152'):
        model.add(
            ResNet152(weights=None, include_top=False,
                      input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet50V2'):
        model.add(
            ResNet50V2(weights=None,
                       include_top=False,
                       input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet101V2'):
        model.add(
            ResNet101V2(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'ResNet152V2'):
        model.add(
            ResNet152V2(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'MobileNet'):
        model.add(
            MobileNet(weights=None, include_top=False,
                      input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'MobileNetV2'):
        model.add(
            MobileNetV2(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'DenseNet121'):
        model.add(
            DenseNet121(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'DenseNet169'):
        model.add(
            DenseNet169(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    elif (pretrainedNN == 'DenseNet201'):
        model.add(
            DenseNet201(weights=None,
                        include_top=False,
                        input_shape=(32, 32, 3)))
    else:
        model.add(
            ResNet50(weights=None, include_top=False, input_shape=(32, 32, 3)))

    model.add(Flatten())

    model.add(Dense(77, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])
    return model
    target_size = (img_size,img_size),
    class_mode = 'binary')

# initialize the testing generator
test_data = test_data_gen.flow_from_directory(
    test_folder,
    shuffle = False,
    batch_size = batch_size,
    target_size = (img_size,img_size),
    class_mode = 'binary')

"""# Building model"""

from tensorflow.keras.applications import DenseNet169

base_model = DenseNet169(input_shape = (300,300,3), include_top = False, weights='imagenet')

model = Sequential()
model.add(base_model)
model.add(Conv2D(512,
                 activation='relu',
                 kernel_size=3,
                 input_shape = (300,300,3)))
model.add(MaxPooling2D(pool_size=(2,2)))
#FC layers
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(Dropout(0.7))
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation = 'relu'))