Пример #1
0
    def getModel(input_shape=(224, 224, 3), num_classes=3):

        model = Sequential()

        #Block 1
        model.add(
            Conv2D(input_shape=input_shape,
                   filters=128,
                   kernel_size=(5, 5),
                   strides=2,
                   padding='Same',
                   name='block1_conv1',
                   activation='relu',
                   kernel_initializer='he_normal'))
        model.add(
            Conv2D(filters=128,
                   kernel_size=(5, 5),
                   strides=2,
                   padding='Same',
                   name='block1_conv2',
                   activation='relu',
                   kernel_initializer='he_normal'))
        model.add(MaxPool2D(strides=(2, 2), name='block1_pool'))
        model.add(BatchNormalization())
        model.add(Dropout(0.25))

        #Block 2
        model.add(
            Conv2D(filters=256,
                   kernel_size=(3, 3),
                   padding='Same',
                   name='block2_conv1',
                   activation='relu',
                   kernel_initializer='he_normal'))
        model.add(
            Conv2D(filters=256,
                   kernel_size=(3, 3),
                   padding='Same',
                   name='block2_conv2',
                   activation='relu',
                   kernel_initializer='he_normal'))
        model.add(
            Conv2D(filters=256,
                   kernel_size=(3, 3),
                   padding='Same',
                   name='block2_conv3',
                   activation='relu',
                   kernel_initializer='he_normal'))
        model.add(MaxPool2D(strides=(2, 2), name='block2_pool'))
        model.add(BatchNormalization())
        model.add(Dropout(0.35))

        #Block 3
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding='Same',
                   name='block3_conv1',
                   activation='relu',
                   kernel_initializer='he_normal'))
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding='Same',
                   name='block3_conv2',
                   activation='relu',
                   kernel_initializer='he_normal'))
        model.add(
            Conv2D(filters=512,
                   kernel_size=(3, 3),
                   padding='Same',
                   name='block3_conv3',
                   activation='relu',
                   kernel_initializer='he_normal'))
        model.add(MaxPool2D(strides=(2, 2), name='block3_pool'))
        model.add(BatchNormalization())
        model.add(Dropout(0.35))

        #Block 4
        model.add(GlobalAveragePooling2D())
        model.add(Dense(512, activation="relu",
                        kernel_initializer='he_normal'))
        model.add(Dropout(0.4))
        model.add(
            Dense(num_classes,
                  activation="softmax",
                  kernel_initializer='he_normal',
                  kernel_regularizer=l2()))

        return model
def middle_modelUnet_AP(input_shape=(32, 32, 3)):
    """
    The architecture of the coarse and middle model.
    """

    ### CONVOLUTIONAL CORE
    input_img = Input(input_shape, name='input_1')

    #information bottleneck (conv + Max pooling sequence)
    c1 = (Conv2D(32,
                 kernel_size=3,
                 input_shape=input_shape,
                 padding="same",
                 name='conv2d'))(input_img)
    c1 = (BatchNormalization(name='batch_normalization'))(c1)
    c1 = (Activation('relu', name='activation'))(c1)
    c2 = (Conv2D(32, 3, padding="same", name='conv2d_1'))(c1)
    c2 = (BatchNormalization(name='batch_normalization_1'))(c2)
    c2 = (Activation('relu', name='activation_1'))(c2)
    m1 = (MaxPooling2D(pool_size=2, name='max_pooling2d'))(c2)
    d1 = (Dropout(0.3, name='dropout'))(m1)

    c3 = (Conv2D(64, 3, padding="same", name='conv2d_2'))(d1)
    c3 = (BatchNormalization(name='batch_normalization_2'))(c3)
    c3 = (Activation('relu', name='activation_2'))(c3)
    c4 = (Conv2D(64, 3, padding="same", name='conv2d_3'))(c3)
    c4 = (BatchNormalization(name='batch_normalization_3'))(c4)
    c4 = (Activation('relu', name='activation_3'))(c4)
    m2 = (MaxPooling2D(pool_size=2, name='max_pooling2d_1'))(c4)
    d2 = (Dropout(0.3, name='dropout_1'))(m2)

    c5 = (Conv2D(128, 3, padding="same", name='conv2d_4'))(d2)
    c5 = (BatchNormalization(name='batch_normalization_4'))(c5)
    c5 = (Activation('relu', name='activation_4'))(c5)
    c6 = (Conv2D(128, 3, padding="same", name='conv2d_5'))(c5)
    c6 = (BatchNormalization(name='batch_normalization_5'))(c6)
    c6 = (Activation('relu', name='activation_5'))(c6)
    m3 = (MaxPooling2D(pool_size=2, name='max_pooling2d_2'))(c6)
    d3 = (Dropout(0.3, name='dropout_2'))(m3)
    c7 = (Conv2D(128, 3, padding="same", name='conv2d_6'))(d3)
    c7 = (BatchNormalization(name='batch_normalization_6'))(c7)
    c7 = (Activation('relu', name='activation_6'))(c7)

    #information retrieval and upsampling (Skipped-connection + conv + upsampling sequence)
    c8 = UpSampling2D(size=(2, 2), name='up_sampling2d')(c7)
    c9 = Concatenate(name='concatenate_1', axis=3)([c6, c8])
    c9 = Conv2D(64, (3, 3), padding='same', name='conv2d_7')(c9)
    c9 = (BatchNormalization(name='batch_normalization_7'))(c9)
    c9 = (Activation('relu', name='activation_7'))(c9)

    #Flattening of the appropriate features+ fully connected layers to the coarse & middle output
    f1 = (GlobalAveragePooling2D())(c7)
    res1 = (Dense(100, activation='relu', name='dense'))(f1)
    res1 = (Dropout(0.3, name='dropout_3'))(res1)
    res1 = (Dense(2, activation='softmax', name='coarse'))(res1)

    f2 = (GlobalAveragePooling2D())(c9)
    res2 = (Dense(50, activation='relu', name='dense_1'))(f2)
    res2 = (Dropout(0.3, name='dropout_4'))(res2)
    res2 = (Dense(5, activation='softmax', name='middle'))(res2)

    model = Model(input_img, [res1, res2])
    return (model)
def SqueezeNet(include_top=True,
               input_shape=None,
               weights='imagenet',
               input_tensor=None,
               pooling=None,
               classes=1000,
               **kwargs):
    """Instantiates the SqueezeNet architecture.
    """

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')


    input_shape = _obtain_input_shape(input_shape,
                                      default_size=227,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        #if not K.is_keras_tensor(input_tensor):
            #img_input = Input(tensor=input_tensor, shape=input_shape)
        #else:
            #img_input = input_tensor
        img_input = input_tensor


    x = Conv2D(64, (3, 3), strides=(2, 2), padding='valid', name='conv1')(img_input)
    x = Activation('relu', name='relu_conv1')(x)
    x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)

    x = fire_module(x, fire_id=2, squeeze=16, expand=64)
    x = fire_module(x, fire_id=3, squeeze=16, expand=64)
    x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)

    x = fire_module(x, fire_id=4, squeeze=32, expand=128)
    x = fire_module(x, fire_id=5, squeeze=32, expand=128)
    x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)

    x = fire_module(x, fire_id=6, squeeze=48, expand=192)
    x = fire_module(x, fire_id=7, squeeze=48, expand=192)
    x = fire_module(x, fire_id=8, squeeze=64, expand=256)
    x = fire_module(x, fire_id=9, squeeze=64, expand=256)

    if include_top:
        # It's not obvious where to cut the network... 
        # Could do the 8th or 9th layer... some work recommends cutting earlier layers.

        x = Dropout(0.5, name='drop9')(x)

        x = Conv2D(classes, (1, 1), padding='valid', name='conv10')(x)
        x = Activation('relu', name='relu_conv10')(x)
        x = GlobalAveragePooling2D()(x)
        x = Activation('softmax', name='loss')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling=='max':
            x = GlobalMaxPooling2D()(x)
        elif pooling==None:
            pass
        else:
            raise ValueError("Unknown argument for 'pooling'=" + pooling)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input

    model = Model(inputs, x, name='squeezenet')

    # load weights
    if weights == 'imagenet':
        if include_top:
            weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels.h5',
                                    WEIGHTS_PATH,
                                    cache_subdir='models')
        else:
            weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels_notop.h5',
                                    WEIGHTS_PATH_NO_TOP,
                                    cache_subdir='models')

        model.load_weights(weights_path)

        if K.image_data_format() == 'channels_first':

            if K.backend() == 'tensorflow':
                warnings.warn('You are using the TensorFlow backend, yet you '
                              'are using the Theano '
                              'image data format convention '
                              '(`image_data_format="channels_first"`). '
                              'For best performance, set '
                              '`image_data_format="channels_last"` in '
                              'your Keras config '
                              'at ~/.keras/keras.json.')
    return model
else:
    base_model_imagenet = DenseNet(include_top=False,
                                   weights='imagenet',
                                   input_shape=(64, 64, 3))
    base_model = DenseNet(include_top=False,
                          weights=None,
                          input_shape=(64, 64, 13))
    for i, layer in enumerate(base_model_imagenet.layers):
        # we must skip input layer, zeropadding and first convolutional layer
        if i < 3:
            continue
        base_model.layers[i].set_weights(layer.get_weights())

# add a global spatial average pooling layer
top_model = base_model.output
top_model = GlobalAveragePooling2D()(top_model)

# or just flatten the layers
#    top_model = Flatten()(top_model)
# let's add a fully-connected layer
if use_vgg:
    # only in VGG19 a fully connected nn is added for classfication
    # DenseNet tends to overfitting if using additionally dense layers
    top_model = Dense(2048, activation='relu')(top_model)
    top_model = Dense(2048, activation='relu')(top_model)
# and a logistic layer
predictions = Dense(num_classes, activation='softmax')(top_model)

# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# print network structure
def build_model(include_top=True,
                batch=2,
                height=400,
                width=400,
                color=True,
                filters=64,
                pooling='avg',
                classes1=3,
                classes2=2):
    inputs = keras.layers.Input((height, width, 3 if color else 1),
                                batch_size=batch)

    x = Conv2D(64, (3, 3), strides=(2, 2), padding='valid',
               name='conv1')(inputs)
    x = Activation('relu', name='relu_conv1')(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x)

    x = fire_module(x, fire_id=2, squeeze=16, expand=64)
    x = fire_module(x, fire_id=3, squeeze=16, expand=64)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x)

    x = fire_module(x, fire_id=4, squeeze=32, expand=128)
    x = fire_module(x, fire_id=5, squeeze=32, expand=128)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x)

    x = fire_module(x, fire_id=10, squeeze=48, expand=192)
    x = fire_module(x, fire_id=11, squeeze=48, expand=192)
    x = fire_module(x, fire_id=12, squeeze=64, expand=256)
    x = fire_module(x, fire_id=13, squeeze=64, expand=256)

    model = Model(inputs, x, name='squeezenet')

    # if weights == 'imagenet':
    #     if include_top:
    #         weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels.h5',
    #                                 WEIGHTS_PATH,
    #                                 cache_subdir='models')
    #     else:
    #         weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels_notop.h5',
    #                                 WEIGHTS_PATH_NO_TOP,
    #                                 cache_subdir='models')
    #
    #     model.load_weights(weights_path)

    # if include_top:
    # It's not obvious where to cut the network...
    # Could do the 8th or 9th layer... some work recommends cutting earlier layers.
    # model = tf.keras.Sequential([
    #     x,
    #     layers.Dense(image_data.num_classes, activation='softmax')
    # ])

    x = Dropout(0.5, name='drop9')(x)
    y = Dropout(0.5, name='drop10')(x)
    x = Conv2D(classes1, (1, 1), padding='valid', name='conv10')(x)
    x = Activation('relu', name='relu_conv10')(x)
    x = GlobalAveragePooling2D()(x)
    x = Activation('softmax', name='location_output1')(x)

    y = Conv2D(classes2, (1, 1), padding='valid', name='conv11')(y)
    y = Activation('relu', name='relu_conv11')(y)
    y = GlobalAveragePooling2D()(y)
    y = Activation('softmax', name='falling_output2')(y)
    # else:
    #     if pooling == 'avg':
    #         x = GlobalAveragePooling2D()(x)
    #         y = GlobalAveragePooling2D()(y)
    #     elif pooling == 'max':
    #         x = GlobalMaxPooling2D()(x)
    #         y = GlobalAveragePooling2D()(y)
    #     elif pooling == None:
    #         pass
    #     else:
    #         raise ValueError("Unknown argument for 'pooling'=" + pooling)

    cutom_model = Model(inputs, [x, y], name='squeezenet')

    # weights_path = "output\\checkpoints\\eye\\generator_scale_300.h5'
    # cutom_model.load_weights(weights_path)

    return cutom_model
Пример #6
0
def build_mobileNetV2(input_shape=[224, 224, 3],
                      l2_regularizer_weight=0.0001,
                      dropout_rate=None,
                      depth_multiplier=1,
                      class_num=2,
                      alpha=1):
    global_regulizer = keras.regularizers.l2(l2_regularizer_weight)
    X_input = layers.Input(shape=input_shape, name="input")
    # X = ZeroPadding2D(((1, 0), (1, 0)))(X_input)
    X = _conv_block(X_input,
                    filters=int(32 * alpha),
                    kernel=(3, 3),
                    base_name='conv1',
                    stride=2,
                    dropout_rate=dropout_rate,
                    kernel_regulizer=global_regulizer)
    X = DepthwiseConv2D(kernel_size=(3, 3),
                        strides=(1, 1),
                        depth_multiplier=1,
                        padding='same',
                        name='expanded_conv_depthwise',
                        use_bias=False)(X)
    X = BatchNormalization(name='expanded_conv_depthwise_BN')(X)
    X = Activation(tf.nn.relu6, name='expanded_conv_depthwise_relu')(X)
    X = Conv2D(filters=int(16 * alpha),
               kernel_size=(1, 1),
               strides=(1, 1),
               padding='same',
               name='expanded_conv_project',
               use_bias=False)(X)
    X = BatchNormalization(name='expanded_conv_project_BN')(X)
    # invert residul block
    X = _inverted_residual_block(X,
                                 filters=int(24 * alpha),
                                 kernel=(3, 3),
                                 t=6,
                                 strides=2,
                                 n=2,
                                 dropout_rate=dropout_rate,
                                 kernel_regulizer=global_regulizer,
                                 depth_multiplier=depth_multiplier,
                                 init_block_id=1)
    X = _inverted_residual_block(X,
                                 filters=int(32 * alpha),
                                 kernel=(3, 3),
                                 t=6,
                                 strides=2,
                                 n=3,
                                 dropout_rate=dropout_rate,
                                 kernel_regulizer=global_regulizer,
                                 depth_multiplier=depth_multiplier,
                                 init_block_id=3)
    X = _inverted_residual_block(X,
                                 filters=int(64 * alpha),
                                 kernel=(3, 3),
                                 t=6,
                                 strides=2,
                                 n=4,
                                 dropout_rate=dropout_rate,
                                 kernel_regulizer=global_regulizer,
                                 depth_multiplier=depth_multiplier,
                                 init_block_id=6)
    X = _inverted_residual_block(X,
                                 filters=int(96 * alpha),
                                 kernel=(3, 3),
                                 t=6,
                                 strides=1,
                                 n=3,
                                 dropout_rate=dropout_rate,
                                 kernel_regulizer=global_regulizer,
                                 depth_multiplier=1,
                                 init_block_id=10)
    X = _inverted_residual_block(X,
                                 filters=int(160 * alpha),
                                 kernel=(3, 3),
                                 t=6,
                                 strides=2,
                                 n=3,
                                 dropout_rate=dropout_rate,
                                 kernel_regulizer=global_regulizer,
                                 depth_multiplier=depth_multiplier,
                                 init_block_id=13)
    X = _inverted_residual_block(X,
                                 filters=int(320 * alpha),
                                 kernel=(3, 3),
                                 t=6,
                                 strides=1,
                                 n=1,
                                 dropout_rate=dropout_rate,
                                 kernel_regulizer=global_regulizer,
                                 depth_multiplier=depth_multiplier,
                                 init_block_id=16)
    # 卷积
    X = _conv_block(X,
                    filters=1280,
                    kernel=(1, 1),
                    base_name='conv2',
                    stride=1,
                    dropout_rate=dropout_rate,
                    kernel_regulizer=global_regulizer)
    # 分类层
    X = GlobalAveragePooling2D()(X)
    X = Dense(class_num, name="output")(X)
    model = keras.Model(inputs=X_input, outputs=X, name='mobileNet')
    return model
Пример #7
0
    def __init__(self,
                 depth_of_model,
                 growth_rate,
                 num_of_blocks,
                 num_layers_in_each_block,
                 data_format,
                 bottleneck=True,
                 compression=0.5,
                 weight_decay=1e-4,
                 dropout_rate=0,
                 pool_initial=False,
                 include_top=True):
        super(DenseNetEager, self).__init__()
        self.depth_of_model = depth_of_model
        self.growth_rate = growth_rate
        self.num_of_blocks = num_of_blocks
        self.num_layers_in_each_block = num_layers_in_each_block
        self.data_format = data_format
        self.bottleneck = bottleneck
        self.compression = compression
        self.weight_decay = weight_decay
        self.dropout_rate = dropout_rate
        self.pool_initial = pool_initial
        self.include_top = include_top

        # deciding on number of layers in each block
        if isinstance(self.num_layers_in_each_block, list) or isinstance(
                self.num_layers_in_each_block, tuple):
            self.num_layers_in_each_block = list(self.num_layers_in_each_block)
        else:
            if self.num_layers_in_each_block == -1:
                if self.num_of_blocks != 3:
                    raise ValueError(
                        "Number of blocks must be 3 if num_layers_in_each_block is -1"
                    )
                if (self.depth_of_model - 4) % 3 == 0:
                    num_layers = (self.depth_of_model - 4) / 3
                    if self.bottleneck:
                        num_layers //= 2
                    self.num_layers_in_each_block = [num_layers
                                                     ] * self.num_of_blocks
                else:
                    raise ValueError(
                        "Depth must be 3N+4 if num_layer_in_each_block=-1")
            else:
                self.num_layers_in_each_block = [
                    self.num_layers_in_each_block
                ] * self.num_of_blocks

        axis = -1 if self.data_format == "channels_last" else 1

        # setting the filters and stride of the initial covn layer.
        if self.pool_initial:
            init_filters = (7, 7)
            stride = (2, 2)
        else:
            init_filters = (3, 3)
            stride = (1, 1)

        self.num_filters = 2 * self.growth_rate

        # first conv and pool layer
        self.conv1 = Conv2D(
            self.num_filters,
            init_filters,
            strides=stride,
            padding="same",
            use_bias=False,
            data_format=self.data_format,
            # kernel_initializer="he_normal",
            kernel_regularizer=l2(self.weight_decay),
        )
        if self.pool_initial:
            self.pool1 = MaxPooling2D(pool_size=(3, 3),
                                      strides=(2, 2),
                                      padding="same",
                                      data_format=self.data_format)
            self.batchnorm1 = BatchNormalization(momentum=0.9, axis=-1)

        self.batchnorm2 = BatchNormalization(momentum=0.9, axis=-1)

        # last pooling and fc layer
        if self.include_top:
            # self.last_pool = tf.layers.AveragePooling2D(pool_size=(6,6), strides=(6,6))
            self.last_pool = GlobalAveragePooling2D(
                data_format=self.data_format)

            # self.last_pool = tf.layers.Flatten()
            ## Remove classifier; we always use hidden layer
            # self.classifier = tf.layers.Dense(self.output_classes)

        # calculating the number of filters after each block
        num_filters_after_each_block = [self.num_filters]
        for i in range(1, self.num_of_blocks):
            temp_num_filters = num_filters_after_each_block[i - 1] + (
                self.growth_rate * self.num_layers_in_each_block[i - 1])
            # using compression to reduce the number of inputs to the
            # transition block
            temp_num_filters = int(temp_num_filters * compression)
            num_filters_after_each_block.append(temp_num_filters)

        # dense block initialization
        self.dense_blocks = []
        self.transition_blocks = []
        for i in range(self.num_of_blocks):
            self.dense_blocks.append(
                DenseBlock(self.num_layers_in_each_block[i], self.growth_rate,
                           self.data_format, self.bottleneck,
                           self.weight_decay, self.dropout_rate))
            if i + 1 < self.num_of_blocks:
                self.transition_blocks.append(
                    TransitionBlock(num_filters_after_each_block[i + 1],
                                    self.data_format, self.weight_decay,
                                    self.dropout_rate))
Пример #8
0
def SqueezeNet(include_top=True,
               weights="imagenet",
               input_tensor=None,
               input_shape=None,
               pooling=None,
               classes=1000):

    if weights not in {'imagenet', None}:
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization) or `imagenet` '
                         '(pre-training on ImageNet).')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError('If using `weights` as imagenet with `include_top`'
                         ' as true, `classes` should be 1000')
    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=48,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    x = Convolution2D(64,
                      kernel_size=(3, 3),
                      strides=(2, 2),
                      padding="same",
                      activation="relu",
                      name='conv1')(img_input)
    x = MaxPooling2D(pool_size=(3, 3),
                     strides=(2, 2),
                     name='maxpool1',
                     padding="valid")(x)

    x = _fire(x, (16, 64, 64), name="fire2")
    x = _fire(x, (16, 64, 64), name="fire3")

    x = MaxPooling2D(pool_size=(3, 3),
                     strides=(2, 2),
                     name='maxpool3',
                     padding="valid")(x)

    x = _fire(x, (32, 128, 128), name="fire4")
    x = _fire(x, (32, 128, 128), name="fire5")

    x = MaxPooling2D(pool_size=(3, 3),
                     strides=(2, 2),
                     name='maxpool5',
                     padding="valid")(x)

    x = _fire(x, (48, 192, 192), name="fire6")
    x = _fire(x, (48, 192, 192), name="fire7")

    x = _fire(x, (64, 256, 256), name="fire8")
    x = _fire(x, (64, 256, 256), name="fire9")

    if include_top:
        x = Dropout(0.5, name='dropout9')(x)

        x = Convolution2D(classes, (1, 1), padding='valid', name='conv10')(x)
        x = AveragePooling2D(pool_size=(13, 13), name='avgpool10')(x)
        x = Flatten(name='flatten10')(x)
        x = Activation("softmax", name='softmax')(x)
    else:
        if pooling == "avg":
            x = GlobalAveragePooling2D(name="avgpool10")(x)
        else:
            x = GlobalMaxPooling2D(name="maxpool10")(x)

    model = Model(img_input, x, name="squeezenet")

    if weights == 'imagenet':
        weights_path = get_file('squeezenet_weights.h5',
                                WEIGHTS_PATH,
                                cache_subdir='models')

        model.load_weights(weights_path)

    return model
Пример #9
0
def __create_dense_net(nb_classes,
                       img_input,
                       include_top,
                       depth=40,
                       nb_dense_block=3,
                       growth_rate=12,
                       nb_filter=-1,
                       nb_layers_per_block=-1,
                       bottleneck=False,
                       reduction=0.0,
                       dropout_rate=None,
                       weight_decay=1e-4,
                       subsample_initial_block=False,
                       activation='softmax'):
    ''' Build the DenseNet model
    Args:
        nb_classes: number of classes
        img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
        include_top: flag to include the final Dense layer
        depth: number or layers
        nb_dense_block: number of dense blocks to add to end (generally = 3)
        growth_rate: number of filters to add per dense block
        nb_filter: initial number of filters. Default -1 indicates initial number of filters is 2 * growth_rate
        nb_layers_per_block: number of layers in each dense block.
                Can be a -1, positive integer or a list.
                If -1, calculates nb_layer_per_block from the depth of the network.
                If positive integer, a set number of layers per dense block.
                If list, nb_layer is used as provided. Note that list size must
                be (nb_dense_block + 1)
        bottleneck: add bottleneck blocks
        reduction: reduction factor of transition blocks. Note : reduction value is inverted to compute compression
        dropout_rate: dropout rate
        weight_decay: weight decay rate
        subsample_initial_block: Set to True to subsample the initial convolution and
                add a MaxPool2D before the dense blocks are added.
        subsample_initial:
        activation: Type of activation at the top layer. Can be one of 'softmax' or 'sigmoid'.
                Note that if sigmoid is used, classes must be 1.
    Returns: keras tensor with nb_layers of conv_block appended
    '''

    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    if reduction != 0.0:
        assert reduction <= 1.0 and reduction > 0.0, 'reduction value must lie between 0.0 and 1.0'

    # layers in each dense block
    if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
        nb_layers = list(nb_layers_per_block)  # Convert tuple to list

        assert len(nb_layers) == (nb_dense_block), 'If list, nb_layer is used as provided. ' \
                                                   'Note that list size must be (nb_dense_block)'
        final_nb_layer = nb_layers[-1]
        nb_layers = nb_layers[:-1]
    else:
        if nb_layers_per_block == -1:
            assert (
                depth - 4
            ) % 3 == 0, 'Depth must be 3 N + 4 if nb_layers_per_block == -1'
            count = int((depth - 4) / 3)

            if bottleneck:
                count = count // 2

            nb_layers = [count for _ in range(nb_dense_block)]
            final_nb_layer = count
        else:
            final_nb_layer = nb_layers_per_block
            nb_layers = [nb_layers_per_block] * nb_dense_block

    # compute initial nb_filter if -1, else accept users initial nb_filter
    if nb_filter <= 0:
        nb_filter = 2 * growth_rate

    # compute compression factor
    compression = 1.0 - reduction

    # Initial convolution
    if subsample_initial_block:
        initial_kernel = (7, 7)
        initial_strides = (2, 2)
    else:
        initial_kernel = (3, 3)
        initial_strides = (1, 1)

    x = Conv2D(nb_filter,
               initial_kernel,
               kernel_initializer='he_normal',
               padding='same',
               strides=initial_strides,
               use_bias=False,
               kernel_regularizer=l2(weight_decay))(img_input)

    if subsample_initial_block:
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)
        x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        x, nb_filter = __dense_block(x,
                                     nb_layers[block_idx],
                                     nb_filter,
                                     growth_rate,
                                     bottleneck=bottleneck,
                                     dropout_rate=dropout_rate,
                                     weight_decay=weight_decay)
        # add transition_block
        x = __transition_block(x,
                               nb_filter,
                               compression=compression,
                               weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

    # The last dense_block does not have a transition_block
    x, nb_filter = __dense_block(x,
                                 final_nb_layer,
                                 nb_filter,
                                 growth_rate,
                                 bottleneck=bottleneck,
                                 dropout_rate=dropout_rate,
                                 weight_decay=weight_decay)

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
    x = Activation('relu')(x)
    x = GlobalAveragePooling2D()(x)

    if include_top:
        x = Dense(nb_classes, activation=activation)(x)

    return x
Пример #10
0
def ShuffleNetV2(include_top=True,
                 input_tensor=None,
                 scale_factor=1.0,
                 pooling='max',
                 input_shape=(224,224,3),
                 load_model=None,
                 num_shuffle_units=[3,7,3],
                 bottleneck_ratio=1,
                 classes=1000,
				 activation="relu"):
    if K.backend() != 'tensorflow':
        raise RuntimeError('Only tensorflow supported for now')
    name = 'ShuffleNetV2_{}_{}_{}'.format(scale_factor, bottleneck_ratio, "".join([str(x) for x in num_shuffle_units]))
    input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=28, require_flatten=include_top,
                                      data_format=K.image_data_format())
    out_dim_stage_two = {0.5:48, 1:116, 1.5:176, 2:244}

    if pooling not in ['max', 'avg']:
        raise ValueError('Invalid value for pooling')
    if not (float(scale_factor)*4).is_integer():
        raise ValueError('Invalid value for scale_factor, should be x over 4')
    exp = np.insert(np.arange(len(num_shuffle_units), dtype=np.float32), 0, 0)  # [0., 0., 1., 2.]
    out_channels_in_stage = 2**exp
    try:
        out_channels_in_stage *= out_dim_stage_two[bottleneck_ratio]  #  calculate output channels for each stage
    except KeyError:
        out_channels_in_stage *= int((out_dim_stage_two[2] - out_dim_stage_two[0.5]
                                     )/1.5*(bottleneck_ratio - 0.5)) + out_dim_stage_two[0.5] # interpolate
    out_channels_in_stage[0] = 24  # first stage has always 24 output channels
    out_channels_in_stage *= scale_factor
    out_channels_in_stage = out_channels_in_stage.astype(int)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    # create shufflenet architecture
    x = Conv2D(filters=out_channels_in_stage[0], kernel_size=(3, 3), padding='same', use_bias=False, strides=(2, 2),
               activation='relu', name='conv1')(img_input)
    x = MaxPool2D(pool_size=(3, 3), strides=(2, 2), padding='same', name='maxpool1')(x)

    # create stages containing shufflenet units beginning at stage 2
    for stage, repeat in enumerate(num_shuffle_units):
        x = block(x, out_channels_in_stage,
                   repeat=repeat,
                   bottleneck_ratio=bottleneck_ratio,
                   stage=stage + 2)
    
    if bottleneck_ratio < 1:
        k = 512
    elif bottleneck_ratio < 2:
        k = 1024
    else:
        k = 2048
    x = Conv2D(k, kernel_size=1, padding='same', strides=1, name='1x1conv5_out', activation='relu')(x)

    if pooling == 'avg':
        x = GlobalAveragePooling2D(name='global_avg_pool')(x)
    elif pooling == 'max':
        x = GlobalMaxPooling2D(name='global_max_pool')(x)

    if include_top:
        x = Dense(classes, name='fc')(x)
        x = Activation('softmax', name='softmax')(x)
    else:
        return img_input, x
    if input_tensor:
        inputs = get_source_inputs(input_tensor)

    else:
        inputs = img_input

    model = Model(inputs, x, name=name)

    if load_model:
        model.load_weights('', by_name=True)

    return model
robot_state_test_label = strawberry_states_frame_rate[
    train_images.shape[0]:train_images.shape[0] + test_images.shape[0]]
print("Robot state label testset size: {}".format(
    robot_state_test_label.shape))

####################################################################################################################
model = VGG19(include_top=False, weights='imagenet', input_shape=(224, 224, 3))
#model.summary()

for layer in model.layers[:21]:
    layer.trainable = False
for layer in model.layers[21:]:
    layer.trainable = True

y1 = model.output
y2 = GlobalAveragePooling2D()(y1)
y3 = Dense(512, activation='relu')(y2)
y4 = Dense(512, activation='relu')(y3)

new_model = Model(inputs=model.input, outputs=y4)
####################################################################################################################

intermediate_layer_model = load_model(
    '/home/kiyanoushs/Kiyanoush Codes/Needle Insertion/Models/CNN_intermediate_layer.h5'
)
intermediate_output_train = intermediate_layer_model.predict(
    [train, robot_state_train_input])
intermediate_output_test = intermediate_layer_model.predict(
    [test, robot_state_test_input])

intermediate_output_train.shape
Пример #12
0
model_start = Input(shape=(x_train.shape[1], x_train.shape[2],
                           x_train.shape[3]))
model_start2 = Input(shape=(xfcss_train.shape[1], ))
model_resnet = model_start
model_perc = model_start2

model_resnet = Conv2D(filters=32, kernel_size=3,
                      activation='relu')(model_resnet)
model_resnet = Conv2D(64, 3, activation='relu')(model_resnet)
mosel_resnet = MaxPooling2D(3)(model_resnet)
num_res_net_blocks = 34
for i in range(num_res_net_blocks):
    model_resnet = res_net_block(model_resnet, 64, 3)
model_resnet = Conv2D(64, 3, activation='relu')(model_resnet)
model_resnet = GlobalAveragePooling2D()(model_resnet)
model_resnet = Dense(256, activation='relu')(model_resnet)

model_resnet = Dropout(dropout_rate)(model_resnet)

model_perc = Dense(50)(model_perc)
model_perc = BatchNormalization()(model_perc)
model_perc = Activation('relu')(model_perc)

# model_perc = Flatten()(model_perc)
# model_cnn = Activation('relu')(model_cnn)

#model_cnn = Dense(50)(model_cnn)
#model_cnn = BatchNormalization()(model_cnn)
#model_cnn = Activation('relu')(model_cnn)