Пример #1
0
def create_optpresso_model(input_shape: List) -> Sequential:
    model = Sequential()
    model.add(InputLayer(input_shape=input_shape))
    model.add(SubtractMeanLayer(mean=MEAN_IMG_VALUES))
    model.add(Rescaling(1.0 / 255))
    model.add(RandomFlip())
    model.add(RandomRotation(1))

    model.add(Convolution2D(
        32,
        (5, 5),
        padding="same",
    ))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    # model.add(SpatialDropout2D(0.3))
    model.add(Convolution2D(
        48,
        (5, 5),
        strides=(2, 2),
        padding="same",
    ))
    # model.add(BatchNormalization())
    # model.add(SpatialDropout2D(0.3))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        48,
        (5, 5),
        strides=(2, 2),
        padding="same",
    ))
    # model.add(SpatialDropout2D(0.1))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        64,
        (3, 3),
        strides=(2, 2),
        padding="same",
    ))
    # model.add(SpatialDropout2D(0.1))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        64,
        (3, 3),
        strides=(2, 2),
        padding="same",
    ))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        128,
        (3, 3),
        strides=(2, 2),
        padding="same",
    ))
    # model.add(SpatialDropout2D(0.15))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        128,
        (3, 3),
        strides=(2, 2),
        padding="same",
    ))
    model.add(Flatten())
    model.add(Activation("relu"))
    model.add(Dense(128))
    model.add(Dropout(0.5))
    model.add(Activation("relu"))
    model.add(Dense(96))
    model.add(Dropout(0.5))
    model.add(Activation("relu"))
    model.add(Dense(64))
    model.add(Dropout(0.5))
    model.add(Activation("relu"))
    model.add(Dense(1, bias_initializer=Constant(MEAN_PULL_TIME)))

    return model
Пример #2
0
size = 200
img_size = 50

data, labels = var5.gen_data(size, img_size)
labels.reshape(labels.size)
num_classes = np.unique(labels).shape[0]
le.fit(np.unique(labels))
labels = le.transform(labels)
labels = utils.to_categorical(labels, num_classes)  # One-hot encode the labels

inp = Input(shape=(img_size, img_size, 1))

# Conv [32] -> Conv [32] -> Pool (with dropout on the pooling layer)
layer = Convolution2D(conv_depth_1, (kernel_size, kernel_size),
                      padding='same',
                      activation='relu')(inp)
layer = Convolution2D(conv_depth_1, (kernel_size, kernel_size),
                      padding='same',
                      activation='relu')(layer)
# layer = MaxPooling2D(pool_size=(pool_size, pool_size))(layer)
# layer = Dropout(drop_prob_1)(layer)

flat = Flatten()(layer)

layer = Dense(hidden_size, activation='relu')(flat)

out = Dense(1, activation='relu')(layer)

model = Model(inputs=inp, outputs=out)
Пример #3
0
def ResNet18(include_top=True,
             weights='cifar100_coarse',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=20,
             **kwargs):
    global backend, layers, models, keras_utils
    backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)

    # Check Weights
    if not (weights in {'cifar100_coarse', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `cifar100_coarse` '
                         '(pre-training on cifar100 coarse (super) classes), '
                         'or the path to the weights file to be loaded.')

    if weights == 'cifar100_coarse' and include_top and classes != 20:
        raise ValueError('If using `weights` as `"cifar100_coarse"` with `include_top`'
                         ' as true, `classes` should be 20')

    # Determine proper input shape
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=32,
                                      data_format=backend.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not tf.keras.backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    # Build ResNet18 architecture
    x = ZeroPadding2D(padding=(3,3),name='conv1_pad')(img_input)
    x = Convolution2D(64,(7,7),
                      strides=(2,2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      name='conv1')(x)
    x = BatchNormalization(axis=bn_axis,name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = ZeroPadding2D(padding=(1,1),name='pool1_pad')(x)
    x = MaxPooling2D((3,3),strides=(2,2))(x)

    x = identity_block(x,3,[64,64],stage=2,block='a')
    x = identity_block(x,3,[64,64],stage=2,block='b')

    x = conv_block(x,3,[128,128],stage=3,block='a')
    x = identity_block(x,3,[128,128],stage=3,block='b')

    x = conv_block(x,3,[256,256],stage=4,block='a')
    x = identity_block(x,3,[256,256],stage=4,block='b')

    x = conv_block(x,3,[512,512],stage=5,block='a')
    x = identity_block(x,3,[512,512],stage=5,block='b')

    # Managing Top
    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='fc20')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)
        '''
        else:
            warnings.warn('No flattenting layer operation like AveragePooling2D or MaxPooling2D has been added
            whereas there are not top. You will need to apply AveragePooling2D or MaxPooling2D in case of 
            doing transfer learning')
        '''

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model
    model = Model(inputs, x, name='resnet18')

    # Load weights
    if weights == 'cifar100_coarse':
        if include_top:
            weights_path = keras_utils.get_file(
                'resnet18_cifar100_top.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='e0798dd90ac7e0498cbdea853bd3ed7f')
        else:
            weights_path = keras_utils.get_file(
                'resnet18_cifar100_no_top.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='bfeace78cec55f2b0401c1f41c81e1dd')
        model.load_weights(weights_path)

  
    return model
Пример #4
0
# Importing the Keras libraries and packages
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Convolution2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense

# Step 1 - Building the CNN

# Initializing the CNN
classifier = Sequential()

# First convolution layer and pooling
classifier.add(
    Convolution2D(32, (3, 3), input_shape=(200, 200, 1), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
# Second convolution layer and pooling
classifier.add(Convolution2D(32, (3, 3), activation='relu'))
# input_shape is going to be the pooled feature maps from the previous convolution layer
classifier.add(MaxPooling2D(pool_size=(2, 2)))

# Flattening the layers
classifier.add(Flatten())

# Adding a fully connected layer
classifier.add(Dense(units=128, activation='relu'))
classifier.add(Dense(units=6, activation='softmax'))  # softmax for more than 2

# Compiling the CNN
classifier.compile(optimizer='adam',
                   loss='binary_crossentropy',
Пример #5
0
    return (X_train, y_train), (X_test, y_test)


(X_train, y_train), (X_test, y_test) = prepare_data_fn()

datagen = tf.keras.preprocessing.image.ImageDataGenerator(
    rotation_range=12,
    zoom_range=0.10,
    width_shift_range=0.1,
    height_shift_range=0.1,
)

model = Sequential([
    Convolution2D(filters=32,
                  kernel_size=3,
                  activation='relu',
                  input_shape=(28, 28, 1)),
    BatchNormalization(),
    Convolution2D(filters=32, kernel_size=3, activation='relu'),
    BatchNormalization(),
    Convolution2D(filters=32,
                  kernel_size=5,
                  strides=2,
                  padding='same',
                  activation='relu'),
    BatchNormalization(),
    Dropout(0.25),
    Convolution2D(filters=64, kernel_size=3, activation='relu'),
    BatchNormalization(),
    Convolution2D(filters=64, kernel_size=3, activation='relu'),
    BatchNormalization(),
Пример #6
0
    def create_and_load_face_model(self, fname):
        # Define VGG_FACE_MODEL architecture
        model = Sequential()
        model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
        model.add(Convolution2D(64, (3, 3), activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(64, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(128, (3, 3), activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(128, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, (3, 3), activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, (3, 3), activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(256, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, (3, 3), activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, (3, 3), activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, (3, 3), activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, (3, 3), activation='relu'))
        model.add(ZeroPadding2D((1, 1)))
        model.add(Convolution2D(512, (3, 3), activation='relu'))
        model.add(MaxPooling2D((2, 2), strides=(2, 2)))
        model.add(Convolution2D(4096, (7, 7), activation='relu'))
        model.add(Dropout(0.5))
        model.add(Convolution2D(4096, (1, 1), activation='relu'))
        model.add(Dropout(0.5))
        model.add(Convolution2D(2622, (1, 1)))
        model.add(Flatten())
        model.add(Activation('softmax'))

        # Load VGG Face model weights
        #model.load_weights('vgg_face_weights.h5')
        model.load_weights(fname)
        return model
Пример #7
0
from flask import Flask, render_template, request, send_from_directory
from flask_fontawesome import FontAwesome
import cv2
import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, BatchNormalization, Flatten
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Convolution2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.preprocessing import image

model = Sequential()
model.add(Convolution2D(64, 3, 3, input_shape=(64, 64, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

model.load_weights('model_classifier_bone_fracture.h5')

COUNT = 0
app = Flask(__name__)
app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 1
Пример #8
0
def cnn4(input_shape=(192, 640, 4)):
    '''Define CNN model'''
    model = Sequential()
    model.add(
        Convolution2D(filters=64,
                      kernel_size=7,
                      strides=(2, 2),
                      padding='valid',
                      input_shape=input_shape,
                      activation=PReLU()))
    model.add(
        Convolution2D(filters=128,
                      kernel_size=5,
                      strides=(2, 2),
                      padding='same',
                      activation=PReLU()))
    model.add(
        Convolution2D(filters=256,
                      kernel_size=5,
                      strides=(2, 2),
                      padding='same',
                      activation=PReLU()))
    model.add(
        Convolution2D(filters=256,
                      kernel_size=3,
                      strides=(1, 1),
                      padding='same',
                      activation=PReLU()))
    model.add(
        Convolution2D(filters=512,
                      kernel_size=3,
                      strides=(2, 2),
                      padding='same',
                      activation=PReLU()))
    model.add(
        Convolution2D(filters=512,
                      kernel_size=3,
                      strides=(1, 1),
                      padding='same',
                      activation=PReLU()))
    model.add(
        Convolution2D(filters=512,
                      kernel_size=3,
                      strides=(2, 2),
                      padding='same',
                      activation=PReLU()))
    model.add(
        Convolution2D(filters=512,
                      kernel_size=3,
                      strides=(1, 1),
                      padding='same',
                      activation=PReLU()))
    model.add(
        Convolution2D(filters=1024,
                      kernel_size=3,
                      strides=(2, 2),
                      padding='same',
                      activation=PReLU()))
    model.add(Flatten())

    return model
def get_unet():
    concat_axis = 3

    inputs = Input(shape=[img_rows, img_cols, 1])
    conv1 = Convolution2D(32, (3, 3), activation='relu',
                          padding='same')(inputs)
    conv1 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, (3, 3), activation='relu',
                          padding='same')(pool2)
    conv3 = Convolution2D(128, (3, 3), activation='relu',
                          padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(256, (3, 3), activation='relu',
                          padding='same')(pool3)
    conv4 = Convolution2D(256, (3, 3), activation='relu',
                          padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(512, (3, 3), activation='relu',
                          padding='same')(pool4)
    conv5 = Convolution2D(512, (3, 3), activation='relu',
                          padding='same')(conv5)

    #     up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
    up6 = Concatenate(axis=concat_axis)(
        [UpSampling2D(size=(2, 2))(conv5), conv4])
    conv6 = Convolution2D(256, (3, 3), activation='relu', padding='same')(up6)
    conv6 = Convolution2D(256, (3, 3), activation='relu',
                          padding='same')(conv6)

    #     up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
    up7 = Concatenate(axis=concat_axis)(
        [UpSampling2D(size=(2, 2))(conv6), conv3])
    conv7 = Convolution2D(128, (3, 3), activation='relu', padding='same')(up7)
    conv7 = Convolution2D(128, (3, 3), activation='relu',
                          padding='same')(conv7)

    #     up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    up8 = Concatenate(axis=concat_axis)(
        [UpSampling2D(size=(2, 2))(conv7), conv2])
    conv8 = Convolution2D(64, (3, 3), activation='relu', padding='same')(up8)
    conv8 = Convolution2D(64, (3, 3), activation='relu', padding='same')(conv8)

    #     up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
    up9 = Concatenate(axis=concat_axis)(
        [UpSampling2D(size=(2, 2))(conv8), conv1])
    conv9 = Convolution2D(32, (3, 3), activation='relu', padding='same')(up9)
    conv9 = Convolution2D(32, (3, 3), activation='relu', padding='same')(conv9)

    conv10 = Convolution2D(1, (1, 1), activation='sigmoid')(conv9)

    model = Model(inputs=inputs, outputs=conv10)

    return model
Пример #10
0
def unetModel_basic_4(input_height, input_width, nChannels, lr_rate=1e-3, dropout_ratio=0.2, activation='relu', dropout_level=0):
    inputs = Input(shape=(input_height, input_width, nChannels))

    conv1 = Convolution2D(16, (3, 3), activation=activation, padding='same')(inputs)
    conv1 = Dropout(dropout_ratio)(conv1)
    conv1 = Convolution2D(16, (3, 3), activation=activation, padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(32, (3, 3), activation=activation, padding='same')(pool1)
    conv2 = Dropout(dropout_ratio)(conv2)
    conv2 = Convolution2D(32, (3, 3), activation=activation, padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(64, (3, 3), activation=activation, padding='same')(pool2)
    conv3 = Dropout(dropout_ratio)(conv3)
    conv3 = Convolution2D(64, (3, 3), activation=activation, padding='same')(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = Convolution2D(128, (3, 3), activation=activation, padding='same')(pool3)
    conv4 = Dropout(dropout_ratio)(conv4)
    conv4 = Convolution2D(128, (3, 3), activation=activation, padding='same')(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Convolution2D(256, (3, 3), activation=activation, padding='same')(pool4)
    conv5 = Dropout(dropout_ratio)(conv5)
    conv5 = Convolution2D(256, (3, 3), activation=activation, padding='same')(conv5)
    pool5 = MaxPooling2D(pool_size=(2, 2))(conv5)

    conv6 = Convolution2D(512, (3, 3), activation=activation, padding='same')(pool5)
    conv6 = Dropout(dropout_ratio)(conv6)
    conv6 = Convolution2D(512, (3, 3), activation=activation, padding='same')(conv6)

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv5], axis=3)
    conv7 = Convolution2D(256, (3, 3), activation=activation, padding='same')(up1)
    conv7 = Dropout(dropout_ratio)(conv7) if dropout_level else conv7
    conv7 = Convolution2D(256, (3, 3), activation=activation, padding='same')(conv7)

    up2 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv4], axis=3)
    conv8 = Convolution2D(128, (3, 3), activation=activation, padding='same')(up2)
    conv8 = Dropout(dropout_ratio)(conv8) if dropout_level else conv8
    conv8 = Convolution2D(128, (3, 3), activation=activation, padding='same')(conv8)

    up3 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv3], axis=3)
    conv9 = Convolution2D(64, (3, 3), activation=activation, padding='same')(up3)
    conv9 = Dropout(dropout_ratio)(conv9) if dropout_level else conv9
    conv9 = Convolution2D(64, (3, 3), activation=activation, padding='same')(conv9)

    up4 = concatenate([UpSampling2D(size=(2, 2))(conv9), conv2], axis=3)
    conv10 = Convolution2D(32, (3, 3), activation=activation, padding='same')(up4)
    conv10 = Dropout(dropout_ratio)(conv10) if dropout_level else conv10
    conv10 = Convolution2D(32, (3, 3), activation=activation, padding='same')(conv10)

    up5 = concatenate([UpSampling2D(size=(2, 2))(conv10), conv1], axis=3)
    conv11 = Convolution2D(16, (3, 3), activation=activation, padding='same')(up5)
    conv11 = Dropout(dropout_ratio)(conv11) if dropout_level else conv11
    conv11 = Convolution2D(16, (3, 3), activation=activation, padding='same')(conv11)

    conv12 = Convolution2D(1, (1, 1), activation='sigmoid', name='main_output')(conv11)

    conv12 = Reshape((input_height * input_width, 1))(conv12)

    model = Model(inputs=inputs, outputs=conv12)

    optAdam = Adam(lr=lr_rate)
    model.compile(loss=dice_coef_loss, optimizer=optAdam, metrics=[dice_coef])#, sample_weight_mode="temporal")

    return model
Пример #11
0
def unetModel_residual(input_height, input_width, nChannels, lr_rate=1e-3, dropout_ratio=0.2, activation='relu', dropout_level=0):
    inputs = Input(shape=(input_height, input_width, nChannels))
    num_features = 16

    conv1 = Convolution2D(num_features * pow(2, 0), (3, 3), activation=activation, padding='same')(inputs)
    conv1 = Dropout(dropout_ratio)(conv1)
    conv1 = Convolution2D(num_features * pow(2, 0), (3, 3), activation=activation, padding='same')(conv1)
    # Residual Conneciton
    shortcut = Convolution2D(num_features, kernel_size=(1, 1))(inputs)
    output = Add()([conv1, shortcut])
    pool1 = MaxPooling2D(pool_size=(2, 2))(output)

    conv2 = Convolution2D(num_features * pow(2, 1), (3, 3), activation=activation, padding='same')(pool1)
    conv2 = Dropout(dropout_ratio)(conv2)
    conv2 = Convolution2D(num_features * pow(2, 1), (3, 3), activation=activation, padding='same')(conv2)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 1), kernel_size=(1, 1))(pool1)
    output = Add()([conv2, shortcut])
    pool2 = MaxPooling2D(pool_size=(2, 2))(output)

    conv3 = Convolution2D(num_features * pow(2, 2), (3, 3), activation=activation, padding='same')(pool2)
    conv3 = Dropout(dropout_ratio)(conv3)
    conv3 = Convolution2D(num_features * pow(2, 2), (3, 3), activation=activation, padding='same')(conv3)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 2), kernel_size=(1, 1))(pool2)
    output = Add()([conv3, shortcut])
    pool3 = MaxPooling2D(pool_size=(2, 2))(output)

    conv4 = Convolution2D(num_features * pow(2, 3), (3, 3), activation=activation, padding='same')(pool3)
    conv4 = Dropout(dropout_ratio)(conv4)
    conv4 = Convolution2D(num_features * pow(2, 3), (3, 3), activation=activation, padding='same')(conv4)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 3), kernel_size=(1, 1))(pool3)
    output = Add()([conv4, shortcut])
    pool4 = MaxPooling2D(pool_size=(2, 2))(output)

    conv5 = Convolution2D(num_features * pow(2, 4), (3, 3), activation=activation, padding='same')(pool4)
    conv5 = Dropout(dropout_ratio)(conv5)
    conv5 = Convolution2D(num_features * pow(2, 4), (3, 3), activation=activation, padding='same')(conv5)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 4), kernel_size=(1, 1))(pool4)
    output = Add()([conv5, shortcut])
    pool5 = MaxPooling2D(pool_size=(2, 2))(output)

    conv6 = Convolution2D(num_features * pow(2, 5), (3, 3), activation=activation, padding='same')(pool5)
    conv6 = Dropout(dropout_ratio)(conv6)
    conv6 = Convolution2D(num_features * pow(2, 5), (3, 3), activation=activation, padding='same')(conv6)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 5), kernel_size=(1, 1))(pool5)
    output = Add()([conv6, shortcut])
    # pool6 = MaxPooling2D(pool_size=(2, 2))(output)

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv5], axis=3)
    conv7 = Convolution2D(num_features * pow(2, 4), (3, 3), activation=activation, padding='same')(up1)
    conv7 = Dropout(dropout_ratio)(conv7) if dropout_level else conv7
    conv7 = Convolution2D(num_features * pow(2, 4), (3, 3), activation=activation, padding='same')(conv7)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 4), kernel_size=(1, 1))(up1)
    conv7 = Add()([conv7, shortcut])

    up2 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv4], axis=3)
    conv8 = Convolution2D(num_features * pow(2, 3), (3, 3), activation=activation, padding='same')(up2)
    conv8 = Dropout(dropout_ratio)(conv8) if dropout_level else conv8
    conv8 = Convolution2D(num_features * pow(2, 3), (3, 3), activation=activation, padding='same')(conv8)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 3), kernel_size=(1, 1))(up2)
    conv8 = Add()([conv8, shortcut])

    up3 = concatenate([UpSampling2D(size=(2, 2))(conv8), conv3], axis=3)
    conv9 = Convolution2D(num_features * pow(2, 2), (3, 3), activation=activation, padding='same')(up3)
    conv9 = Dropout(dropout_ratio)(conv9) if dropout_level else conv9
    conv9 = Convolution2D(num_features * pow(2, 2), (3, 3), activation=activation, padding='same')(conv9)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 2), kernel_size=(1, 1))(up3)
    conv9 = Add()([conv9, shortcut])

    up4 = concatenate([UpSampling2D(size=(2, 2))(conv9), conv2], axis=3)
    conv10 = Convolution2D(num_features * pow(2, 1), (3, 3), activation=activation, padding='same')(up4)
    conv10 = Dropout(dropout_ratio)(conv10) if dropout_level else conv10
    conv10 = Convolution2D(num_features * pow(2, 1), (3, 3), activation=activation, padding='same')(conv10)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 1), kernel_size=(1, 1))(up4)
    conv10 = Add()([conv10, shortcut])

    up5 = concatenate([UpSampling2D(size=(2, 2))(conv10), conv1], axis=3)
    conv11 = Convolution2D(num_features * pow(2, 0), (3, 3), activation=activation, padding='same')(up5)
    conv11 = Dropout(dropout_ratio)(conv11) if dropout_level else conv11
    conv11 = Convolution2D(num_features * pow(2, 0), (3, 3), activation=activation, padding='same')(conv11)
    # Residual Conneciton
    shortcut = Convolution2D(num_features * pow(2, 0), kernel_size=(1, 1))(up5)
    conv11 = Add()([conv11, shortcut])

    conv12 = Convolution2D(1, (1, 1), activation='sigmoid', name='main_output')(conv11)

    conv12 = Reshape((input_height * input_width, 1))(conv12)

    model = Model(inputs=inputs, outputs=conv12)

    optAdam = Adam(lr=lr_rate)
    model.compile(loss=dice_coef_loss, optimizer=optAdam, metrics=[dice_coef])#, sample_weight_mode="temporal")

    return model
Пример #12
0
y_train = tf.keras.utils.to_categorical(y_train, num_classes=10)
y_test = tf.keras.utils.to_categorical(y_test, num_classes=10)

# 定义顺序模型
model = Sequential()
# 第一个卷积层
# input_shape 输入数据
# filters 滤波器个数32,生成32张特征图
# kernel_size 卷积窗口大小5*5
# strides 步长1
# padding padding方式 same/valid
# activation 激活函数
model.add(
    Convolution2D(input_shape=(28, 28, 1),
                  filters=32,
                  kernel_size=5,
                  strides=1,
                  padding='same',
                  activation='relu'))
# 第一个池化层
# pool_size 池化窗口大小2*2
# strides 步长2
# padding padding方式 same/valid
model.add(MaxPooling2D(
    pool_size=2,
    strides=2,
    padding='same',
))
# 第二个卷积层
# filters 滤波器个数64,生成64张特征图
# kernel_size 卷积窗口大小5*5
# strides 步长1
Пример #13
0
    img = plt.imread(str1)
    inputs[ind, :, :, :] = img
    targets[ind, :] = np.array([1, 0])

for ind in range(len(files2)):
    str1 = path_negative + files2[ind]
    img = plt.imread(str1)
    inputs[ind + len(files1), :, :, :] = img
    targets[ind + len(files1), :] = np.array([0, 1])

model = Sequential()

model.add(
    Convolution2D(filters=32,
                  kernel_size=(3, 3),
                  input_shape=(40, 40, 3),
                  strides=(1, 1),
                  padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Flatten())

model.add(Dense(10, activation='tanh'))
model.add(Dense(2, activation='softmax'))

model.compile(optimizer='rmsprop', loss='mse')
model.summary()

epochs = 1000
batch = 16
val = 0.15
Пример #14
0
def unet_rrn(
    name,
    input_shapes,
    output_shapes,
    kernel=3,
    stride=1,
    activation='elu',
    output_channels=2,
    kinit='RandomUniform',
    batch_norm=True,
    padding='same',
    axis=3,
    crop=0,
    mpadd=0,
):
    nr_classes = output_channels
    timeseries, input_1_height, input_1_width, input_1_channels = input_shapes[
        "input_1"]
    timeseries_mask_shape = input_shapes["input_2"]
    inputs = Input(
        (timeseries, input_1_height, input_1_width, input_1_channels))
    mask = Input(timeseries_mask_shape)

    # Encoding
    conv1_output_last, pool1 = encode_block_lstm(32,
                                                 inputs,
                                                 kernel,
                                                 stride,
                                                 activation,
                                                 kinit,
                                                 padding,
                                                 mask=mask)
    conv2_output_last, pool2 = encode_block(64, pool1, kernel, stride,
                                            activation, kinit, padding)
    conv3_output_last, pool3 = encode_block(128, pool2, kernel, stride,
                                            activation, kinit, padding)
    conv4_output_last, pool4 = encode_block(256, pool3, kernel, stride,
                                            activation, kinit, padding)

    # Middle
    conv5_output_last, _ = encode_block(512,
                                        pool4,
                                        kernel,
                                        stride,
                                        activation,
                                        kinit,
                                        padding,
                                        max_pool=False)

    # Decoding
    conv6 = conv_t_block(256, conv5_output_last, conv4_output_last, kernel,
                         stride, activation, kinit, padding, axis)
    conv7 = conv_t_block(128, conv6, conv3_output_last, kernel, stride,
                         activation, kinit, padding, axis)
    conv8 = conv_t_block(64, conv7, conv2_output_last, kernel, stride,
                         activation, kinit, padding, axis)
    conv9 = conv_t_block(32, conv8, conv1_output_last, kernel, stride,
                         activation, kinit, padding, axis)

    # Output
    conv9 = BatchNormalization()(conv9) if batch_norm else conv9

    conv9 = Cropping2D((mpadd, mpadd))(conv9)

    conv10 = Convolution2D(nr_classes, (1, 1),
                           activation='softmax',
                           name="output_1")(conv9)
    model = Model(inputs=[inputs, mask], outputs=[conv10])

    return model
Пример #15
0
# Importing the Keras libraries and packages
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Convolution2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.preprocessing.image import ImageDataGenerator

# %%

# Initialising the CNN
classifier = Sequential()

# Step 1 - Convolution
classifier.add(
    Convolution2D(32, 3, 3, input_shape=(64, 64, 3), activation='relu'))

# Step 2 - Pooling
classifier.add(MaxPooling2D(pool_size=(2, 2)))

# Adding a second convolutional layer
classifier.add(Convolution2D(32, 3, 3, activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))

# Step 3 - Flattening
classifier.add(Flatten())

# Step 4 - Full connection
classifier.add(Dense(output_dim=128, activation='relu'))
classifier.add(Dense(output_dim=1, activation='sigmoid'))
Пример #16
0
from tensorflow.keras.layers import ZeroPadding2D, Convolution2D, MaxPooling2D
from tensorflow.keras.layers import Dense, Dropout, Softmax, Flatten, Activation, BatchNormalization
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.applications.imagenet_utils import preprocess_input
import tensorflow.keras.backend as K
import os
import cv2
import dlib
import matplotlib.pyplot as plt
import json
from keras.models import model_from_json, load_model

# Define VGG_FACE_MODEL architecture
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
Пример #17
0
#  - Reshape [N,2,128] to [N,1,2,128] on input
#  - Pass through 2 2DConv/ReLu layers
#  - Pass through 2 Dense layers (ReLu and Softmax)
#  - Perform categorical cross entropy optimization

def custom_loss(target,output):
    output **= (1/hp.T)
    output /= K.reshape(K.sum(output,axis=1) , (-1,1))
    return (hp.T)**2 * K.categorical_crossentropy(target,output)

optim = Adam(hp.lr)

model = models.Sequential()
model.add(Reshape(in_shp+[1], input_shape=in_shp)) # tf format
model.add(ZeroPadding2D((0, 2)))
model.add(Convolution2D(256, (1, 3), padding='valid', activation="relu", name="conv1", kernel_initializer='glorot_uniform'))
model.add(Dropout(hp.dr))
model.add(ZeroPadding2D((0, 2)))
model.add(Convolution2D(80, (2, 3), padding="valid", activation="relu", name="conv2", kernel_initializer='glorot_uniform'))
model.add(Dropout(hp.dr))
model.add(Flatten())
model.add(Dense(256, activation='relu', kernel_initializer='he_normal', name="dense1"))
model.add(Dropout(hp.dr))
model.add(Dense( len(classes), kernel_initializer='he_normal', name="dense2" ))
model.add(Activation('softmax'))
model.add(Reshape([len(classes)]))
model.compile(loss=custom_loss, metrics=['accuracy'], optimizer=optim)

# # Train the Model

# perform training ...
args = parser.parse_args()

# Get the environment and extract the number of actions.
env = gym.make(args.env_name)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n

# Next, we build our model. We use the same model that was described by Mnih et al. (2015).
input_shape = (WINDOW_LENGTH,) + INPUT_SHAPE
model = Sequential()

# (width, height, channels)
model.add(Permute((2, 3, 1), input_shape=input_shape))

model.add(Convolution2D(32, (8, 8), strides=(4, 4)))
model.add(Activation('relu'))
model.add(Convolution2D(64, (4, 4), strides=(2, 2)))
model.add(Activation('relu'))
model.add(Convolution2D(64, (3, 3), strides=(1, 1)))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())

# Finally, we configure and compile our agent. You can use every built-in tensorflow.keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=1000000, window_length=WINDOW_LENGTH)
Пример #19
0
    x_pos = []

    for i in range(0, len(index)):
        x_right.append(x[index[i]])
        x_pos.append(x1[index[i]])
        y1.append(y[index[i]])

    x_right = np.array(x_right)
    x_pos = np.array(x_pos)
    y1 = np.array(y1)
    #change list to array

    #input size
    X = Input(shape=(HEIGHT, WIDTH, 3))
    #convolutional layers
    conv_1 = Convolution2D(64, (3, 3), activation='selu')(X)
    batch1 = BatchNormalization()(conv_1)
    max_1 = MaxPooling2D((2, 2), strides=(2, 2))(batch1)
    conv_2 = Convolution2D(128, (7, 7), activation='selu')(max_1)
    batch2 = BatchNormalization()(conv_2)
    max_2 = MaxPooling2D((2, 2), strides=(2, 2))(batch2)
    conv_3 = Convolution2D(256, (3, 3), activation='selu')(max_2)
    batch3 = BatchNormalization()(conv_3)
    max_3 = MaxPooling2D((2, 2), strides=(2, 2))(batch3)
    conv_4 = Convolution2D(256, (3, 3), activation='selu')(max_3)
    batch4 = BatchNormalization()(conv_4)
    #flatten and dense twice
    fcl = Flatten()(batch4)
    dense_1 = Dense(4096, activation='selu')(fcl)
    dense_2 = Dense(1024, activation='selu')(dense_1)
base_image = K.variable(preprocess_image(base_image_path, True, read_mode=read_mode))

style_reference_images = []
for style_path in style_image_paths:
    style_reference_images.append(K.variable(preprocess_image(style_path)))

nb_tensors = 1
if K.image_data_format() == "channels_first":
    shape = (nb_tensors, 3, img_width, img_height)
else:
    shape = (nb_tensors, img_width, img_height, 3)

ip = Input(batch_shape=shape)

# build the VGG16 network with our 3 images as input
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_1', padding='same')(ip)
x = Convolution2D(64, (3, 3), activation='relu', name='conv1_2', padding='same')(x)
x = pooling_func(x)

x = Convolution2D(128, (3, 3), activation='relu', name='conv2_1', padding='same')(x)
x = Convolution2D(128, (3, 3), activation='relu', name='conv2_2', padding='same')(x)
x = pooling_func(x)

x = Convolution2D(256, (3, 3), activation='relu', name='conv3_1', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_2', padding='same')(x)
x = Convolution2D(256, (3, 3), activation='relu', name='conv3_3', padding='same')(x)
if args.model == "vgg19":
    x = Convolution2D(256, (3, 3), activation='relu', name='conv3_4', padding='same')(x)
x = pooling_func(x)

x = Convolution2D(512, (3, 3), activation='relu', name='conv4_1', padding='same')(x)
Пример #21
0
    data_entrenamiento,
    target_size=(altura, longitud),
    batch_size=batch_size,
    class_mode='categorical')

validacion_generador = test_datagen.flow_from_directory(
    data_validacion,
    target_size=(altura, longitud),
    batch_size=batch_size,
    class_mode='categorical')

cnn = Sequential()
cnn.add(
    Convolution2D(filtrosConv1,
                  tamano_filtro1,
                  padding="same",
                  input_shape=(longitud, altura, 3),
                  activation='relu'))
cnn.add(MaxPooling2D(pool_size=tamano_pool))

cnn.add(Convolution2D(filtrosConv2, tamano_filtro2, padding="same"))
cnn.add(MaxPooling2D(pool_size=tamano_pool))

cnn.add(Flatten())
cnn.add(Dense(256, activation='relu'))
cnn.add(Dropout(0.5))
cnn.add(Dense(clases, activation='softmax'))

cnn.compile(loss='categorical_crossentropy',
            optimizer=optimizers.Adam(lr=lr),
            metrics=['accuracy'])
Пример #22
0
def prepModel(target_size, Softmax_size):

    padding = "same"

    model = Sequential()

    # 1st CNN
    model.add(
        Convolution2D(32, (3, 3),
                      input_shape=(target_size, target_size, 3),
                      padding=padding))
    model.add(myBN())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # 2nd CNN
    model.add(Convolution2D(64, (3, 3), padding=padding))
    model.add(myBN())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # 3rd CNN
    model.add(Convolution2D(128, (3, 3), padding=padding))
    model.add(myBN())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # 4th CNN
    model.add(Convolution2D(256, (3, 3), padding=padding))
    model.add(myBN())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # 5th CNN
    model.add(Convolution2D(256, (3, 3), padding=padding))
    model.add(myBN())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    # 6th CNN
    model.add(Convolution2D(256, (3, 3), padding=padding))
    model.add(myBN())
    model.add(Activation('relu'))

    # 7th CNN
    model.add(Convolution2D(256, (3, 3), padding=padding))
    model.add(myBN())
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())

    # -3rd dense
    model.add(Dense(256, kernel_regularizer=None, bias_regularizer=None))
    model.add(myBN())
    model.add(Dropout(rate=0.5))
    model.add(Activation('relu'))

    # -2nd dense
    model.add(Dense(128, kernel_regularizer=None, bias_regularizer=None))
    model.add(myBN())
    model.add(Dropout(rate=0.5))
    model.add(Activation('relu'))

    # -1st dense
    model.add(Dense(Softmax_size, activation='softmax'))

    model.compile(
        loss='categorical_crossentropy',
        optimizer=Adam(lr=1e-4),  #'adam', # default LR: 1e-3
        metrics=['accuracy'])

    return model
def create_standalone_nvidia_cnn(activation='linear',
                                 input_shape=(60, 180, 3),
                                 output_shape=1):
    """
    Activation: linear, softmax.
    Architecture is from nvidia paper mentioned in https://github.com/tanelp/self-driving-convnet/blob/master/train.py
    """
    from tensorflow.keras.layers import Convolution2D
    from tensorflow.keras.regularizers import l2
    from tensorflow.keras.layers import Dense
    from tensorflow.keras.layers import Flatten
    from tensorflow.keras.layers import Input
    from tensorflow.keras.models import Model
    from tensorflow.keras.optimizers import Adam
    from tensorflow.keras.losses import mean_squared_error, mean_absolute_error

    inputs = Input(shape=input_shape)
    conv_1 = Convolution2D(24,
                           kernel_size=(5, 5),
                           kernel_regularizer=l2(0.0005),
                           strides=(2, 2),
                           padding="same",
                           activation="elu")(inputs)
    conv_2 = Convolution2D(36,
                           kernel_size=(5, 5),
                           kernel_regularizer=l2(0.0005),
                           strides=(2, 2),
                           padding="same",
                           activation="elu")(conv_1)
    conv_3 = Convolution2D(48,
                           kernel_size=(5, 5),
                           kernel_regularizer=l2(0.0005),
                           strides=(2, 2),
                           padding="same",
                           activation="elu")(conv_2)
    conv_4 = Convolution2D(64,
                           kernel_size=(3, 3),
                           kernel_regularizer=l2(0.0005),
                           padding="same",
                           activation="elu")(conv_3)
    conv_5 = Convolution2D(64,
                           kernel_size=(3, 3),
                           kernel_regularizer=l2(0.0005),
                           padding="same",
                           activation="elu")(conv_4)
    flatten = Flatten()(conv_5)
    dense_1 = Dense(1164, kernel_regularizer=l2(0.0005),
                    activation="elu")(flatten)
    dense_2 = Dense(100, kernel_regularizer=l2(0.0005),
                    activation="elu")(dense_1)
    dense_3 = Dense(50, kernel_regularizer=l2(0.0005),
                    activation="elu")(dense_2)
    dense_4 = Dense(10, kernel_regularizer=l2(0.0005),
                    activation="elu")(dense_3)
    out_dense = Dense(output_shape, activation=activation)(dense_4)

    model = Model(inputs=inputs, outputs=out_dense)
    optimizer = Adam(lr=3e-4)
    model.compile(loss=mean_absolute_error, optimizer=optimizer)

    return model
Пример #24
0
    def _build_net(self):
        self.model_eval = Sequential([
            # 输入第一层是一个二维卷积层(100, 80, 1)
            Convolution2D(  # 就是Conv2D层
                batch_input_shape=(None, self.observation_shape[0],
                                   self.observation_shape[1],
                                   self.observation_shape[2]),
                filters=15,  # 多少个滤波器 卷积核的数目(即输出的维度)
                kernel_size=5,  # 卷积核的宽度和长度。如为单个整数,则表示在各个空间维度的相同长度。
                strides=1,  # 每次滑动大小
                padding='same',  # Padding 的方法也就是过滤后数据xy大小是否和之前的一样
                #data_format='channels_last',           # 表示图像通道维的位置,这里rgb图像是最后一维表示通道
            ),
            Activation('relu'),
            # 输出(100, 80, 15)
            # Pooling layer 1 (max pooling) output shape (50, 40, 15)
            MaxPooling2D(
                pool_size=2,  # 池化窗口大小
                strides=2,  # 下采样因子
                padding='same',  # Padding method
                #data_format='channels_last',
            ),
            # output(50, 40, 30)
            Convolution2D(
                30,
                5,
                strides=1,
                padding='same',
                #data_format='channels_last'
            ),
            Activation('relu'),
            # (10, 8, 30)
            MaxPooling2D(
                5,
                5,
                'same',
                #data_format='channels_first'
            ),
            # (10, 8, 30)
            Flatten(),
            # LSTM(
            #     units=1024,
            #     return_sequences=True,  # True: output at all steps. False: output as last step.
            #     stateful=True,          # True: the final state of batch1 is feed into the initial state of batch2
            # ),
            Dense(512),
            Activation('relu'),
            Dense(self.n_actions),
        ])

        self.model_target = Sequential([
            Convolution2D(  # 就是Conv2D层
                batch_input_shape=(None, self.observation_shape[0],
                                   self.observation_shape[1],
                                   self.observation_shape[2]),
                filters=15,  # 多少个滤波器 卷积核的数目(即输出的维度)
                kernel_size=5,  # 卷积核的宽度和长度。如为单个整数,则表示在各个空间维度的相同长度。
                strides=1,  # 每次滑动大小
                padding='same',  # Padding 的方法也就是过滤后数据xy大小是否和之前的一样
                #data_format='channels_last',  # 表示图像通道维的位置,这里rgb图像是最后一维表示通道
            ),
            Activation('relu'),
            # 输出(210, 160, 30)
            # Pooling layer 1 (max pooling) output shape (105, 80, 30)
            MaxPooling2D(
                pool_size=2,  # 池化窗口大小
                strides=2,  # 下采样因子
                padding='same',  # Padding method
                #data_format='channels_last',
            ),
            # output(105, 80, 60)
            Convolution2D(
                30,
                5,
                strides=1,
                padding='same',
                #data_format='channels_last'
            ),
            Activation('relu'),
            # (21, 16, 60)
            MaxPooling2D(
                5,
                5,
                'same',
                #data_format='channels_first'
            ),
            # 21 * 16 * 60 = 20160
            Flatten(),
            # LSTM(
            #     units=1024,
            #     return_sequences=True,  # True: output at all steps. False: output as last step.
            #     stateful=True,          # True: the final state of batch1 is feed into the initial state of batch2
            # ),
            Dense(512),
            Activation('relu'),
            Dense(self.n_actions),
        ])
Пример #25
0
####   了解輸出feature map尺寸變化原理
'''

# %%
from tensorflow.keras import backend
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Convolution2D
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model

##kernel size=(6,6)
##kernel數量:32
## Same padding、strides=(1,1)
classifier = Sequential()
inputs = Input(shape=(13, 13, 1))
x = Convolution2D(32, (3, 3), input_shape=(13, 13, 1))(inputs)
model = Model(inputs=inputs, outputs=x)
model.summary()
## Same padding、strides=(2,2)
classifier = Sequential()
inputs = Input(shape=(13, 13, 1))
x = Convolution2D(32, (3, 3), strides=(2, 2))(inputs)
model = Model(inputs=inputs, outputs=x)
model.summary()
## Valid padding、strides=(1,1)
classifier = Sequential()
inputs = Input(shape=(13, 13, 1))
x = Convolution2D(32, (3, 3), strides=(1, 1), padding="valid")(inputs)
model = Model(inputs=inputs, outputs=x)
model.summary()
## Valid padding、strides=(2,2)
Пример #26
0
def get_vgg_face():
    model = Sequential()
    model.add(ZeroPadding2D((1, 1), input_shape=(224, 224, 3)))
    model.add(Convolution2D(64, (3, 3), activation="relu"))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, (3, 3), activation="relu"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation="relu"))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, (3, 3), activation="relu"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation="relu"))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation="relu"))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, (3, 3), activation="relu"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation="relu"))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation="relu"))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation="relu"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation="relu"))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation="relu"))
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, (3, 3), activation="relu"))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(Convolution2D(4096, (7, 7), activation="relu"))
    model.add(Dropout(0.5))
    model.add(Convolution2D(4096, (1, 1), activation="relu"))
    model.add(Dropout(0.5))
    model.add(Convolution2D(2622, (1, 1)))
    model.add(Flatten())
    model.add(Activation("softmax"))

    model.load_weights("./vgg_face_weights.h5")
    return Model(inputs=model.layers[0].input, outputs=model.layers[-2].output)
Пример #27
0
def densenet_model(growth_rate=32,
                   nb_filter=64,
                   nb_layers=[6, 12, 24, 16],
                   reduction=0.0,
                   dropout_rate=0.0,
                   weight_decay=1e-4,
                   classes=16,
                   shape=(32, 32, 3),
                   batch_size=32,
                   with_se_layers=True):
    # compute compression factor
    compression = 1.0 - reduction

    nb_dense_block = len(nb_layers)
    # From architecture for ImageNet (Table 1 in the paper)
    # nb_filter = 64
    # nb_layers = [6,12,24,16] # For DenseNet-121

    img_input = Input(shape=shape, name='data')

    x = ZeroPadding2D((3, 3), name='conv1_zeropadding',
                      batch_size=batch_size)(img_input)
    x = Convolution2D(nb_filter, 7, 2, name='conv1', use_bias=False)(x)
    x = BatchNormalization(name='conv1_bn')(x)
    x = Activation('relu', name='relu1')(x)
    x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)

    # Add dense blocks
    for block_idx in range(nb_dense_block - 1):
        stage = block_idx + 2
        x, nb_filter = dense_block(x,
                                   stage,
                                   nb_layers[block_idx],
                                   nb_filter,
                                   growth_rate,
                                   dropout_rate=dropout_rate,
                                   weight_decay=weight_decay)

        if (with_se_layers):
            x = se_block(x, stage, 'dense', nb_filter)

        # Add transition_block
        x = transition_block(x,
                             stage,
                             nb_filter,
                             compression=compression,
                             dropout_rate=dropout_rate,
                             weight_decay=weight_decay)
        nb_filter = int(nb_filter * compression)

        if (with_se_layers):
            x = se_block(x, stage, 'transition', nb_filter)

    final_stage = stage + 1
    x, nb_filter = dense_block(x,
                               final_stage,
                               nb_layers[-1],
                               nb_filter,
                               growth_rate,
                               dropout_rate=dropout_rate,
                               weight_decay=weight_decay)

    if (with_se_layers):
        x = se_block(x, final_stage, 'dense', nb_filter)

    x = BatchNormalization(name='conv_final_blk_bn')(x)
    x = Activation('relu', name='relu_final_blk')(x)
    x = GlobalAveragePooling2D(name='pool_final')(x)
    x = Dense(classes, name='fc6')(x)
    output = Activation('softmax', name='prob')(x)

    return Model(inputs=img_input, outputs=output)
img_cols = 28
nb_filters = 32
batch_size = 128
nb_epoch = 2
nb_classes = 10

pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)

model = Sequential()
model.add(
    Reshape((img_rows, img_cols, 1),
            input_shape=[img_rows * img_cols],
            name="input"))
model.add(Convolution2D(nb_filters, kernel_size, activation='relu'))

model.add(Convolution2D(nb_filters, kernel_size, activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))

model.add(Convolution2D(2 * nb_filters, kernel_size, activation='relu'))

model.add(Convolution2D(2 * nb_filters, kernel_size, activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))

model.add(Convolution2D(2 * nb_filters, kernel_size, activation='relu'))
model.add(MaxPooling2D(pool_size=pool_size))

model.add(Flatten())
model.add(Dense(1024, activation='relu'))
model.add(Dense(64, activation='relu'))
Пример #29
0
                                                    target_size=(height,
                                                                 width),
                                                    batch_size=batch_size,
                                                    class_mode='categorical')

validation_generator = test_datagen.flow_from_directory(
    directory=data_validation,
    target_size=(height, width),
    batch_size=batch_size,
    class_mode='categorical')

cnn = Sequential()
cnn.add(
    Convolution2D(filtersConv1,
                  filterSize1,
                  padding="same",
                  input_shape=(width, height, 3),
                  activation='relu'))
cnn.add(MaxPooling2D(pool_size=poolSize))

cnn.add(Convolution2D(filtersConv2, filterSize2, padding="same"))
cnn.add(MaxPooling2D(pool_size=poolSize))

cnn.add(Convolution2D(filtersConv3, filterSize2, padding="same"))
cnn.add(MaxPooling2D(pool_size=poolSize))

cnn.add(Flatten())
cnn.add(Dense(128, activation='relu'))
cnn.add(Dropout(0.5))
cnn.add(Dense(classes, activation='softmax'))
print(te_img.shape)
print(te_lab.shape)
print(tr_lab)
print(te_lab)
plt.imshow(tr_img[18],cmap='gray')
plt.show()
print(tr_lab[0])

"""Custom Model"""

tr_img = tr_img.reshape(-1,512,512,3)

#defining our model ,the description of the model is provided separately
from tensorflow.keras.layers import Dropout
lesion_Classifier=Sequential()
lesion_Classifier.add(Convolution2D(8,(3,3),input_shape=(512,512,3),activation='relu'))
lesion_Classifier.add(MaxPooling2D(pool_size=(2,2)))
lesion_Classifier.add(Convolution2D(16,(3,3),activation='relu'))
lesion_Classifier.add(MaxPooling2D(pool_size=(2,2)))
lesion_Classifier.add(Convolution2D(16,(3,3),activation='relu'))
lesion_Classifier.add(MaxPooling2D(pool_size=(2,2)))
lesion_Classifier.add(Dropout(0.3))
lesion_Classifier.add(Convolution2D(32,(3,3),activation='relu'))
lesion_Classifier.add(MaxPooling2D(pool_size=(2,2)))
lesion_Classifier.add(Convolution2D(32,(3,3),activation='relu'))
lesion_Classifier.add(MaxPooling2D(pool_size=(2,2)))
lesion_Classifier.add(Convolution2D(32,(3,3),activation='relu'))
lesion_Classifier.add(MaxPooling2D(pool_size=(2,2)))
lesion_Classifier.add(Dropout(0.3))
lesion_Classifier.add(Convolution2D(64,(3,3),activation='relu'))
lesion_Classifier.add(MaxPooling2D(pool_size=(2,2)))