def get_unet():
    concat_axis = 3
    #inputs = layers.Input(shape = (256, 256, 2))
    inputs = layers.Input(shape=(1024, 1024, 4))
    #inputs = layers.Input(shape = (2048,2048,4))

    feats = 16
    bn0 = BatchNormalization(axis=3)(inputs)
    conv1 = layers.Conv2D(feats, (3, 3),
                          activation='relu',
                          padding='same',
                          name='conv1_1')(bn0)
    bn1 = BatchNormalization(axis=3)(conv1)
    conv1 = layers.Conv2D(feats, (3, 3), activation='relu',
                          padding='same')(bn1)
    bn2 = BatchNormalization(axis=3)(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(bn2)

    conv2 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(pool1)
    bn3 = BatchNormalization(axis=3)(conv2)
    conv2 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(bn3)
    bn4 = BatchNormalization(axis=3)(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(bn4)

    conv3 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(pool2)
    bn5 = BatchNormalization(axis=3)(conv3)
    conv3 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(bn5)
    bn6 = BatchNormalization(axis=3)(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(bn6)

    conv4 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(pool3)
    bn7 = BatchNormalization(axis=3)(conv4)
    conv4 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(bn7)
    bn8 = BatchNormalization(axis=3)(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(bn8)

    conv5 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(pool4)
    bn9 = BatchNormalization(axis=3)(conv5)
    conv5 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(bn9)
    bn10 = BatchNormalization(axis=3)(conv5)
    pool5 = layers.MaxPooling2D(pool_size=(2, 2))(bn10)

    conv6 = layers.Conv2D(32 * feats, (3, 3),
                          activation='relu',
                          padding='same')(pool5)
    bn11 = BatchNormalization(axis=3)(conv6)
    conv6 = layers.Conv2D(32 * feats, (3, 3),
                          activation='relu',
                          padding='same')(bn11)
    bn12 = BatchNormalization(axis=3)(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(bn12)
    up7 = layers.concatenate([up_conv6, conv5], axis=concat_axis)

    conv7 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(up7)
    bn13 = BatchNormalization(axis=3)(conv6)
    conv7 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(bn12)
    bn14 = BatchNormalization(axis=3)(conv6)

    up_conv5 = layers.UpSampling2D(size=(2, 2))(bn10)
    up6 = layers.concatenate([up_conv5, conv4], axis=concat_axis)

    conv6 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(up6)
    bn15 = BatchNormalization(axis=3)(conv6)
    conv6 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(bn15)
    bn16 = BatchNormalization(axis=3)(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(bn16)
    up7 = layers.concatenate([up_conv6, conv3], axis=concat_axis)
    conv7 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(up7)
    bn13 = BatchNormalization(axis=3)(conv7)
    conv7 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(bn13)
    bn14 = BatchNormalization(axis=3)(conv7)

    up_conv7 = layers.UpSampling2D(size=(2, 2))(bn14)
    up8 = layers.concatenate([up_conv7, conv2], axis=concat_axis)
    conv8 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(up8)
    bn15 = BatchNormalization(axis=3)(conv8)
    conv8 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(bn15)
    bn16 = BatchNormalization(axis=3)(conv8)

    up_conv8 = layers.UpSampling2D(size=(2, 2))(bn16)
    up9 = layers.concatenate([up_conv8, conv1], axis=concat_axis)
    conv9 = layers.Conv2D(feats, (3, 3), activation='relu',
                          padding='same')(up9)
    bn17 = BatchNormalization(axis=3)(conv9)
    conv9 = layers.Conv2D(feats, (3, 3), activation='relu',
                          padding='same')(bn17)
    bn18 = BatchNormalization(axis=3)(conv9)

    conv10 = layers.Conv2D(1, (1, 1))(bn18)
    #bn19 = BatchNormalization(axis=3)(conv10)

    model = models.Model(inputs=inputs, outputs=conv10)

    return model
val_ds = val_ds.prefetch(buffer_size=AUTOTUNE)

nomalization_layer = layers.experimental.preprocessing.Rescaling(1./255)
nomalized_ds = train_ds.map(lambda x, y: (nomalization_layer(x), y))
image_batch, labels_batch = next(iter(nomalized_ds))
first_img = image_batch[0]

num_classes = 5

model = Sequential([
    layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
    layers.experimental.preprocessing.RandomFlip("horizontal", input_shape=(img_height, img_width, 3)),
    layers.experimental.preprocessing.RandomRotation(0.1),
    layers.experimental.preprocessing.RandomZoom(0.1),
    layers.Conv2D(16, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(32, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(64, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Dropout(0.2),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dense(32, kernel_regularizer=keras.regularizers.l2(0.001), activation='relu'),
    layers.Dense(16, activation='relu'),
    layers.Dense(num_classes)
])

model.compile(
    optimizer='adam',
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
Пример #3
0
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
        return dict


train1 = unpickle('./data_batch_1')
x_train = train1[b'data'].reshape(10000, 32, 32, 3)
y_train = train1[b'labels']
y_train = np.array(y_train, dtype='uint8')
y_train = np.expand_dims(y_train, axis=1)
x_train = x_train / 255.0

inputs = tf.keras.Input(shape=(32, 32, 3))
x = inputs
x = layers.Conv2D(16, 5, activation='relu', padding='same')(x)
x = layers.MaxPooling2D(2, 2)(x)
x = layers.Conv2D(16, 5, activation='relu', padding='same')(x)
x = layers.Flatten()(x)
x = layers.Dense(16)(x)
x = layers.Dense(10, activation='softmax')(x)

outputs = x
model4_1 = tf.keras.Model(inputs, outputs)
model4_1.summary()
''' 모델 조정 '''
inputs = tf.keras.Input(shape=(32, 32, 3))
x2 = inputs
x2 = layers.Conv2D(32, 3, activation='relu', padding='valid')(x2)
x2 = layers.MaxPooling2D(2, 2)(x2)
x2 = layers.Conv2D(12, 3, activation='relu', padding='valid')(x2)
x2 = layers.Flatten()(x2)
def vgg_encoder(input_shape=(224, 224, 3)):

    image_input = layers.Input(shape=input_shape)

    # image = model_input[:,:,:,:3]
    # mask = model_input[:,:,:,3]

    # Block 1
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(image_input)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    f1 = x

    # Block 2
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(128, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    f2 = x

    # Block 3
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv1')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv2')(x)
    x = layers.Conv2D(256, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block3_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    f3 = x

    # Block 4
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block4_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
    f4 = x

    # Block 5
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv1')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv2')(x)
    x = layers.Conv2D(512, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block5_conv3')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
    f5 = x

    return image_input, [f1, f2, f3, f4, f5]
Пример #5
0
for i in range(25):
    plt.subplot(5, 5, i + 1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    plt.imshow(train_images[i])
    plt.xlabel(class_names[train_labels[i][0]])
plt.show()

train_images = train_images / 255.0
test_images = test_images / 255.0

model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu',
                        input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))

model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])

model.fit(train_images, train_labels, epochs=10)

test_loss, test_acc = model.evaluate(test_images, test_labels)
    def __init__(self, input_shape,embed_size=256):
        """

        :param input_shape: [32, 32, 3]
        """
        super(VGG16, self).__init__()

        weight_decay = 0.000
        self.num_classes = embed_size

        model = models.Sequential()

        model.add(layers.Conv2D(64, (3, 3), padding='same',
                         input_shape=input_shape, kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.3))

        model.add(layers.Conv2D(64, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        model.add(layers.Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(layers.Conv2D(128, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))

        model.add(layers.Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(layers.Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(layers.Conv2D(256, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))


        model.add(layers.Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(layers.Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(layers.Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))


        model.add(layers.Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(layers.Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())
        model.add(layers.Dropout(0.4))

        model.add(layers.Conv2D(512, (3, 3), padding='same',kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        model.add(layers.Dropout(0.5))

        model.add(layers.Flatten())
        model.add(layers.Dense(512,kernel_regularizer=regularizers.l2(weight_decay)))
        model.add(layers.Activation('relu'))
        model.add(layers.BatchNormalization())

        model.add(layers.Dropout(0.5))
        model.add(layers.Dense(self.num_classes))
        # model.add(layers.Activation('softmax'))


        self.model = model
Пример #7
0
#build a CNN
dense = keras.layers.Dense(units=16)
# Let's say we expect our inputs to be RGB images of arbitrary size
inputs = keras.Input(shape=(None, None, 3))

from tensorflow.keras import layers
# Center-crop images to 101 x 200 to fit sample size
x = CenterCrop(height=101, width=200)(inputs)
# Rescale images to [0, 1]
x = Rescaling(scale=1. / 255)(x)
# Apply some convolution and pooling layers
x = layers.Conv2D(filters=32,
                  kernel_size=(3, 3),
                  padding='SAME',
                  activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32,
                  kernel_size=(3, 3),
                  padding='SAME',
                  activation='relu')(x)
# Apply global average pooling to get flat feature vectors
x = layers.GlobalAveragePooling2D()(x)
# add a dense layer
x = layers.Dense(20, activation='relu')(x)
# Add a dense classifier on top
num_classes = 10
outputs = layers.Dense(num_classes, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.summary()
#compile and keep metrics
model.compile(optimizer='adam',
Пример #8
0
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import datasets


(train_x, train_y), (test_x, test_y) = datasets.mnist.load_data()


inputs = layers.Input((28, 28, 1))
net = layers.Conv2D(32, (3, 3), padding='SAME')(inputs)
net = layers.Activation('relu')(net)
net = layers.Conv2D(32, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.MaxPooling2D(pool_size=(2, 2))(net)
net = layers.Dropout(0.25)(net)

net = layers.Conv2D(64, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.Conv2D(64, (3, 3), padding='SAME')(net)
net = layers.Activation('relu')(net)
net = layers.MaxPooling2D(pool_size=(2, 2))(net)
net = layers.Dropout(0.25)(net)

net = layers.Flatten()(net)
net = layers.Dense(512)(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(0.5)(net)
net = layers.Dense(10)(net)  # num_classes
net = layers.Activation('softmax')(net)

model = tf.keras.Model(inputs=inputs, outputs=net, name='Basic_CNN')
def build_BiFPN(features, num_channels, id, freeze_bn=False):
    if id == 0:
        _, _, C3, C4, C5 = features
        P3_in = ConvBlock(num_channels,
                          kernel_size=1,
                          strides=1,
                          freeze_bn=freeze_bn,
                          name='BiFPN_{}_P3'.format(id))(C3)
        P4_in = ConvBlock(num_channels,
                          kernel_size=1,
                          strides=1,
                          freeze_bn=freeze_bn,
                          name='BiFPN_{}_P4'.format(id))(C4)
        P5_in = ConvBlock(num_channels,
                          kernel_size=1,
                          strides=1,
                          freeze_bn=freeze_bn,
                          name='BiFPN_{}_P5'.format(id))(C5)
        # P6_in = ConvBlock(num_channels, kernel_size=3, strides=2, freeze_bn=freeze_bn, name='BiFPN_{}_P6'.format(id))(
        #     C5)
        # P7_in = ConvBlock(num_channels, kernel_size=3, strides=2, freeze_bn=freeze_bn, name='BiFPN_{}_P7'.format(id))(
        #     P6_in)
    else:
        P3_in, P4_in, P5_in = features
        P3_in = ConvBlock(num_channels,
                          kernel_size=1,
                          strides=1,
                          freeze_bn=freeze_bn,
                          name='BiFPN_{}_P3'.format(id))(P3_in)
        P4_in = ConvBlock(num_channels,
                          kernel_size=1,
                          strides=1,
                          freeze_bn=freeze_bn,
                          name='BiFPN_{}_P4'.format(id))(P4_in)
        P5_in = ConvBlock(num_channels,
                          kernel_size=1,
                          strides=1,
                          freeze_bn=freeze_bn,
                          name='BiFPN_{}_P5'.format(id))(P5_in)
        # P6_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P6'.format(id))(
        #     P6_in)
        # P7_in = ConvBlock(num_channels, kernel_size=1, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_P7'.format(id))(
        #     P7_in)

    # upsample
    # P7_U = layers.UpSampling2D()(P7_in)
    # P6_td = layers.Add()([P7_U, P6_in])
    # P6_td = DepthwiseConvBlock(kernel_size=3, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_U_P6'.format(id))(P6_td)
    # P6_U = layers.UpSampling2D()(P6_in)
    # P5_td = layers.Add()([P6_U, P5_in])
    # P5_td = DepthwiseConvBlock(kernel_size=3, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_U_P5'.format(id))(P5_td)
    P5_U = layers.UpSampling2D()(P5_in)
    P4_td = layers.Add()([P5_U, P4_in])
    P4_td = DepthwiseConvBlock(kernel_size=3,
                               strides=1,
                               freeze_bn=freeze_bn,
                               name='BiFPN_{}_U_P4'.format(id))(P4_td)
    P4_U = layers.UpSampling2D()(P4_td)
    P3_out = layers.Add()([P4_U, P3_in])
    P3_out = DepthwiseConvBlock(kernel_size=3,
                                strides=1,
                                freeze_bn=freeze_bn,
                                name='BiFPN_{}_U_P3'.format(id))(P3_out)
    # downsample
    P3_D = layers.MaxPooling2D(strides=(2, 2))(P3_out)
    P4_out = layers.Add()([P3_D, P4_td, P4_in])
    P4_out = DepthwiseConvBlock(kernel_size=3,
                                strides=1,
                                freeze_bn=freeze_bn,
                                name='BiFPN_{}_D_P4'.format(id))(P4_out)
    P4_D = layers.MaxPooling2D(strides=(2, 2))(P4_out)
    P5_out = layers.Add()([P4_D, P5_in])
    P5_out = DepthwiseConvBlock(kernel_size=3,
                                strides=1,
                                freeze_bn=freeze_bn,
                                name='BiFPN_{}_D_P5'.format(id))(P5_out)
    # P5_D = layers.MaxPooling2D(strides=(2, 2))(P5_out)
    # P6_out = layers.Add()([P5_D, P6_in])
    # P6_out = DepthwiseConvBlock(kernel_size=3, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_D_P6'.format(id))(P6_out)
    # P6_D = layers.MaxPooling2D(strides=(2, 2))(P6_out)
    # P7_out = layers.Add()([P6_D, P7_in])
    # P7_out = DepthwiseConvBlock(kernel_size=3, strides=1, freeze_bn=freeze_bn, name='BiFPN_{}_D_P7'.format(id))(P7_out)

    return P3_out, P4_out, P5_out  #, P6_out , P7_out
Пример #10
0
def encoder_block_h2(input_tensor, num_filters):
    encoder = conv_block_h2(input_tensor, num_filters)
    encoder_pool = layers.MaxPooling2D((2, 2), strides=(2, 2))(encoder)
    return encoder_pool, encoder
Пример #11
0
def run(l, epochs=5):
    [n, i] = l
    loss_list = []
    acc_list = []

    model = models.Sequential()
    model.add(
        layers.Conv2D(n, (3, 3),
                      activation='relu',
                      input_shape=train_images[0].shape,
                      padding="same"))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(
        layers.Conv2D(n, (3, 3),
                      activation='relu',
                      input_shape=train_images[0].shape))
    model.add(layers.MaxPooling2D((2, 2)))

    model.add(layers.Flatten())
    model.add(layers.Dense(32, activation='relu'))
    model.add(layers.Dense(10))

    def scheduler(epoch):
        if epoch < 2:
            return 0.01
        else:
            return 0.01 * np.cos(np.pi / 2 * (epoch - 2) / (epochs - 2))

    callback = tf.keras.callbacks.LearningRateScheduler(scheduler)

    model.compile(
        optimizer='adam',
        loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
        metrics=['accuracy'])

    #Note that we're not using the scheduler defined above. The scheduler has been left in the code in case the reader
    #wishes to use it.
    history = model.fit(train_images,
                        train_labels,
                        epochs=epochs,
                        validation_data=(test_images, test_labels),
                        verbose=0)

    test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
    train_loss, train_acc = model.evaluate(train_images,
                                           train_labels,
                                           verbose=2)
    N = model.count_params()

    model.save('data/models/model' + str(n) + '_' + str(i))

    np.savetxt('data/loss_history/train_loss_hist' + str(n) + '_' + str(i),
               history.history['loss'])
    np.savetxt('data/acc_history/train_acc_hist' + str(n) + '_' + str(i),
               history.history['accuracy'])
    np.savetxt('data/loss_history/test_loss_hist' + str(n) + '_' + str(i),
               history.history['val_loss'])
    np.savetxt('data/acc_history/test_acc_hist' + str(n) + '_' + str(i),
               history.history['val_accuracy'])

    np.savetxt('data/losses/train_loss' + str(n) + '_' + str(i), [train_loss])
    np.savetxt('data/accuracies/train_acc' + str(n) + '_' + str(i),
               [train_acc])
    np.savetxt('data/losses/test_loss' + str(n) + '_' + str(i), [test_loss])
    np.savetxt('data/accuracies/test_acc' + str(n) + '_' + str(i), [test_acc])

    return N
Пример #12
0
    x = layers.Conv2D(np.floor(num_filters * alpha),
                      kernel_size=kernel_size,
                      strides=strides,
                      use_bias=False,
                      padding='same')(x)
    x = layers.BatchNormalization(momentum=0.9997)(x)
    x = layers.Activation('relu')(x)
    return x


if base_model == "FLNet":
    # Instantiate ResNet model
    inputs = keras.Input(shape=(32, 32, 3))
    x = layers.Conv2D(32, 3, activation='relu')(inputs)
    x = layers.Conv2D(64, 3, activation='relu')(x)
    x = layers.MaxPooling2D(3)(x)
    num_res_net_blocks = 8
    for i in range(num_res_net_blocks):
        x = res_net_block(x, 64, 3)
    x = layers.Conv2D(64, 3, activation='relu')(x)
    x = layers.GlobalAveragePooling2D()(x)
    x = layers.Dense(256, activation='relu')(x)
    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(10, activation='softmax')(x)
    model = keras.Model(inputs, outputs)

    model.save_weights("./chkpts/init_resnet8.ckpt")

if base_model == "lenet":
    # Instantiate LeNet model
    inputs = keras.Input(shape=(32, 32, 3), name="input")
def tf_keras_model_fn(features, labels, mode, params):
    model = tf.keras.Sequential([
        layers.Reshape((28, 28, 1), input_shape=(784, )),
        layers.Conv2D(64,
                      3,
                      padding='same',
                      data_format='channels_last',
                      activation='relu'),
        layers.MaxPooling2D(pool_size=3,
                            strides=2,
                            padding='same',
                            data_format='channels_last'),
        layers.Conv2D(128,
                      3,
                      padding='same',
                      data_format='channels_last',
                      activation='relu'),
        layers.MaxPooling2D(pool_size=3,
                            strides=2,
                            padding='same',
                            data_format='channels_last'),
        layers.Conv2D(256,
                      3,
                      padding='same',
                      data_format='channels_last',
                      activation='relu'),
        layers.MaxPooling2D(pool_size=3,
                            strides=2,
                            padding='same',
                            data_format='channels_last'),
        layers.Flatten(),
        layers.Dense(1024, activation='relu'),
        layers.Dense(10)  # 在交叉熵损失中做了 softmax 概率归一化,所以此次就不用 softmax 激活了
    ])

    if mode == tf.estimator.ModeKeys.PREDICT:
        output = model(features, training=False)
        predicted_classes = tf.argmax(output, axis=1)
        return tf.estimator.EstimatorSpec(mode, predictions=predicted_classes)

    output = model(features, training=True)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels,
                                                               logits=output)
    loss = tf.reduce_mean(cross_entropy)

    if mode == tf.estimator.ModeKeys.TRAIN:
        # 选择优化算法很重要
        optimizer = tf.train.AdamOptimizer(1e-3)
        train_op = optimizer.minimize(loss,
                                      global_step=tf.train.get_global_step())
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=loss,
                                          train_op=train_op)

    if mode == tf.estimator.ModeKeys.EVAL:
        # 定义准确率
        predicted_classes = tf.argmax(output, axis=1)
        accuracy = tf.metrics.accuracy(labels=tf.argmax(labels, axis=1),
                                       predictions=predicted_classes)
        return tf.estimator.EstimatorSpec(
            mode=mode, loss=loss, eval_metric_ops={'accuracy': accuracy})
Пример #14
0
def Unet():
    concat_axis = 3
    ref_input = layers.Input(shape=(1024, 1024, 2))
    gpm_input = layers.Input(shape=(512, 512, 1))

    feats = 16
    bn0 = layers.BatchNormalization(axis=3)(ref_input)
    conv1 = layers.Conv2D(feats, (3, 3),
                          activation='relu',
                          padding='same',
                          name='conv1_1')(bn0)
    bn1 = layers.BatchNormalization(axis=3)(conv1)
    conv1 = layers.Conv2D(feats, (3, 3), activation='relu',
                          padding='same')(bn1)
    bn2 = layers.BatchNormalization(axis=3)(conv1)
    pool1 = layers.MaxPooling2D(pool_size=(2, 2))(bn2)

    gpmadd = layers.concatenate([gpm_input, pool1], axis=concat_axis)
    gpmaddn = layers.BatchNormalization(axis=3)(gpmadd)

    conv2 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(gpmaddn)
    bn3 = layers.BatchNormalization(axis=3)(conv2)
    conv2 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(bn3)
    bn4 = layers.BatchNormalization(axis=3)(conv2)
    pool2 = layers.MaxPooling2D(pool_size=(2, 2))(bn4)

    conv3 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(pool2)
    bn5 = layers.BatchNormalization(axis=3)(conv3)
    conv3 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(bn5)
    bn6 = layers.BatchNormalization(axis=3)(conv3)
    pool3 = layers.MaxPooling2D(pool_size=(2, 2))(bn6)

    conv4 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(pool3)
    bn7 = layers.BatchNormalization(axis=3)(conv4)
    conv4 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(bn7)
    bn8 = layers.BatchNormalization(axis=3)(conv4)
    pool4 = layers.MaxPooling2D(pool_size=(2, 2))(bn8)

    conv5 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(pool4)
    bn9 = layers.BatchNormalization(axis=3)(conv5)
    conv5 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(bn9)
    bn10 = layers.BatchNormalization(axis=3)(conv5)
    pool5 = layers.MaxPooling2D(pool_size=(2, 2))(bn10)

    conv6 = layers.Conv2D(32 * feats, (3, 3),
                          activation='relu',
                          padding='same')(pool5)
    bn11 = layers.BatchNormalization(axis=3)(conv6)
    conv6 = layers.Conv2D(32 * feats, (3, 3),
                          activation='relu',
                          padding='same')(bn11)
    bn12 = layers.BatchNormalization(axis=3)(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(bn12)
    up7 = layers.concatenate([up_conv6, conv5], axis=concat_axis)

    conv7 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(up7)
    bn13 = layers.BatchNormalization(axis=3)(conv6)
    conv7 = layers.Conv2D(16 * feats, (3, 3),
                          activation='relu',
                          padding='same')(bn12)
    bn14 = layers.BatchNormalization(axis=3)(conv6)

    up_conv5 = layers.UpSampling2D(size=(2, 2))(bn10)
    up6 = layers.concatenate([up_conv5, conv4], axis=concat_axis)

    conv6 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(up6)
    bn15 = layers.BatchNormalization(axis=3)(conv6)
    conv6 = layers.Conv2D(8 * feats, (3, 3), activation='relu',
                          padding='same')(bn15)
    bn16 = layers.BatchNormalization(axis=3)(conv6)

    up_conv6 = layers.UpSampling2D(size=(2, 2))(bn16)
    up7 = layers.concatenate([up_conv6, conv3], axis=concat_axis)
    conv7 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(up7)
    bn13 = layers.BatchNormalization(axis=3)(conv7)
    conv7 = layers.Conv2D(4 * feats, (3, 3), activation='relu',
                          padding='same')(bn13)
    bn14 = layers.BatchNormalization(axis=3)(conv7)

    up_conv7 = layers.UpSampling2D(size=(2, 2))(bn14)
    up8 = layers.concatenate([up_conv7, conv2], axis=concat_axis)
    conv8 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(up8)
    bn15 = layers.BatchNormalization(axis=3)(conv8)
    conv8 = layers.Conv2D(2 * feats, (3, 3), activation='relu',
                          padding='same')(bn15)
    bn16 = layers.BatchNormalization(axis=3)(conv8)

    up_conv8 = layers.UpSampling2D(size=(2, 2))(bn16)
    up9 = layers.concatenate([up_conv8, conv1], axis=concat_axis)
    conv9 = layers.Conv2D(feats, (3, 3), activation='relu',
                          padding='same')(up9)
    bn17 = layers.BatchNormalization(axis=3)(conv9)
    conv9 = layers.Conv2D(feats, (3, 3), activation='relu',
                          padding='same')(bn17)
    bn18 = layers.BatchNormalization(axis=3)(conv9)

    conv10 = layers.Conv2D(1, (1, 1), activation='relu')(bn18)
    #bn19 = BatchNormalization(axis=3)(conv10)

    model = tf.keras.models.Model(inputs=[ref_input, gpm_input],
                                  outputs=conv10)

    return model
Пример #15
0
X_train = X_train.astype('float64')

#%%
numVal = 10000
X_val = X_train[0:numVal, :]
Y_val = Y_train[0:numVal:]

X_train_partial = X_train[numVal:, :]
Y_train_partial = Y_train[numVal:]
"""**Creating the CNN**"""

network = models.Sequential()
network.add(
    layers.Conv2D(64, (3, 3), activation='relu', input_shape=(32, 32, 3)))
network.add(layers.Conv2D(64, (3, 3), activation='relu'))
network.add(layers.MaxPooling2D((2, 2)))
network.add(layers.Conv2D(64, (3, 3), activation='relu'))
network.add(layers.Dense(64, activation='relu'))
network.add(layers.MaxPooling2D((2, 2)))
network.add(layers.MaxPooling2D((2, 2)))
network.add(layers.Conv2D(64, (2, 2), activation='relu'))
network.add(layers.Conv2D(64, (1, 1), activation='relu'))
network.add(layers.Dense(64, activation='relu'))
network.add(layers.MaxPooling2D((1, 1)))

network.add(layers.Flatten())
network.add(layers.Dense(64, activation='relu'))
network.add(layers.Dense(10, activation='softmax'))

network.compile(optimizer='Adam',
                loss='categorical_crossentropy',
# specify parameters
modelname = 'CNN_sum_K-32-32-64-128_KS-37-37-37-37_MP-12-22-22-32_DO-2-2-2-2-2_AD'
time_sound = 750  # input dimension 1 (time)
nfreqs = 99  # input dimension 2 (frequencies)

#------------------------------------------------------------------------------
# Define model architecture
#------------------------------------------------------------------------------
# CNN 1 - left channel
in1 = layers.Input(
    shape=(time_sound, nfreqs,
           1))  # define input (rows, columns, channels (only one in my case))
model_l_conv1 = layers.Conv2D(32, (3, 7), activation='relu', padding='same')(
    in1)  # define first layer and input to the layer
model_l_conv1_mp = layers.MaxPooling2D(pool_size=(1, 2))(model_l_conv1)
model_l_conv1_mp_do = layers.Dropout(0.2)(model_l_conv1_mp)

# CNN 1 - right channel
in2 = layers.Input(shape=(time_sound, nfreqs, 1))  # define input
model_r_conv1 = layers.Conv2D(32, (3, 7), activation='relu', padding='same')(
    in2)  # define first layer and input to the layer
model_r_conv1_mp = layers.MaxPooling2D(pool_size=(1, 2))(model_r_conv1)
model_r_conv1_mp_do = layers.Dropout(0.2)(model_r_conv1_mp)

# CNN 2 - merged
model_final_merge = layers.Add()([model_l_conv1_mp_do, model_r_conv1_mp_do])
model_final_conv1 = layers.Conv2D(32, (3, 7),
                                  activation='relu',
                                  padding='same')(model_final_merge)
model_final_conv1_mp = layers.MaxPooling2D(pool_size=(2, 2))(model_final_conv1)
Пример #17
0
def resnet_graph(input_image, architecture, stage5=False, train_bn=True):
    """Build a ResNet graph.
        architecture: Can be resnet50 or resnet101
        stage5: Boolean. If False, stage5 of the network is not created
        train_bn: Boolean. Train or freeze Batch Norm layers
    """
    assert architecture in ["resnet50", "resnet101"]
    # Stage 1
    x = KL.ZeroPadding2D((3, 3))(input_image)
    x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)
    x = BatchNorm(name='bn_conv1')(x, training=train_bn)
    x = KL.Activation('relu')(x)
    C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
    # Stage 2
    x = conv_block(x,
                   3, [64, 64, 256],
                   stage=2,
                   block='a',
                   strides=(1, 1),
                   train_bn=train_bn)
    x = identity_block(x,
                       3, [64, 64, 256],
                       stage=2,
                       block='b',
                       train_bn=train_bn)
    C2 = x = identity_block(x,
                            3, [64, 64, 256],
                            stage=2,
                            block='c',
                            train_bn=train_bn)
    # Stage 3
    x = conv_block(x,
                   3, [128, 128, 512],
                   stage=3,
                   block='a',
                   train_bn=train_bn)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='b',
                       train_bn=train_bn)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='c',
                       train_bn=train_bn)
    C3 = x = identity_block(x,
                            3, [128, 128, 512],
                            stage=3,
                            block='d',
                            train_bn=train_bn)
    # Stage 4
    x = conv_block(x,
                   3, [256, 256, 1024],
                   stage=4,
                   block='a',
                   train_bn=train_bn)
    block_count = {"resnet50": 5, "resnet101": 22}[architecture]
    for i in range(block_count):
        x = identity_block(x,
                           3, [256, 256, 1024],
                           stage=4,
                           block=chr(98 + i),
                           train_bn=train_bn)
    C4 = x
    # Stage 5
    if stage5:
        x = conv_block(x,
                       3, [512, 512, 2048],
                       stage=5,
                       block='a',
                       train_bn=train_bn)
        x = identity_block(x,
                           3, [512, 512, 2048],
                           stage=5,
                           block='b',
                           train_bn=train_bn)
        C5 = x = identity_block(x,
                                3, [512, 512, 2048],
                                stage=5,
                                block='c',
                                train_bn=train_bn)
    else:
        C5 = None
    return [C1, C2, C3, C4, C5]
def generator_model():
    inputs = tf.keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, CHANNEL+2))
    x = inputs

    '''
    #generator
    x = layers.Conv2D(64, (9, 9), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (9, 9), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(x)
    res1 = x
    x = layers.LeakyReLU()(x)

    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(x)
    res2 = x
    x = layers.LeakyReLU()(x)

    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(x)
    res3 = x
    x = layers.LeakyReLU()(x)

    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    #x = layers.add([x, res3])
    x = layers.LeakyReLU()(x)
    x = layers.add([x, res3])
    x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    #x = layers.add([x, res2])
    x = layers.LeakyReLU()(x)
    x = layers.add([x, res2])
    x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    #x = layers.subtract([res1, x])
    x = layers.LeakyReLU()(x)
    x = layers.subtract([res1, x])
    x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (9, 9), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(3, (9, 9), strides=(1, 1), padding='same')(x)
    x = layers.Activation('tanh')(x)
    '''

    #generator
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    res1 = x

    x = layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    res2 = x

    x = layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)


    #dilated convs
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', dilation_rate=2)(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', dilation_rate=4)(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', dilation_rate=8)(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', dilation_rate=16)(x)
    x = layers.LeakyReLU()(x)


    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same')(x)
    x = layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.add([x, res2])

    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same')(x)
    x = layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.add([x, res1])

    x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(3, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.Activation('tanh')(x)
    outputs = Lambda(lambda x: x)(x)

    gen_model = tf.keras.Model(inputs = inputs, outputs=outputs)
    return gen_model
Пример #19
0
def LSTM_Unet():

    INPUT_SIZE = (496, 496, 1)

    KERNEL = 3
    NUM_CLASSES = 9
    POOL_SIZE = (2, 2)

    CONV_PARAMS = {
        'activation': 'relu',
        'padding': 'same',
        'kernel_initializer': 'he_normal'
    }

    inputs = layers.Input(INPUT_SIZE)

    #Downward slope
    down_conv_1 = layers.Conv2D(32, KERNEL, **CONV_PARAMS)(inputs)
    down_conv_1 = layers.Conv2D(32, KERNEL, **CONV_PARAMS)(down_conv_1)
    down_pool_1 = layers.MaxPooling2D(POOL_SIZE)(down_conv_1)

    down_conv_2 = layers.Conv2D(64, KERNEL, **CONV_PARAMS)(down_pool_1)
    down_conv_2 = layers.Conv2D(64, KERNEL, **CONV_PARAMS)(down_conv_2)
    down_pool_2 = layers.MaxPooling2D(POOL_SIZE)(down_conv_2)

    down_conv_3 = layers.Conv2D(128, KERNEL, **CONV_PARAMS)(down_pool_2)
    down_conv_3 = layers.Conv2D(128, KERNEL, **CONV_PARAMS)(down_conv_3)
    down_pool_3 = layers.MaxPooling2D(POOL_SIZE)(down_conv_3)

    down_conv_4 = layers.Conv2D(256, KERNEL, **CONV_PARAMS)(down_pool_3)
    down_conv_4 = layers.Conv2D(256, KERNEL, **CONV_PARAMS)(down_conv_4)
    down_pool_4 = layers.MaxPooling2D(POOL_SIZE)(down_conv_4)

    #Re-net Center
    down_pool_last = down_pool_4
    bidirect = ReNetCell(down_pool_last)
    center_drop = bidirect
    #Upward slope

    up_sampling_conv_4 = layers.Conv2D(256, 2, **CONV_PARAMS)(
        layers.UpSampling2D(size=POOL_SIZE)(center_drop))
    up_merge_4 = layers.Concatenate(axis=3)([up_sampling_conv_4, down_conv_4])
    up_conv_4 = layers.Conv2D(256, KERNEL, **CONV_PARAMS)(up_merge_4)
    up_conv_4 = layers.Conv2D(256, KERNEL, **CONV_PARAMS)(up_conv_4)

    up_sampling_conv_3 = layers.Conv2D(128, 2, **CONV_PARAMS)(
        layers.UpSampling2D(size=POOL_SIZE)(up_conv_4))
    up_merge_3 = layers.Concatenate(axis=3)([up_sampling_conv_3, down_conv_3])
    up_conv_3 = layers.Conv2D(128, KERNEL, **CONV_PARAMS)(up_merge_3)
    up_conv_3 = layers.Conv2D(128, KERNEL, **CONV_PARAMS)(up_conv_3)

    up_sampling_conv_2 = layers.Conv2D(64, 2, **CONV_PARAMS)(
        layers.UpSampling2D(size=POOL_SIZE)(up_conv_3))
    up_merge_2 = layers.Concatenate(axis=3)([up_sampling_conv_2, down_conv_2])
    up_conv_2 = layers.Conv2D(64, KERNEL, **CONV_PARAMS)(up_merge_2)
    up_conv_2 = layers.Conv2D(64, KERNEL, **CONV_PARAMS)(up_conv_2)

    up_sampling_conv_1 = layers.Conv2D(32, 2, **CONV_PARAMS)(
        layers.UpSampling2D(size=POOL_SIZE)(up_conv_2))
    up_merge_1 = layers.Concatenate(axis=3)([up_sampling_conv_1, down_conv_1])
    up_conv_1 = layers.Conv2D(32, KERNEL, **CONV_PARAMS)(up_merge_1)
    up_conv_1 = layers.Conv2D(32, KERNEL, **CONV_PARAMS)(up_conv_1)

    output = layers.Conv2D(
        NUM_CLASSES,
        1,
        activation=None,
        padding='same',
        kernel_initializer='he_normal',
    )(up_conv_1)
    output = layers.Softmax(axis=-1)(output)

    model = models.Model(inputs=inputs, outputs=output)

    return model
Пример #20
0
    def buildMyModelV4(self, shape, n_cls):
        model = Sequential()
        model.add(layers.Conv2D(256, (3, 3), padding='same',
                                input_shape=shape))
        # kernel_regularizer=l2(0.01)))
        # model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.MaxPooling2D())
        model.add(layers.Dropout(0.25))
        layer05 = layers.Dense(128, activation='sigmoid')(layers.Flatten()(
            model.output))
        model.add(layers.Conv2D(128, (3, 3), padding='same'))
        # kernel_regularizer=l2(0.01)))
        # model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.MaxPooling2D())
        model.add(layers.Dropout(0.25))
        layer04 = layers.Dense(128, activation='sigmoid')(layers.Flatten()(
            model.output))
        model.add(layers.Conv2D(128, (3, 3), padding='same'))
        # kernel_regularizer=l2(0.01)))
        # model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.Dropout(0.25))
        model.add(layers.Conv2D(64, (3, 3), padding='same'))
        # kernel_regularizer=l2(0.01)))
        # model.add(layers.BatchNormalization())
        model.add(layers.Activation('relu'))
        model.add(layers.Dropout(0.25))
        model.add(layers.Flatten())
        model.add(layers.Dense(512, activation='sigmoid'))
        #    kernel_regularizer=l2(0.01)))
        model.add(layers.Dropout(0.25))
        layer03 = model.output
        model.add(layers.Dense(256, activation='sigmoid'))
        #    kernel_regularizer=l2(0.01)))
        model.add(layers.Dropout(0.25))
        layer02 = model.output
        model.add(layers.Dense(128, activation='sigmoid'))
        #    kernel_regularizer=l2(0.01)))

        self.e_len = [128, 128, 512, 256, 128]
        self.input = model.input
        self.output = model.output

        input_a = layers.Input(shape=shape)
        input_p = layers.Input(shape=shape)
        input_n = layers.Input(shape=shape)

        layer05_model = Model(inputs=model.input, outputs=layer05)
        layer05_a = layer05_model(input_a)
        layer05_p = layer05_model(input_p)
        layer05_n = layer05_model(input_n)

        layer04_model = Model(inputs=model.input, outputs=layer04)
        layer04_a = layer04_model(input_a)
        layer04_p = layer04_model(input_p)
        layer04_n = layer04_model(input_n)

        layer03_model = Model(inputs=model.input, outputs=layer03)
        layer03_a = layer03_model(input_a)
        layer03_p = layer03_model(input_p)
        layer03_n = layer03_model(input_n)

        layer02_model = Model(inputs=model.input, outputs=layer02)
        layer02_a = layer02_model(input_a)
        layer02_p = layer02_model(input_p)
        layer02_n = layer02_model(input_n)

        embed_model = Model(inputs=model.input, outputs=model.output)
        embed_a = embed_model(input_a)
        embed_p = embed_model(input_p)
        embed_n = embed_model(input_n)

        concat = layers.Concatenate()([
            layer05_a, layer05_p, layer05_n, layer04_a, layer04_p, layer04_n,
            layer03_a, layer03_p, layer03_n, layer02_a, layer02_p, layer02_n,
            embed_a, embed_p, embed_n
        ])

        self.model = Model(inputs=[input_a, input_p, input_n], outputs=concat)
        return self
Пример #21
0
                               test_labels) = util.cifar10_load_data(dirname)

class_names = [
    'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
    'ship', 'truck'
]

model = models.Sequential()
model.add(
    layers.Conv2D(filters=6,
                  kernel_size=(5, 5),
                  strides=1,
                  activation='relu',
                  input_shape=(32, 32, 3)))
model.add(layers.Dropout(rate=0.25))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(
    layers.Conv2D(filters=16, kernel_size=(5, 5), strides=1,
                  activation='relu'))
model.add(layers.Dropout(rate=0.25))
model.add(layers.MaxPooling2D(pool_size=(2, 2), strides=2))
model.add(layers.Flatten())
model.add(layers.Dense(120, activation='relu'))
model.add(layers.Dense(84, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.summary()

model.compile(
    optimizer='adam',
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
    metrics=['accuracy'])
Пример #22
0
def build_model(nx: Optional[int] = None,
                ny: Optional[int] = None,
                channels: int = 3,
                layer_depth: int = 3,
                filters_root: int = 64,
                kernel_size: int = 3,
                pool_size: int = 2,
                dropout_rate: int = 0.5,
                padding: str = "same",
                activation: Union[str, Callable] = "relu",
                norm_layer: str = 'instance') -> Model:
    """
    Constructs a U-Net model

    :param nx: (Optional) image size on x-axis
    :param ny: (Optional) image size on y-axis
    :param channels: number of channels of the input tensors
    :param layer_depth: total depth of unet
    :param filters_root: number of filters in top unet layer
    :param kernel_size: size of convolutional layers
    :param pool_size: size of maxplool layers
    :param dropout_rate: rate of dropout
    :param padding: padding to be used in convolutions
    :param activation: activation to be used

    :return: A TF Keras model
    """

    inputs = Input(shape=(nx, ny, channels), name="inputs")

    x = inputs
    contracting_layers = {}

    conv_params = dict(filters_root=filters_root,
                       kernel_size=kernel_size,
                       dropout_rate=dropout_rate,
                       padding=padding,
                       activation=activation,
                       norm_layer=norm_layer)

    for layer_idx in range(0, layer_depth - 1):
        x = ConvBlock(layer_idx, **conv_params)(x)
        contracting_layers[layer_idx] = x
        x = layers.MaxPooling2D((pool_size, pool_size))(x)

    x = ConvBlock(layer_idx + 1, **conv_params)(x)

    for layer_idx in range(layer_idx, -1, -1):
        x = UpconvBlock(layer_idx, filters_root, kernel_size, pool_size,
                        padding, activation)(x)
        x = CropConcatBlock()(x, contracting_layers[layer_idx])
        x = ConvBlock(layer_idx, **conv_params)(x)

    x = layers.Conv2D(filters=channels,
                      kernel_size=(1, 1),
                      kernel_initializer=_get_kernel_initializer(
                          filters_root, kernel_size),
                      strides=1,
                      padding=padding)(x)

    outputs = layers.Activation("tanh", name="outputs")(x)
    model = Model(inputs, outputs, name="unet")

    return model
Пример #23
0
batch_size = 32

# create model
model = models.Sequential()
model.add(layers.InputLayer(input_shape=(28, 28, 1)))  # input layer
model.add(
    layers.Conv2D(  # conv layer
        filters=32,
        strides=(1, 1),
        kernel_size=[3, 3],
        padding="same",
        activation='relu',
        kernel_initializer=tf.contrib.layers.xavier_initializer(
            uniform=False)))
model.add(layers.BatchNormalization())  # batch norm
model.add(layers.MaxPooling2D((2, 2)))  # max pooling
model.add(layers.Flatten())  # flatten
model.add(layers.Dense(784, activation='relu'))  # fully-connected 784
model.add(layers.Dense(10))  # fully-connected 10
model.add(layers.Softmax())  # softmax output

# compile model w/ Adam optimizer + cross entropy loss
model.compile(optimizer=tf.keras.optimizers.Adam(lr=learning_rate),
              loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])

# ## 2.2 Model Training {-}


# callback to test after each epoch
class TestCallback(tf.keras.callbacks.Callback):
Пример #24
0
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

normalization_layer = layers.experimental.preprocessing.Rescaling(1./255)

# normalized_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))
# image_batch, labels_batch = next(iter(normalized_ds))
# first_image = image_batch[0]
# # Notice the pixels values are now in `[0,1]`.
# print(np.min(first_image), np.max(first_image))

num_classes = 10

model = Sequential([
  layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
  layers.Conv2D(16, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(pool_size=(2,4)),
  layers.Conv2D(32, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(pool_size=(2,4)),
  layers.Conv2D(64, 3, padding='same', activation='relu'),
  layers.MaxPooling2D(pool_size=(2,4)),
  layers.Flatten(),
  layers.Dense(64, activation='relu'),
  layers.Dense(num_classes)
])



model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
              metrics=['accuracy'])
Пример #25
0
    def build_train(self):
        AUTOTUNE = self.AUTOTUNE
        spectrogram_ds = self.spectrogram_ds
        commands = self.commands

        # repeat the training set preprocessing on the validation and test sets.
        train_ds = spectrogram_ds
        val_ds = self.preprocess_dataset(self.val_files)
        test_ds = self.preprocess_dataset(self.test_files)

        # Batch the training and validation sets for model training.
        batch_size = 64
        train_ds = train_ds.batch(batch_size)
        val_ds = val_ds.batch(batch_size)

        # Add dataset cache() and prefetch() operations to reduce read latency while training the model.
        train_ds = train_ds.cache().prefetch(AUTOTUNE)
        val_ds = val_ds.cache().prefetch(AUTOTUNE)

        self.test_ds = test_ds

        if not os.path.exists("speech.model"):
            for spectrogram, _ in spectrogram_ds.take(1):
                input_shape = spectrogram.shape
            print('Input shape:', input_shape)
            num_labels = len(commands)
            norm_layer = preprocessing.Normalization()
            norm_layer.adapt(spectrogram_ds.map(lambda x, _: x))
            model = models.Sequential([
                layers.Input(shape=input_shape),
                preprocessing.Resizing(32, 32),
                norm_layer,
                layers.Conv2D(32, 3, activation='relu'),
                layers.Conv2D(64, 3, activation='relu'),
                layers.MaxPooling2D(),
                layers.Dropout(0.25),
                layers.Flatten(),
                layers.Dense(128, activation='relu'),
                layers.Dropout(0.5),
                layers.Dense(num_labels),
            ])

            model.summary()
            model.compile(
                optimizer=tf.keras.optimizers.Adam(),
                loss=tf.keras.losses.SparseCategoricalCrossentropy(
                    from_logits=True),
                metrics=['accuracy'],
            )

            EPOCHS = 10
            history = model.fit(
                train_ds,
                validation_data=val_ds,
                epochs=EPOCHS,
                callbacks=tf.keras.callbacks.EarlyStopping(verbose=1,
                                                           patience=2),
            )

            metrics = history.history
            fig4 = plt.figure()
            plt.plot(history.epoch, metrics['loss'], metrics['val_loss'])
            plt.legend(['loss', 'val_loss'])

            model.save('speech.model')
faces = normalizeFacesData(FER_DATA)
faces = np.asarray(faces)
emotions = FER_DATA['emotion'].to_numpy()
emotions = np.expand_dims(emotions, -1)
#-------------------------------------------------------------
#KERAS MODEL
#-------------------------------------------------------------
model = keras.Sequential()
model.add(
    layers.Conv2D(32, (3, 3),
                  padding='same',
                  activation='relu',
                  input_shape=(48, 48, 1)))
model.add(layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(32, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))

model.add(layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))

model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(layers.Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2, 2)))

model.add(layers.Flatten()
          )  # this converts our 3D feature maps to 1D feature vectors
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(64, activation='relu'))
Пример #27
0
def Trainingresult_1():  #Q5.4

    global class_name

    #與mnist.load_data()不同, cifar10.load_data() 會固定去 Cifar-10 網頁下載資料集,
    (x_train_image,
     y_train_label), (x_test_image,
                      y_test_label) = datasets.cifar10.load_data()

    class_name = [
        'airplane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
        'ship', 'truck'
    ]

    #pretreatment
    x_train_normalize = x_train_image.astype('float32') / 255.0
    x_test_normalize = x_test_image.astype('float32') / 255.0

    #setting
    learning_rate_set = 0.001
    batch_size_set = 100
    epochs_set = 50

    #model setting
    model = models.Sequential()
    model.add(
        layers.Conv2D(filters=6,
                      kernel_size=(5, 5),
                      input_shape=(32, 32, 3),
                      activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Conv2D(filters=16, kernel_size=(5, 5), activation='relu'))
    model.add(layers.MaxPooling2D(pool_size=(2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dense(120, activation='relu'))
    model.add(layers.Dense(84, activation='relu'))
    model.add(layers.Dense(10, activation='softmax'))

    # model.compile(loss='sparse_categorical_crossentropy',optimizer = tf.optimizers.SGD(lr = learning_rate_set, clipnorm = 1), metrics=['accuracy']) #測出來很怪
    model.compile(
        optimizer=tf.optimizers.Adam(learning_rate=learning_rate_set),
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy'])

    history = model.fit(x=x_train_normalize,
                        y=y_train_label,
                        validation_data=(x_test_normalize, y_test_label),
                        epochs=epochs_set,
                        batch_size=batch_size_set)

    # 將模型儲存至 HDF5 檔案中
    model.save("my_model.h5")

    plt.subplot(2, 1, 1)
    plt.plot(history.history['accuracy'], '#3A5FCD', label='accuracy')
    plt.xlabel('epoch')
    plt.ylabel('Accuracy')
    plt.legend(loc='upper right')
    plt.subplot(2, 1, 2)
    plt.plot(history.history['loss'], '#3A5FCD', label='loss')
    plt.xlabel('epoch')
    plt.ylabel('Loss')
    plt.legend(loc='upper right')
    plt.savefig('5/5_4.png')
    plt.show()

import tensorflow as tf
from tensorflow.python.ops import summary_ops_v2
from tensorflow import keras
from tensorflow.keras import datasets, layers, models, optimizers, metrics




model = tf.keras.Sequential([
    layers.Reshape(
        target_shape=[28, 28, 1],
        input_shape=(28, 28,)),
    layers.Conv2D(2, 5, padding='same', activation=tf.nn.relu),
    layers.MaxPooling2D((2, 2), (2, 2), padding='same'),
    layers.Conv2D(4, 5, padding='same', activation=tf.nn.relu),
    layers.MaxPooling2D((2, 2), (2, 2), padding='same'),
    layers.Flatten(),
    layers.Dense(32, activation=tf.nn.relu),
    layers.Dropout(rate=0.4),
    layers.Dense(10)])

compute_loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
compute_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
optimizer = optimizers.SGD(learning_rate=0.01, momentum=0.5)


def mnist_datasets():
    (x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
    # Numpy defaults to dtype=float64; TF defaults to float32. Stick with float32.
Пример #29
0
def ResNet50_mod(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000,
             **kwargs):
    """Instantiates the ResNet50 architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.
    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 32.
            E.g. `(200, 200, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
    """
#     global backend, layers, models, keras_utils
#     backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)

#     if not (weights in {'imagenet', None} or os.path.exists(weights)):
#         raise ValueError('The `weights` argument should be either '
#                          '`None` (random initialization), `imagenet` '
#                          '(pre-training on ImageNet), '
#                          'or the path to the weights file to be loaded.')

#     if weights == 'imagenet' and include_top and classes != 1000:
#         raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
#                          ' as true, `classes` should be 1000')

#     # Determine proper input shape
#     input_shape = _obtain_input_shape(input_shape,
#                                       default_size=224,
#                                       min_size=32,
#                                       data_format=backend.image_data_format(),
#                                       require_flatten=include_top,
#                                       weights=weights)

#     if input_tensor is None:
#         img_input = layers.Input(shape=input_shape)
#     else:
#         if not backend.is_keras_tensor(input_tensor):
#             img_input = layers.Input(tensor=input_tensor, shape=input_shape)
#         else:
#             img_input = input_tensor
#     if backend.image_data_format() == 'channels_last':
#         bn_axis = 3
#     else:
#         bn_axis = 1
    
    bn_axis = -1
    img_input = Input(shape=input_shape)
    
    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
    x = layers.Conv2D(64, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='fc1000')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)
        else:
            warnings.warn('The output shape of `ResNet50(include_top=False)` '
                          'has been changed since Keras 2.2.0.')

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = models.Model(inputs, x, name='resnet50')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            weights_path = data_utils.get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
                WEIGHTS_PATH,
                cache_subdir='models',
                md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
        else:
            weights_path = data_utils.get_file(
                'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
                WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                md5_hash='a268eb855778b3df3c7506639542a6af')
        model.load_weights(weights_path)

    elif weights is not None:
        model.load_weights(weights)

    return model
Пример #30
0
model = models.Sequential()
model.add(
    layers.experimental.preprocessing.Rescaling(1. / 255,
                                                input_shape=(IMG_WIDTH,
                                                             IMG_HEIGHT,
                                                             IMG_DEPTH)))
model.add(layers.experimental.preprocessing.RandomRotation(10 / 360)),

model.add(
    layers.Conv2D(filters=32,
                  kernel_size=(3, 3),
                  padding="same",
                  activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D((2, 2), strides=2, padding="valid"))
model.add(
    layers.Conv2D(filters=64,
                  kernel_size=(3, 3),
                  padding="same",
                  activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D((2, 2), strides=2, padding="valid"))
model.add(
    layers.Conv2D(filters=128,
                  kernel_size=(3, 3),
                  padding="same",
                  activation='relu'))
model.add(layers.BatchNormalization())
model.add(layers.MaxPooling2D((2, 2), strides=2, padding="valid"))