def output_block_network(inputs):
    x=Conv2D(filters=1, kernel_size=(1,1), padding="same")(inputs) #filter =1 to generate the grey scale mask initially
    x= Activation("sigmoid")(x)
    return x
Пример #2
0
def Conv(filters=16, kernel_size=(3,3), activation='relu', input_shape=None):
    if input_shape:
        return Conv2D(filters=filters, kernel_size=kernel_size, padding='Same', activation=activation, input_shape=input_shape)
    else:
        return Conv2D(filters=filters, kernel_size=kernel_size, padding='Same', activation=activation)
Пример #3
0
test_set = datagen2.flow(test, batch_size=batch_size, shuffle=False)

x_train, x_val, y_train, y_val = train_test_split(x,
                                                  y,
                                                  train_size=0.8,
                                                  random_state=23)

x2_train, x2_val, y2_train, y2_val = train_test_split(x2,
                                                      y2,
                                                      train_size=0.8,
                                                      random_state=23)

# 학원에서는 x,test 에 /255. 하고 집에서는 /255. 하지 말 것

input1 = Input(shape=(128, 128, 1))
a = Conv2D(128, 3, padding='same')(input1)
a = BatchNormalization()(a)
a = Activation('relu')(a)
a = MaxPooling2D(3, padding='same')(a)
a = Conv2D(256, 3, padding='same')(a)
a = BatchNormalization()(a)
a = Activation('relu')(a)
a = MaxPooling2D(3, padding='same')(a)
a = Conv2D(512, 3, padding='same')(a)
a = BatchNormalization()(a)
a = Activation('relu')(a)
a = MaxPooling2D(3, padding='same')(a)
a = Flatten()(a)
a = Dense(128)(a)
a = BatchNormalization()(a)
a = Activation('relu')(a)
Пример #4
0
                                            input_shape=(image_height,
                                                         image_width, 3),
                                            pooling="avg")

model = tf.keras.Sequential()

#for layer in RESNET.layers:
#  model.add(layer)

#for l in model.layers:
#    l.trainable=False

# Projection
model.add(
    Conv2D(3, (3, 3),
           input_shape=(image_height, image_width, 1),
           padding="same"))

model.add(RESNET)
#model.layers[1].trainable=True

model.add(Dense(512, Activation("relu")))
model.add(Dropout(0.50))
model.add(Dense(256, Activation("relu")))
model.add(Dropout(0.50))
model.add(Dense(128, Activation("relu")))
model.add(Dropout(0.50))
model.add(Dense(64, Activation("relu")))
model.add(Dropout(0.50))
model.add(Dense(1))
Пример #5
0
train_generator, test_generator = image_generator(train_dir, test_dir)

divergence_fn = lambda q,p,_:tfd.kl_divergence(q,p)/3457

model_bayes = Sequential([
    
    tfpl.Convolution2DReparameterization(input_shape=(75,75,3), filters=8, kernel_size=16, activation='relu',
                                           kernel_prior_fn = tfpl.default_multivariate_normal_fn,
                                           kernel_posterior_fn=tfpl.default_mean_field_normal_fn(is_singular=False),
                                           kernel_divergence_fn = divergence_fn,
                                           bias_prior_fn = tfpl.default_multivariate_normal_fn,
                                           bias_posterior_fn=tfpl.default_mean_field_normal_fn(is_singular=False),
                                           bias_divergence_fn = divergence_fn),
    MaxPooling2D(2,2),
    Conv2D(32, (3,3), activation='relu'),
    MaxPooling2D(2,2),
    Conv2D(64, (3,3), activation='relu'),
    MaxPooling2D(2,2),
    Conv2D(64, (3,3), activation='relu'),
    MaxPooling2D(2,2),
    Flatten(),
    Dense(512, activation='relu'),
    Dropout(0.2),
    tfpl.DenseReparameterization(units=tfpl.OneHotCategorical.params_size(5), activation=None,
                                    kernel_prior_fn = tfpl.default_multivariate_normal_fn,
                                    kernel_posterior_fn=tfpl.default_mean_field_normal_fn(is_singular=False),
                                    kernel_divergence_fn = divergence_fn,
                                    bias_prior_fn = tfpl.default_multivariate_normal_fn,
                                    bias_posterior_fn=tfpl.default_mean_field_normal_fn(is_singular=False),
                                    bias_divergence_fn = divergence_fn
Пример #6
0
def DarknetConv2D(*args, **kwargs):
    """Wrapper to set Darknet parameters for Convolution2D."""
    darknet_conv_kwargs = {'kernel_regularizer': l2(5e-4)}
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides')==(2,2) else 'same'
    darknet_conv_kwargs.update(kwargs)
    return Conv2D(*args, **darknet_conv_kwargs)
Пример #7
0
def MobileNet(input_shape=[224, 224, 3],
              depth_multiplier=1,
              dropout=1e-3,
              classes=1000):
    img_input = Input(shape=input_shape)

    # 224,224,3 -> 112,112,32
    x = _conv_block(img_input, 32, strides=(2, 2))
    # 112,112,32 -> 112,112,64
    x = _depthwise_conv_block(x, 64, depth_multiplier, block_id=1)

    # 112,112,64 -> 56,56,128
    x = _depthwise_conv_block(x,
                              128,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=2)
    # 56,56,128 -> 56,56,128
    x = _depthwise_conv_block(x, 128, depth_multiplier, block_id=3)

    # 56,56,128 -> 28,28,256
    x = _depthwise_conv_block(x,
                              256,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=4)
    # 28,28,256 -> 28,28,256
    x = _depthwise_conv_block(x, 256, depth_multiplier, block_id=5)

    # 28,28,256 -> 14,14,512
    x = _depthwise_conv_block(x,
                              512,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=6)
    # 14,14,512 -> 14,14,512
    x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=7)
    x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=8)
    x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=9)
    x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=10)
    x = _depthwise_conv_block(x, 512, depth_multiplier, block_id=11)

    # 14,14,512 -> 7,7,1024
    x = _depthwise_conv_block(x,
                              1024,
                              depth_multiplier,
                              strides=(2, 2),
                              block_id=12)
    x = _depthwise_conv_block(x, 1024, depth_multiplier, block_id=13)

    # 7,7,1024 -> 1,1,1024
    # 7x7x1024
    # 1024
    x = GlobalAveragePooling2D()(x)
    x = Reshape((1, 1, 1024), name='reshape_1')(x)
    x = Dropout(dropout, name='dropout')(x)
    # 1024*2
    x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
    x = Activation('softmax', name='act_softmax')(x)
    x = Reshape((classes, ), name='reshape_2')(x)

    inputs = img_input

    model = Model(inputs, x, name='mobilenet_1_0_224_tf')

    return model
Пример #8
0
def unet(pretrained=False, base=4):

    if pretrained:
        path = os.path.join('models', model_name + '.model')
        if os.path.exists(path):
            model = load_model(path, custom_objects={'dice': dice})
            model.summary()
            return model
        else:
            print('Failed to load existing model at: {}'.format(path))

    if n_classes == 1:
        loss = 'binary_crossentropy'
        final_act = 'sigmoid'
    elif n_classes > 1:
        loss = 'categorical_crossentropy'
        final_act = 'softmax'

    b = base
    i = Input((imshape[0], imshape[1], imshape[2]))
    s = Lambda(lambda x: preprocess_input(x))(i)

    c1 = Conv2D(2**b, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(s)
    c1 = Dropout(0.1)(c1)
    c1 = Conv2D(2**b, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c1)
    p1 = MaxPooling2D((2, 2))(c1)

    c2 = Conv2D(2**(b + 1), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p1)
    c2 = Dropout(0.1)(c2)
    c2 = Conv2D(2**(b + 1), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c2)
    p2 = MaxPooling2D((2, 2))(c2)

    c3 = Conv2D(2**(b + 2), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p2)
    c3 = Dropout(0.2)(c3)
    c3 = Conv2D(2**(b + 2), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c3)
    p3 = MaxPooling2D((2, 2))(c3)

    c4 = Conv2D(2**(b + 3), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p3)
    c4 = Dropout(0.2)(c4)
    c4 = Conv2D(2**(b + 3), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c4)
    p4 = MaxPooling2D(pool_size=(2, 2))(c4)

    c5 = Conv2D(2**(b + 4), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(p4)
    c5 = Dropout(0.3)(c5)
    c5 = Conv2D(2**(b + 4), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c5)

    u6 = Conv2DTranspose(2**(b + 3), (2, 2), strides=(2, 2),
                         padding='same')(c5)
    u6 = concatenate([u6, c4])
    c6 = Conv2D(2**(b + 3), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u6)
    c6 = Dropout(0.2)(c6)
    c6 = Conv2D(2**(b + 3), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c6)

    u7 = Conv2DTranspose(2**(b + 2), (2, 2), strides=(2, 2),
                         padding='same')(c6)
    u7 = concatenate([u7, c3])
    c7 = Conv2D(2**(b + 2), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u7)
    c7 = Dropout(0.2)(c7)
    c7 = Conv2D(2**(b + 2), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c7)

    u8 = Conv2DTranspose(2**(b + 1), (2, 2), strides=(2, 2),
                         padding='same')(c7)
    u8 = concatenate([u8, c2])
    c8 = Conv2D(2**(b + 1), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u8)
    c8 = Dropout(0.1)(c8)
    c8 = Conv2D(2**(b + 1), (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c8)

    u9 = Conv2DTranspose(2**b, (2, 2), strides=(2, 2), padding='same')(c8)
    u9 = concatenate([u9, c1], axis=3)
    c9 = Conv2D(2**b, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(u9)
    c9 = Dropout(0.1)(c9)
    c9 = Conv2D(2**b, (3, 3),
                activation='elu',
                kernel_initializer='he_normal',
                padding='same')(c9)

    o = Conv2D(n_classes, (1, 1), activation=final_act)(c9)

    model = Model(inputs=i, outputs=o, name=model_name)
    model.compile(optimizer=Adam(1e-4), loss=loss, metrics=[dice])
    model.summary()

    return model

########## MAIN ##########

if (TRAIN):
    ## Load training data
    train_file = "./input/digit_recognizer/train.csv"
    raw_data = pd.read_csv(train_file)
    x, y = data_prep(raw_data)

    ## Specify model
    model = Sequential()
    # Input layer
    model.add(
        Conv2D(20,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=(img_rows, img_cols, 1)))
    #   (number of convolutions/filters, size of the conv, activation function
    # Hidden layers
    model.add(Dropout(0.3))
    model.add(Conv2D(20, kernel_size=(3, 3), activation='relu'))
    model.add(Dropout(0.3))
    #model.add(Conv2D(20, kernel_size=(3, 3), activation='relu'))
    model.add(Flatten())  # Flatten layer
    # Convert the output of the previous layers into a 1D representation
    model.add(Dense(128, activation='relu'))  # Dense layer with 128 nodes
    # Perform usually better when adding a dense layer in between
    # the flatten layer and the final layer
    # Output layer
    model.add(Dense(num_classes, activation='softmax'))
Пример #10
0
# print(X[1])

# END FIRST PART OF PROJ

NAME = "Cats-vs-dogs-cnn-64x2-{}".format(int(time.time()))

tensorboard = TensorBoard(log_dir='logs/{}'.format(NAME))

X = pickle.load(open("X.pickle", "rb"))
y = pickle.load(open("y.pickle", "rb"))

X = X / 255.0

model = Sequential()
model.add(Conv2D(64, (3, 3), input_shape=X.shape[1:]))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Flatten())
# model.add(Dense(64))

model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss="binary_crossentropy",
              optimizer="adam",
Пример #11
0
def fcn_8(pretrained=False, base=4):

    if pretrained:
        path = os.path.join('models', model_name + '.model')
        if os.path.exists(path):
            model = load_model(path, custom_objects={'dice': dice})
            return model
        else:
            print('Failed to load existing model at: {}'.format(path))

    if n_classes == 1:
        loss = 'binary_crossentropy'
        final_act = 'sigmoid'
    elif n_classes > 1:
        loss = 'categorical_crossentropy'
        final_act = 'softmax'

    b = base
    i = Input(shape=imshape)
    s = Lambda(lambda x: preprocess_input(x))(i)
    ## Block 1
    x = Conv2D(2**b, (3, 3),
               activation='elu',
               padding='same',
               name='block1_conv1')(s)
    x = Conv2D(2**b, (3, 3),
               activation='elu',
               padding='same',
               name='block1_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
    f1 = x

    # Block 2
    x = Conv2D(2**(b + 1), (3, 3),
               activation='elu',
               padding='same',
               name='block2_conv1')(x)
    x = Conv2D(2**(b + 1), (3, 3),
               activation='elu',
               padding='same',
               name='block2_conv2')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
    f2 = x

    # Block 3
    x = Conv2D(2**(b + 2), (3, 3),
               activation='elu',
               padding='same',
               name='block3_conv1')(x)
    x = Conv2D(2**(b + 2), (3, 3),
               activation='elu',
               padding='same',
               name='block3_conv2')(x)
    x = Conv2D(2**(b + 2), (3, 3),
               activation='elu',
               padding='same',
               name='block3_conv3')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
    pool3 = x

    # Block 4
    x = Conv2D(2**(b + 3), (3, 3),
               activation='elu',
               padding='same',
               name='block4_conv1')(x)
    x = Conv2D(2**(b + 3), (3, 3),
               activation='elu',
               padding='same',
               name='block4_conv2')(x)
    x = Conv2D(2**(b + 3), (3, 3),
               activation='elu',
               padding='same',
               name='block4_conv3')(x)
    pool4 = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)

    # Block 5
    x = Conv2D(2**(b + 3), (3, 3),
               activation='elu',
               padding='same',
               name='block5_conv1')(pool4)
    x = Conv2D(2**(b + 3), (3, 3),
               activation='elu',
               padding='same',
               name='block5_conv2')(x)
    x = Conv2D(2**(b + 3), (3, 3),
               activation='elu',
               padding='same',
               name='block5_conv3')(x)
    pool5 = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)

    conv6 = Conv2D(2048, (7, 7),
                   activation='elu',
                   padding='same',
                   name="conv6")(pool5)
    conv6 = Dropout(0.5)(conv6)
    conv7 = Conv2D(2048, (1, 1),
                   activation='elu',
                   padding='same',
                   name="conv7")(conv6)
    conv7 = Dropout(0.5)(conv7)

    pool4_n = Conv2D(n_classes, (1, 1), activation='elu',
                     padding='same')(pool4)
    u2 = Conv2DTranspose(n_classes,
                         kernel_size=(2, 2),
                         strides=(2, 2),
                         padding='same')(conv7)
    u2_skip = Add()([pool4_n, u2])

    pool3_n = Conv2D(n_classes, (1, 1), activation='elu',
                     padding='same')(pool3)
    u4 = Conv2DTranspose(n_classes,
                         kernel_size=(2, 2),
                         strides=(2, 2),
                         padding='same')(u2_skip)
    u4_skip = Add()([pool3_n, u4])

    o = Conv2DTranspose(n_classes,
                        kernel_size=(8, 8),
                        strides=(8, 8),
                        padding='same',
                        activation=final_act)(u4_skip)

    model = Model(inputs=i, outputs=o, name=model_name)
    model.compile(optimizer=Adam(1e-4), loss=loss, metrics=[dice])
    model.summary()

    return model
Пример #12
0
width = 28
height = 28

(x_train,y_train), (x_test,y_test) = mnist.load_data()

x_train = x_train.reshape(60000, width, height, 1).astype('float32')/255.0
x_test = x_test.reshape(10000, width, height, 1).astype('float32')/255.0

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

inputs = Input(shape=[28,28,1])

#Layer_1
H1_Zero = ZeroPadding2D(padding=(1,1))(inputs)
H1_Conv = Conv2D(64,kernel_size=3,strides=1,activation='relu')(H1_Zero)

#Layer_2
H2_Zero = ZeroPadding2D(padding=(1,1))(H1_Conv)
H2_Conv = Conv2D(64,kernel_size=3,strides=1,activation='relu')(H2_Zero)
H2_Pool = MaxPool2D(pool_size=(2,2),strides=2)(H2_Conv)

#Layer_3
H3_Zero1 = ZeroPadding2D(padding=(1,1))(H2_Pool)
H3_Conv1 = Conv2D(128,kernel_size=3,strides=1,activation='relu')(H3_Zero1)
H3_Zero2 = ZeroPadding2D(padding=(1,1))(H3_Conv1)
H3_Conv2 = Conv2D(128,kernel_size=3,strides=1,activation='relu')(H3_Zero2)
H3_Pool = MaxPool2D(pool_size=(2,2),strides=2)(H3_Conv2)

#Layer_4
H4_Zero1 = ZeroPadding2D(padding=(1,1))(H3_Pool)
X_piece = X_piece.reshape(
    (X_piece.shape[0], X_piece.shape[1], X_piece.shape[2], 1))
X_piece = X_piece.astype(float)
Y_piece = np.asarray(Y_piece)
Y_piece = Y_piece.astype(int)

print(timer() - start)
'''We have one model to determine wether a square is empty and another model to determine the piece type on not empty squares
We achieved a precision of 100% on train set and 99.99% on test set'''

from tensorflow.keras.layers import Dense, Input, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.models import Sequential, Model

model_empty = Sequential()
model_empty.add(Input(shape=(32, 32, 1)))
model_empty.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_empty.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_empty.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
model_empty.add(MaxPooling2D((2, 2)))
model_empty.add(Flatten())
model_empty.add(Dense(128, activation='relu'))
model_empty.add(Dense(2, activation='softmax'))

adam = tf.keras.optimizers.Adam(learning_rate=0.001)
model_empty.compile(optimizer=adam,
                    loss='sparse_categorical_crossentropy',
                    metrics=['accuracy'])

model_piece = Sequential()
model_piece.add(Input(shape=(32, 32, 1)))
model_piece.add(Conv2D(filters=5, kernel_size=(3, 3), activation='relu'))
    def __init__(self, args):
        # TODO: Define a suitable model, by calling `super().__init__`
        # with appropriate inputs and outputs.
        #
        # Alternatively, if you prefer to use a `tf.keras.Sequential`,
        # replace the `Network` parent, call `super().__init__` at the beginning
        # of this constructor and add layers using `self.add`.

        # TODO: After creating the model, call `self.compile` with appropriate arguments.
        super(Network, self).__init__()
        num_classes = 10

        weight_decay = 1e-4

        self.add(
            Conv2D(32, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay),
                   input_shape=(32, 32, 3)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(
            Conv2D(32, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(MaxPooling2D(pool_size=(2, 2)))
        self.add(Dropout(0.2))

        self.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(
            Conv2D(64, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(MaxPooling2D(pool_size=(2, 2)))
        self.add(Dropout(0.3))

        self.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(
            Conv2D(128, (3, 3),
                   padding='same',
                   kernel_regularizer=regularizers.l2(weight_decay)))
        self.add(Activation('relu'))
        self.add(BatchNormalization())
        self.add(MaxPooling2D(pool_size=(2, 2)))
        self.add(Dropout(0.4))

        self.add(Flatten())
        self.add(Dense(num_classes, activation='softmax'))

        schedule = tf.keras.optimizers.schedules.ExponentialDecay(
            initial_learning_rate=0.01,
            decay_steps=args.epochs * 45000 / 500,
            decay_rate=0.0001 / 0.01)
        self.compile(
            optimizer=tf.keras.optimizers.Adam(clipnorm=1.0,
                                               clipvalue=0.5,
                                               learning_rate=schedule),
            loss=tf.keras.losses.SparseCategoricalCrossentropy(),
            metrics=[
                tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")
            ])

        self.tb_callback = tf.keras.callbacks.TensorBoard(args.logdir,
                                                          update_freq=1000,
                                                          profile_batch=1)
        self.tb_callback.on_train_end = lambda *_: None
Пример #15
0
 def get_custom_model(self,n):
     Input_1 = Input(shape=(224, 224, 3), name='Input_1')
     Convolution2D_1 = Conv2D(4, kernel_size=3, padding='same', activation='relu')(Input_1)
     Convolution2D_2 = Conv2D(4, kernel_size=3, padding= 'same' ,activation= 'relu')(Convolution2D_1)
     Convolution2D_2 = BatchNormalization()(Convolution2D_2)
     MaxPooling2D_1 = MaxPooling2D()(Convolution2D_2)
     
     Convolution2D_5 = Conv2D(8, kernel_size=3, padding='same', activation='relu')(MaxPooling2D_1)
     Convolution2D_6 = Conv2D(8, kernel_size=3, padding='same', activation='relu')(Convolution2D_5)
     Convolution2D_6 = BatchNormalization()(Convolution2D_6)
     MaxPooling2D_2 = MaxPooling2D()(Convolution2D_6)
     
     Convolution2D_7 = Conv2D(16, kernel_size=3, padding='same', activation='relu')(MaxPooling2D_2)
     Convolution2D_8 = Conv2D(16, kernel_size=3, padding='same', activation='relu')(Convolution2D_7)
     Convolution2D_11 = Conv2D(16, kernel_size=3, padding='same', activation='relu')(Convolution2D_8)
     Convolution2D_11 = BatchNormalization()(Convolution2D_11)
     MaxPooling2D_3 = MaxPooling2D()(Convolution2D_11)
     
     Convolution2D_9 = Conv2D(32, kernel_size=3, padding='same', activation='relu')(MaxPooling2D_3)
     Convolution2D_10 = Conv2D(32, kernel_size=3, padding='same', activation='relu')(Convolution2D_9)
     Convolution2D_12 = Conv2D(16, kernel_size=3, padding='same', activation='relu')(Convolution2D_10)
     Convolution2D_12 = BatchNormalization()(Convolution2D_12)
     MaxPooling2D_4 = MaxPooling2D(name='MaxPooling2D_4')(Convolution2D_12)
     
     Convolution2D_13 = Conv2D(32, kernel_size=3, padding='same', activation='relu')(MaxPooling2D_4)
     Convolution2D_14 = Conv2D(32, kernel_size=3, padding='same', activation='relu')(Convolution2D_13)
     Convolution2D_16 = Conv2D(16, kernel_size=3, padding='same', activation='relu')(Convolution2D_14)
     Convolution2D_16 = BatchNormalization()(Convolution2D_16)
     MaxPooling2D_5 = MaxPooling2D(name='MaxPooling2D_5')(Convolution2D_16)
     
     Flatten_1 = Flatten()(MaxPooling2D_5)
     Dense_1 = Dense(512,activation= 'relu' )(Flatten_1)
     # Dropout_1 = Dropout(0.2)(Dense_1)
     Dense_2 = Dense(512,activation= 'relu' )(Dense_1)
     # Dropout_2 = Dropout(0.2)(Dense_2)
     Dense_3 = Dense(n,activation= 'softmax' )(Dense_2)
     
     model = Model([Input_1],[Dense_3])
     return model
Пример #16
0
def VGG16_SEGNET(n_classes,
                 input_height=224,
                 input_width=224,
                 input_depth=3,
                 vgg_level=-1):
    img_input = Input(shape=(input_height, input_width, input_depth))

    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv1',
               data_format='channels_last')(img_input)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='block1_conv2',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block1_pool',
                     data_format='channels_last')(x)
    f1 = x
    # Block 2
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv1',
               data_format='channels_last')(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='block2_conv2',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block2_pool',
                     data_format='channels_last')(x)
    f2 = x

    # Block 3
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv1',
               data_format='channels_last')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv2',
               data_format='channels_last')(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='block3_conv3',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block3_pool',
                     data_format='channels_last')(x)
    f3 = x

    # Block 4
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv1',
               data_format='channels_last')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv2',
               data_format='channels_last')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block4_conv3',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block4_pool',
                     data_format='channels_last')(x)
    f4 = x

    # Block 5
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv1',
               data_format='channels_last')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv2',
               data_format='channels_last')(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='block5_conv3',
               data_format='channels_last')(x)
    x = MaxPooling2D((2, 2),
                     strides=(2, 2),
                     name='block5_pool',
                     data_format='channels_last')(x)
    f5 = x

    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1')(x)
    x = Dense(4096, activation='relu', name='fc2')(x)
    x = Dense(1000, activation='softmax', name='predictions')(x)

    vgg = Model(img_input, x)
    vgg.load_weights(VGG_Weights_path)
    levels = [f1, f2, f3, f4, f5]

    o = levels[vgg_level]

    #o = ( UpSampling2D( (2,2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(512, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(512, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(256, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(128, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = (UpSampling2D((2, 2), data_format='channels_last'))(o)
    o = (ZeroPadding2D((1, 1), data_format='channels_last'))(o)
    o = (Conv2D(64, (3, 3),
                activation='relu',
                padding='valid',
                data_format='channels_last'))(o)
    o = (BatchNormalization())(o)

    o = Conv2D(n_classes, (3, 3), padding='same',
               data_format='channels_last')(o)
    #o_shape = Model(img_input , o ).output_shape
    #outputHeight = o_shape[2]
    #outputWidth = o_shape[3]

    #o = (Reshape((  -1  , outputHeight*outputWidth   )))(o)
    #o = (Permute((2, 1)))(o)
    o = (Activation('softmax'))(o)
    model = Model(img_input, o)
    #model.outputWidth = outputWidth
    #model.outputHeight = outputHeight

    return model
def main(session_name, epochs, batch_size, optimizer, loss, metrics):
    # kafka_dataset = tfio.kafka.KafkaDataset(
    #     topics='deeplearnint_training_1', servers='localhost', group='', eof=False, timeout=1000,
    #     config_global=None, config_topic=None, message_key=False
    # )

    _URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'

    path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)

    PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
    train_dir = os.path.join(PATH, 'train')
    validation_dir = os.path.join(PATH, 'validation')
    train_cats_dir = os.path.join(train_dir, 'cats')  # directory with our training cat pictures
    train_dogs_dir = os.path.join(train_dir, 'dogs')  # directory with our training dog pictures
    validation_cats_dir = os.path.join(validation_dir, 'cats')  # directory with our validation cat pictures
    validation_dogs_dir = os.path.join(validation_dir, 'dogs')  # directory with our validation dog pictures
    num_cats_tr = len(os.listdir(train_cats_dir))
    num_dogs_tr = len(os.listdir(train_dogs_dir))

    num_cats_val = len(os.listdir(validation_cats_dir))
    num_dogs_val = len(os.listdir(validation_dogs_dir))

    total_train = num_cats_tr + num_dogs_tr
    total_val = num_cats_val + num_dogs_val

    print('total training cat images:', num_cats_tr)
    print('total training dog images:', num_dogs_tr)

    print('total validation cat images:', num_cats_val)
    print('total validation dog images:', num_dogs_val)
    print("--")
    print("Total training images:", total_train)
    print("Total validation images:", total_val)

    batch_size = 128
    epochs = 15
    IMG_HEIGHT = 150
    IMG_WIDTH = 150
    train_image_generator = ImageDataGenerator(rescale=1. / 255)  # Generator for our training data
    validation_image_generator = ImageDataGenerator(rescale=1. / 255)  # Generator for our validation data
    train_data_gen = train_image_generator.flow_from_directory(batch_size=batch_size,
                                                               directory=train_dir,
                                                               shuffle=True,
                                                               target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                               class_mode='binary')
    val_data_gen = validation_image_generator.flow_from_directory(batch_size=batch_size,
                                                                  directory=validation_dir,
                                                                  target_size=(IMG_HEIGHT, IMG_WIDTH),
                                                                  class_mode='binary')
    sample_training_images, _ = next(train_data_gen)

    model = Sequential([
        Conv2D(16, 3, padding='same', activation='relu', input_shape=(IMG_HEIGHT, IMG_WIDTH, 3)),
        MaxPooling2D(),
        Conv2D(32, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Conv2D(64, 3, padding='same', activation='relu'),
        MaxPooling2D(),
        Flatten(),
        Dense(512, activation='relu'),
        Dense(1)
    ])
    # model.compile(optimizer=optimizer,
    #               loss=loss,
    #               metrics=metrics)
    model.compile(optimizer='adam',
                  loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
                  metrics=['accuracy'])
    model.summary()

    # model.fit(train_images, train_labels, epochs=epochs, batch_size=batch_size, callbacks=[KafkaCallback(session_name)])
    # test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2, callbacks=[KafkaCallback(session_name)])

    history = model.fit(
        train_data_gen,
        steps_per_epoch=total_train // batch_size,
        epochs=epochs,
        validation_data=val_data_gen,
        validation_steps=total_val // batch_size
    )
Пример #18
0
    def __init__(self, num_class, image_size):
        # super(UNet,self).__init__()
        inputs = Input(shape=[image_size, image_size, 3])

        c1 = Conv2D(32, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(inputs)
        c1 = BatchNormalization()(c1)
        c1 = Dropout(0.1)(c1)
        c1 = Conv2D(32, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c1)
        c1 = BatchNormalization()(c1)
        p1 = MaxPooling2D((2, 2))(c1)

        c2 = Conv2D(64, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(p1)
        c2 = BatchNormalization()(c2)
        c2 = Dropout(0.1)(c2)
        c2 = Conv2D(64, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c2)
        c2 = BatchNormalization()(c2)
        p2 = MaxPooling2D((2, 2))(c2)

        c3 = Conv2D(128, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(p2)
        c3 = BatchNormalization()(c3)
        c3 = Dropout(0.2)(c3)
        c3 = Conv2D(128, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c3)
        c3 = BatchNormalization()(c3)
        p3 = MaxPooling2D((2, 2))(c3)

        c4 = Conv2D(256, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(p3)
        c4 = BatchNormalization()(c4)
        c4 = Dropout(0.2)(c4)
        c4 = Conv2D(256, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c4)
        c4 = BatchNormalization()(c4)
        p4 = MaxPooling2D(pool_size=(2, 2))(c4)

        c5 = Conv2D(512, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(p4)
        c5 = BatchNormalization()(c5)
        c5 = Dropout(0.3)(c5)
        c5 = Conv2D(512, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c5)
        c5 = BatchNormalization()(c5)

        u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(c5)
        u6 = concatenate([u6, c4])
        c6 = Conv2D(256, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(u6)
        c6 = BatchNormalization()(c6)
        c6 = Dropout(0.2)(c6)
        c6 = Conv2D(256, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c6)
        c6 = BatchNormalization()(c6)

        u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(c6)
        u7 = concatenate([u7, c3])
        c7 = Conv2D(128, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(u7)
        c7 = BatchNormalization()(c7)
        c7 = Dropout(0.2)(c7)
        c7 = Conv2D(128, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c7)
        c7 = BatchNormalization()(c7)

        u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(c7)
        u8 = concatenate([u8, c2])
        c8 = Conv2D(64, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(u8)
        c8 = BatchNormalization()(c8)
        c8 = Dropout(0.1)(c8)
        c8 = Conv2D(64, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c8)
        c8 = BatchNormalization()(c8)

        u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(c8)
        u9 = concatenate([u9, c1], axis=3)
        c9 = Conv2D(32, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(u9)
        c9 = BatchNormalization()(c9)
        c9 = Dropout(0.1)(c9)
        c9 = Conv2D(32, (3, 3),
                    activation='relu',
                    kernel_initializer='he_normal',
                    padding='same')(c9)
        c9 = BatchNormalization()(c9)
        c10 = Conv2D(num_class, 1, activation='sigmoid')(c9)
        model = Model(inputs=inputs, outputs=c10)
        model.compile(optimizer=Adam(lr=1e-4),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])
        self.model = model
def YOLOv3Net(cfgfile, model_size, num_classes):
    #Konstruisanje YOLO v3 modela.

    #Parametri:
    #    cfgfile: Ime i putanja do konfiguracione datoteke
    #    model_size: Veličina ulaznih podataka u model (visina, širina, dubina)
    #    num_classes: broj klasa

    #Povratna vrednost:
    #    model: YOLO v3 model

    # učitavanje informacija iz konfiguracionog mrežnog fajla
    blocks = parse_cfg(cfgfile)

    outputs = {}
    output_filters = []
    filters = []
    out_pred = []
    scale = 0

    inputs = input_image = Input(shape=model_size)
    inputs = inputs / 255.0

    # prolazak kroz slojeve i parametre slojeva i konstruisanje TensorFlow modela
    for i, block in enumerate(blocks[1:]):
        # Ako je u pitanju konvolucioni blok
        if (block["type"] == "convolutional"):

            activation = block["activation"]
            filters = int(block["filters"])
            kernel_size = int(block["size"])
            strides = int(block["stride"])

            if strides > 1:
                inputs = ZeroPadding2D(((1, 0), (1, 0)))(inputs)

            inputs = Conv2D(filters,
                            kernel_size,
                            strides=strides,
                            padding='valid' if strides > 1 else 'same',
                            name='conv_' + str(i),
                            use_bias=False if
                            ("batch_normalize" in block) else True)(inputs)

            if "batch_normalize" in block:
                inputs = BatchNormalization(name='bnorm_' + str(i))(inputs)
                inputs = LeakyReLU(alpha=0.1, name='leaky_' + str(i))(inputs)

        # Ako je u pitanju blok povećanja dimenzionalnosti
        elif (block["type"] == "upsample"):
            stride = int(block["stride"])
            inputs = UpSampling2D(stride)(inputs)

        # Ako je u pitanju [route] blok
        elif (block["type"] == "route"):
            block["layers"] = block["layers"].split(',')
            start = int(block["layers"][0])

            if len(block["layers"]) > 1:
                end = int(block["layers"][1]) - i
                filters = output_filters[i + start] + output_filters[end]
                inputs = tf.concat([outputs[i + start], outputs[i + end]],
                                   axis=-1)
            else:
                filters = output_filters[i + start]
                inputs = outputs[i + start]

        # Ako je u pitanju [shortcut] blok
        elif block["type"] == "shortcut":
            from_ = int(block["from"])
            inputs = outputs[i - 1] + outputs[i + from_]

        # Yolo detekcioni sloj (nakon konvolucionih obrada)
        elif block["type"] == "yolo":

            mask = block["mask"].split(",")
            mask = [int(x) for x in mask]
            anchors = block["anchors"].split(",")
            anchors = [int(a) for a in anchors]
            anchors = [(anchors[i], anchors[i + 1])
                       for i in range(0, len(anchors), 2)]
            anchors = [anchors[i] for i in mask]
            n_anchors = len(anchors)

            out_shape = inputs.get_shape().as_list()

            inputs = tf.reshape(inputs, [-1, n_anchors * out_shape[1] * out_shape[2], \
           5 + num_classes])

            box_centers = inputs[:, :, 0:2]
            box_shapes = inputs[:, :, 2:4]
            confidence = inputs[:, :, 4:5]
            classes = inputs[:, :, 5:num_classes + 5]

            box_centers = tf.sigmoid(box_centers)
            confidence = tf.sigmoid(confidence)
            classes = tf.sigmoid(classes)

            anchors = tf.tile(anchors, [out_shape[1] * out_shape[2], 1])
            box_shapes = tf.exp(box_shapes) * tf.cast(anchors,
                                                      dtype=tf.float32)

            x = tf.range(out_shape[1], dtype=tf.float32)
            y = tf.range(out_shape[2], dtype=tf.float32)

            cx, cy = tf.meshgrid(x, y)
            cx = tf.reshape(cx, (-1, 1))
            cy = tf.reshape(cy, (-1, 1))
            cxy = tf.concat([cx, cy], axis=-1)
            cxy = tf.tile(cxy, [1, n_anchors])
            cxy = tf.reshape(cxy, [1, -1, 2])

            strides = (input_image.shape[1] // out_shape[1], \
                       input_image.shape[2] // out_shape[2])
            box_centers = (box_centers + cxy) * strides

            prediction = tf.concat(
                [box_centers, box_shapes, confidence, classes], axis=-1)

            if scale:
                out_pred = tf.concat([out_pred, prediction], axis=1)
            else:
                out_pred = prediction
                scale = 1

        outputs[i] = inputs
        output_filters.append(filters)

    # Ulazna slika predstavlja početnu tačku
    # Predikcije iz YOLO sloja predstavljaju krajnju tačku modela
    model = Model(input_image, out_pred)
    # Ispis svih slojeva i parametara modela u konzolu
    # model.summary()
    return model
Пример #20
0
# convolution kernel size
num_conv = 3

if backend.image_data_format() == 'channels_first':  # atau 'channels_last'
    original_img_size = (img_chns, img_rows, img_cols)  #1,28, 28
else:
    original_img_size = (img_rows, img_cols, img_chns)  #28, 28, 1

# In[]: Building the architechture
X = Input(shape=original_img_size)
Y = Input(shape=(D2, ))
Y_mu = Input(shape=(D2, ))
Y_lsgms = Input(shape=(D2, ))
conv_1 = Conv2D(img_chns,
                kernel_size=(2, 2),
                padding='same',
                activation='relu',
                name='en_conv_1')(X)
conv_2 = Conv2D(filters,
                kernel_size=(2, 2),
                padding='same',
                activation='relu',
                strides=(2, 2),
                name='en_conv_2')(conv_1)
conv_3 = Conv2D(filters,
                kernel_size=num_conv,
                padding='same',
                activation='relu',
                strides=1,
                name='en_conv_3')(conv_2)
conv_4 = Conv2D(filters,
Пример #21
0


# In[8]:


content = imread(content_path).astype('float32')
style = imread(style_path).astype('float32')


# In[9]:


image_transformation_network = tf.keras.Sequential()

image_transformation_network.add(Conv2D(filters=8, kernel_size=3, padding='same', input_shape=(image_size, image_size, 3), activation='relu'))
image_transformation_network.add(BatchNormalization())
image_transformation_network.add(Conv2D(filters=16, kernel_size=3, padding='same', activation='relu'))
image_transformation_network.add(BatchNormalization())
#image_transformation_network.add(Conv2D(filters=64, kernel_size=3, padding='same', activation='relu'))
#image_transformation_network.add(BatchNormalization())
#image_transformation_network.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
#image_transformation_network.add(BatchNormalization())
#image_transformation_network.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
#image_transformation_network.add(BatchNormalization())
#image_transformation_network.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
#image_transformation_network.add(BatchNormalization())
#image_transformation_network.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
#image_transformation_network.add(BatchNormalization())
#image_transformation_network.add(Conv2D(filters=128, kernel_size=3, padding='same', activation='relu'))
#image_transformation_network.add(BatchNormalization())
Пример #22
0
                                            class_mode='binary')

classifier.fit_generator(training_set,
                        steps_per_epoch=14000,
                        epochs=50,
                        validation_data=test_set,
                        validation_steps=3000)
'''
#===============================================================================================
target_size = (256, 256)
batch_size = 32
epochs = 90
input_shape = target_size + (3, )

classifier = Sequential()
classifier.add(Conv2D(32, (3, 3), input_shape=(256, 256, 3),
                      activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(64, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(128, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Conv2D(256, (3, 3), activation='relu'))
classifier.add(MaxPooling2D(pool_size=(2, 2)))
classifier.add(Flatten())
classifier.add(Dropout(0.5))
classifier.add(Dense(units=512, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(units=512, activation='relu'))
classifier.add(Dropout(0.5))
classifier.add(Dense(units=6, activation='softmax'))
classifier.compile(optimizer='adam',
Пример #23
0
    def build(self, input_shape):
        if tf.keras.backend.image_data_format() == 'channels_last':
            bn_axis = -1
        else:
            bn_axis = 1

        # pre
        # 512,512,3 -> 256,256,32 -> 256,256,64
        self.pre_layers = []
        pre_filters = [32, 64]
        pre_strides = [2, 1]
        for i in range(len(pre_filters)):
            conv_sequential = tf.keras.Sequential([
                Conv2D(filters=pre_filters[i],
                       kernel_size=(3, 3),
                       padding='same',
                       kernel_regularizer=self.kernel_regularizer,
                       kernel_initializer=self.kernel_initializer,
                       strides=(pre_strides[i], pre_strides[i]),
                       use_bias=False),
                BatchNormalization(axis=bn_axis,
                                   momentum=self.batchnorm_momentum,
                                   epsilon=self.batchnorm_epsilon),
                Activation(self.activation)
            ])
            self.pre_layers.append(conv_sequential)

        # entry flow
        # 256,256,64 -> 128,128,128
        self.entry_layer1 = XceptionBlock(depth_list=[128, 128, 128],
                                          skip_connection_type='conv',
                                          stride=2,
                                          depth_activation=False)

        # 128,128,128 -> 64,64,256
        # skip = 128,128,256
        self.entry_layer2 = XceptionBlock(depth_list=[256, 256, 256],
                                          skip_connection_type='conv',
                                          stride=2,
                                          depth_activation=False,
                                          return_skip=True)

        # 64,64,256 -> 64,64,728
        self.entry_layer3 = XceptionBlock(depth_list=[728, 728, 728],
                                          skip_connection_type='conv',
                                          stride=1,
                                          depth_activation=False)

        # middle flow
        # 64,64,728 -> 64,64,728
        self.middle_layers = []
        for i in range(16):
            self.middle_layers.append(
                XceptionBlock(depth_list=[728, 728, 728],
                              skip_connection_type='sum',
                              stride=1,
                              rate=1,
                              depth_activation=False))

        # exit flow
        # 64,64,728 -> 64,64,1024
        self.exit_layer1 = XceptionBlock(depth_list=[728, 1024, 1024],
                                         skip_connection_type='conv',
                                         stride=1,
                                         rate=1,
                                         depth_activation=False)
        # 64,64,1024 -> 64,64,2048
        self.exit_layer2 = XceptionBlock(depth_list=[1536, 1536, 2048],
                                         skip_connection_type='none',
                                         stride=1,
                                         rate=2,
                                         depth_activation=True)
Пример #24
0
def build_ATN(architecture=1, input_shape=[28, 28, 1], num_classes=10):
    if architecture == 0:
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        #target_int = Lambda(lambda x:K.argmax(x,axis=-1))(target)
        x1 = Flatten()(image)
        #x2 = Embedding(10,20,input_length=1)(target_int)
        #x2 = Lambda(lambda x: K.squeeze(x, -2))(x2)
        x = Concatenate(axis=-1)([x1, target])
        x = Dense(2048,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        cnn = Model(inputs=[image, target], outputs=x)
    elif architecture == 1:
        image = Input(shape=input_shape)
        target = Input(shape=(num_classes, ))
        x1 = Flatten()(image)
        x = Concatenate(axis=-1)([x1, target])
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(1024,
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros')(x)
        x = BatchNormalization()(x)
        x = Activation('relu')(x)
        x = Dense(np.prod(input_shape),
                  activation='sigmoid',
                  bias_initializer='zeros')(x)
        x = Reshape(input_shape)(x)
        cnn = Model(inputs=[image, target], outputs=x)
    elif architecture == -1:
        cnn = Sequential()
        cnn.add(Flatten(input_shape=input_shape))
        cnn.add(
            Dense(2048,
                  activation='relu',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Dropout(0.25))
        cnn.add(
            Dense(np.prod(input_shape),
                  activation='sigmoid',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Reshape(input_shape))
    elif architecture == -2:
        cnn = Sequential()
        cnn.add(
            Conv2D(
                64,
                kernel_size=(3, 3),
                activation='relu',
                kernel_initializer='glorot_normal',
                bias_initializer='zeros',  #Constant(-0.5),
                kernel_regularizer=l2(0.005),
                input_shape=input_shape,
                padding='same'))
        cnn.add(
            Conv2D(128,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros',
                   kernel_regularizer=l2(0.005),
                   padding='same'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))

        cnn.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   activation='relu',
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros',
                   kernel_regularizer=l2(0.005),
                   padding='same'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))
        cnn.add(Flatten())
        cnn.add(
            Dense(2048,
                  activation='relu',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros',
                  kernel_regularizer=l2(0.05)))
        cnn.add(Dropout(0.25))
        cnn.add(
            Dense(np.prod(input_shape),
                  activation='sigmoid',
                  kernel_initializer='glorot_normal',
                  bias_initializer='zeros'))
        cnn.add(Reshape(input_shape))
    elif architecture == 2:
        cnn = Sequential()
        cnn.add(
            Conv2D(256,
                   kernel_size=(3, 3),
                   activation='relu',
                   input_shape=input_shape,
                   padding='same',
                   use_bias=True,
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros'))
        cnn.add(MaxPooling2D(pool_size=(2, 2)))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2D(512,
                   kernel_size=(3, 3),
                   activation='relu',
                   padding='same',
                   use_bias=True,
                   kernel_initializer='glorot_normal',
                   bias_initializer='zeros'))
        #cnn.add(MaxPooling2D(pool_size=(2, 2)))
        #cnn.add(Dropout(0.5))
        #cnn.add(Conv2D(512, kernel_size=(3, 3),activation='relu',padding='same',
        #          use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros'))
        #cnn.add(UpSampling2D(data_format='channels_last'))
        #cnn.add(Dropout(0.5))
        #cnn.add(Conv2DTranspose(256, kernel_size=(3,3), padding='same', activation='relu',
        #          use_bias=True, kernel_initializer='glorot_normal', bias_initializer='zeros'))
        cnn.add(UpSampling2D(data_format='channels_last'))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2DTranspose(256,
                            kernel_size=(3, 3),
                            padding='same',
                            activation='relu',
                            use_bias=True,
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros'))
        cnn.add(Dropout(0.5))
        cnn.add(
            Conv2DTranspose(1,
                            kernel_size=(3, 3),
                            padding='same',
                            activation='sigmoid',
                            use_bias=True,
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros'))
    return cnn
def DarknetConv2D(*args, **kwargs):
    darknet_conv_kwargs = {}
    darknet_conv_kwargs['padding'] = 'valid' if kwargs.get('strides') == (
        2, 2) else 'same'
    darknet_conv_kwargs.update(kwargs)
    return Conv2D(*args, **darknet_conv_kwargs)
Пример #26
0
def G_resnet(n=1,
             block_strides=[1, 2, 2, 1 / 2, 1 / 2],
             input_shape=[32, 32, 3],
             num_classes=10,
             num_filters=16,
             inner_loop_concat=False):
    image = Input(shape=input_shape)
    target = Input(shape=(num_classes, ))
    subtract_pixel_mean = True
    x = image

    #subract the pixel mean of the training set in the first layer
    if subtract_pixel_mean:
        x_train_mean = np.load('saved_models/cifar10_input_mean.npy')
        constant_init = Constant(value=totuple(x_train_mean))
        x = Subtract_Mean(bias_initializer=constant_init)(x)
    #  x = Concatenate(axis=-1)([target[:,i]*x for i in range(num_classes)])

    x = OuterProduct2D()([x, target])

    print('Building Generator: {} layer resnet'.format(2 * n *
                                                       len(block_strides) + 2))

    x = resnet_block(inputs=x, kernel_size=5, num_filters=num_filters)
    # Instantiate convolutional base (stack of blocks).
    for i, m in enumerate(block_strides):
        for j in range(n):
            strides = 1
            is_first_layer_but_not_first_block = j == 0 and i > 0
            transposed = False
            if is_first_layer_but_not_first_block:
                strides = m
                if m < 1:
                    transposed = True
                    strides = int(1 / m)
                num_filters = int(m * num_filters)
            y = resnet_block(inputs=x,
                             num_filters=num_filters,
                             strides=strides,
                             transposed=transposed)
            if inner_loop_concat:
                y = OuterProduct2D()([y, target])
            y = resnet_block(inputs=y,
                             num_filters=num_filters,
                             activation=None)
            if is_first_layer_but_not_first_block:
                x = resnet_block(inputs=x,
                                 num_filters=num_filters,
                                 kernel_size=1,
                                 strides=strides,
                                 transposed=transposed,
                                 activation=None)
            x = tf.keras.layers.add([x, y])
            x = Activation('relu')(x)
    x = OuterProduct2D()([x, target])
    x = resnet_block(inputs=x,
                     kernel_size=5,
                     num_filters=int(num_filters / 2),
                     activation=None)
    x = OuterProduct2D()([x, target])
    x = Conv2D(3,
               kernel_size=1,
               padding='same',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)

    #image_predist = Activation(invsigmoid)(image)
    #image_predist = Shift_Scale(w=4,b=-0.5)(image)
    x = Add()([x, image])
    output = Activation(clip01)(x)
    model = Model(inputs=[image, target], outputs=output, name='model_G')
    return model
Пример #27
0
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam

gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        logical_gpus = tf.config.experimental.list_logical_devices('GPU')
        print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
    except RuntimeError as e:
        print(e)

input_layer = Input((150, 150, 3))

x = Conv2D(filters=16, kernel_size=3, padding='same')(input_layer)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = MaxPooling2D(pool_size=2)(x)

x = Conv2D(filters=32, kernel_size=3, padding='same')(x)
x = BatchNormalization()(x)
x = LeakyReLU()(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(rate=0.4)(x)

x = Flatten()(x)

x = Dense(512)(x)
x = Dropout(rate=0.4)(x)
x = BatchNormalization()(x)
Пример #28
0
def stem(x, filters, kernel_size=3, padding='same', strides=1):
    conv = Conv2D(filters, kernel_size, padding=padding, strides=strides)(x)
X = X/255.0

dense_layers = [0]
layer_sizes = [64]
conv_layers = [3]

for dense_layer in dense_layers:
    for layer_size in layer_sizes:
        for conv_layer in conv_layers:
            NAME = "{}-conv-{}-nodes-{}-dense-{}".format(conv_layer, layer_size, dense_layer, int(time.time()))
            print(NAME)

            model = Sequential()

            model.add(Conv2D(layer_size, (3, 3), input_shape=X.shape[1:]))
            model.add(Activation('relu'))
            model.add(MaxPooling2D(pool_size=(2, 2)))

            for l in range(conv_layer-1):
                model.add(Conv2D(layer_size, (3, 3)))
                model.add(Activation('relu'))
                model.add(MaxPooling2D(pool_size=(2, 2)))

            model.add(Flatten())

            for _ in range(dense_layer):
                model.add(Dense(layer_size))
                model.add(Activation('relu'))

            model.add(Dense(1))
Пример #30
0
myfil2 = np.array([[-2, 1, 1], [-2, 1, 1], [-2, 1, 1]], dtype=float)

x_img = x_train[id_img, :, :, 0]  #28x28

img_h = 28
img_w = 28
out_img1 = np.zeros_like(x_img)
out_img2 = np.zeros_like(x_img)

checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "my_ckpt")

model = Sequential()
model.add(
    Conv2D(8, (3, 3),
           padding='same',
           input_shape=(28, 28, 1),
           activation='relu'))
model.add(Flatten())
model.add(Dense(10, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer=tf.keras.optimizers.Adam(),
              metrics=['accuracy'])
print(model.summary())
startTime = time.time()
print(np.shape(x_train))
#history=model.fit(x_train,y_train, batch_size=1000, epochs=20, verbose=1, validation_data=(x_test, y_test))
#model.save_weights(checkpoint_prefix)
model.load_weights(tf.train.latest_checkpoint(checkpoint_dir))

score = model.evaluate(x_test, y_test, verbose=0)