def block(inputs,
          activation_fn=get_swish,
          drop_rate=0.,
          name='',
          filters_in=32,
          filters_out=16,
          kernel_size=3,
          strides=1,
          expand_ratio=1,
          se_ratio=0.,
          id_skip=True):

    filters = filters_in * expand_ratio
    #-------------------------------------------------#
    #   利用Inverted residuals
    #   part1 利用1x1卷积进行通道数上升
    #-------------------------------------------------#
    if expand_ratio != 1:
        x = layers.Conv2D(filters,
                          1,
                          padding='same',
                          use_bias=False,
                          kernel_initializer=CONV_KERNEL_INITIALIZER,
                          name=name + 'expand_conv')(inputs)
        x = layers.BatchNormalization(name=name + 'expand_bn')(x)
        x = layers.Activation(activation_fn,
                              name=name + 'expand_activation')(x)
    else:
        x = inputs

    #------------------------------------------------------#
    #   如果步长为2x2的话,利用深度可分离卷积进行高宽压缩
    #   part2 利用3x3卷积对每一个channel进行卷积
    #------------------------------------------------------#
    if strides == 2:
        x = layers.ZeroPadding2D(padding=correct_pad(x, kernel_size),
                                 name=name + 'dwconv_pad')(x)
        conv_pad = 'valid'
    else:
        conv_pad = 'same'
    x = layers.DepthwiseConv2D(kernel_size,
                               strides=strides,
                               padding=conv_pad,
                               use_bias=False,
                               depthwise_initializer=CONV_KERNEL_INITIALIZER,
                               name=name + 'dwconv')(x)
    x = layers.BatchNormalization(name=name + 'bn')(x)
    x = layers.Activation(activation_fn, name=name + 'activation')(x)

    #------------------------------------------------------#
    #   完成深度可分离卷积后
    #   对深度可分离卷积的结果施加注意力机制
    #------------------------------------------------------#
    if 0 < se_ratio <= 1:
        filters_se = max(1, int(filters_in * se_ratio))
        se = layers.GlobalAveragePooling2D(name=name + 'se_squeeze')(x)
        se = layers.Reshape((1, 1, filters), name=name + 'se_reshape')(se)
        #------------------------------------------------------#
        #   通道先压缩后上升,最后利用sigmoid将值固定到0-1之间
        #------------------------------------------------------#
        se = layers.Conv2D(filters_se,
                           1,
                           padding='same',
                           activation=activation_fn,
                           kernel_initializer=CONV_KERNEL_INITIALIZER,
                           name=name + 'se_reduce')(se)
        se = layers.Conv2D(filters,
                           1,
                           padding='same',
                           activation='sigmoid',
                           kernel_initializer=CONV_KERNEL_INITIALIZER,
                           name=name + 'se_expand')(se)
        x = layers.multiply([x, se], name=name + 'se_excite')

    #------------------------------------------------------#
    #   part3 利用1x1卷积进行通道下降
    #------------------------------------------------------#
    x = layers.Conv2D(filters_out,
                      1,
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name=name + 'project_conv')(x)
    x = layers.BatchNormalization(name=name + 'project_bn')(x)

    #------------------------------------------------------#
    #   part4 如果满足残差条件,那么就增加残差边
    #------------------------------------------------------#
    if (id_skip is True and strides == 1 and filters_in == filters_out):
        if drop_rate > 0:
            x = layers.Dropout(drop_rate,
                               noise_shape=(None, 1, 1, 1),
                               name=name + 'drop')(x)
        x = layers.add([x, inputs], name=name + 'add')

    return x
Example #2
0
def CNN3D(input_shape):
    """
    Define base 3D CNN implementation.
    Implement a 3D CNN for two-way classification following the architecture
    of Basu et al.
    """
    """ADDED POOLING LAYERS TO EACH CONV LAYER"""

    img_input = layers.Input(shape=input_shape)
    x = layers.Conv3D(11, (3, 3, 3),
                      activation='relu',
                      name='conv1',
                      strides=(1, 1, 1),
                      padding="valid")(img_input)
    x = layers.BatchNormalization(axis=-1, name='bn1')(x)
    x = layers.MaxPooling3D(pool_size=(2, 2, 2), padding='valid',
                            name='pool1')(x)
    x = layers.Dropout(0.2)(x)

    x = layers.Conv3D(11, (3, 3, 3),
                      activation='relu',
                      name='conv2',
                      strides=(1, 1, 1),
                      padding="valid")(x)
    x = layers.BatchNormalization(axis=-1, name='bn2')(x)
    x = layers.MaxPooling3D(pool_size=(2, 2, 2), padding='valid',
                            name='pool2')(x)
    x = layers.Dropout(0.2)(x)

    x = layers.Conv3D(11, (3, 3, 3),
                      activation='relu',
                      name='conv3',
                      strides=(1, 1, 1),
                      padding="valid")(x)
    x = layers.BatchNormalization(axis=-1, name='bn3')(x)
    x = layers.MaxPooling3D(pool_size=(2, 2, 2), padding='valid',
                            name='pool3')(x)
    x = layers.Dropout(0.2)(x)

    x = layers.Conv3D(11, (3, 3, 3),
                      activation='relu',
                      name='conv4',
                      strides=(1, 1, 1),
                      padding="valid")(x)
    x = layers.BatchNormalization(axis=-1, name='bn4')(x)
    x = layers.MaxPooling3D(pool_size=(2, 2, 2), padding='valid',
                            name='pool4')(x)
    x = layers.Dropout(0.2)(x)

    x = layers.Flatten()(x)

    x = layers.Dense(4096, name='fc0', activation='relu')(x)
    #x = layers.Dropout(0.2)(x)

    x = layers.Dense(4096, name='fc1', activation='relu')(x)
    x = layers.Activation('relu')(x)
    #x = layers.Dropout(0.2)(x)

    x = layers.Dense(2, activation='softmax', name='fc3')(x)
    model = models.Model(img_input, x)

    # with open('modelsummary.txt', 'w') as f:
    #     with redirect_stdout(f):
    #         utils.print_summary(model, line_length=110, print_fn=None)

    return model
x1 = x / 255.0
xt1 = xt / 255.0

y1 = to_categorical(y)
yt1 = to_categorical(yt)

model = models.Sequential()
model.add(layers.Conv2D(
    filters=16,
    kernel_size=4,
    padding='same',
    strides=1,
    activation='relu',
    input_shape=(32,32,3,)))
model.add(layers.MaxPool2D(pool_size=2, padding='same'))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.05))

model.add(layers.Conv2D(
    filters=32,
    kernel_size=4,
    padding='same',
    strides=1,
    activation='relu',
    input_shape=(32,32,3,)))
model.add(layers.MaxPool2D(pool_size=4, padding='same') )
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.05))

model.add(layers.Flatten())
model.add(layers.Dense(500, activation='relu'))
def process(filename):

    dataset = open(filename, "r")
    dataset = [line.rstrip().split(",") for line in dataset][0:]
    mols = [Chem.MolFromSmiles(line[0]) for line in dataset]

    u = 0
    indexy = []
    for y in mols:
        if y is not None:
            indexy.append(u)
            u += 1

    goodmols = [mols[k] for k in indexy]

    Y = [line[1] for line in dataset]
    goody = [Y[k] for k in indexy]
    trainmols, testmols, trainy, testy = train_test_split(goodmols,
                                                          goody,
                                                          test_size=0.1,
                                                          random_state=90)

    #trainmols, testmols, trainy, testy = train_test_split(goodmols, Y, test_size = 0.1, random_state=90  )
    trainfps = [
        AllChem.GetMorganFingerprintAsBitVect(m, 2) for m in goodmols
        if m is not None
    ]
    testfps = [
        AllChem.GetMorganFingerprintAsBitVect(m, 2) for m in testmols
        if m is not None
    ]

    u = 0
    indexy = []
    for y in trainfps:
        if y is not None:
            indexy.append(u)
            u += 1

    newy = array([int(goody[k]) for k in indexy])
    print(len(newy))

    np_fptrain = []
    for fp in trainfps:
        arr = np.zeros((1, ))
        DataStructs.ConvertToNumpyArray(fp, arr)
        np_fptrain.append(arr)

    np_fptest = []
    for fp in testfps:
        arr = np.zeros((1, ))
        DataStructs.ConvertToNumpyArray(fp, arr)
        np_fptest.append(arr)

    a = csr_matrix(np_fptrain, dtype=np.int8).toarray()
    b = csr_matrix(np_fptest, dtype=np.int8).toarray()
    a = abscaler.fit_transform(a)
    b = abscaler.fit_transform(b)

    print(len(a))

    import matplotlib.pyplot as plt
    from sklearn.metrics import mean_squared_error, r2_score

    from sklearn import datasets

    # In[ ]:

    from sklearn.preprocessing import StandardScaler
    from sklearn.model_selection import cross_validate
    from keras.models import Sequential
    from keras import layers
    from keras.optimizers import SGD

    from keras.models import Sequential

    from keras.callbacks import ReduceLROnPlateau
    from keras import regularizers as WeightRegularizer
    from keras.optimizers import SGD
    #SKlearn for metrics and datasplits
    from sklearn.model_selection import cross_validate
    from sklearn.metrics import roc_auc_score, roc_curve
    #Matplotlib for plotting
    from matplotlib import pyplot as plt

    model = Sequential()
    model.add(layers.Dense(236, input_dim=2048, activation='relu'))
    model.add(layers.BatchNormalization())
    model.add(layers.Dropout(.2))
    model.add(layers.Dense(236, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))

    sgd = SGD(lr=.01, momentum=.8, decay=0.0, nesterov=False)
    model.compile(loss='binary_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    model.fit(a, newy, nb_epoch=7, batch_size=3)

    model.save('classifier.h5')
    from sklearn.metrics import confusion_matrix
    from keras.models import load_model
    model = load_model('classifier.h5')

    predy = model.predict(b)
    predy[predy > 0.5] = 1
    predy[predy <= 0.5] = 0
    testy = np.delete(testy, -1)
    testb = [int(h) for h in testy]
    print(confusion_matrix(testb, predy))
Example #5
0
def ResNet50(classes: int = 1000, input_shape: Tuple[int] = (224, 224)):
    """ Instantiates the ResNet50 architecture.

    # Arguments
        - classes: The number of classes to predict.
        - input_shape: The size of the inputs (x, y).

    # Returns:
        A Keras model instance.
    """
    img_input = layers.Input(shape=(*input_shape, 3))

    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    # 1
    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
    x = layers.Conv2D(64, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(0.00005),
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis,
                                  momentum=0.9,
                                  epsilon=1e-5,
                                  name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    # 2
    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')

    # 3
    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')

    # 4
    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')

    # 5
    x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
    x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')

    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    x = layers.Dense(classes,
                     activation='softmax',
                     kernel_regularizer=l2(0.00005),
                     name='fc1000')(x)

    inputs = img_input

    # Create model.
    model = models.Model(inputs, x, name='resnet50')

    return model
Example #6
0
def generator(d=128, input_shape=[64, 64]):
    conv_options = {
        'kernel_initializer': initializers.normal(mean=0.0, stddev=0.02),
    }
    batchnor_options = {
        'gamma_initializer': initializers.normal(mean=0.1, stddev=0.02),
        'beta_initializer': initializers.constant(0),
        'momentum': 0.9
    }

    inputs = layers.Input([
        100,
    ])

    #----------------------------------------------#
    #   当生成的图片是64, 64, 3的时候
    #----------------------------------------------#
    s_h, s_w = input_shape[0], input_shape[1]
    # 32, 32
    s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2)
    # 16, 16
    s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2)
    # 8, 8
    s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2)
    # 4, 4
    s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2)

    #----------------------------------------------#
    #   100, -> 8192,
    #----------------------------------------------#
    x = layers.Dense(s_h16 * s_w16 * d * 8, **conv_options)(inputs)

    #----------------------------------------------#
    #   8192, -> 4, 4, 512
    #----------------------------------------------#
    x = layers.Reshape([s_h16, s_w16, d * 8])(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.Activation("relu")(x)

    #----------------------------------------------#
    #   4, 4, 512 -> 8, 8, 256
    #----------------------------------------------#
    x = layers.Conv2DTranspose(filters=d * 4,
                               kernel_size=4,
                               strides=2,
                               padding="same",
                               **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.Activation("relu")(x)

    #----------------------------------------------#
    #   8, 8, 256 -> 16, 16, 128
    #----------------------------------------------#
    x = layers.Conv2DTranspose(filters=d * 2,
                               kernel_size=4,
                               strides=2,
                               padding="same",
                               **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.Activation("relu")(x)

    #----------------------------------------------#
    #   16, 16, 128 -> 32, 32, 64
    #----------------------------------------------#
    x = layers.Conv2DTranspose(filters=d,
                               kernel_size=4,
                               strides=2,
                               padding="same",
                               **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.Activation("relu")(x)

    #----------------------------------------------#
    #   32, 32, 64 -> 64, 64, 3
    #----------------------------------------------#
    x = layers.Conv2DTranspose(filters=3,
                               kernel_size=4,
                               strides=2,
                               padding="same",
                               **conv_options)(x)
    x = layers.Activation("tanh")(x)

    model = Model(inputs, x)
    return model
Example #7
0
    def build_model(self):
        """ mapping of states to actions """
        # defining input layer state
        states = layers.Input(shape=(self.state_size, ), name='states')

        # adding hidden layers
        net = layers.Dense(units=32,
                           use_bias=False,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(states)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.Dropout(0.5)(net)

        net = layers.Dense(units=64,
                           use_bias=False,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.Dropout(0.5)(net)

        net = layers.Dense(units=128,
                           use_bias=False,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.Dropout(0.5)(net)

        net = layers.Dense(units=64,
                           use_bias=False,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l1(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation('relu')(net)
        net = layers.Dropout(0.5)(net)

        # output_layer
        raw_actions = layers.Dense(units=self.action_size,
                                   activation='sigmoid',
                                   name='raw_actions')(net)

        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam()
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
Example #8
0
def common_representation(x1, x2):    
    x = layers.concatenate([x1, x2], axis=3) 
    x = layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(x)
    x = layers.BatchNormalization(axis=3)(x) 
    x = layers.Activation('relu')(x) 
    return x
Example #9
0
def my_model(encoder = 'VGG', input_size = (256, 256, 1), k_shot =1, learning_rate = 1e-4, learning_rate2 = 1e-4, no_weight = False):
    # Get the encoder
    if encoder == 'VGG':
       encoder = EM.vgg_encoder(input_size = input_size, no_weight = no_weight)
    else:
       print('Encoder is not defined yet')
       
    S_input  = layers.Input(input_size)
    Q_input  = layers.Input(input_size)
    ## Encode support and query sample
    s_encoded = encoder(S_input)

    ## Auxiliary task
    x1  = layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(s_encoded)
    x1  = layers.BatchNormalization(axis=3)(x1)
    x1  = layers.Activation('relu')(x1) 
    x1  = layers.Conv2D(64, 3, padding = 'same', kernel_initializer = 'he_normal')(x1)
    x1  = layers.Conv2D(3,  3, padding = 'same', kernel_initializer = 'he_normal')(x1)        
    xa  = layers.Activation('sigmoid')(x1)
    
    ###################################### K-shot learning #####################################
    ## K shot
    S_input2  = layers.Input((k_shot, input_size[0], input_size[1], input_size[2]))
    Q_input2  = layers.Input(input_size)
    S_mask2   = layers.Input((k_shot, int(input_size[0]/4), int(input_size[1]/4), 1))  
      
    kshot_encoder = keras.models.Sequential()
    kshot_encoder.add(layers.TimeDistributed(encoder, input_shape=(k_shot, input_size[0], input_size[1], input_size[2])))

    s_encoded = kshot_encoder(S_input2)
    q_encoded = encoder(Q_input2)
    s_encoded = layers.TimeDistributed(layers.Conv2D(128, (3, 3), activation='relu', padding='same'))(s_encoded)
    q_encoded = layers.Conv2D(128, (3, 3), activation='relu', padding='same')(q_encoded) 

    ## Global Representation
    s_encoded  = GlobalAveragePooling2D_r(S_mask2)(s_encoded)   

    ## Common Representation of Support and Query sample
    Bi_rep  = common_representation(s_encoded, q_encoded)

    ## Decode to query segment
    x = layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(Bi_rep)
    x = layers.BatchNormalization(axis=3)(x) 
    x = layers.Activation('relu')(x)       
    x = layers.UpSampling2D(size=(2, 2))(x)
    x = layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(x)
    x = layers.BatchNormalization(axis=3)(x) 
    x = layers.Activation('relu')(x)    
    x = layers.UpSampling2D(size=(2, 2))(x)
    x = layers.Conv2D(128, 3, padding = 'same', kernel_initializer = 'he_normal')(x)
    x = layers.BatchNormalization(axis=3)(x) 
    x = layers.Activation('relu')(x)       
    x = layers.Conv2D(64, 3,  activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(x)
    x = layers.Conv2D(2, 3,   activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(x)
    final = layers.Conv2D(1, 1,   activation = 'sigmoid')(x)  
    
    seg_model = Model(inputs=[S_input2, S_mask2, Q_input2], outputs = final)

    seg_model.compile(optimizer = keras.optimizers.Adam(lr = learning_rate), loss = 'binary_crossentropy', metrics = ['accuracy']) 

    Surrogate_model = Model(inputs=[S_input], outputs = xa)
    Surrogate_model.compile(loss="binary_crossentropy", optimizer=keras.optimizers.Adam(lr = learning_rate2))
              
    return seg_model, Surrogate_model    
    
    
    
    
Example #10
0
def EfficientNet(input_shape,
                 block_args_list: List[BlockArgs],
                 width_coefficient: float,
                 depth_coefficient: float,
                 include_top=True,
                 weights=None,
                 input_tensor=None,
                 pooling=None,
                 classes=1000,
                 dropout_rate=0.,
                 drop_connect_rate=0.,
                 batch_norm_momentum=0.99,
                 batch_norm_epsilon=1e-3,
                 depth_divisor=8,
                 min_depth=None,
                 data_format=None,
                 default_size=None,
                 **kwargs):
    """
    Builder model for EfficientNets.

    # Arguments:
        input_shape: Optional shape tuple, the input shape
            depends on the configuration, with a minimum
            decided by the number of stride 2 operations.
            When None is provided, it defaults to 224.
            Considered the "Resolution" parameter from
            the paper (inherently Resolution coefficient).
        block_args_list: Optional List of BlockArgs, each
            of which detail the arguments of the MBConvBlock.
            If left as None, it defaults to the blocks
            from the paper.
        width_coefficient: Determines the number of channels
            available per layer. Compound Coefficient that
            needs to be found using grid search on a base
            configuration model.
        depth_coefficient: Determines the number of layers
            available to the model. Compound Coefficient that
            needs to be found using grid search on a base
            configuration model.
        include_top: Whether to include the fully-connected
            layer at the top of the network.
        weights: `None` (random initialization) or
            `imagenet` (ImageNet weights)
        input_tensor: Optional Keras tensor (i.e. output of
            `layers.Input()`)
            to use as image input for the model.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model
                will be the 4D tensor output of the
                last convolutional layer.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional layer, and thus
                the output of the model will be a
                2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: Optional number of classes to classify images
            into, only to be specified if `include_top` is True, and
            if no `weights` argument is specified.
        dropout_rate: Float, percentage of random dropout.
        drop_connect_rate: Float, percentage of random droped
            connections.
        batch_norm_momentum: Float, default batch normalization
            momentum. Obtained from the paper.
        batch_norm_epsilon: Float, default batch normalization
            epsilon. Obtained from the paper.
        depth_divisor: Optional. Used when rounding off the coefficient
             scaled channels and depth of the layers.
        min_depth: Optional. Minimum depth value in order to
            avoid blocks with 0 layers.
        data_format: "channels_first" or "channels_last". If left
            as None, defaults to the value set in ~/.keras.
        default_size: Specifies the default image size of the model

    # Raises:
        - ValueError: If weights are not in 'imagenet' or None.
        - ValueError: If weights are 'imagenet' and `classes` is
            not 1000.

    # Returns:
        A Keras Model.
    """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top` '
            'as true, `classes` should be 1000')

    if data_format is None:
        data_format = K.image_data_format()

    if data_format == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    if default_size is None:
        default_size = 224

    if block_args_list is None:
        block_args_list = get_default_block_list()

    # count number of strides to compute min size
    stride_count = 1
    for block_args in block_args_list:
        if block_args.strides is not None and block_args.strides[0] > 1:
            stride_count += 1

    min_size = int(2**stride_count)

    # Determine proper input shape and default size.
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=default_size,
                                      min_size=min_size,
                                      data_format=data_format,
                                      require_flatten=include_top,
                                      weights=weights)

    # Stem part
    if input_tensor is None:
        inputs = layers.Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            inputs = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            inputs = input_tensor

    x = inputs
    x = layers.Conv2D(filters=round_filters(32, width_coefficient,
                                            depth_divisor, min_depth),
                      kernel_size=[3, 3],
                      strides=[2, 2],
                      kernel_initializer=EfficientNetConvInitializer(),
                      padding='same',
                      use_bias=False)(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  momentum=batch_norm_momentum,
                                  epsilon=batch_norm_epsilon)(x)
    x = Swish()(x)

    num_blocks = sum([block_args.num_repeat for block_args in block_args_list])
    drop_connect_rate_per_block = drop_connect_rate / float(num_blocks)

    # Blocks part
    for block_idx, block_args in enumerate(block_args_list):
        assert block_args.num_repeat > 0

        # Update block input and output filters based on depth multiplier.
        block_args.input_filters = round_filters(block_args.input_filters,
                                                 width_coefficient,
                                                 depth_divisor, min_depth)
        block_args.output_filters = round_filters(block_args.output_filters,
                                                  width_coefficient,
                                                  depth_divisor, min_depth)
        block_args.num_repeat = round_repeats(block_args.num_repeat,
                                              depth_coefficient)

        # The first block needs to take care of stride and filter size increase.
        x = MBConvBlock(block_args.input_filters, block_args.output_filters,
                        block_args.kernel_size, block_args.strides,
                        block_args.expand_ratio, block_args.se_ratio,
                        block_args.identity_skip,
                        drop_connect_rate_per_block * block_idx,
                        batch_norm_momentum, batch_norm_epsilon,
                        data_format)(x)

        if block_args.num_repeat > 1:
            block_args.input_filters = block_args.output_filters
            block_args.strides = [1, 1]

        for _ in range(block_args.num_repeat - 1):
            x = MBConvBlock(block_args.input_filters,
                            block_args.output_filters, block_args.kernel_size,
                            block_args.strides, block_args.expand_ratio,
                            block_args.se_ratio, block_args.identity_skip,
                            drop_connect_rate_per_block * block_idx,
                            batch_norm_momentum, batch_norm_epsilon,
                            data_format)(x)

    # Head part
    x = layers.Conv2D(filters=round_filters(1280, width_coefficient,
                                            depth_coefficient, min_depth),
                      kernel_size=[1, 1],
                      strides=[1, 1],
                      kernel_initializer=EfficientNetConvInitializer(),
                      padding='same',
                      use_bias=False)(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  momentum=batch_norm_momentum,
                                  epsilon=batch_norm_epsilon)(x)
    x = Swish()(x)

    if include_top:
        x = layers.GlobalAveragePooling2D(data_format=data_format)(x)

        if dropout_rate > 0:
            x = layers.Dropout(dropout_rate)(x)

        x = layers.Dense(classes,
                         kernel_initializer=EfficientNetDenseInitializer())(x)
        x = layers.Activation('softmax')(x)

    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)

    outputs = x

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)

    model = Model(inputs, outputs)

    # Load weights
    if weights == 'imagenet':
        if default_size == 224:
            if include_top:
                weights_path = get_file(
                    'efficientnet-b0.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b0.h5",
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'efficientnet-b0_notop.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b0_notop.h5",
                    cache_subdir='models')
            model.load_weights(weights_path)

        elif default_size == 240:
            if include_top:
                weights_path = get_file(
                    'efficientnet-b1.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b1.h5",
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'efficientnet-b1_notop.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b1_notop.h5",
                    cache_subdir='models')
            model.load_weights(weights_path)

        elif default_size == 260:
            if include_top:
                weights_path = get_file(
                    'efficientnet-b2.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b2.h5",
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'efficientnet-b2_notop.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b2_notop.h5",
                    cache_subdir='models')
            model.load_weights(weights_path)

        elif default_size == 300:
            if include_top:
                weights_path = get_file(
                    'efficientnet-b3.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b3.h5",
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'efficientnet-b3_notop.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b3_notop.h5",
                    cache_subdir='models')
            model.load_weights(weights_path)

        elif default_size == 380:
            if include_top:
                weights_path = get_file(
                    'efficientnet-b4.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b4.h5",
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'efficientnet-b4_notop.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b4_notop.h5",
                    cache_subdir='models')
            model.load_weights(weights_path)

        elif default_size == 456:
            if include_top:
                weights_path = get_file(
                    'efficientnet-b5.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b5.h5",
                    cache_subdir='models')
            else:
                weights_path = get_file(
                    'efficientnet-b5_notop.h5',
                    "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b5_notop.h5",
                    cache_subdir='models')
            model.load_weights(weights_path)

        # elif default_size == 528:
        #     if include_top:
        #         weights_path = get_file(
        #             'efficientnet-b6.h5',
        #             "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b6.h5",
        #             cache_subdir='models')
        #     else:
        #         weights_path = get_file(
        #             'efficientnet-b6_notop.h5',
        #             "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b6_notop.h5",
        #             cache_subdir='models')
        #     model.load_weights(weights_path)
        #
        # elif default_size == 600:
        #     if include_top:
        #         weights_path = get_file(
        #             'efficientnet-b7.h5',
        #             "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b7.h5",
        #             cache_subdir='models')
        #     else:
        #         weights_path = get_file(
        #             'efficientnet-b7_notop.h5',
        #             "https://github.com/titu1994/keras-efficientnets/releases/download/v0.1/efficientnet-b7_notop.h5",
        #             cache_subdir='models')
        #     model.load_weights(weights_path)

        else:
            raise ValueError(
                'ImageNet weights can only be loaded with EfficientNetB0-7')

    elif weights is not None:
        model.load_weights(weights)

    return model
Example #11
0
    def build_model(self):
        """Build an actor (policy) network that maps states -> actions."""
        # Define input layer (states)
        states = layers.Input(shape=(self.state_size, ), name='states')

        # Add hidden layers
        #         original
        #         net = layers.Dense(units=32, activation='relu')(states)
        #         net = layers.Dense(units=64, activation='relu')(net)
        #         net = layers.Dense(units=32, activation='relu')(net)

        net = layers.Dense(units=64,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l2(0.01))(states)
        net = layers.BatchNormalization()(net)
        net = layers.Activation(activation='relu')(net)
        net = layers.Dropout(0.3)(net)

        net = layers.Dense(units=128,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l2(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation(activation='relu')(net)
        net = layers.Dropout(0.3)(net)

        net = layers.Dense(units=256,
                           kernel_regularizer=regularizers.l2(0.01),
                           activity_regularizer=regularizers.l2(0.01))(net)
        net = layers.BatchNormalization()(net)
        net = layers.Activation(activation='relu')(net)
        net = layers.Dropout(0.3)(net)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Add final output layer with sigmoid activation
        #         original
        #         raw_actions = layers.Dense(units=self.action_size, activation='sigmoid',
        #             name='raw_actions')(net)
        raw_actions = layers.Dense(units=self.action_size,
                                   activity_regularizer=regularizers.l2(0.01),
                                   activation='sigmoid',
                                   name='raw_actions')(net)

        # Scale [0, 1] output for each action dimension to proper range
        actions = layers.Lambda(lambda x:
                                (x * self.action_range) + self.action_low,
                                name='actions')(raw_actions)

        # Create Keras model
        self.model = models.Model(inputs=states, outputs=actions)

        # Define loss function using action value (Q value) gradients
        action_gradients = layers.Input(shape=(self.action_size, ))
        loss = K.mean(-action_gradients * actions)

        # Incorporate any additional losses here (e.g. from regularizers)

        # Define optimizer and training function
        optimizer = optimizers.Adam()
        updates_op = optimizer.get_updates(params=self.model.trainable_weights,
                                           loss=loss)
        self.train_fn = K.function(
            inputs=[self.model.input, action_gradients,
                    K.learning_phase()],
            outputs=[],
            updates=updates_op)
def conv_model():
    '''
    Model for convolution neural network.
    @Output: output_model, the cnn model with input and output
    '''
    img_input = layers.Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 1))
    # img_input = layers.Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

    model = layers.Conv2D(64, 3, activation='relu', padding='same')(img_input)
    model = layers.Conv2D(64, 3, activation='relu', padding='same')(model)
    model = layers.MaxPooling2D()(model)

    # model = layers.SeparableConv2D(32, 3, activation='relu', padding='same')(model)
    # model = layers.SeparableConv2D(32, 3, activation='relu', padding='same')(model)
    # model = layers.BatchNormalization()(model)
    # model = layers.MaxPooling2D()(model)

    model = layers.Dropout(0.2)(model)
    model = layers.SeparableConv2D(128, 3, activation='relu',
                                   padding='same')(model)
    model = layers.SeparableConv2D(128, 3, activation='relu',
                                   padding='same')(model)
    model = layers.BatchNormalization()(model)
    model = layers.MaxPooling2D()(model)

    model = layers.Dropout(0.3)(model)
    model = layers.SeparableConv2D(128, 3, activation='relu',
                                   padding='same')(model)
    model = layers.SeparableConv2D(128, 3, activation='relu',
                                   padding='same')(model)
    model = layers.BatchNormalization()(model)
    model = layers.MaxPooling2D()(model)

    model = layers.Dropout(0.5)(model)
    model = layers.SeparableConv2D(256, 3, activation='relu',
                                   padding='same')(model)
    model = layers.SeparableConv2D(256, 3, activation='relu',
                                   padding='same')(model)
    model = layers.BatchNormalization()(model)
    model = layers.MaxPooling2D()(model)
    model = layers.Dropout(0.5)(model)

    model = layers.Flatten()(model)
    model = layers.Dense(512, activation='relu')(model)
    model = layers.BatchNormalization()(model)
    model = layers.Dropout(0.5)(model)

    # model = layers.Dense(128, activation='relu')(model)
    model = layers.Dense(512, activation='relu')(model)
    model = layers.BatchNormalization()(model)
    model = layers.Dropout(0.5)(model)

    model = layers.Dense(64, activation='relu')(model)
    model = layers.BatchNormalization()(model)
    model = layers.Dropout(0.5)(model)

    # model = layers.Dense(2, activation='sigmoid')(model)
    model = layers.Dense(2, activation='softmax')(model)

    output_model = Model(img_input, model)

    return output_model
Example #13
0
def Conv2DClassifierIn1(x_train, y_train, x_test, y_test):
    summary = True
    verbose = 1

    # setHyperParams------------------------------------------------------------------------------------------------
    batch_size = {{choice([32, 64, 128, 256])}}
    epoch = {{choice([25, 50, 75, 100, 125, 150, 175, 200])}}

    conv_block = {{choice(['two', 'three', 'four'])}}

    conv1_num = {{choice([8, 16, 32, 64])}}
    conv2_num = {{choice([16, 32, 64, 128])}}
    conv3_num = {{choice([32, 64, 128])}}
    conv4_num = {{choice([32, 64, 128, 256])}}

    dense1_num = {{choice([128, 256, 512])}}
    dense2_num = {{choice([64, 128, 256])}}

    l1_regular_rate = {{uniform(0.00001, 1)}}
    l2_regular_rate = {{uniform(0.000001, 1)}}
    drop1_num = {{uniform(0.1, 1)}}
    drop2_num = {{uniform(0.0001, 1)}}

    activator = {{choice(['elu', 'relu', 'tanh'])}}
    optimizer = {{choice(['adam', 'rmsprop', 'SGD'])}}

    #---------------------------------------------------------------------------------------------------------------
    kernel_size = (3, 3)
    pool_size = (2, 2)
    initializer = 'random_uniform'
    padding_style = 'same'
    loss_type = 'binary_crossentropy'
    metrics = ['accuracy']
    my_callback = None
    # early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    # checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
    #                                verbose=1,
    #                                save_best_only=True)
    # my_callback = callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2,
    #                                           patience=5, min_lr=0.0001)

    # build --------------------------------------------------------------------------------------------------------
    input_layer = Input(shape=x_train.shape[1:])
    conv = layers.Conv2D(conv1_num,
                         kernel_size,
                         padding=padding_style,
                         kernel_initializer=initializer,
                         activation=activator)(input_layer)
    conv = layers.Conv2D(conv1_num,
                         kernel_size,
                         padding=padding_style,
                         kernel_initializer=initializer,
                         activation=activator)(conv)
    pool = layers.MaxPooling2D(pool_size, padding=padding_style)(conv)
    if conv_block == 'two':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)
    elif conv_block == 'three':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)
    elif conv_block == 'four':
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv2_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv3_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

        conv = layers.Conv2D(conv4_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(pool)
        conv = layers.Conv2D(conv4_num,
                             kernel_size,
                             padding=padding_style,
                             kernel_initializer=initializer,
                             activation=activator)(conv)
        BatchNorm = layers.BatchNormalization(axis=-1)(conv)
        pool = layers.MaxPooling2D(pool_size, padding=padding_style)(BatchNorm)

    flat = layers.Flatten()(pool)
    drop = layers.Dropout(drop1_num)(flat)

    dense = layers.Dense(dense1_num,
                         activation=activator,
                         kernel_regularizer=regularizers.l1_l2(
                             l1=l1_regular_rate, l2=l2_regular_rate))(drop)
    BatchNorm = layers.BatchNormalization(axis=-1)(dense)
    drop = layers.Dropout(drop2_num)(BatchNorm)

    dense = layers.Dense(dense2_num,
                         activation=activator,
                         kernel_regularizer=regularizers.l1_l2(
                             l1=l1_regular_rate, l2=l2_regular_rate))(drop)

    output_layer = layers.Dense(len(np.unique(y_train)),
                                activation='softmax')(dense)

    model = models.Model(inputs=input_layer, outputs=output_layer)

    if summary:
        model.summary()

# train(self):
    class_weights = class_weight.compute_class_weight('balanced',
                                                      np.unique(y_train),
                                                      y_train.reshape(-1))
    class_weights_dict = dict(enumerate(class_weights))
    model.compile(
        optimizer=optimizer,
        loss=loss_type,
        metrics=metrics  # accuracy
    )

    result = model.fit(x=x_train,
                       y=y_train,
                       batch_size=batch_size,
                       epochs=epoch,
                       verbose=verbose,
                       callbacks=my_callback,
                       validation_data=(x_test, y_test),
                       shuffle=True,
                       class_weight=class_weights_dict)

    validation_acc = np.amax(result.history['val_acc'])
    print('Best validation acc of epoch:', validation_acc)
    return {'loss': -validation_acc, 'status': STATUS_OK, 'model': model}
def EfficientNet(width_coefficient,
                 depth_coefficient,
                 default_size,
                 dropout_rate=0.2,
                 drop_connect_rate=0.2,
                 depth_divisor=8,
                 activation_fn=tf.nn.swish,
                 blocks_args=DEFAULT_BLOCKS_ARGS,
                 model_name='efficientnet',
                 weights='imagenet',
                 input_tensor=None,
                 input_shape=None,
                 pooling=None,
                 classes=1000,
                 **kwargs):

    img_input = layers.Input(tensor=input_tensor, shape=input_shape)

    #-------------------------------------------------#
    #   该函数的目的是保证filter的大小可以被8整除
    #-------------------------------------------------#
    def round_filters(filters, divisor=depth_divisor):
        """Round number of filters based on depth multiplier."""
        filters *= width_coefficient
        new_filters = max(divisor,
                          int(filters + divisor / 2) // divisor * divisor)
        # Make sure that round down does not go down by more than 10%.
        if new_filters < 0.9 * filters:
            new_filters += divisor
        return int(new_filters)

    #-------------------------------------------------#
    #   计算模块的重复次数
    #-------------------------------------------------#
    def round_repeats(repeats):
        return int(math.ceil(depth_coefficient * repeats))

    #-------------------------------------------------#
    #   创建stem部分
    #-------------------------------------------------#
    x = img_input
    x = layers.ZeroPadding2D(padding=correct_pad(x, 3),
                             name='stem_conv_pad')(x)
    x = layers.Conv2D(round_filters(32),
                      3,
                      strides=2,
                      padding='valid',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='stem_conv')(x)
    x = layers.BatchNormalization(name='stem_bn')(x)
    x = layers.Activation(activation_fn, name='stem_activation')(x)

    blocks_args = deepcopy(blocks_args)
    #-------------------------------------------------#
    #   计算总的efficient_block的数量
    #-------------------------------------------------#
    b = 0
    blocks = float(sum(args['repeats'] for args in blocks_args))
    #------------------------------------------------------------------------------#
    #   对结构块参数进行循环、一共进行7个大的结构块。
    #   每个大结构块下会重复小的efficient_block
    #------------------------------------------------------------------------------#
    for (i, args) in enumerate(blocks_args):
        assert args['repeats'] > 0
        #-------------------------------------------------#
        #   对使用到的参数进行更新
        #-------------------------------------------------#
        args['filters_in'] = round_filters(args['filters_in'])
        args['filters_out'] = round_filters(args['filters_out'])

        for j in range(round_repeats(args.pop('repeats'))):
            if j > 0:
                args['strides'] = 1
                args['filters_in'] = args['filters_out']
            x = block(x,
                      activation_fn,
                      drop_connect_rate * b / blocks,
                      name='block{}{}_'.format(i + 1, chr(j + 97)),
                      **args)
            b += 1

    #-------------------------------------------------#
    #   1x1卷积调整通道数
    #-------------------------------------------------#
    x = layers.Conv2D(round_filters(1280),
                      1,
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name='top_conv')(x)
    x = layers.BatchNormalization(name='top_bn')(x)
    x = layers.Activation(activation_fn, name='top_activation')(x)

    #-------------------------------------------------#
    #   利用GlobalAveragePooling2D代替全连接层
    #-------------------------------------------------#
    x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
    if dropout_rate > 0:
        x = layers.Dropout(dropout_rate, name='top_dropout')(x)

    x = layers.Dense(classes,
                     activation='softmax',
                     kernel_initializer=DENSE_KERNEL_INITIALIZER,
                     name='probs')(x)

    inputs = img_input
    model = Model(inputs, x, name=model_name)

    #-------------------------------------------------#
    #   载入权值
    #-------------------------------------------------#
    if weights == 'imagenet':
        file_suff = '_weights_tf_dim_ordering_tf_kernels_autoaugment.h5'
        file_hash = WEIGHTS_HASHES[model_name[-2:]][0]
        file_name = model_name + file_suff
        weights_path = get_file(file_name,
                                BASE_WEIGHTS_PATH + file_name,
                                cache_subdir='models',
                                file_hash=file_hash)
        model.load_weights(weights_path)
    elif weights is not None:
        model.load_weights(weights)

    return model
Example #15
0
Cor4=Lambda(choose_corrlation,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)

Relabel1=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input1)
Relabel2=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input2)
Relabel3=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries1.shape[0],'cor_number':PCAseries2.shape[0]})(input3)
Relabel4=Lambda(choose_relabel,arguments={'info_number':info_number,'x_samplenumber':PCAseries2.shape[0],'cor_number':PCAseries1.shape[0]})(input4)

x_concat1=layers.concatenate([Data1,Data3])#batch1
x_concat2=layers.concatenate([Data2,Data4])#batch2

x1=layers.Dense(layer,activation=activation)(Data1)
x2=layers.Dense(layer,activation=activation)(Data2)
x3=layers.Dense(layer,activation=activation)(Data3)
x4=layers.Dense(layer,activation=activation)(Data4)

x1=layers.BatchNormalization()(x1)
x2=layers.BatchNormalization()(x2)
x3=layers.BatchNormalization()(x3)
x4=layers.BatchNormalization()(x4)

x1_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x2_mid1=layers.Dense(layer,activation=activation)(layers.concatenate([x1,x2]))
x1_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))
x2_mid2=layers.Dense(layer,activation=activation)(layers.concatenate([x3,x4]))

x1_mid1=layers.BatchNormalization()(x1_mid1)
x2_mid1=layers.BatchNormalization()(x2_mid1)
x1_mid2=layers.BatchNormalization()(x1_mid2)
x2_mid2=layers.BatchNormalization()(x2_mid2)

layer_classify=layers.Dense(max(recluster1.max(),recluster2.max())+1,activation='relu')
        #  -------------------------------------------------- Definisikan Capsnet

        """
        A Capsule Network on MNIST.
        :param input_shape: data shape, 3d, [width, height, channels]
        :param n_class: number of classes
        :param num_routing: number of routing iterations
        :return: Two Keras Models, the first one used for training, and the second one for evaluation.
                `eval_model` can also be used for training.
        """
        x = layers.Input(shape=input_shape)

        # Layer 1: Just a conventional Conv2D layer
        conv1t = layers.Conv3D(filters=32, kernel_size=(3,5,5), strides=1, padding='valid', activation='relu', name='conv1t')(x)
        conv1tMP = layers.MaxPooling3D(pool_size=(1, 2, 3), strides=None, padding='valid', data_format=None)(conv1t)
        conv1tNorm = layers.BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None)(conv1tMP)
        conv1tDO = layers.SpatialDropout3D(0.3)(conv1tNorm)       
        conv1 = layers.Conv3D(filters=32, kernel_size=(3,5,5), strides=1, padding='valid', activation='relu', name='conv1')(conv1tDO)
        conv1MP = layers.MaxPooling3D(pool_size=(1, 2, 2), strides=None, padding='valid', data_format=None)(conv1)
        conv1Norm = layers.BatchNormalization(epsilon=0.001, axis=-1, momentum=0.99, weights=None, beta_init='zero', gamma_init='one', gamma_regularizer=None, beta_regularizer=None)(conv1MP)
        conv1DO = layers.SpatialDropout3D(0.5)(conv1Norm)
        # Layer 2: Conv2D layer with `squash` activation, then reshape to [None, num_capsule, dim_capsule]
        primarycaps = PrimaryCap(conv1DO, dim_capsule=8, n_channels=16, kernel_size=(3,3,3), strides=1, padding='valid')

        # Layer 3: Capsule layer. Routing algorithm works here.
        digitcaps = CapsuleLayer(num_capsule=n_class, dim_capsule=4, num_routing=num_routing,
                                 name='digitcaps')(primarycaps)

        # Layer 4: This is an auxiliary layer to replace each capsule with its length. Just to match the true label's shape.
        # If using tensorflow, this will not be necessary. :)
        out_caps = Length(name='c_net')(digitcaps)
Example #17
0
    pickle.dump(train_x_word,open("word2id.pkl","wb"))


# Model -1d cnn
# 模型结构:嵌入-卷积池化*2-dropout-BN-全连接-dropout-全连接

model = models.Sequential()
model.add(layers.Embedding(UNK_WORD + 1, 100, input_length=20))
model.add(layers.Convolution1D(256, 3, padding='same'))
model.add(layers.MaxPool1D(3,3,padding='same'))
model.add(layers.Convolution1D(128, 3, padding='same'))
model.add(layers.MaxPool1D(3,3,padding='same'))
model.add(layers.Convolution1D(64, 3, padding='same'))
model.add(layers.Flatten())
model.add(layers.Dropout(0.5))
model.add(layers.BatchNormalization()) # (批)规范化层
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(512,activation='relu'))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(300,activation='softmax'))

model.compile(loss='categorical_crossentropy',
            optimizer='adam',
            metrics=['accuracy'])

print("To categorical")
tmp_y = np.asarray(data['b_numerical']).astype(int)
y = []
for i in range(len(tmp_y)):
    y.append(to_categorical(tmp_y[i],num_classes=300)[0])
Example #18
0
                                         batch_size=train_batch_size,
                                         target_size=(299, 299),
                                         shuffle=True)
test_data = datagen.flow_from_directory(r'G:\lijiawei\test',
                                        batch_size=test_batch_size,
                                        target_size=(299, 299),
                                        shuffle=True)

classes = 4
img_input = layers.Input(shape=(299, 299, 3))

x = layers.Conv2D(32, (3, 3),
                  strides=(2, 2),
                  use_bias=False,
                  name='block1_conv1')(img_input)
x = layers.BatchNormalization(name='block1_conv1_bn')(x)
x = layers.Activation('relu', name='block1_conv1_act')(x)
x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
x = layers.BatchNormalization(name='block1_conv2_bn')(x)
x = layers.Activation('relu', name='block1_conv2_act')(x)

residual = layers.Conv2D(128, (1, 1),
                         strides=(2, 2),
                         padding='same',
                         use_bias=False)(x)
residual = layers.BatchNormalization()(residual)

x = layers.SeparableConv2D(128, (3, 3),
                           padding='same',
                           use_bias=False,
                           name='block2_sepconv1')(x)
Example #19
0
def discriminator(d=128, input_shape=[64, 64]):
    conv_options = {
        'kernel_initializer': initializers.normal(mean=0., stddev=0.02),
    }
    batchnor_options = {
        'gamma_initializer': initializers.normal(mean=0.1, stddev=0.02),
        'beta_initializer': initializers.constant(0),
        'momentum': 0.9
    }

    #----------------------------------------------#
    #   64, 64, 3 -> 32, 32, 64
    #----------------------------------------------#
    inputs = layers.Input([input_shape[0], input_shape[1], 3])
    x = layers.Conv2D(filters=d,
                      kernel_size=4,
                      strides=2,
                      padding="same",
                      **conv_options)(inputs)
    x = layers.LeakyReLU(0.2)(x)

    #----------------------------------------------#
    #   32, 32, 64 -> 16, 16, 128
    #----------------------------------------------#
    x = layers.Conv2D(filters=2 * d,
                      kernel_size=4,
                      strides=2,
                      padding="same",
                      **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.LeakyReLU(0.2)(x)

    #----------------------------------------------#
    #   16, 16, 128 -> 8, 8, 256
    #----------------------------------------------#
    x = layers.Conv2D(filters=4 * d,
                      kernel_size=4,
                      strides=2,
                      padding="same",
                      **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.LeakyReLU(0.2)(x)

    #----------------------------------------------#
    #   8, 8, 256 -> 4, 4, 512
    #----------------------------------------------#
    x = layers.Conv2D(filters=8 * d,
                      kernel_size=4,
                      strides=2,
                      padding="same",
                      **conv_options)(x)
    x = layers.BatchNormalization(**batchnor_options)(x)
    x = layers.LeakyReLU(0.2)(x)

    x = layers.Flatten()(x)

    #----------------------------------------------#
    #   4*4*512, -> 1,
    #----------------------------------------------#
    x = layers.Dropout(0.3)(x)
    x = layers.Dense(1, **conv_options)(x)
    x = layers.Activation("sigmoid")(x)

    model = Model(inputs, x)
    return model
Example #20
0
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2)):
    """A block that has a conv layer at shortcut.

    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
            middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
        strides: Strides for the first conv layer in the block.

    # Returns
        Output tensor for the block.

    Note that from stage 3,
    the first conv layer at main path is with strides=(2, 2)
    And the shortcut should have strides=(2, 2) as well
    """
    filters1, filters2, filters3 = filters
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = layers.Conv2D(filters1, (1, 1),
                      strides=strides,
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2a')(input_tensor)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters2,
                      kernel_size,
                      padding='same',
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2b')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = layers.Activation('relu')(x)

    x = layers.Conv2D(filters3, (1, 1),
                      kernel_initializer='he_normal',
                      name=conv_name_base + '2c')(x)
    x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = layers.Conv2D(filters3, (1, 1),
                             strides=strides,
                             kernel_initializer='he_normal',
                             name=conv_name_base + '1')(input_tensor)
    shortcut = layers.BatchNormalization(axis=bn_axis,
                                         name=bn_name_base + '1')(shortcut)

    x = layers.add([x, shortcut])
    x = layers.Activation('relu')(x)
    return x
def mobilenet_v2_ssdlite(input_image, sub):

    alpha = 1.0

    first_block_filters = _make_divisible(32 * alpha, 8)

    # stage subtraction feature map extraction
    # 300*300*3 -> 150*150*16
    sub = KL.ZeroPadding2D(padding=correct_pad(K, sub, 3),
                         name='sub_stage1_block1_pad')(sub)
    sub = KL.Conv2D(first_block_filters,
                  kernel_size=3, strides=(2, 2),
                  padding='valid', use_bias=False,
                  name='sub_stage1_block1_conv')(sub)
    sub = KL.BatchNormalization(
        epsilon=1e-3, momentum=0.999, name='sub_stage1_block1_bn')(sub)
    sub = KL.ReLU(6., name='sub_stage1_block1_relu')(sub)


    sub = dw_sub_block(sub, filters=24, alpha=alpha, stride=2, stage=2, block_id=1)
    # 150*150*16 -> 75*75*24
    # 75*75*24 -> 38*38*32
    #sub = dw_sub_block(sub, filters=32, alpha=alpha, stride=2, stage=3, block_id=1)
    #38*38*32 -> 19*19*32
    #sub = dw_sub_block(sub, filters=32, alpha=alpha, stride=2, stage=4, block_id=1)


    # stage1
    x = KL.ZeroPadding2D(padding=correct_pad(K, input_image, 3),
                         name='bbn_stage1_block1_pad')(input_image)
    x = KL.Conv2D(first_block_filters,
                  kernel_size=3, strides=(2, 2),
                  padding='valid', use_bias=False,
                  name='bbn_stage1_block1_conv')(x)
    x = KL.BatchNormalization(
        epsilon=1e-3, momentum=0.999, name='bbn_stage1_block1_bn')(x)
    x = KL.ReLU(6., name='bbn_stage1_block1_relu')(x)
    x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
                                 expansion=1, stage=1, block_id=2, expand=False)

    # stage2
    x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
                            expansion=6, stage=2, block_id=1)
    x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
                                 expansion=6, stage=2, block_id=2)

    # stage3
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
                            expansion=6, stage=3, block_id=1)

    # concatenate sub here  
    sub = dw_sub_block(sub, filters=32, alpha=alpha, stride=2, stage=3, block_id=1)
    x = KL.Add(name='38_38_32stage3_add')([x, sub])#KL.Add(name=name + '_add')([inputs, x])
                        
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
                            expansion=6, stage=3, block_id=2)
    x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
                                 expansion=6, stage=3, block_id=3)

    # stage4 
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=2,
                            expansion=6, stage=4, block_id=1)
    # concatenate sub here
    sub = dw_sub_block(sub, filters=64, alpha=alpha, stride=2, stage=4, block_id=1)
    x = KL.Add(name='19_19_64stage4_add')([x, sub])

    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
                            expansion=6, stage=4, block_id=2)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
                            expansion=6, stage=4, block_id=3)
    x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1,
                            expansion=6, stage=4, block_id=4)

    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
                            expansion=6, stage=4, block_id=5)
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
                            expansion=6, stage=4, block_id=6)
    x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1,
                                 expansion=6, stage=4, block_id=7)
    
    # stage5
    x, link1 = _inverted_res_block(x, filters=160, alpha=alpha, stride=2,
                            expansion=6, stage=5, block_id=1, output2=True)
    # concatenate sub here                        
    sub = dw_sub_b lock(sub, filters=160, alpha=alpha, stride=2, stage=5, block_id=1)
    x = KL.Add(name='10_10_160stage5_add')([x, sub])

    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
                            expansion=6, stage=5, block_id=2)
    x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1,
                            expansion=6, stage=5, block_id=3)

    x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1,
                                 expansion=6, stage=5, block_id=4)

    x = KL.Conv2D(1280, kernel_size=1, padding='same', use_bias=False, activation=None, name='ssd_2_conv')(x)
    x = KL.BatchNormalization(epsilon=1e-3, momentum=0.999, name='ssd_2_conv_bn')(x)
    link2 = x = KL.ReLU(6., name='ssd_2_conv_relu')(x)

    link3 = x = _followed_down_sample_block(x, 256, 512, 3)

    link4 = x = _followed_down_sample_block(x, 128, 256, 4)

    link5 = x = _followed_down_sample_block(x, 128, 256, 5)

    link6 = x = _followed_down_sample_block(x, 64, 128, 6)

    links = [link1, link2, link3, link4, link5, link6]# return 6 feature maps with different scales 

    return links
Example #22
0
def ResNet(input_shape, **kwargs):
    """Instantiates the ResNet architecture.

    # Arguments
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(224, 224, 3)` (with `channels_last` data format)
            or `(3, 224, 224)` (with `channels_first` data format).
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 197.
            E.g. `(200, 200, 3)` would be one valid value.

    # Returns
        A Keras model instance.

    # ResNet configuration space:
        kernel_size: 3,5
        stage2_block: [1,3]
        stage3_block: [1,11]
        stage4_block: [1,47]
        stage5_block: [1,4]
    """

    kwargs = {k: kwargs[k]
              for k in kwargs if kwargs[k]}  # Remove None value in args

    kernel_size = kwargs['res_kernel_size']
    stages = 4

    img_input = layers.Input(shape=input_shape)
    if backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1
    filters = 64

    # stage 1
    x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(img_input)
    x = layers.Conv2D(filters, (7, 7),
                      strides=(2, 2),
                      padding='valid',
                      kernel_initializer='he_normal',
                      name='conv1')(x)
    x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = layers.Activation('relu')(x)
    x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)

    # stage 2-5
    for stage in range(2, stages + 2):
        if stage == 2:
            x = conv_block(x,
                           kernel_size, [filters, filters, filters * 4],
                           stage=stage,
                           block='_0_',
                           strides=(1, 1))
        else:
            x = conv_block(x,
                           kernel_size, [filters, filters, filters * 4],
                           stage=stage,
                           block='_0_')
        for i in range(kwargs['res_stage' + str(stage) + '_block']):
            x = identity_block(x,
                               3, [filters, filters, filters * 4],
                               stage=stage,
                               block="_" + str(i + 1) + "_")
        filters *= 2

    x = layers.GlobalAveragePooling2D()(x)
    # Create model.
    model = Model(img_input, x, name='resnet')
    return model
    model = expr.load_weight_and_training_config_and_state()
    expr.printdebug("Checkpoint found. Resuming model at %s" %
                    expr.dir_lasttime)
else:
    ###############################
    # Architecture of the network #
    ###############################
    # First channel: image

    x_inputs = L.Input(shape=SHAPE)
    x = x_inputs  #inputs is used by the line "Model(inputs, ... )" below

    conv11 = L.Conv2D(32, (8, 8), strides=4, padding='valid')
    x = conv11(x)
    x = L.Activation('relu')(x)
    x = L.BatchNormalization()(x)
    x = L.Dropout(dropout)(x)

    conv12 = L.Conv2D(64, (4, 4), strides=2, padding='valid')
    x = conv12(x)
    x = L.Activation('relu')(x)
    x = L.BatchNormalization()(x)
    x = L.Dropout(dropout)(x)

    conv13 = L.Conv2D(64, (3, 3), strides=1, padding='valid')
    x = conv13(x)
    x = L.Activation('relu')(x)
    x = L.BatchNormalization()(x)
    x = L.Dropout(dropout)(x)

    deconv11 = L.Conv2DTranspose(64, (3, 3), strides=1, padding='valid')
Example #24
0
def create_CNN(window_shape):
    K.set_image_dim_ordering('tf')
    # sgd = optimizers.SGD()
    adam = optimizers.Adam(lr=0.0000001)  #  , decay=0.000001)
    cnn_model = Sequential()
    print(window_shape)
    cnn_model.add(
        layers.ZeroPadding2D(padding=2,
                             input_shape=window_shape,
                             data_format='channels_last'))
    cnn_model.add(layers.Conv2D(18, (5, 5), strides=(1, 1), activation='relu')
                  )  #, kernel_regularizer=regularizers.l2()))  # 'relu'))
    cnn_model.add(layers.BatchNormalization())
    cnn_model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
    cnn_model.add(layers.ZeroPadding2D(padding=(1, 1), data_format=None))
    cnn_model.add(
        layers.Conv2D(36, (3, 3), strides=(1, 1), activation='relu')
    )  #,kernel_regularizer=regularizers.l2()))  # 'relu')) #4 previously
    cnn_model.add(layers.BatchNormalization())
    cnn_model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
    cnn_model.add(layers.Flatten())
    cnn_model.add(
        layers.Dense(20, activation='relu')
    )  #, kernel_regularizer=regularizers.l2()))  # 'sigmoid'))  # layers.Dropout(rate, noise_shape=None, seed=None)
    cnn_model.add(layers.Dropout(0.5))
    # cnn_model.add(layers.Flatten())
    cnn_model.add(
        layers.Dense(1, activation='sigmoid')
    )  #,kernel_regularizer=regularizers.l2()))  # layers.Dropout(rate, noise_shape=None, seed=None)
    # cnn_model.add(layers.Dropout(0.1))
    # cnn_model.add(layers.Activation('softmax'))
    cnn_model.summary()
    # cnn_model.add(layers.Dense())
    cnn_model.compile(optimizer=adam, loss='binary_crossentropy')
    # K.set_image_dim_ordering('tf')
    # # sgd = optimizers.SGD()
    # adam = optimizers.Adam(lr=0.0001)
    # cnn_model = Sequential()
    # print(window_shape)
    # cnn_model.add(layers.ZeroPadding2D(padding=2, input_shape=window_shape, data_format='channels_last'))
    # cnn_model.add(
    #     layers.Conv2D(12, (5, 5), strides=(1, 1), activation=None, kernel_regularizer=regularizers.l2()))  # 'relu'))
    # cnn_model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
    # cnn_model.add(layers.ZeroPadding2D(padding=(1, 1), data_format=None))
    # cnn_model.add(layers.Conv2D(8, (3, 3), strides=(1, 1), activation=None,
    #                             kernel_regularizer=regularizers.l2()))  # 'relu')) #4 previously
    # cnn_model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
    # cnn_model.add(layers.Flatten())
    # cnn_model.add(layers.Dense(20, activation=None,
    #                            kernel_regularizer=regularizers.l2()))  # 'sigmoid'))  # layers.Dropout(rate, noise_shape=None, seed=None)
    # # cnn_model.add(layers.Dropout(0.2))
    # # cnn_model.add(layers.Flatten())
    # cnn_model.add(layers.Dense(2, activation='softmax', kernel_regularizer=regularizers.l2()))
    # cnn_model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])

    # K.set_image_dim_ordering('tf')
    # sgd = optimizers.SGD()
    # cnn_model = Sequential()
    # # sigmoid part may need to be of form, activation='tanh'  activation = layers.sigmoid  input_shape=window_shape,
    # print(window_shape)
    # cnn_model.add(layers.ZeroPadding2D(padding=2, input_shape=window_shape, data_format='channels_last'))
    # # cnn_model.add(layers.ZeroPadding2D(padding=2, input_shape=(window_shape[2], window_shape[0], window_shape[1]), data_format='channels_first'))
    # cnn_model.add(layers.Conv2D(12, (5, 5), strides=(1, 1), activation ='relu'))
    # cnn_model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
    # cnn_model.add(layers.ZeroPadding2D(padding=(1, 1), data_format=None))
    # cnn_model.add(layers.Conv2D(4, (3, 3), strides=(1, 1), activation='relu'))
    # cnn_model.add(layers.MaxPooling2D(pool_size=(2, 2), padding='valid'))
    # cnn_model.add(layers.Flatten())
    # cnn_model.add(layers.Dense(20, activation='sigmoid'))  # layers.Dropout(rate, noise_shape=None, seed=None)
    # # cnn_model.add(layers.Dropout(0.1))
    # # cnn_model.add(layers.Flatten())
    # cnn_model.add(layers.Dense(2, activation='softmax'))  # layers.Dropout(rate, noise_shape=None, seed=None)
    # # cnn_model.add(layers.Dropout(0.1))
    # # cnn_model.add(layers.Activation('softmax'))
    #
    # ## Summary Line, Suppressed
    # # cnn_model.summary()
    #
    #
    # # cnn_model.add(layers.Dense())
    # cnn_model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
    cnn_model.load_weights('Keras_Models_Saved\Model_12')
    # cnn_model.load_weights('Keras_Models_Saved\Model_112917_2220_binary')
    # cnn_model.load_weights('Keras_Models_Saved\Model_112717_test7')
    # utils.plot_model(cnn_model, to_file='model_1107.png', show_shapes=True, show_layer_names=True, rankdir='LR')
    print(cnn_model.get_weights())
    return cnn_model
Example #25
0
    def add_common_layers(y):
        y = layers.BatchNormalization()(y)
        y = layers.LeakyReLU()(y)

        return y
Example #26
0
def run_fte_bte_exp(data_x, data_y, which_task, model, ntrees=30, shift=0):

    df_total = []

    for slot in range(
            1
    ):  # Rotates the batch of training samples that are used from each class in each task
        train_x, train_y, test_x, test_y = cross_val_data(
            data_x, data_y, shift, slot)

        if model == "odif":
            # Reshape the data
            train_x = train_x.reshape(
                train_x.shape[0],
                train_x.shape[1] * train_x.shape[2] * train_x.shape[3])
            test_x = test_x.reshape(
                test_x.shape[0],
                test_x.shape[1] * test_x.shape[2] * test_x.shape[3])

        if model == "odin":
            clear_session(
            )  # clear GPU memory before each run, to avoid OOM error

            default_transformer_class = NeuralClassificationTransformer

            network = keras.Sequential()
            network.add(
                layers.Conv2D(
                    filters=16,
                    kernel_size=(3, 3),
                    activation="relu",
                    input_shape=np.shape(data_x)[1:],
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=32,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=64,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=128,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))
            network.add(layers.BatchNormalization())
            network.add(
                layers.Conv2D(
                    filters=254,
                    kernel_size=(3, 3),
                    strides=2,
                    padding="same",
                    activation="relu",
                ))

            network.add(layers.Flatten())
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(2000, activation="relu"))
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(2000, activation="relu"))
            network.add(layers.BatchNormalization())
            network.add(layers.Dense(units=4, activation="softmax"))

            default_transformer_kwargs = {
                "network": network,
                "euclidean_layer_idx": -2,
                "loss": "categorical_crossentropy",
                "optimizer": Adam(3e-4),
                "fit_kwargs": {
                    "epochs": 100,
                    "callbacks":
                    [EarlyStopping(patience=5, monitor="val_loss")],
                    "verbose": False,
                    "validation_split": 0.33,
                    "batch_size": 32,
                },
            }
            default_voter_class = KNNClassificationVoter
            default_voter_kwargs = {"k": int(np.log2(360))}
            default_decider_class = SimpleArgmaxAverage

            p_learner = ProgressiveLearner(
                default_transformer_class=default_transformer_class,
                default_transformer_kwargs=default_transformer_kwargs,
                default_voter_class=default_voter_class,
                default_voter_kwargs=default_voter_kwargs,
                default_decider_class=default_decider_class,
            )
        elif model == "odif":
            p_learner = LifelongClassificationForest()

        df = fte_bte_experiment(
            train_x,
            train_y,
            test_x,
            test_y,
            ntrees,
            shift,
            slot,
            model,
            p_learner,
            which_task,
            acorn=12345,
        )

        df_total.append(df)

    return df_total
 def conv_bonnet(ip):
     x = layers.Conv2D(16, (5, 5), activation="relu", padding="same")(ip)
     x = layers.BatchNormalization()(x)
     return x
    x_col="Überschrift der Spalte mit den Dateinamen",
    batch_size=1,
    shuffle=False,
    class_mode=None,
    target_size=(200, 200))

input_1 = Input(shape=(200, 200, 3), dtype='float32', name='egal')

# This is module with image preprocessing utilities

x = layers.SeparableConv2D(32, (3, 3),
                           activation='relu',
                           input_shape=(150, 150, 3))(input_1)
x = layers.MaxPooling2D((3, 3))(x)
x = layers.SeparableConv2D(64, (3, 3), activation='relu', padding='same')(x)
x = layers.BatchNormalization()(x)
z = layers.SeparableConv2D(64, (3, 3), activation='relu')(x)
z = layers.MaxPooling2D((2, 2))(z)
z = layers.SeparableConv2D(128, (3, 3), activation='relu')(z)
z = layers.BatchNormalization()(z)
z = layers.MaxPooling2D((2, 2))(z)
z = layers.SeparableConv2D(256, (3, 3), activation='relu', padding='same')(z)
z = layers.BatchNormalization()(z)
z = layers.MaxPooling2D((2, 2))(z)
z = layers.SeparableConv2D(512, (3, 3), activation='relu')(z)
z = layers.BatchNormalization()(z)
z = layers.Flatten()(z)
z = layers.Dense(512, activation='relu')(z)
z = layers.Dense(64, activation='relu')(z)
model_output_1 = layers.Dense(1, activation='sigmoid')(z)
model = Model(input_1, model_output_1)
Example #29
0
    def build_model(self):
        """Build a critic (value) network that maps (state, action) pairs -> Q-values."""
        # Define input layers
        states = layers.Input(shape=(self.state_size, ), name='states')
        actions = layers.Input(shape=(self.action_size, ), name='actions')

        # Add hidden layer(s) for state pathway
        net_states = layers.Dense(
            units=32,
            use_bias=False,
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Activation('relu')(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(
            units=64,
            use_bias=False,
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Activation('relu')(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        net_states = layers.Dense(
            units=128,
            use_bias=False,
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(states)
        net_states = layers.BatchNormalization()(net_states)
        net_states = layers.Activation('relu')(net_states)
        net_states = layers.Dropout(0.5)(net_states)

        # Add hidden layer(s) for action pathway
        net_actions = layers.Dense(
            units=32,
            use_bias=False,
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Activation('relu')(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        net_actions = layers.Dense(
            units=64,
            use_bias=False,
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Activation('relu')(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        net_actions = layers.Dense(
            units=128,
            use_bias=False,
            kernel_regularizer=regularizers.l2(0.01),
            activity_regularizer=regularizers.l1(0.01))(net_actions)
        net_actions = layers.BatchNormalization()(net_actions)
        net_actions = layers.Activation('relu')(net_actions)
        net_actions = layers.Dropout(0.5)(net_actions)

        # Try different layer sizes, activations, add batch normalization, regularizers, etc.

        # Combine state and action pathways
        net = layers.Add()([net_states, net_actions])
        net = layers.Activation('relu')(net)

        # Add more layers to the combined network if needed

        # Add final output layer to produce action values (Q values)
        Q_values = layers.Dense(units=1, name='q_values')(net)

        # Create Keras model
        self.model = models.Model(inputs=[states, actions], outputs=Q_values)

        # Define optimizer and compile model for training with built-in loss function
        optimizer = optimizers.Adam()
        self.model.compile(optimizer=optimizer, loss='mse')

        # Compute action gradients (derivative of Q values w.r.t. to actions)
        action_gradients = K.gradients(Q_values, actions)

        # Define an additional function to fetch action gradients (to be used by actor model)
        self.get_action_gradients = K.function(
            inputs=[*self.model.input, K.learning_phase()],
            outputs=action_gradients)
def batchActivate(x):
    x = layers.BatchNormalization()(x)
    x = layers.Activation('relu')(x)
    return x