コード例 #1
0
ファイル: NetModel_conc.py プロジェクト: Azurequeen/python
def ClassNet():
    inputs = Input(config["input_shape"])
    conv1 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(inputs)

    l = spacial_red(conv1, 64)
    #l=res(l,128,128)

    l = spacial_red(l, 128)
    #l=res(l,256,256)

    l = spacial_red(l, 256)
    #l=res(l,512,512)

    l = f_red(l, 512)
    #l=res(l,256,256)

    l = f_red(l, 256)

    l = Conv3D(128, (6, 6, 6), activation='relu', padding='valid')(l)
    l = Conv3D(2, (1, 1, 1))(l)
    l = core.Reshape((2, 1))(l)
    l = core.Permute((2, 1))(l)
    act = Activation('softmax')(l)
    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adam(lr=config["initial_learning_rate"]),
                  loss='mean_squared_error',
                  metrics=['accuracy'])
    return model
コード例 #2
0
ファイル: NetModel.py プロジェクト: Azurequeen/python
def ClassNet_MultiScale():
    inputs = Input((1,48,48,48))
    #noise=GaussianNoise(stddev=0.01,input_shape=(1,48,48,48))(inputs)
    ch1=inputs
    #ch1=inputs#add([inputs,noise])
    ch2=Cropping3D(((8,8),(8,8),(8,8)))(inputs)
    ch3=Cropping3D(((16,16),(16,16),(16,16)))(inputs)
    
    #ch2=UpSampling3D(size=(2,2,2))(ch2)
    #ch3=UpSampling3D(size=(4,4,4))(ch3)
    
    ch1=ConvNet48(ch1)
    ch2=ConvNet32(ch2)
    ch3=ConvNet16(ch3)
    #ch2=ConvNet32(ch2)    
    #ch3=ConvNet12(ch3)
    
    #fusion=add([ch1,ch2,ch3])
    fusion=concatenate([ch1,ch2,ch3],axis=1)
    fusion=Dense(2)(fusion)#Conv3D(2,(1,1,1), padding='same',activation='relu')(fusion)
    fusion=core.Reshape((2,1))(fusion)
    #a=core.Reshape((6,1))(fusion)
    a=core.Permute((2,1))(fusion)
    act=Activation('softmax')(a)
    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adam(lr=config["initial_learning_rate"]), loss='mean_squared_error',metrics=['accuracy'])
    return model
コード例 #3
0
def Unet(nClasses,
         optimizer=None,
         input_width=256,
         input_height=256,
         nChannels=4):
    inputs = Input((nChannels, input_height, input_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)

    up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2],
                mode='concat',
                concat_axis=1)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv4)

    up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1],
                mode='concat',
                concat_axis=1)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    conv6 = Convolution2D(nClasses,
                          1,
                          1,
                          activation='relu',
                          border_mode='same')(conv5)
    conv6 = core.Reshape((nClasses, input_height * input_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)

    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    if not optimizer is None:
        model.compile(loss="categorical_crossentropy",
                      optimizer=optimizer,
                      metrics=['accuracy'])

    return model
コード例 #4
0
def getShallowUnet(patch_height, patch_width, n_ch):
    #
    inputs = Input((patch_height, patch_width, n_ch))
    #
    conv1 = Conv2D(32, (3, 3), padding="same", activation="relu")(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), padding="same", activation="relu")(conv1)
    print(conv1.shape)
    #
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    print(pool1.shape)
    #
    conv2 = Conv2D(64, (3, 3), padding="same", activation="relu")(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), padding="same", activation="relu")(conv2)
    print(conv2.shape)
    #
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    print(pool2.shape)
    #
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)
    print(conv3.shape)
    #
    up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=-1)
    print(up1.shape)
    #
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv4)
    print(conv4.shape)
    #
    up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=-1)
    print(up2.shape)
    #
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv5)
    print(conv5.shape)
    #
    conv6 = Conv2D(2, (1, 1), activation='relu', padding='same')(conv5)
    print(conv6.shape)
    conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
    print(conv6.shape)
    conv6 = core.Permute((2, 1))(conv6)
    print(conv6.shape)
    #
    conv7 = core.Activation('softmax')(conv6)
    print(conv7.shape)

    model = Model(inputs=inputs, outputs=conv7)

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    model.summary()

    return model
コード例 #5
0
def get_dilated_bn_unet(n_ch, patch_height, patch_width, dilaterate=3):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    conv1 = Conv2d_BN(inputs, 32, (3, 3))
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2d_BN(conv1, 32, (3, 3))
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2d_BN(pool1, 64, (3, 3))
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2d_BN(conv2, 64, (3, 3))
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2d_BN(pool2, 128, (3, 3))
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2d_BN(conv3, 128, (3, 3))

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2, up1], axis=1)
    conv4 = Conv2d_BN(up1, 64, (3, 3))
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2d_BN(conv4, 64, (3, 3))
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1, up2], axis=1)
    conv5 = Conv2d_BN(up2, 32, (3, 3))
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2d_BN(conv5, 32, (3, 3))
    #
    conv6 = Conv2d_BN(conv5, 2, (1, 1))
    print(conv6)
    print(patch_height, patch_width)
    conv6 = core.Reshape((2, patch_height * patch_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)
    # scheduler = LearningRateScheduler(mlr.lr_scheduler)
    sgd = SGD(lr=0.01, decay=2e-5, momentum=0.8, nesterov=False)
    model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
    # adam=optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-07)
    # model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
    # 1、目标函数
    # (1)mean_squared_error / mse 均方误差,常用的目标函数,公式为((y_pred-y_true) ** 2).mean()
    # (2)mean_absolute_error / mae绝对值均差,公式为( | y_pred - y_true |).mean()
    # (3)mean_absolute_percentage_error / mape公式为:(| (y_true - y_pred) / clip((| y_true |), epsilon, infinite) |).mean(axis=-1) * 100,和mae的区别就是,累加的是(预测值与实际值的差)除以(剔除不介于epsilon和infinite之间的实际值),然后求均值。
    # (4)mean_squared_logarithmic_error / msle公式为: (log(clip(y_pred, epsilon, infinite) + 1) - log(clip(y_true, epsilon, infinite) + 1.)) ^ 2.mean(axis=-1),这个就是加入了log对数,剔除不介于epsilon和infinite之间的预测值与实际值之后,然后取对数,作差,平方,累加求均值。
    # (5)squared_hinge公式为:(max(1 - y_truey_pred, 0)) ^ 2.mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的平方的累加均值。
    # (6)hinge公式为:(max(1 - y_truey_pred, 0)).mean(axis=-1),取1减去预测值与实际值乘积的结果与0比相对大的值的的累加均值。
    # (7)binary_crossentropy: 常说的逻辑回归, 就是常用的交叉熵函
    # (8)categorical_crossentropy: 多分类的逻辑
    #
    # 2、性能评估函数:
    # (1)binary_accuracy: 对二分类问题, 计算在所有预测值上的平均正确率
    # (2)categorical_accuracy: 对多分类问题, 计算再所有预测值上的平均正确率
    # (3)sparse_categorical_accuracy: 与categorical_accuracy相同, 在对稀疏的目标值预测时有用
    # (4)top_k_categorical_accracy: 计算top - k正确率, 当预测值的前k个值中存在目标类别即认为预测正确
    # (5)sparse_top_k_categorical_accuracy:与top_k_categorical_accracy作用相同,但适用于稀疏情况
    return model
コード例 #6
0
def get_gnet(n_ch,patch_height,patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    up1 = UpSampling2D(size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv2)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool1)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv3)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv3)
    #
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool2)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv4)
    #
    conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool3)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv5)
    #
    up2 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv6)
    #
    up3 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up3)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv7)
    #
    up4 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up4)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv8)
    #
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv8)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool4)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
    #
    conv10 = Convolution2D(2, 1, 1, activation='relu', border_mode='same')(conv9)
    conv10 = core.Reshape((2,patch_height*patch_width))(conv10)
    conv10 = core.Permute((2,1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])

    return model
コード例 #7
0
    def build_model_8x96x96(self, label_num):
        inputs = Input((self.img_frames, self.img_rows, self.img_cols, 5))

        # conv_1 = self.Residual3d_x3(inputs, 16, (1,3,3))
        # pool = self.conv3d_as_pool(conv_1,32,(1,3,3),(1,2,2))
        conv_1 = self.Residual3d_x3(inputs, 16, (3, 3, 3))
        pool = self.conv3d_as_pool(conv_1, 32, (3, 3, 3), (2, 2, 2))
        print("conv1 shape:", pool.shape)

        conv_2 = self.Residual3d_x3(pool, 32, (3, 3, 3))
        pool = self.conv3d_as_pool(conv_2, 64, (3, 3, 3), (2, 2, 2))
        print("conv2 shape:", pool.shape)

        conv_3 = self.Residual3d_x3(pool, 64, (1, 3, 3))
        pool = self.conv3d_as_pool(conv_3, 128, (1, 3, 3), (2, 2, 2))
        print("conv3 shape:", pool.shape)

        # conv_4 = self.Residual3d_x3(pool, 128, (1,5,5))
        # pool = self.conv3d_as_pool(conv_4,256,(1,5,5),(1,3,3))
        conv_4 = self.Residual3d_x3(pool, 128, (1, 3, 3))
        pool = self.conv3d_as_pool(conv_4, 256, (1, 3, 3), (1, 2, 2))
        print("conv4 shape:", pool.shape)

        conv_5 = self.Residual3d_x3(pool, 256, (1, 3, 3))
        pool = self.conv3d_as_pool(conv_5, 512, (1, 3, 3), (1, 2, 2))
        print("conv5 shape:", pool.shape)

        bottom = self.Residual3d_x3(pool, 512, (1, 3, 3))
        print("bottom shape:", bottom.shape)

        # deconv_4 = self.deconv3d_x3(conv_4, bottom, 128, (1,3,3), (1, 2, 2))
        deconv_5 = self.deconv3d_x3(conv_5, bottom, 256, (1, 3, 3), (1, 2, 2))
        deconv_4 = self.deconv3d_x3(conv_4, deconv_5, 128, (1, 3, 3),
                                    (1, 2, 2))
        deconv_3 = self.deconv3d_x3(conv_3, deconv_4, 64, (1, 3, 3), (2, 2, 2))
        deconv_2 = self.deconv3d_x3(conv_2, deconv_3, 32, (3, 3, 3), (2, 2, 2))
        deconv_1 = self.deconv3d_x3(conv_1, deconv_2, 16, (3, 3, 3), (2, 2, 2))

        output_layer = Conv3D(label_num,
                              1,
                              activation='relu',
                              padding='same',
                              kernel_initializer='he_normal')(deconv_1)
        output_layer = core.Reshape(
            (label_num,
             self.img_rows * self.img_cols * self.img_frames))(output_layer)
        output_layer = core.Permute((2, 1))(output_layer)
        output_layer = core.Activation('softmax')(output_layer)
        print("output_layer shape:", output_layer.shape)
        model = Model(inputs=inputs, outputs=output_layer)

        # initiate  optimizer
        opt = optimizers.Adam(lr=1e-4)
        self.lossfn = MyDefineLoss.wrapped_partial(
            MyDefineLoss.combine_wieght_ce_dice_loss,
            weights=self.class_weights)
        model.compile(opt, loss=self.lossfn, metrics=['accuracy'])
        return model
コード例 #8
0
def getUNet(input_shape, nb_classes):
    (n_ch, patch_height, patch_width) = input_shape
    inputs = Input(input_shape)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)

    up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2],
                mode='concat',
                concat_axis=1)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv4)
    #
    up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1],
                mode='concat',
                concat_axis=1)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv5)
    #
    conv6 = Convolution2D(nb_classes,
                          1,
                          1,
                          activation='relu',
                          border_mode='same')(conv5)
    conv6 = core.Reshape((nb_classes, patch_height * patch_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
コード例 #9
0
ファイル: get_NN_Model.py プロジェクト: zcxiong/retina-CDNet
def get_CDNet(n_ch, patch_height, patch_width):
    #512
    inputs = Input((n_ch, patch_height, patch_width))
    seg1 = get_segBlock(inputs, "seg1", 32)
    #256
    pool1 = MaxPooling2D(pool_size=(2, 2), name="pool1")(seg1)
    seg2 = get_segBlock(pool1, "seg2", 48)
    #128
    pool2 = MaxPooling2D(pool_size=(2, 2), name="pool2")(seg2)
    seg3 = get_segBlock(pool2, "seg3", 64)
    #64
    pool3 = MaxPooling2D(pool_size=(2, 2), name="pool3")(seg3)
    seg4 = get_segBlock(pool3, "seg4", 80)
    #32
    pool4 = MaxPooling2D(pool_size=(2, 2), name="pool4")(seg4)
    seg5 = get_segBlock(pool4, "seg5", 96)
    conca1 = concatenate([pool4, seg5], axis=1, name="conca1")
    #64
    up1 = UpSampling2D(size=(2, 2))(conca1)
    seg6 = get_segBlock(up1, "seg6", 80)
    conca2 = concatenate([pool3, seg6], axis=1, name="conca2")
    #128
    up2 = UpSampling2D(size=(2, 2))(conca2)
    seg7 = get_segBlock(up2, "seg7", 64)
    conca3 = concatenate([pool2, seg7], axis=1, name="conca3")

    #256
    up3 = UpSampling2D(size=(2, 2))(conca3)
    seg8 = get_segBlock(up3, "seg8", 48)
    conca4 = concatenate([pool1, seg8], axis=1, name="conca4")

    #512
    up4 = UpSampling2D(size=(2, 2))(conca4)
    seg9 = get_segBlock(up4, "seg9", 32)
    conca5 = concatenate([seg1, seg9], axis=1, name="conca5")

    #
    conv10 = Conv2D(2, (1, 1),
                    padding="valid",
                    activation="relu",
                    use_bias=True,
                    name="conv10")(conca5)
    RS = core.Reshape((2, patch_height * patch_width))(conv10)
    PM = core.Permute((2, 1))(RS)
    ############
    ACT = core.Activation('softmax')(PM)
    model = Model(inputs=inputs, outputs=ACT)
    #model.load_weights('./'+name_experiment+'/'+name_experiment +'_best_weights.h5', by_name=True)

    sgd1 = SGD(lr=init_lr, decay=1e-6, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd1,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
コード例 #10
0
ファイル: NetModel.py プロジェクト: Azurequeen/python
def ClassNet_48():
    inputs = Input((1,48,48,48))
    ch1=ConvNet48(inputs)
    ch1=Dense(256)(ch1)
    ch1=Dense(2)(ch1)
    a=core.Reshape((2,1))(ch1)
    a=core.Permute((2,1))(a)
    act=Activation('softmax')(a)
    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adam(lr=config["initial_learning_rate"]), loss='categorical_crossentropy',metrics=['categorical_accuracy'])
    return model
コード例 #11
0
def build_unet(input_shape, nClasses):

    inputs = Input(input_shape)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu',
                          border_mode='same')(conv3)

    up1 = Concatenate()([UpSampling2D(size=(2, 2))(conv3), conv2])
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu',
                          border_mode='same')(conv4)

    up2 = Concatenate()([UpSampling2D(size=(2, 2))(conv4), conv1])
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu',
                          border_mode='same')(conv5)

    conv6 = Convolution2D(nClasses,
                          1,
                          1,
                          activation='relu',
                          border_mode='same')(conv5)
    conv6 = core.Reshape((nClasses, input_shape[0] * input_shape[1]))(conv6)
    conv6 = core.Permute((2, 1))(conv6)

    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    #if not optimizer is None:
    #	    model.compile(loss="categorical_crossentropy", optimizer= optimizer , metrics=['accuracy'] )

    return model
コード例 #12
0
def segnet(nClasses, optimizer=None, input_height=96, input_width=64):
    kernel = 3
    filter_size = 64
    pad = 1
    pool_size = 2

    inputs = Input((input_height, input_width, 1))

    # encoder
    x = ZeroPadding2D(padding=(pad, pad))(inputs)
    x = Conv2D(filter_size, (kernel, kernel), padding='valid')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2D(128, (kernel, kernel), padding='valid')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2D(256, (kernel, kernel), padding='valid')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D(pool_size=(pool_size, pool_size))(x)

    # decoder
    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2D(256, (kernel, kernel), padding='valid')(x)

    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2D(128, (kernel, kernel), padding='valid')(x)

    x = UpSampling2D(size=(pool_size, pool_size))(x)
    x = ZeroPadding2D(padding=(pad, pad))(x)
    x = Conv2D(128, (kernel, kernel), padding='valid')(x)

    x = Conv2D(nClasses, (1, 1), padding='valid')(x)

    x = core.Reshape((nClasses, input_height * input_width),)(x)

    x = core.Permute((2, 1))(x)
    x = Activation('softmax')(x)

    model = Model(inputs=inputs, outputs=x)
    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])

    return model
コード例 #13
0
ファイル: NetModel_conc.py プロジェクト: Azurequeen/python
def ClassNet_FlexWindow():
    inputs = Input((1, 48, 48, 48))
    ch1 = inputs

    ch1 = ConvNet48(ch1)

    fusion = Conv3D(2, (1, 1, 1), padding='same', activation='relu')(ch1)

    a = core.Reshape((2, 1))(fusion)
    a = core.Permute((2, 1))(a)
    act = Activation('softmax')(a)
    model = Model(inputs=inputs, outputs=act)
    model.compile(optimizer=Adam(lr=config["initial_learning_rate"]),
                  loss='categorical_crossentropy',
                  metrics=['categorical_accuracy'
                           ])  #'mean_squared_error',metrics=['accuracy'])
    return model
コード例 #14
0
def get_resnet18_unet(n_ch,patch_height,patch_width,dilaterate=3,filters=[64,128,256,512],blocks=[2,2,2,2]):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    x=Conv2d_BN(inputs,32, (3, 3))
    lay=[]
    for _,block in enumerate(blocks):
        for i in range(block):
            x=resnet_unit(x,filters[_])
            if i==block-1 and _==len(filters)-1:
                lay.append(x)
            elif i==block-1:
                lay.append(x)
                x = MaxPooling2D((2, 2))(x)

    print(lay)
    up1 = UpSampling2D(size=(2, 2))(lay[3])
    up1 = concatenate([lay[2], up1], axis=1)
    conv1 = Conv2d_BN(up1, 128, (3, 3))
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2d_BN(conv1, 128, (3, 3))

    up2 = UpSampling2D(size=(2, 2))(conv1)
    up2 = concatenate([lay[1], up2], axis=1)
    conv2 = Conv2d_BN(up2, 64, (3, 3))
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2d_BN(conv2, 64, (3, 3))

    up3 = UpSampling2D(size=(2, 2))(conv2)
    up3 = concatenate([lay[0], up3], axis=1)
    conv3 = Conv2d_BN(up3, 32, (3, 3))
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2d_BN(conv3, 32, (3, 3))

    conv4 = Conv2d_BN(conv3, 2, (1, 1))


    print(conv4)
    conv4 = core.Reshape((2, patch_height * patch_width))(conv4)
    conv4 = core.Permute((2, 1))(conv4)
    ############
    conv5 = core.Activation('softmax')(conv4)

    model = Model(inputs=inputs, outputs=conv5)
    # scheduler = LearningRateScheduler(mlr.lr_scheduler)
    sgd = SGD(lr=0.01, decay=2e-5, momentum=0.8, nesterov=False)
    model.compile(optimizer=sgd, loss='binary_crossentropy', metrics=['accuracy'])
    return model
コード例 #15
0
ファイル: Unet.py プロジェクト: zhaokaithu/ECG_UNet
def Unet(nClasses, optimizer=None, input_length=1800, nChannels=1):
    inputs = Input((input_length, nChannels))
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(inputs)
    conv1 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv1)
    pool1 = MaxPooling1D(pool_size=2)(conv1)

    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv2)
    pool2 = MaxPooling1D(pool_size=2)(conv2)
    
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool2)
    conv3 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv3)
    pool3 = MaxPooling1D(pool_size=2)(conv3)

    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(pool3)
    conv4 = Dropout(0.5)(conv4)
    conv4 = Conv1D(128, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv4)

    up1 = Conv1D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv4))
    merge1 = concatenate([up1, conv3], axis=-1)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
    conv5 = Conv1D(64, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv5)
    
    up2 = Conv1D(32, 2, activation='relu', padding='same', kernel_initializer = 'he_normal')(UpSampling1D(size=2)(conv5))
    merge2 = concatenate([up2, conv2], axis=-1)
    conv6 = Conv1D(32, 32, activation='relu', padding='same', kernel_initializer = 'he_normal')(merge2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Conv1D(32, 32, activation='relu', padding='same')(conv6)
    
    up3 = Conv1D(16, 2, activation='relu', padding='same', kernel_initializer='he_normal')(UpSampling1D(size=2)(conv6))
    merge3 = concatenate([up3, conv1], axis=-1)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
    conv7 = Conv1D(16, 32, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    
    conv8 = Conv1D(nClasses, 1, activation='relu', padding='same', kernel_initializer='he_normal')(conv7)
    conv8 = core.Reshape((nClasses, input_length))(conv8)
    conv8 = core.Permute((2, 1))(conv8)

    conv9 = core.Activation('softmax')(conv8)

    model = Model(inputs=inputs, outputs=conv9)
    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])

    return model
コード例 #16
0
def unet(n_ch,
         patch_height,
         patch_width,
         category_num,
         act='selu',
         loss_weight=None,
         sample_weight_mode=None,
         GPU_num=1,
         net_name='unet16',
         fine_tune=1,
         pretrain_model=''):
    nets = {
        'unet': unet_backbone,
    }
    learn_rates = {
        'unet': 1e-4,
    }
    net = nets[net_name]
    learn_rate = learn_rates[net_name]
    inputs = Input((n_ch, patch_height, patch_width))
    output = net(inputs, act)
    conv = Conv2D(category_num, (1, 1),
                  activation=act,
                  padding='same',
                  data_format='channels_first')(output)
    conv = core.Reshape((category_num, patch_height * patch_width))(conv)
    conv = core.Permute((2, 1))(conv)
    ############
    conv = core.Activation('softmax')(conv)
    model = Model(inputs=inputs, outputs=conv)
    if fine_tune == 1:
        model.load_weights(pretrain_model, by_name=True)
        for layer in model.layers[10:]:
            layer.trainable = False
    if GPU_num > 1:
        model = make_parallel(model, GPU_num)
    adam = Adam(lr=learn_rate)
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'],
                  loss_weights=loss_weight,
                  sample_weight_mode=sample_weight_mode)
    return model
コード例 #17
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2, up1], axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1, up2], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same', data_format='channels_first')(conv5)
    #
    conv6 = Conv2D(2, (1, 1), activation='relu', padding='same', data_format='channels_first')(conv5)
    conv6 = core.Reshape((2, patch_height * patch_width))(conv6)

    # print(conv6.shape)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)
    # print(conv7.shape)
    model = Model(inputs=inputs, outputs=conv7)
    sgd = SGD(lr=0.001, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy',metrics=['accuracy'])
    # model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])

    return model
コード例 #18
0
ファイル: ghn_org.py プロジェクト: HaonanGu/retina_g
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input((n_ch,patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)#'valid'
    conv1 = Dropout(0.3)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)

    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3), padding='same')(pool1) #,activation='relu', padding='same')(pool1)
    conv2 = normalization.BatchNormalization(epsilon=2e-05, axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv2)
    conv2 = Activation('relu')(conv2)
    #conv2 = Dropout(0.3)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)#,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3), padding='same')(pool2)   #, activation='relu', padding='same')(pool2)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,axis=1, momentum=0.9, weights=None, beta_initializer='zero', gamma_initializer='one')(conv3)
    conv3 = Activation('relu')(conv3)
    #conv3 = Dropout(0.3)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)#,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(up1)
    conv4 = Dropout(0.3)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv4)
    #
    up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2)
    conv5 = Dropout(0.3)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv5)

    conv6 = Conv2D(2, (1, 1), activation='relu',padding='same')(conv5)
    #conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    act = Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=act)
    return model
コード例 #19
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2,up1],axis=1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1,up2], axis=1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv5)
    #
    conv6 = Conv2D(3, (1, 1), activation='relu',padding='same',data_format='channels_first')(conv5)
    conv6 = core.Reshape((3,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
    model.compile(optimizer=adam,loss='categorical_crossentropy',metrics=['categorical_accuracy'],sample_weight_mode="temporal")

    return model
コード例 #20
0
def get_unet(n_ch,patch_height,patch_width):
    inputs = Input(shape=(n_ch,patch_height,patch_width))
    conv1 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, 3, 3, activation='relu', border_mode='same')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = merge([conv2,up1], mode='concat', concat_axis=1)
    conv4 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = merge([conv1,up2], mode='concat', concat_axis=1)
    conv5 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, 3, 3, activation='relu', border_mode='same')(conv5)
    #
    conv6 = Conv2D(2, 1, 1, activation='relu', border_mode='same')(conv5)
    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    #model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'], context=['gpu(0)','gpu(1)','gpu(2)'])
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'], context=['gpu(0)'])

    return model
コード例 #21
0
def Unet(nClasses, optimizer=None, input_width=64, input_height=96, nChannels=1):
    inputs = Input((input_height, input_width, nChannels))
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), activation='relu', padding='same')(conv3)

    up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=-1)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv4)

    up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=-1)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), activation='relu', padding='same')(conv5)

    conv6 = Conv2D(nClasses, (1, 1), activation='relu', padding='same')(conv5)
    conv6 = core.Reshape((nClasses, input_height * input_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)

    conv7 = core.Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=conv7)

    if not optimizer is None:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])

    return model
コード例 #22
0
def create_nn_architectire(n_ch, patch_height, patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)

    up1 = merge([UpSampling2D(size=(2, 2))(conv3), conv2], mode='concat', concat_axis=1)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    #
    up2 = merge([UpSampling2D(size=(2, 2))(conv4), conv1], mode='concat', concat_axis=1)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv5)
    #
    conv6 = Convolution2D(2, 1, 1, activation='relu',border_mode='same')(conv5)
    conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
    conv6 = core.Permute((2,1))(conv6)
    ############
    conv7 = core.Activation('softmax')(conv6)

    model = Model(input=inputs, output=conv7)
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])

    return model
コード例 #23
0
ファイル: unet.py プロジェクト: ylHe/DRIVE_Segmentation
def unet_model(n_ch,patch_height,patch_width):
   inputs = Input(shape=(n_ch,patch_height,patch_width))
   conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(inputs)
   conv1 = Dropout(0.2)(conv1)
   conv1 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv1)
   pool1 = MaxPooling2D((2, 2))(conv1)
   
   conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool1)
   conv2 = Dropout(0.2)(conv2)
   conv2 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv2)
   pool2 = MaxPooling2D((2, 2))(conv2)
   
   conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(pool2)
   conv3 = Dropout(0.2)(conv3)
   conv3 = Conv2D(128, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv3)
   
   up1 = UpSampling2D(size=(2, 2))(conv3)
   up1 = concatenate([conv2,up1],axis=1)
   conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(up1)
   conv4 = Dropout(0.2)(conv4)
   conv4 = Conv2D(64, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv4)

   up2 = UpSampling2D(size=(2, 2))(conv4)
   up2 = concatenate([conv1,up2], axis=1)
   conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(up2)
   conv5 = Dropout(0.2)(conv5)
   conv5 = Conv2D(32, (3, 3), activation='relu', padding='same',data_format='channels_first')(conv5)

   conv6 = Conv2D(2, (1, 1), activation='relu',padding='same',data_format='channels_first')(conv5)
   conv6 = core.Reshape((2,patch_height*patch_width))(conv6)
   conv6 = core.Permute((2,1))(conv6)
   
   conv7 = core.Activation('softmax')(conv6)
   
   model = Model(inputs=inputs, outputs=conv7)
   return model
コード例 #24
0
def R_Unet(n_ch, patch_height, patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (1, 1), activation=None, padding='same')(inputs)
    conv1 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='zero',
                                             gamma_initializer='one')(conv1)
    conv1 = Activation('relu')(conv1)

    conv1 = DenseBlock(conv1, 32)  #48
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    conv2 = DenseBlock(pool1, 64)  #24
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = DenseBlock(pool2, 64)  #12
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)

    conv4 = DenseBlock(pool3, 64)  # 12

    up1 = Conv2DTranspose(64, (3, 3),
                          strides=2,
                          activation='relu',
                          padding='same')(conv4)
    up1 = concatenate([up1, conv3], axis=1)

    conv5 = DenseBlock(up1, 64)

    up2 = Conv2DTranspose(64, (3, 3),
                          strides=2,
                          activation='relu',
                          padding='same')(conv5)
    up2 = concatenate([up2, conv2], axis=1)

    conv6 = DenseBlock(up2, 64)

    up3 = Conv2DTranspose(64, (3, 3),
                          strides=2,
                          activation='relu',
                          padding='same')(conv6)
    up3 = concatenate([up3, conv1], axis=1)

    conv7 = DenseBlock(up3, 32)

    conv8 = Conv2D(num_lesion_class + 1, (1, 1),
                   activation='relu',
                   padding='same')(conv7)
    # conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

    # for tensorflow
    # conv6 = core.Reshape((patch_height*patch_width,num_lesion_class+1))(conv6)
    # for theano
    conv8 = core.Reshape(
        ((num_lesion_class + 1), patch_height * patch_width))(conv8)
    conv8 = core.Permute((2, 1))(conv8)
    ############
    act = Activation('softmax')(conv8)

    model = Model(inputs=inputs, outputs=act)
    return model
コード例 #25
0
def unet_model_MultiScale():
    inputs = Input(config["input_shape"])
    conv1 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(inputs)
    conv1 = Conv3D(32, (3, 3, 3), padding='same')(conv1)
    conv1 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='zero',
                                             gamma_initializer='one')(conv1)
    conv1 = core.Activation('relu')(conv1)
    pool1 = MaxPooling3D(pool_size=config["pool_size"])(conv1)

    conv2 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(pool1)
    conv2 = Conv3D(64, (3, 3, 3), padding='same')(conv2)
    conv2 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='zero',
                                             gamma_initializer='one')(conv2)
    conv2 = core.Activation('relu')(conv2)

    pool2_1 = MaxPooling3D(pool_size=config["pool_size"])(conv2)
    conv3_1 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(pool2_1)
    conv3_1 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(conv3_1)

    pool2_2 = MaxPooling3D(pool_size=(4, 4, 4))(conv2)
    conv3_2 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(pool2_2)
    conv3_2 = Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same')(conv3_2)

    fuse = concatenate(
        [UpSampling3D(size=config["pool_size"])(conv3_2), conv3_1], axis=1)
    conv3_f = Conv3D(128, (3, 3, 3), activation='relu', padding='same')(fuse)

    up4 = concatenate([UpSampling3D(size=config["pool_size"])(conv3_f), conv2],
                      axis=1)
    conv4 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(up4)
    conv4 = Conv3D(64, (3, 3, 3), activation='relu', padding='same')(conv4)

    up5 = concatenate([UpSampling3D(size=config["pool_size"])(conv4), conv1],
                      axis=1)
    conv5 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(up5)
    conv5 = Conv3D(32, (3, 3, 3), activation='relu', padding='valid')(conv5)

    conv6 = Conv3D(config["n_labels"], (1, 1, 1))(conv5)
    conv6 = core.Reshape((1, out_w * out_w * out_w))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    #conv6 =
    act = Activation('sigmoid')(conv6)
    model = Model(inputs=inputs, outputs=act)

    #model.compile(optimizer=Adam(lr=config["initial_learning_rate"]), loss='categorical_crossentropy',metrics=['fbeta_score'])
    model.compile(optimizer=Adam(lr=config["initial_learning_rate"]),
                  loss=dice_coef_loss,
                  metrics=[dice_coef])
    return model
コード例 #26
0
ファイル: my_model.py プロジェクト: sdcjimmy/od-segmentation
def get_unet_seg(n_ch, img_rows=480, img_cols=480):
    inputs = Input((n_ch, img_rows, img_cols))
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv1_1")(inputs)
    conv1 = BatchNormalization(axis=1, name="conv1_2")(conv1)
    conv1 = Dropout(0.5, name="conv1_3")(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv1_4")(conv1)
    conv1 = BatchNormalization(axis=1, name="conv1_5")(conv1)
    conv1.trainable = False
    pool1 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv1_6")(conv1)
    pool1.trainable = False

    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv2_1")(pool1)
    conv2 = BatchNormalization(axis=1, name="conv2_2")(conv2)
    conv2 = Dropout(0.5, name="conv2_3")(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv2_4")(conv2)
    conv2 = BatchNormalization(axis=1, name="conv2_5")(conv2)
    conv2.trainable = False
    pool2 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv2_6")(conv2)
    pool2.trainable = False

    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv3_1")(pool2)
    conv3 = BatchNormalization(axis=1, name="conv3_2")(conv3)
    conv3 = Dropout(0.5, name="conv3_3")(conv3)
    conv3 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv3_4")(conv3)
    conv3 = BatchNormalization(axis=1, name="conv3_5")(conv3)
    conv3.trainable = False
    pool3 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv3_6")(conv3)
    pool3.trainable = False

    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv4_1")(pool3)
    conv4 = BatchNormalization(axis=1, name="conv4_2")(conv4)
    conv4 = Dropout(0.5, name="conv4_3")(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv4_4")(conv4)
    conv4 = BatchNormalization(axis=1, name="conv4_5")(conv4)
    conv4.trainable = False
    pool4 = MaxPooling2D((2, 2), data_format='channels_first',
                         name="conv4_6")(conv4)
    pool4.trainable = False

    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv5_1")(pool4)
    conv5 = BatchNormalization(axis=1, name="conv5_2")(conv5)
    conv5 = Dropout(0.5, name="conv5_3")(conv5)
    conv5 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first',
                   name="conv5_4")(conv5)
    conv5 = BatchNormalization(axis=1, name="conv5_5")(conv5)
    conv5.trainable = False

    up1 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv5)
    up1 = concatenate([conv4, up1], axis=1)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv6 = BatchNormalization(axis=1)(conv6)
    conv6 = Dropout(0.3)(conv6)
    conv6 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv6)
    conv6 = BatchNormalization(axis=1)(conv6)

    up2 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv6)
    up2 = concatenate([conv3, up2], axis=1)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv7 = BatchNormalization(axis=1)(conv7)
    conv7 = Dropout(0.3)(conv7)
    conv7 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv7)
    conv7 = BatchNormalization(axis=1)(conv7)

    up3 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv7)
    up3 = concatenate([conv2, up3], axis=1)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up3)
    conv8 = BatchNormalization(axis=1)(conv8)
    conv8 = Dropout(0.3)(conv8)
    conv8 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv8)
    conv8 = BatchNormalization(axis=1)(conv8)

    up4 = UpSampling2D(size=(2, 2), data_format='channels_first')(conv8)
    up4 = concatenate([conv1, up4], axis=1)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up4)
    conv9 = BatchNormalization(axis=1)(conv9)
    conv9 = Dropout(0.3)(conv9)
    conv9 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv9)
    conv9 = BatchNormalization(axis=1)(conv9)

    conv10 = Conv2D(2, (1, 1),
                    activation='relu',
                    padding='same',
                    data_format='channels_first')(conv9)
    conv10 = BatchNormalization(axis=1)(conv10)
    conv10 = core.Reshape((2, img_rows * img_cols))(conv10)
    conv10 = core.Permute((2, 1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    adaGrad = Adagrad(lr=1e-7, epsilon=1e-7, decay=1e-6)
    model.compile(optimizer='sgd',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
コード例 #27
0
def get_unet(n_ch, patch_height, patch_width):
    inputs = Input(shape=(n_ch, patch_height, patch_width))
    #data_format:字符串,“channels_first”或“channels_last”之一,代表图像的通道维的位置。
    #以128x128的RGB图像为例,“channels_first”应将数据组织为(3,128,128),而“channels_last”应将数据组织为(128,128,3)。该参数的默认值是~/.keras/keras.json中设置的值,若从未设置过,则为“channels_last”。
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv1)
    pool1 = MaxPooling2D((2, 2))(conv1)
    #
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv2)
    pool2 = MaxPooling2D((2, 2))(conv2)
    #
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(pool2)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv3)

    up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([conv2, up1], axis=1)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up1)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv4)
    #
    up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([conv1, up2], axis=1)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(up2)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    #
    #1×1的卷积的作用
    #大概有两个方面的作用:1. 实现跨通道的交互和信息整合2. 进行卷积核通道数的降维和升维。
    conv6 = Conv2D(2, (1, 1),
                   activation='relu',
                   padding='same',
                   data_format='channels_first')(conv5)
    conv6 = core.Reshape((2, patch_height * patch_width))(
        conv6)  #此时output的shape是(batchsize,2,patch_height*patch_width)
    conv6 = core.Permute((2, 1))(
        conv6
    )  #此时output的shape是(Npatch,patch_height*patch_width,2)即输出维度是(Npatch,2304,2)
    ############
    conv7 = core.Activation('softmax')(conv6)
    model = Model(inputs=inputs, outputs=conv7)
    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer=Adam(lr=0.001),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
    '''
コード例 #28
0
class LayerCorrectnessTest(keras_parameterized.TestCase):
    def setUp(self):
        super(LayerCorrectnessTest, self).setUp()
        # Set two virtual CPUs to test MirroredStrategy with multiple devices
        cpus = tf.config.list_physical_devices('CPU')
        tf.config.set_logical_device_configuration(cpus[0], [
            tf.config.LogicalDeviceConfiguration(),
            tf.config.LogicalDeviceConfiguration(),
        ])

    def _create_model_from_layer(self, layer, input_shapes):
        inputs = [layers.Input(batch_input_shape=s) for s in input_shapes]
        if len(inputs) == 1:
            inputs = inputs[0]
        y = layer(inputs)
        model = models.Model(inputs, y)
        model.compile('sgd', 'mse')
        return model

    @parameterized.named_parameters(
        ('LeakyReLU', advanced_activations.LeakyReLU, (2, 2)),
        ('PReLU', advanced_activations.PReLU, (2, 2)),
        ('ELU', advanced_activations.ELU, (2, 2)),
        ('ThresholdedReLU', advanced_activations.ThresholdedReLU, (2, 2)),
        ('Softmax', advanced_activations.Softmax, (2, 2)),
        ('ReLU', advanced_activations.ReLU, (2, 2)),
        ('Conv1D', lambda: convolutional.Conv1D(2, 2), (2, 2, 1)),
        ('Conv2D', lambda: convolutional.Conv2D(2, 2), (2, 2, 2, 1)),
        ('Conv3D', lambda: convolutional.Conv3D(2, 2), (2, 2, 2, 2, 1)),
        ('Conv2DTranspose', lambda: convolutional.Conv2DTranspose(2, 2),
         (2, 2, 2, 2)),
        ('SeparableConv2D', lambda: convolutional.SeparableConv2D(2, 2),
         (2, 2, 2, 1)),
        ('DepthwiseConv2D', lambda: convolutional.DepthwiseConv2D(2, 2),
         (2, 2, 2, 1)),
        ('UpSampling2D', convolutional.UpSampling2D, (2, 2, 2, 1)),
        ('ZeroPadding2D', convolutional.ZeroPadding2D, (2, 2, 2, 1)),
        ('Cropping2D', convolutional.Cropping2D, (2, 3, 3, 1)),
        ('ConvLSTM2D',
         lambda: convolutional_recurrent.ConvLSTM2D(4, kernel_size=(2, 2)),
         (4, 4, 4, 4, 4)),
        ('Dense', lambda: core.Dense(2), (2, 2)),
        ('Dropout', lambda: core.Dropout(0.5), (2, 2)),
        ('SpatialDropout2D', lambda: core.SpatialDropout2D(0.5), (2, 2, 2, 2)),
        ('Activation', lambda: core.Activation('sigmoid'), (2, 2)),
        ('Reshape', lambda: core.Reshape((1, 4, 1)), (2, 2, 2)),
        ('Permute', lambda: core.Permute((2, 1)), (2, 2, 2)),
        ('Attention', dense_attention.Attention, [(2, 2, 3), (2, 3, 3),
                                                  (2, 3, 3)]),
        ('AdditiveAttention', dense_attention.AdditiveAttention, [(2, 2, 3),
                                                                  (2, 3, 3),
                                                                  (2, 3, 3)]),
        ('Embedding', lambda: embeddings.Embedding(4, 4),
         (2, 4), 2e-3, 2e-3, np.random.randint(4, size=(2, 4))),
        ('LocallyConnected1D', lambda: local.LocallyConnected1D(2, 2),
         (2, 2, 1)),
        ('LocallyConnected2D', lambda: local.LocallyConnected2D(2, 2),
         (2, 2, 2, 1)),
        ('Add', merge.Add, [(2, 2), (2, 2)]),
        ('Subtract', merge.Subtract, [(2, 2), (2, 2)]),
        ('Multiply', merge.Multiply, [(2, 2), (2, 2)]),
        ('Average', merge.Average, [(2, 2), (2, 2)]),
        ('Maximum', merge.Maximum, [(2, 2), (2, 2)]),
        ('Minimum', merge.Minimum, [(2, 2), (2, 2)]),
        ('Concatenate', merge.Concatenate, [(2, 2), (2, 2)]),
        ('Dot', lambda: merge.Dot(1), [(2, 2), (2, 2)]),
        ('GaussianNoise', lambda: noise.GaussianNoise(0.5), (2, 2)),
        ('GaussianDropout', lambda: noise.GaussianDropout(0.5), (2, 2)),
        ('AlphaDropout', lambda: noise.AlphaDropout(0.5), (2, 2)),
        ('BatchNormalization', normalization_v2.BatchNormalization,
         (2, 2), 1e-2, 1e-2),
        ('LayerNormalization', normalization.LayerNormalization, (2, 2)),
        ('LayerNormalizationUnfused',
         lambda: normalization.LayerNormalization(axis=1), (2, 2, 2)),
        ('MaxPooling2D', pooling.MaxPooling2D, (2, 2, 2, 1)),
        ('AveragePooling2D', pooling.AveragePooling2D, (2, 2, 2, 1)),
        ('GlobalMaxPooling2D', pooling.GlobalMaxPooling2D, (2, 2, 2, 1)),
        ('GlobalAveragePooling2D', pooling.GlobalAveragePooling2D,
         (2, 2, 2, 1)),
        ('SimpleRNN', lambda: recurrent.SimpleRNN(units=4),
         (4, 4, 4), 1e-2, 1e-2),
        ('GRU', lambda: recurrent.GRU(units=4), (4, 4, 4)),
        ('LSTM', lambda: recurrent.LSTM(units=4), (4, 4, 4)),
        ('GRUV2', lambda: recurrent_v2.GRU(units=4), (4, 4, 4)),
        ('LSTMV2', lambda: recurrent_v2.LSTM(units=4), (4, 4, 4)),
        ('TimeDistributed', lambda: wrappers.TimeDistributed(core.Dense(2)),
         (2, 2, 2)),
        ('Bidirectional',
         lambda: wrappers.Bidirectional(recurrent.SimpleRNN(units=4)),
         (2, 2, 2)),
        ('AttentionLayerCausal',
         lambda: dense_attention.Attention(causal=True), [(2, 2, 3), (2, 3, 3),
                                                          (2, 3, 3)]),
        ('AdditiveAttentionLayerCausal',
         lambda: dense_attention.AdditiveAttention(causal=True), [(2, 3, 4),
                                                                  (2, 3, 4),
                                                                  (2, 3, 4)]),
    )
    def test_layer(self,
                   f32_layer_fn,
                   input_shape,
                   rtol=2e-3,
                   atol=2e-3,
                   input_data=None):
        """Tests a layer by comparing the float32 and mixed precision weights.

    A float32 layer, a mixed precision layer, and a distributed mixed precision
    layer are run. The three layers are identical other than their dtypes and
    distribution strategies. The outputs after predict() and weights after fit()
    are asserted to be close.

    Args:
      f32_layer_fn: A function returning a float32 layer. The other two layers
        will automatically be created from this
      input_shape: The shape of the input to the layer, including the batch
        dimension. Or a list of shapes if the layer takes multiple inputs.
      rtol: The relative tolerance to be asserted.
      atol: The absolute tolerance to be asserted.
      input_data: A Numpy array with the data of the input. If None, input data
        will be randomly generated
    """

        if f32_layer_fn == convolutional.ZeroPadding2D and \
           tf.test.is_built_with_rocm():
            return
        if isinstance(input_shape[0], int):
            input_shapes = [input_shape]
        else:
            input_shapes = input_shape
        strategy = create_mirrored_strategy()
        f32_layer = f32_layer_fn()

        # Create the layers
        assert f32_layer.dtype == f32_layer._compute_dtype == 'float32'
        config = f32_layer.get_config()
        config['dtype'] = policy.Policy('mixed_float16')
        mp_layer = f32_layer.__class__.from_config(config)
        distributed_mp_layer = f32_layer.__class__.from_config(config)

        # Compute per_replica_input_shapes for the distributed model
        global_batch_size = input_shapes[0][0]
        assert global_batch_size % strategy.num_replicas_in_sync == 0, (
            'The number of replicas, %d, does not divide the global batch size of '
            '%d' % (strategy.num_replicas_in_sync, global_batch_size))
        per_replica_batch_size = (global_batch_size //
                                  strategy.num_replicas_in_sync)
        per_replica_input_shapes = [(per_replica_batch_size, ) + s[1:]
                                    for s in input_shapes]

        # Create the models
        f32_model = self._create_model_from_layer(f32_layer, input_shapes)
        mp_model = self._create_model_from_layer(mp_layer, input_shapes)
        with strategy.scope():
            distributed_mp_model = self._create_model_from_layer(
                distributed_mp_layer, per_replica_input_shapes)

        # Set all model weights to the same values
        f32_weights = f32_model.get_weights()
        mp_model.set_weights(f32_weights)
        distributed_mp_model.set_weights(f32_weights)

        # Generate input data
        if input_data is None:
            # Cast inputs to float16 to avoid measuring error from having f16 layers
            # cast to float16.
            input_data = [
                np.random.normal(size=s).astype('float16')
                for s in input_shapes
            ]
            if len(input_data) == 1:
                input_data = input_data[0]

        # Assert all models have close outputs.
        f32_output = f32_model.predict(input_data)
        mp_output = mp_model.predict(input_data)
        self.assertAllClose(mp_output, f32_output, rtol=rtol, atol=atol)
        self.assertAllClose(distributed_mp_model.predict(input_data),
                            f32_output,
                            rtol=rtol,
                            atol=atol)

        # Run fit() on models
        output = np.random.normal(
            size=f32_model.outputs[0].shape).astype('float16')
        for model in f32_model, mp_model, distributed_mp_model:
            model.fit(input_data, output, batch_size=global_batch_size)

        # Assert all models have close weights
        f32_weights = f32_model.get_weights()
        self.assertAllClose(mp_model.get_weights(),
                            f32_weights,
                            rtol=rtol,
                            atol=atol)
        self.assertAllClose(distributed_mp_model.get_weights(),
                            f32_weights,
                            rtol=rtol,
                            atol=atol)
コード例 #29
0
def get_unet(n_ch, patch_height, patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Conv2D(32, (3, 3), padding='same')(inputs)  #'valid'
    conv1 = LeakyReLU(alpha=0.3)(conv1)
    conv1 = Dropout(0.2)(conv1)
    conv1 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv1)
    conv1 = Conv2D(32, (3, 3), dilation_rate=2, padding='same')(conv1)
    conv1 = LeakyReLU(alpha=0.3)(conv1)
    conv1 = Conv2D(32, (3, 3), dilation_rate=4, padding='same')(conv1)
    conv1 = LeakyReLU(alpha=0.3)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)

    #pool1 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(pool1)
    conv2 = Conv2D(64, (3, 3), padding='same')(
        pool1)  #,activation='relu', padding='same')(pool1)
    conv2 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv2)
    conv2 = LeakyReLU(alpha=0.3)(conv2)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Conv2D(64, (3, 3), dilation_rate=2, padding='same')(conv2)
    conv2 = LeakyReLU(alpha=0.3)(conv2)
    conv2 = Conv2D(64, (3, 3), dilation_rate=4, padding='same')(
        conv2)  #,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv2)
    conv2 = LeakyReLU(alpha=0.3)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    #crop = Cropping2D(cropping=((int(3 * patch_height / 8), int(3 * patch_height / 8)), (int(3 * patch_width / 8), int(3 * patch_width / 8))))(conv1)
    #conv3 = concatenate([crop,pool2], axis=1)
    conv3 = Conv2D(128, (3, 3), padding='same')(
        pool2)  #, activation='relu', padding='same')(conv3)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv3)
    conv3 = LeakyReLU(alpha=0.3)(conv3)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Conv2D(128, (3, 3), dilation_rate=2, padding='same')(
        conv3)  #,W_regularizer=l2(0.01), b_regularizer=l2(0.01))(conv3)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv3)
    conv3 = LeakyReLU(alpha=0.3)(conv3)

    conv3 = Conv2D(128, (3, 3), dilation_rate=4, padding='same')(conv3)
    conv3 = normalization.BatchNormalization(epsilon=2e-05,
                                             axis=1,
                                             momentum=0.9,
                                             weights=None,
                                             beta_initializer='RandomNormal',
                                             gamma_initializer='one')(conv3)
    conv3 = LeakyReLU(alpha=0.3)(conv3)

    #up1 = UpSampling2D(size=(2, 2))(conv3)
    up1 = concatenate([UpSampling2D(size=(2, 2))(conv3), conv2], axis=1)
    conv4 = Conv2D(64, (3, 3), padding='same')(up1)
    conv4 = LeakyReLU(alpha=0.3)(conv4)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Conv2D(64, (3, 3), padding='same')(conv4)
    conv4 = LeakyReLU(alpha=0.3)(conv4)
    #conv4 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv4)
    #
    #up2 = UpSampling2D(size=(2, 2))(conv4)
    up2 = concatenate([UpSampling2D(size=(2, 2))(conv4), conv1], axis=1)
    conv5 = Conv2D(32, (3, 3), padding='same')(up2)
    conv5 = LeakyReLU(alpha=0.3)(conv5)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Conv2D(32, (3, 3), padding='same')(conv5)
    conv5 = LeakyReLU(alpha=0.3)(conv5)

    conv6 = Conv2D(num_lesion_class + 1, (1, 1), padding='same')(conv5)
    conv6 = LeakyReLU(alpha=0.3)(conv6)
    #conv6 = normalization.BatchNormalization(epsilon=1e-06, mode=1, axis=-1, momentum=0.9, weights=None, beta_init='zero', gamma_init='one')(conv6)

    #for tensorflow
    #conv6 = core.Reshape((patch_height*patch_width,num_lesion_class+1))(conv6)
    #for theano
    conv6 = core.Reshape(
        ((num_lesion_class + 1), patch_height * patch_width))(conv6)
    conv6 = core.Permute((2, 1))(conv6)
    ############
    act = Activation('softmax')(conv6)

    model = Model(inputs=inputs, outputs=act)
    return model
コード例 #30
0
ファイル: fuck_net.py プロジェクト: qq191513/mySeg
def get_gnet(n_ch,patch_height,patch_width):
    inputs = Input((n_ch, patch_height, patch_width))
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Dropout(0.2)(conv1)
    conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    up1 = UpSampling2D(size=(2, 2))(conv1)
    #
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up1)
    conv2 = Dropout(0.2)(conv2)
    conv2 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv2)
    pool1 = MaxPooling2D(pool_size=(2, 2))(conv2)
    #
    conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool1)
    conv3 = Dropout(0.2)(conv3)
    conv3 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv3)
    pool2 = MaxPooling2D(pool_size=(2, 2))(conv3)
    #
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool2)
    conv4 = Dropout(0.2)(conv4)
    conv4 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv4)
    pool3 = MaxPooling2D(pool_size=(2, 2))(conv4)
    #
    conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool3)
    conv5 = Dropout(0.2)(conv5)
    conv5 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv5)
    #
    up2 = concatenate([UpSampling2D(size=(2, 2))(conv5), conv4], axis=3)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up2)
    conv6 = Dropout(0.2)(conv6)
    conv6 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv6)
    #
    up3 = concatenate([UpSampling2D(size=(2, 2))(conv6), conv3], axis=3)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up3)
    conv7 = Dropout(0.2)(conv7)
    conv7 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv7)
    #
    up4 = concatenate([UpSampling2D(size=(2, 2))(conv7), conv2], axis=3)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(up4)
    conv8 = Dropout(0.2)(conv8)
    conv8 = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(conv8)
    #
    pool4 = MaxPooling2D(pool_size=(2, 2))(conv8)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(pool4)
    conv9 = Dropout(0.2)(conv9)
    conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)
    #
    conv10 = Convolution2D(2, 1, 1, activation='relu', border_mode='same')(conv9)
    conv10 = core.Reshape((2,patch_height*patch_width))(conv10)
    conv10 = core.Permute((2,1))(conv10)
    ############
    conv10 = core.Activation('softmax')(conv10)

    model = Model(input=inputs, output=conv10)

    # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
    model.compile(optimizer='sgd', loss='categorical_crossentropy',metrics=['accuracy'])
    x, y = imageSegmentationGenerator(cfg.train_images, cfg.train_annotations, cfg.train_batch_size,
                                      cfg.n_classes, cfg.input_height, cfg.input_width, cfg.output_height,
                                      cfg.output_width)
    model.fit(
        x, y,
        steps_per_epoch=int(cfg.train_data_number / cfg.train_batch_size),
        max_queue_size=8, workers=4, validation_data=5, epochs=cfg.epochs
    )
    # return model