예제 #1
0
def generator(latent_size=200, gflag=0, gf=8, gx=5, gy=5, gz=5):

    latent = Input(shape=(latent_size, ))

    x = Dense(64 * 7 * 7)(latent)
    x = Reshape((7, 7, 8, 8))(x)
    x = Conv3D(64, 6, 6, 8, border_mode='same', init='he_uniform')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = UpSampling3D(size=(2, 2, 2))(x)

    x = ZeroPadding3D((2, 2, 0))(x)
    x = Conv3D(6, 6, 5, 8, init='he_uniform')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = UpSampling3D(size=(2, 2, 3))(x)

    if gflag == 1:
        x = Conv3D(gf, gx, gy, gz, init='he_uniform', border_mode='same')(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)

    x = ZeroPadding3D((1, 0, 3))(x)
    x = Conv3D(6, 3, 3, 8, init='he_uniform')(x)
    x = LeakyReLU()(x)
    x = Conv3D(1, 2, 2, 2, bias=False, init='glorot_normal')(x)
    x = Activation('relu')(x)

    loc = Model(latent, x)
    loc.summary()
    fake_image = loc(latent)
    Model(input=[latent], output=fake_image)
    return Model(input=[latent], output=fake_image)
예제 #2
0
 def build(input_channels, n_classes, patch_size=5, dilation=1):
             
     patch_size = patch_size
     input_shape = (1,input_channels,patch_size,patch_size)
     dilation = (dilation, 1, 1)        
     model_name="HamidaEtAl"
     model = Sequential()     
     model.add(Conv2D(20, (3, 3), strides=(1, 1), dilation_rate=dilation, padding="same",input_shape=input_shape))
     model.add(Activation("relu"))        
     model.add(ZeroPadding3D(padding=(1,0,0)))
     model.add(Conv2D(20, (3, 1), dilation_rate=dilation, strides=(2, 1)))          
     
     model.add(ZeroPadding3D(padding=(1,0,0)))
     model.add(Conv2D(35, (3, 3), dilation_rate=dilation, strides=(1, 1))) 
     model.add(Activation("relu"))            
     model.add(ZeroPadding3D(padding=(1,0,0)))
     model.add(Conv2D(35, (3, 1), dilation_rate=dilation,activation="relu", strides=(2, 1))) 
     
     model.add(ZeroPadding3D(padding=(1,0,0)))
     model.add(Conv2D(35, (3, 1), dilation_rate=dilation,activation="relu", strides=(1, 1))) 
     
     model.add(ZeroPadding3D(padding=(1,0,0)))
     model.add(Conv2D(35, (2, 1), dilation_rate=dilation, strides=(2, 1)))        
     model.add(Flatten())
     model.add(Dense(n_classes, activation='softmax'))        
     return model, model_name
예제 #3
0
def generator(latent_size=200, return_intermediate=False):

    loc = Sequential([
        Dense(64 * 7 * 7, input_dim=latent_size),
        Reshape((7, 7, 8, 8)),
        Conv3D(64, 6, 6, 8, border_mode='same', init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),
        ZeroPadding3D((2, 2, 0)),
        Conv3D(6, 6, 5, 8, init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 3)),
        ZeroPadding3D((1, 0, 3)),
        Conv3D(6, 3, 3, 8, init='he_uniform'),
        LeakyReLU(),
        Conv3D(1, 2, 2, 2, bias=False, init='glorot_normal'),
        Activation('relu')
    ])

    latent = Input(shape=(latent_size, ))

    image_class = Input(shape=(1, ), dtype='float32')
    emb = Flatten()(Embedding(500,
                              latent_size,
                              input_length=1,
                              init='glorot_normal')(image_class))

    h = merge([latent, emb], mode='mul')

    fake_image = loc(h)

    Model(input=[latent, image_class], output=fake_image).summary()
    return Model(input=[latent, image_class], output=fake_image)
예제 #4
0
def _pad_or_crop_to_shape_3D(x, in_shape, tgt_shape):
    '''
    in_shape, tgt_shape are both 2x1 numpy arrays
    '''
    im_diff = np.asarray(in_shape[:3]) - np.asarray(tgt_shape[:3])
    print(im_diff)
    if im_diff[0] < 0:
        pad_amt = (int(np.ceil(abs(im_diff[0]) / 2.0)),
                   int(np.floor(abs(im_diff[0]) / 2.0)))
        x = ZeroPadding3D((pad_amt, (0, 0), (0, 0)))(x)
    if im_diff[1] < 0:
        pad_amt = (int(np.ceil(abs(im_diff[1]) / 2.0)),
                   int(np.floor(abs(im_diff[1]) / 2.0)))
        x = ZeroPadding3D(((0, 0), pad_amt, (0, 0)))(x)
    if im_diff[2] < 0:
        pad_amt = (int(np.ceil(abs(im_diff[2]) / 2.0)),
                   int(np.floor(abs(im_diff[2]) / 2.0)))
        x = ZeroPadding3D(((0, 0), (0, 0), pad_amt))(x)

    if im_diff[0] > 0:
        crop_amt = (int(np.ceil(im_diff[0] / 2.0)),
                    int(np.floor(im_diff[0] / 2.0)))
        x = Cropping3D((crop_amt, (0, 0), (0, 0)))(x)
    if im_diff[1] > 0:
        crop_amt = (int(np.ceil(im_diff[1] / 2.0)),
                    int(np.floor(im_diff[1] / 2.0)))
        x = Cropping3D(((0, 0), crop_amt, (0, 0)))(x)
    if im_diff[2] > 0:
        crop_amt = (int(np.ceil(im_diff[2] / 2.0)),
                    int(np.floor(im_diff[2] / 2.0)))
        x = Cropping3D(((0, 0), (0, 0), crop_amt))(x)
    return x
예제 #5
0
def generator(latent_size=1024, return_intermediate=False):

    loc = Sequential([
        Dense(64 * 7* 7, input_dim=latent_size),
        Reshape((7, 7,8, 8)),

        Conv3D(64, 6, 6, 8, border_mode='same', init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),

        ZeroPadding3D((2, 2, 0)),
        Conv3D(6, 6, 5, 8, init='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 3)),

        ZeroPadding3D((1,0,3)),
        Conv3D(6, 3, 3, 8, init='he_uniform'),
        LeakyReLU(),
        Conv3D(1, 2, 2, 2, bias=False, init='glorot_normal'),
        Activation('relu')
    ])
   
    latent = Input(shape=(latent_size, ))
     
    fake_image = loc(latent)

    Model(input=[latent], output=fake_image).summary()
    return Model(input=[latent], output=fake_image)
예제 #6
0
def gen_model(input_shape=(6, 10, 10, 10)):
    model = Sequential()
    inp = Input(input_shape)
    model.add(
        Convolution3D(8, 3, 3, 3, border_mode='same', input_shape=input_shape))
    X = Convolution3D(8, 3, 3, 3, border_mode='same')(inp)
    model.add(Activation('relu'))
    X = Activation('relu')(X)
    model.add(ZeroPadding3D())
    X = ZeroPadding3D()(X)
    model.add(Convolution3D(8, 3, 3, 3))
    X = Convolution3D(8, 3, 3, 3)(X)
    model.add(Activation('relu'))
    X = Activation('relu')(X)
    model.add(ZeroPadding3D())
    X = ZeroPadding3D()(X)

    model.add(Convolution3D(1, 3, 3, 3))
    X = Convolution3D(1, 3, 3, 3)(X)
    model.add(Activation('relu'))
    X = Activation('relu')(X)
    model.add(Flatten())
    X = Flatten()(X)
    model.add(Dense(1024))
    Z = X
    X = Dense(1024)(X)
    model.add(Activation('relu'))
    X = Activation('relu')(X)
    model.add(Dense(1024))
    X = Dense(1024)(X)
    model.add(Activation('relu'))
    X = Activation('relu')(X)
    model.add(Dense(3))
    X1 = Dense(3)(X)
    model.add(Activation('softmax'))
    X1 = Activation('softmax', name='Class Act')(X1)
    model.add(Dense(400))
    X2 = Dense(400)(X)
    model.add(Activation('relu'))
    X2 = Activation('relu')(X2)
    model.add(Dense(4))
    X2 = Dense(4)(X2)
    model.add(Activation('relu'))

    X2 = Activation('relu', name='Box Act')(X2)
    model = Model(inp, [X1, X2])
    convModel = Model(inp, Z)
    sgd = SGD(lr=2e-30, decay=1e-4, momentum=0.9, nesterov=True)

    adm = Adam(lr=1e-4)
    model.compile(optimizer=sgd,
                  loss=['binary_crossentropy', 'mse']
                  #, metrics = ['accuracy']
                  )
    convModel.compile(optimizer=sgd, loss='mse'
                      #, metrics = ['accuracy']
                      )

    return model, convModel
예제 #7
0
def LearnReg(input):
    conv1 = createConvLayer(input, F, k_size, norm_stride, D_conv=3)
    conv2 = createConvLayer(conv1, F, k_size, norm_stride, D_conv=3)
    conv3 = ZeroPadding3D(padding=(1, 1, 1), data_format=data_format)(conv2)
    conv3 = createConvLayer(conv3,
                            2 * F,
                            k_size,
                            ds_stride,
                            padding='valid',
                            D_conv=3)
    conv4 = createConvLayer(conv3, 2 * F, k_size, norm_stride, D_conv=3)
    conv5 = createConvLayer(conv4, 2 * F, k_size, norm_stride, D_conv=3)
    conv6 = ZeroPadding3D(padding=(1, 1, 1), data_format=data_format)(conv5)
    conv6 = createConvLayer(conv6,
                            2 * F,
                            k_size,
                            ds_stride,
                            padding='valid',
                            D_conv=3)
    conv7 = createConvLayer(conv6, 2 * F, k_size, norm_stride, D_conv=3)
    conv8 = createConvLayer(conv7, 2 * F, k_size, norm_stride, D_conv=3)
    conv9 = ZeroPadding3D(padding=(1, 1, 1), data_format=data_format)(conv8)
    conv9 = createConvLayer(conv9,
                            2 * F,
                            k_size,
                            ds_stride,
                            padding='valid',
                            D_conv=3)
    conv10 = createConvLayer(conv9, 2 * F, k_size, norm_stride, D_conv=3)
    conv11 = createConvLayer(conv10, 2 * F, k_size, norm_stride, D_conv=3)
    conv12 = ZeroPadding3D(padding=(1, 1, 1), data_format=data_format)(conv11)
    conv12 = createConvLayer(conv12,
                             4 * F,
                             k_size,
                             ds_stride,
                             padding='valid',
                             D_conv=3)
    conv13 = createConvLayer(conv12, 4 * F, k_size, norm_stride, D_conv=3)
    conv14 = createConvLayer(conv13, 4 * F, k_size, norm_stride, D_conv=3)
    deconv15, output_shape15 = createDeconvLayer(conv14, 2 * F, k_size,
                                                 ds_stride)
    add16 = add([deconv15, conv11])
    deconv17, output_shape17 = createDeconvLayer(add16, 2 * F, k_size,
                                                 ds_stride)
    add18 = add([deconv17, conv8])
    deconv19, output_shape19 = createDeconvLayer(add18, 2 * F, k_size,
                                                 ds_stride)
    add20 = add([deconv19, conv5])
    deconv21, output_shape21 = createDeconvLayer(add20, F, k_size, ds_stride)
    add22 = add([deconv21, conv2])
    deconv23, output_shape23 = createDeconvLayer(add22,
                                                 1,
                                                 k_size,
                                                 ds_stride,
                                                 batch_norm=False,
                                                 act_func=False)
    return deconv23
예제 #8
0
def discriminator(fixed_bn = False, discr_drop_out=0.2):

    image = Input(shape=( 25, 25, 25,1 ), name='image')

    bnm=2 if fixed_bn else 0
    f=(5,5,5)
    x = _Conv3D(32, 5, 5,5,border_mode='same',
               name='disc_c1')(image)
    x = LeakyReLU()(x)
    x = Dropout(discr_drop_out)(x)

    x = ZeroPadding3D((2, 2,2))(x)
    x = _Conv3D(8, 5, 5,5,border_mode='valid',
               name='disc_c2'
    )(x)
    x = LeakyReLU()(x)
    x = _BatchNormalization(name='disc_bn1',
                           mode=bnm,
    )(x)
    x = Dropout(discr_drop_out)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = _Conv3D(8, 5, 5,5,border_mode='valid',
               name='disc_c3'
)(x)
    x = LeakyReLU()(x)
    x = _BatchNormalization(name='disc_bn2',
                           #momentum = 0.00001
                           mode=bnm,
    )(x)
    x = Dropout(discr_drop_out)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = _Conv3D(8, 5, 5,5,border_mode='valid',
               name='disc_c4'
    )(x)
    x = LeakyReLU()(x)
    x = _BatchNormalization(name='disc_bn3',
                           mode=bnm,
    )(x)
    x = Dropout(discr_drop_out)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)

    dnn = _Model(input=image, output=h, name='dnn')

    dnn_out = dnn(image)

    fake = _Dense(1, activation='sigmoid', name='classification')(dnn_out)
    aux = _Dense(1, activation='linear', name='energy')(dnn_out)
    ecal = Lambda(lambda x: K.sum(x, axis=(1, 2, 3)), name='sum_cell')(image)

    return _Model(output=[fake, aux, ecal], input=image, name='discriminator_model')
예제 #9
0
    def _masked_conv(self,
                     x,
                     filter_size,
                     stack_name,
                     layer_idx,
                     mask_type='B'):

        if stack_name == 'vertical':
            # e.g. (8, 8, 8) now becomes (9, 10, 10)
            res = ZeroPadding3D(padding=((filter_size[0] // 2,
                                          0), (filter_size[1] // 2,
                                               filter_size[1] // 2),
                                         (filter_size[2] // 2,
                                          filter_size[1] // 2)))(x)
            # back to (8, 8, 8) by using rectangular kernel (2, 3, 3)
            res = Convolution3D(filters=2 * self.nb_filters,
                                kernel_size=(filter_size[0] // 2 + 1,
                                             filter_size[1], filter_size[2]),
                                padding='valid')(res)

        elif stack_name == 'depth':
            # e.g. (8, 8, 8) now becomes (9, 10, 10)
            res = ZeroPadding3D(padding=((0, 0), (filter_size[2] // 2, 0),
                                         (filter_size[2] // 2,
                                          filter_size[1] // 2)))(x)
            # back to (8, 8, 8) by using kernel (1, 2, 3)
            res = Convolution3D(filters=1,
                                kernel_size=(1, filter_size[1] // 2 + 1,
                                             filter_size[2]),
                                padding='valid')(res)

        elif stack_name == 'horizontal':
            # e.g. turn (8, 8, 8) into (8, 8, 9)
            res = ZeroPadding3D(padding=((0, 0), (0, 0), (filter_size[2] // 2,
                                                          0)))(x)
            # mask type A zeros out the center weights as well
            # so (8, 8, 9) will remain (8, 8, 9) -- have to crop it later on in Gated CNN
            if mask_type == 'A':
                res = Convolution3D(filters=2 * self.nb_filters,
                                    kernel_size=(1, 1, filter_size[2] // 2),
                                    name='h_conv_' + str(layer_idx))(res)
            # don't zero out centre so (8, 8, 9) will become (8, 8, 8)
            else:
                res = Convolution3D(filters=2 * self.nb_filters,
                                    kernel_size=(1, 1,
                                                 filter_size[2] // 2 + 1),
                                    name='h_conv_' + str(layer_idx))(res)

        return res
예제 #10
0
 def build(input_channels, n_classes, n_planes=2, patch_size=5):
     model_name="LiEtAl"
     input_x=Input(shape=(1,input_channels,patch_size,patch_size))
     x = ZeroPadding3D(padding=(1,0,0))(input_x)
     x=Conv3D(n_planes, kernel_size=(7, 3, 3),padding="same")(x)
     x=Activation("relu")(x)
     
     x = ZeroPadding3D(padding=(1,0,0))(x)
     x=Conv3D(2*n_planes, kernel_size=(3, 3, 3),padding="same")(x)
     x=Activation("relu")(x)
     
     x = Flatten()(x)
     x = Dense(n_classes, activation='softmax')(x)
     model = Model(input_x, x)
     return model, model_name
예제 #11
0
def generator(latent_size=200, return_intermediate=False, with_bn=True):
    latent = Input(shape=(latent_size, ))

    bnm = 0

    x = _Dense(64 * 8 * 8, init='glorot_normal', name='gen_dense1')(latent)
    x = Reshape((8, 8, 8, 8))(x)
    x = _Conv3D(64,
                6,
                6,
                8,
                border_mode='same',
                init='he_uniform',
                name='gen_c1')(x)
    x = LeakyReLU()(x)
    if with_bn:
        x = _BatchNormalization(name='gen_bn1', mode=bnm)(x)
    x = UpSampling3D(size=(2, 2, 2))(x)
    x = ZeroPadding3D((0, 0, 2))(x)
    x = _Conv3D(6,
                1,
                1,
                10,
                border_mode='valid',
                init='he_uniform',
                name='gen_c2')(x)
    x = LeakyReLU()(x)
    if with_bn:
        x = _BatchNormalization(name='gen_bn2', mode=bnm)(x)

    x = UpSampling3D(size=(1, 1, 5))(x)
    x = ZeroPadding3D((1, 1, 0))(x)

    x = _Conv3D(1,
                3,
                3,
                1,
                bias=False,
                border_mode='valid',
                init='glorot_normal',
                name='gen_c3')(x)

    x = Activation('relu')(x)

    loc = _Model(input=latent, output=x)
    fake_image = loc(latent)
    _Model(input=[latent], output=fake_image)
    return _Model(input=[latent], output=fake_image, name='generator_model')
예제 #12
0
    def get_colearning_block(encoder_outputs, reg_factor, index):
        modality_axis = 1

        stacked_features = concatenate(encoder_outputs, CHANNEL_AXIS)
        colearning_input = []

        # prepare encoder output for colearning
        for encoder_block in encoder_outputs:
            block_output = ZeroPadding3D(padding=1)(encoder_block)
            block_output = expand_dims(axis=modality_axis)(block_output)
            colearning_input.append(block_output)

        fusion_map = concatenate(colearning_input, modality_axis)
        fusion_map = Permute(
            (5, 1, 4, 3, 2))(fusion_map)  #(b,l,w,h,d,c)=>(b,c,l,d,h,w)
        fusion_map = conv4d_wrapper(
            num_filters=stacked_features._keras_shape[CHANNEL_AXIS],
            kernel_size=(
                2, 3, 3, 3
            ),  #experimental conv4d only operates on a channels-first basis
            reg_factor=reg_factor,
            padding='valid',
            index=index)(fusion_map)
        fusion_map = Permute(
            (2, 5, 4, 3, 1))(fusion_map)  #(b,c,l,d,h,w)=>(b,l,w,h,d,c)
        fusion_map = squeeze(modality_axis)(fusion_map)

        colearning_output = Multiply()([stacked_features, fusion_map])

        return colearning_output
예제 #13
0
def discriminator():

    image = Input(shape=(25, 25, 25, 1))

    x = Conv3D(32, 5, 5,5, border_mode='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2,2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)  #added
    x = Conv3D(8, 5, 5,5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)
    
    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(16, 3, 3, 3, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)
    
    dnn = Model(image, h)
    dnn.summary()
    image = Input(shape=(25, 25, 25, 1))

    dnn_out = dnn(image)


    fake = Dense(1, activation='sigmoid', name='generation')(dnn_out)
    aux = Dense(1, activation='linear', name='auxiliary')(dnn_out)
    ecal = Lambda(lambda x: K.sum(x, axis=(1, 2, 3)))(image)
    Model(input=image, output=[fake, aux, ecal]).summary()
    return Model(input=image, output=[fake, aux, ecal])
예제 #14
0
def _shortcut3d(input, residual, deconv=False):
    """3D shortcut to match input and residual and merges them with "sum"."""
    if deconv:
        stride_dim1 = math.floor(residual._keras_shape[DIM1_AXIS] /
                                 input._keras_shape[DIM1_AXIS])
        stride_dim2 = math.floor(residual._keras_shape[DIM2_AXIS] /
                                 input._keras_shape[DIM2_AXIS])
        stride_dim3 = math.floor(residual._keras_shape[DIM3_AXIS] /
                                 input._keras_shape[DIM3_AXIS])

        padding_dim1 = (
            residual._keras_shape[DIM1_AXIS]) % input._keras_shape[DIM1_AXIS]
        padding_dim2 = (
            residual._keras_shape[DIM2_AXIS]) % input._keras_shape[DIM2_AXIS]
        padding_dim3 = (
            residual._keras_shape[DIM3_AXIS]) % input._keras_shape[DIM3_AXIS]
        padding_dim1 = (math.floor(0.5 * padding_dim1),
                        math.ceil(0.5 * padding_dim1))
        padding_dim2 = (math.floor(0.5 * padding_dim2),
                        math.ceil(0.5 * padding_dim2))
        padding_dim3 = (math.floor(0.5 * padding_dim3),
                        math.ceil(0.5 * padding_dim3))

    else:
        stride_dim1 = math.ceil(input._keras_shape[DIM1_AXIS] /
                                residual._keras_shape[DIM1_AXIS])
        stride_dim2 = math.ceil(input._keras_shape[DIM2_AXIS] /
                                residual._keras_shape[DIM2_AXIS])
        stride_dim3 = math.ceil(input._keras_shape[DIM3_AXIS] /
                                residual._keras_shape[DIM3_AXIS])

    equal_channels = residual._keras_shape[CHANNEL_AXIS] == input._keras_shape[
        CHANNEL_AXIS]

    shortcut = input
    if stride_dim1 > 1 or stride_dim2 > 1 or stride_dim3 > 1 \
            or not equal_channels:

        if deconv:  #output = stride_dim * input_dim + padding_dim
            shortcut = Conv3DTranspose(
                filters=residual._keras_shape[CHANNEL_AXIS],
                kernel_size=(1, 1, 1),
                strides=(stride_dim1, stride_dim2, stride_dim3),
                kernel_initializer="he_normal",
                padding="valid",
                kernel_regularizer=l2(1e-4))(input)

            shortcut = ZeroPadding3D(padding=(padding_dim1, padding_dim2,
                                              padding_dim3))(shortcut)

        else:
            shortcut = Conv3D(filters=residual._keras_shape[CHANNEL_AXIS],
                              kernel_size=(1, 1, 1),
                              strides=(stride_dim1, stride_dim2, stride_dim3),
                              kernel_initializer="he_normal",
                              padding="valid",
                              kernel_regularizer=l2(1e-4))(input)

    return add([shortcut, residual])
예제 #15
0
def c3d_model(summary=False):
    """ Return the Keras model of the network
    """
    model = Sequential()
    # 1st layer group
    model.add(Convolution3D(64, 3, 3, 3, activation='relu', 
                            border_mode='same', name='conv1',
                            subsample=(1, 1, 1), 
                            input_shape=(16,120,120,3)))
   # model.add(BatchNormalization())
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), 
                           border_mode='same', name='pool1'))
    # 2nd layer group
    model.add(Convolution3D(128, 3, 3, 3, activation='relu', 
                            border_mode='same', name='conv2',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), 
                           border_mode='same', name='pool2'))
    # 3rd layer group
    model.add(Convolution3D(256, 3, 3, 3, activation='relu', 
                            border_mode='same', name='conv3a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(256, 3, 3, 3, activation='relu', 
                            border_mode='same', name='conv3b',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), 
                           border_mode='same', name='pool3'))
    # 4th layer group
    model.add(Convolution3D(512, 3, 3, 3, activation='relu', 
                            border_mode='same', name='conv4a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(512, 3, 3, 3, activation='relu', 
                            border_mode='same', name='conv4b',
                            subsample=(1, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), 
                           border_mode='same', name='pool4'))
    # 5th layer group
    model.add(Convolution3D(512, 3, 3, 3, activation='relu', 
                            border_mode='same', name='conv5a',
                            subsample=(1, 1, 1)))
    model.add(Convolution3D(512, 3, 3, 3, activation='relu', 
                            border_mode='same', name='conv5b',
                            subsample=(1, 1, 1)))
    model.add(ZeroPadding3D(padding=(0, 1, 1)))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), 
                           border_mode='valid', name='pool5'))
    model.add(BatchNormalization())
    model.add(Flatten())
    # FC layers group
    model.add(Dense(4096,activation='relu', name='fc6'))
    model.add(Dropout(.5))
    model.add(BatchNormalization())
    model.add(Dense(4096, activation='relu', name='fc7'))
    model.add(Dropout(.5))
   
    model.add(Dense(14, activation='softmax', name='fc9'))
    if summary:
        print(model.summary())
    return model
예제 #16
0
def discriminator(keras_dformat='channels_last'):

    if keras_dformat == 'channels_last':
        dshape = (25, 25, 25, 1)
        daxis = (1, 2, 3)
    else:
        dshape = (1, 25, 25, 25)
        daxis = (2, 3, 4)

    image = Input(shape=dshape)

    x = Conv3D(32, (5, 5, 5), data_format=keras_dformat, padding='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, data_format=keras_dformat, padding='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, data_format=keras_dformat, padding='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(8, 5, 5, 5, data_format=keras_dformat, padding='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)

    dnn = Model(image, h)

    dnn_out = dnn(image)

    fake = Dense(1, activation='sigmoid', name='generation')(dnn_out)
    aux = Dense(1, activation='linear', name='auxiliary')(dnn_out)
    ecal = Lambda(lambda x: K.sum(x, axis=daxis))(image)
    Model(input=image, output=[fake, aux, ecal]).summary()
    return Model(input=image, output=[fake, aux, ecal])
예제 #17
0
def pad_to_fit(inputs, n_layers=4):
  width = int_shape(inputs)[1]
  height = int_shape(inputs)[2]
  depth = int_shape(inputs)[3]

  w_pad, h_pad, d_pad = calc_fit_pad(width, height, depth, n_layers)

  x = ZeroPadding3D((w_pad, h_pad, d_pad))(inputs)
  return x
예제 #18
0
def generator(latent_size=200,
              return_intermediate=False,
              keras_dformat='channels_last'):

    if keras_dformat == 'channels_last':
        dim = (7, 7, 8, 8)
    else:
        dim = (8, 7, 7, 8)

    loc = Sequential([
        Dense(64 * 7 * 7, input_dim=latent_size),
        Reshape(dim),
        Conv3D(64, (6, 6, 8),
               data_format=keras_dformat,
               padding='same',
               kernel_initializer='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 2)),
        ZeroPadding3D((2, 2, 0)),
        Conv3D(6, (6, 5, 8),
               data_format=keras_dformat,
               kernel_initializer='he_uniform'),
        LeakyReLU(),
        BatchNormalization(),
        UpSampling3D(size=(2, 2, 3)),
        ZeroPadding3D((1, 0, 3)),
        Conv3D(6, (3, 3, 8),
               data_format=keras_dformat,
               kernel_initializer='he_uniform'),
        LeakyReLU(),
        Conv3D(1, (2, 2, 2),
               data_format=keras_dformat,
               use_bias=False,
               kernel_initializer='glorot_normal'),
        Activation('relu')
    ])

    latent = Input(shape=(latent_size, ))

    fake_image = loc(latent)

    Model(input=[latent], output=fake_image).summary()
    return Model(input=[latent], output=fake_image)
예제 #19
0
def discriminator(dflag=0, df=8, dx=5, dy=5, dz=5, dp=0.2):

    image = Input(shape=(25, 25, 25, 1))
    x = image
    if dflag == 1:
        x = Conv3D(df, dx, dy, dz, border_mode='same')(x)
        x = LeakyReLU()(x)
        x = BatchNormalization()(x)
        x = Dropout(dp)(x)

    x = Conv3D(32, 5, 5, 5, border_mode='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(dp)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dp)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dp)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(dp)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)

    dnn = Model(image, h)

    dnn_out = dnn(image)

    fake = Dense(1, activation='sigmoid', name='generation')(dnn_out)
    aux = Dense(1, activation='linear', name='auxiliary')(dnn_out)
    ecal = Lambda(lambda x: K.sum(x, axis=(1, 2, 3)))(image)
    Model(input=image, output=[fake, aux, ecal])
    return Model(input=image, output=[fake, aux, ecal])
예제 #20
0
파일: c3d.py 프로젝트: Primus19/vid_anomaly
def C3D(weights='sports1M'):
    """Instantiates a C3D Kerasl model

    Keyword arguments:
    weights -- weights to load into model. (default is sports1M)

    Returns:
    A Keras model.

    """

    if weights not in {'sports1M', None}:
        raise ValueError('weights should be either be sports1M or None')

    if K.image_data_format() == 'channels_last':
        shape = (16,112,112,3)
    else:
        shape = (3,16,112,112)

    model = Sequential()
    model.add(Conv3D(64, 3, activation='relu', padding='same', name='conv1', input_shape=shape))
    model.add(MaxPooling3D(pool_size=(1,2,2), strides=(1,2,2), padding='same', name='pool1'))

    model.add(Conv3D(128, 3, activation='relu', padding='same', name='conv2'))
    model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool2'))

    model.add(Conv3D(256, 3, activation='relu', padding='same', name='conv3a'))
    model.add(Conv3D(256, 3, activation='relu', padding='same', name='conv3b'))
    model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool3'))

    model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv4a'))
    model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv4b'))
    model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool4'))

    model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv5a'))
    model.add(Conv3D(512, 3, activation='relu', padding='same', name='conv5b'))
    model.add(ZeroPadding3D(padding=(0,1,1)))
    model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2), padding='valid', name='pool5'))

    model.add(Flatten())

    model.add(Dense(4096, activation='relu', name='fc6'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu', name='fc7'))
    model.add(Dropout(0.5))
    model.add(Dense(487, activation='softmax', name='fc8'))

    if weights == 'sports1M':
        weights_path = get_file('sports1M_weights_tf.h5',
                                WEIGHTS_PATH,
                                cache_subdir='models',
                                md5_hash='b7a93b2f9156ccbebe3ca24b41fc5402')

        model.load_weights(weights_path)

    return model
예제 #21
0
def resnetttt(in_shape):
    inp = Input(shape=in_shape)
    out = ZeroPadding3D((3, 3, 3))(inp)
    out = Convolution3D(64, 5, strides=1)(out)
    print("input shape:", out.shape)
    out = BatchNormalization()(out)
    out = Activation('relu')(out)
    out = MaxPooling3D((3, 3, 3), strides=(2, 2, 2))(out)
    print("shape:", out.shape)

    out = conv_block(out, [64, 64, 256])  #[32, 32, 64]
    out = identity_block(out, [64, 64, 256])
    out = identity_block(out, [64, 64, 256])
    print("stage 1 shape:", out.shape)

    out = conv_block(out, [128, 128, 512])  #[64, 64, 64]
    out = identity_block(out, [128, 128, 512])
    out = identity_block(out, [128, 128, 512])
    out = identity_block(out, [128, 128, 512])
    print("stage 2 shape:", out.shape)

    out = conv_block(out, [256, 256, 1024])
    out = se_identity_block(out, [256, 256, 1024])
    out = se_identity_block(out, [256, 256, 1024])
    out = se_identity_block(out, [256, 256, 1024])
    out = se_identity_block(out, [256, 256, 1024])
    out = se_identity_block(out, [256, 256, 1024])
    print("stage 3 shape:", out.shape)

    out = conv_block(out, [512, 512, 2048])
    out = se_identity_block(out, [512, 512, 2048])
    out = se_identity_block(out, [512, 512, 2048])
    print("stage 4 shape:", out.shape)

    out = GlobalAveragePooling3D(data_format='channels_last')(out)
    print("pooling shape:", out.shape)

    # out = Dense(1000, activation='softmax')(out)
    # out = Flatten()(out)
    # out = Dense(500, activation='relu')(out)
    # out = Dropout(rate=0.25)(out)

    out = Dense(150, activation='relu')(out)  #250
    out = Dropout(rate=0.3)(out)
    print("dense shape:", out.shape)

    out = Dense(2, activation='softmax')(out)
    print("dense shape:", out.shape)

    model = Model(inp, out)

    model.compile(optimizer=SGD(lr=1e-6),
                  loss='categorical_crossentropy',
                  metrics=['acc'])
    return model
예제 #22
0
    def __init__(self, frame_count, image_channels=3, image_height=50, image_width=100, max_string=32, output_size=28):
        input_shape = self.get_input_shape(frame_count, image_channels, image_height, image_width)
        self.input_layer = Input(shape=input_shape, dtype='float32', name='input')

        self.zero_1 = ZeroPadding3D(padding=(1, 2, 2), name='zero_1')(self.input_layer)
        self.conv_1 = Conv3D(32, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_1')(self.zero_1)
        self.batc_1 = BatchNormalization(name='batc_1')(self.conv_1)
        self.actv_1 = Activation('relu', name='actv_1')(self.batc_1)
        self.pool_1 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_1')(self.actv_1)
        self.drop_1 = SpatialDropout3D(0.5, name='drop_1')(self.pool_1)

        self.zero_2 = ZeroPadding3D(padding=(1, 2, 2), name='zero_2')(self.drop_1)
        self.conv_2 = Conv3D(64, (3, 5, 5), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_2')(self.zero_2)
        self.batc_2 = BatchNormalization(name='batc_2')(self.conv_2)
        self.actv_2 = Activation('relu', name='actv_2')(self.batc_2)
        self.pool_2 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_2')(self.actv_2)
        self.drop_2 = SpatialDropout3D(0.5, name='drop_2')(self.pool_2)

        self.zero_3 = ZeroPadding3D(padding=(1, 1, 1), name='zero_3')(self.drop_2)
        self.conv_3 = Conv3D(96, (3, 3, 3), strides=(1, 2, 2), kernel_initializer='he_normal', name='conv_3')(self.zero_3)
        self.batc_3 = BatchNormalization(name='batc_3')(self.conv_3)
        self.actv_3 = Activation('relu', name='actv_3')(self.batc_3)
        self.pool_3 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), name='pool_3')(self.actv_3)
        self.drop_3 = SpatialDropout3D(0.5, name='drop_3')(self.pool_3)

        self.res = TimeDistributed(Flatten())(self.drop_3)

        self.gru_1 = Bidirectional(GRU(256, return_sequences=True, activation=None, kernel_initializer='Orthogonal', name='gru_1'), merge_mode='concat')(self.res)
        self.gru_1_actv = Activation('relu', name='gru_1_actv')(self.gru_1)
        self.gru_2 = Bidirectional(GRU(256, return_sequences=True, activation=None, kernel_initializer='Orthogonal', name='gru_2'), merge_mode='concat')(self.gru_1_actv)
        self.gru_2_actv = Activation('relu', name='gru_2_actv')(self.gru_2)

        self.dense_1 = Dense(output_size, kernel_initializer='he_normal', name='dense_1')(self.gru_2_actv)
        self.y_pred  = Activation('softmax', name='softmax')(self.dense_1)

        self.input_labels = Input(shape=[max_string], dtype='float32', name='labels')
        self.input_length = Input(shape=[1], dtype='int64', name='input_length')
        self.label_length = Input(shape=[1], dtype='int64', name='label_length')

        self.loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([self.y_pred, self.input_labels, self.input_length, self.label_length])

        self.model = Model(inputs=[self.input_layer, self.input_labels, self.input_length, self.label_length], outputs=self.loss_out)
예제 #23
0
def resnet(shape, classes, is_regression=False):
    inpt = Input(shape=shape)
    x = ZeroPadding3D((1, 1, 1), data_format='channels_first')(inpt)

    # conv1
    x = Conv3d_BN(x,
                  nb_filter=16,
                  kernel_size=(6, 6, 6),
                  strides=1,
                  padding='valid')
    x = MaxPooling3D(pool_size=(3, 3, 3),
                     strides=2,
                     data_format='channels_first')(x)

    # conv2_x
    x = identity_Block(x,
                       nb_filter=32,
                       kernel_size=(2, 2, 2),
                       strides=1,
                       with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=32, kernel_size=(2, 2, 2))
    #     x = identity_Block(x, nb_filter=64, kernel_size=(3, 3, 3))

    # conv3_x
    x = identity_Block(x,
                       nb_filter=64,
                       kernel_size=(2, 2, 2),
                       strides=1,
                       with_conv_shortcut=True)
    x = identity_Block(x, nb_filter=64, kernel_size=(2, 2, 2))
    #     x = identity_Block(x, nb_filter=128, kernel_size=(3, 3, 3))
    #     x = identity_Block(x, nb_filter=128, kernel_size=(3, 3, 3))

    #     # conv4_x
    #     x = identity_Block(x, nb_filter=256, kernel_size=(3, 3, 3), strides=2, with_conv_shortcut=True)
    #     x = identity_Block(x, nb_filter=256, kernel_size=(3, 3, 3))
    #     x = identity_Block(x, nb_filter=256, kernel_size=(3, 3, 3))

    x = AveragePooling3D(pool_size=(2, 2, 2))(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Flatten()(x)
    x = Dense(512, activation='relu')(x)
    x = Dropout(0.2)(x)
    x = Dense(256, activation='relu')(x)
    x = Dropout(0.2)(x)
    if is_regression:
        x = Dense(classes)(x)
    else:
        x = Dense(classes, activation='softmax')(x)

    model = Model(inputs=inpt, outputs=x)

    return model
예제 #24
0
def video_model_a(input_dim, output_dim):
    model = Sequential()
    model.add(SpatialDropout3D(0.2, input_shape=(5, input_dim, input_dim, 3)))
    model.add(ZeroPadding3D((1, 1, 1)))
    model.add(Convolution3D(64, 3, 3, 3, activation='relu'))
    model.add(ZeroPadding3D((1, 1, 1)))
    model.add(Convolution3D(64, 3, 3, 3, activation='relu'))
    model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

    model.add(ZeroPadding3D((1, 1, 1)))
    model.add(Convolution3D(128, 3, 3, 3, activation='relu'))
    model.add(ZeroPadding3D((1, 1, 1)))
    model.add(Convolution3D(128, 3, 3, 3, activation='relu'))
    model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

    # not enough memory to run! :(
    # model.add(ZeroPadding3D((1, 1, 1)))
    # model.add(Convolution3D(256, 3, 3, 3, activation='relu'))
    # model.add(ZeroPadding3D((1, 1, 1)))
    # model.add(Convolution3D(256, 3, 3, 3, activation='relu'))
    # model.add(ZeroPadding3D((1, 1, 1)))
    # model.add(Convolution3D(256, 3, 3, 3, activation='relu'))
    # model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))
    #
    # model.add(ZeroPadding3D((1, 1, 1)))
    # model.add(Convolution3D(512, 3, 3, 3, activation='relu'))
    # model.add(ZeroPadding3D((1, 1, 1)))
    # model.add(Convolution3D(512, 3, 3, 3, activation='relu'))
    # model.add(ZeroPadding3D((1, 1, 1)))
    # model.add(Convolution3D(512, 3, 3, 3, activation='relu'))
    # model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(4096, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(output_dim=output_dim, activation='linear'))

    return model
예제 #25
0
def discriminator():

    image = Input(shape=(25, 25, 25, 1))

    x = Conv3D(32, 5, 5, 5, border_mode='same')(image)
    x = LeakyReLU()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((2, 2, 2))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = ZeroPadding3D((1, 1, 1))(x)
    x = Conv3D(8, 5, 5, 5, border_mode='valid')(x)
    x = LeakyReLU()(x)
    x = BatchNormalization()(x)
    x = Dropout(0.2)(x)

    x = AveragePooling3D((2, 2, 2))(x)
    h = Flatten()(x)

    dnn = Model(image, h)

    image = Input(shape=(25, 25, 25, 1))

    dnn_out = dnn(image)

    fake = Dense(1, activation='sigmoid', name='generation')(dnn_out)
    aux = Dense(1, activation='relu', name='auxiliary')(dnn_out)

    Model(input=image, output=[fake, aux]).summary()
    return Model(input=image, output=[fake, aux])
예제 #26
0
def get_model(summary=False, backend='tf'):
    """ Return the Keras model of the network
    """
    model = Sequential()
    if backend == 'tf':
        input_shape=(16, 112, 112, 3) # l, h, w, c
    else:
        input_shape=(3, 16, 112, 112) # c, l, h, w
    model.add(Conv3D(64, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv1',
                            input_shape=input_shape))
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
                           padding='valid', name='pool1'))
    # 2nd layer group
    model.add(Conv3D(128, 3, 3, 3, activation='relu',
                            border_mode='same', name='conv2'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool2'))
    # 3rd layer group
    model.add(Conv3D(256, 3, 3, 3, activation='relu',
                            padding='same', name='conv3a'))
    model.add(Conv3D(256, 3, 3, 3, activation='relu',
                            padding='same', name='conv3b'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool3'))
    # 4th layer group
    model.add(Conv3D(512, 3, 3, 3, activation='relu',
                            padding='same', name='conv4a'))
    model.add(Conv3D(512, 3, 3, 3, activation='relu',
                            padding='same', name='conv4b'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool4'))
    # 5th layer group
    model.add(Conv3D(512, 3, 3, 3, activation='relu',
                            padding='same', name='conv5a'))
    model.add(Conv3D(512, 3, 3, 3, activation='relu',
                            padding='same', name='conv5b'))
    model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad5'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool5'))
    model.add(Flatten())
    # FC layers group
    model.add(Dense(4096, activation='relu', name='fc6'))
    model.add(Dropout(.5))
    model.add(Dense(4096, activation='relu', name='fc7'))
    model.add(Dropout(.5))
    model.add(Dense(487, activation='softmax', name='fc8'))

    if summary:
        print(model.summary())

    return model
예제 #27
0
def residual_zero_padding_block(x0, filters, first_unit=False,
                                down_sample=False):
    residual_kwargs = {
        'kernel_size': (3, 3),
        'padding': 'same',
        'use_bias': False,
        'kernel_initializer': he_normal(),
        'kernel_regularizer': l2(5e-4)
    }
    skip_kwargs = {
        'kernel_size': (1, 1),
        'padding': 'valid',
        'use_bias': False,
        'kernel_initializer': he_normal(),
        'kernel_regularizer': l2(5e-4)
    }

    if first_unit:
        x0 = BatchNormalization(momentum=.9)(x0)
        x0 = Activation('relu')(x0)
        if down_sample:
            residual_kwargs['strides'] = (2, 2)
            skip_kwargs['strides'] = (2, 2)
        x1 = Conv2D(filters, **residual_kwargs)(x0)
        x1 = BatchNormalization(momentum=.9)(x1)
        x1 = Activation('relu')(x1)
        residual_kwargs['strides'] = (1, 1)
        x1 = Conv2D(filters, **residual_kwargs)(x1)
        x0_img_shape = x0.shape.as_list()[1:-1]
        x1_img_shape = x1.shape.as_list()[1:-1]
        x0_filters = x0.shape.as_list()[-1]
        x1_filters = x1.shape.as_list()[-1]
        if x0_img_shape != x1_img_shape:
            x0 = Conv2D(x0_filters, **skip_kwargs)(x0)
        if x0_filters != x1_filters:
            target_shape = (x1_img_shape[0], x1_img_shape[1], x0_filters, 1)
            x0 = Reshape(target_shape)(x0)
            zero_padding_size = x1_filters - x0_filters
            x0 = ZeroPadding3D(((0, 0), (0, 0), (0, zero_padding_size)))(x0)
            target_shape = (x1_img_shape[0], x1_img_shape[1], x1_filters)
            x0 = Reshape(target_shape)(x0)
    else:
        x1 = BatchNormalization(momentum=.9)(x0)
        x1 = Activation('relu')(x1)
        x1 = Conv2D(filters, **residual_kwargs)(x1)
        x1 = BatchNormalization(momentum=.9)(x1)
        x1 = Activation('relu')(x1)
        x1 = Conv2D(filters, **residual_kwargs)(x1)
    x0 = Add()([x0, x1])
    return x0
def get_model(nb_classes=21):
    input_shape=(16, 112, 112, 3) # l, h, w, c
    X_input = Input(input_shape)

    
    #1st layer group 
    x = Convolution3D(64, (3, 3, 3), activation='relu',
                            padding='same', name='conv1',
                            input_shape=input_shape)(X_input)
    x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
                           padding='valid', name='pool1')(x)
    # 2nd layer group
    x = Convolution3D(128, (3, 3, 3), activation='relu',
                            padding='same',  name='conv2')(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool2')(x)
    # 3rd layer group
    x = Convolution3D(256, (3, 3, 3), activation='relu',
                            padding='same',  name='conv3a')(x)
    x = Convolution3D(256, (3, 3, 3), activation='relu',
                            padding='same',  name='conv3b')(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool3')(x)
    # 4th layer group
    x = Convolution3D(512, (3, 3, 3), activation='relu',
                            padding='same',  name='conv4a')(x)
    x = Convolution3D(512, (3, 3, 3), activation='relu',
                            padding='same',  name='conv4b')(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool4')(x)
    # 5th layer group
    x = Convolution3D(512, (3, 3, 3), activation='relu',
                            padding='same',  name='conv5a')(x)
    x = Convolution3D(512, (3, 3, 3), activation='relu',
                            padding='same',  name='conv5b')(x)
    x = ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad5')(x)
    x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool5')(x)
    x = Flatten(name='flatten_1')(x)
    # FC layers group
    x = Dense(4096, name='fc6')(x)
    x = Activation('relu', name='fc6_relu')(x)
    x = Dropout(.5)(x)
    x = Dense(4096, name='fc7')(x)
    x = Activation('relu', name='fc7_relu')(x)
    x = Dropout(.5)(x)
    x = Dense(nb_classes, activation='softmax', name='fc8')(x)

    model = Model(X_input, x)
    return model
예제 #29
0
def create_model_functional():
    """ Creates model object with the functional API:
     https://keras.io/models/model/
     """
    inputs = Input(shape=(16, 112, 112, 3,))

    conv1 = Conv3D(64, (3, 3, 3), activation='relu',
                   padding='same', name='conv1')(inputs)
    pool1 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
                         padding='valid', name='pool1')(conv1)

    conv2 = Conv3D(128, (3, 3, 3), activation='relu',
                   padding='same', name='conv2')(pool1)
    pool2 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                         padding='valid', name='pool2')(conv2)

    conv3a = Conv3D(256, (3, 3, 3), activation='relu',
                    padding='same', name='conv3a')(pool2)
    conv3b = Conv3D(256, (3, 3, 3), activation='relu',
                    padding='same', name='conv3b')(conv3a)
    pool3 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                         padding='valid', name='pool3')(conv3b)

    conv4a = Conv3D(512, (3, 3, 3), activation='relu',
                    padding='same', name='conv4a')(pool3)
    conv4b = Conv3D(512, (3, 3, 3), activation='relu',
                    padding='same', name='conv4b')(conv4a)
    pool4 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                         padding='valid', name='pool4')(conv4b)

    conv5a = Conv3D(512, (3, 3, 3), activation='relu',
                    padding='same', name='conv5a')(pool4)
    conv5b = Conv3D(512, (3, 3, 3), activation='relu',
                    padding='same', name='conv5b')(conv5a)
    zeropad5 = ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)),
                             name='zeropad5')(conv5b)
    pool5 = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                         padding='valid', name='pool5')(zeropad5)

    flattened = Flatten()(pool5)
    fc6 = Dense(4096, activation='relu', name='fc6')(flattened)
    dropout1 = Dropout(rate=0.5)(fc6)

    fc7 = Dense(4096, activation='relu', name='fc7')(dropout1)
    dropout2 = Dropout(rate=0.5)(fc7)

    predictions = Dense(487, activation='softmax', name='fc8')(dropout2)

    return Model(inputs=inputs, outputs=predictions)
예제 #30
0
def create_model_sequential():
    """ Creates model object with the sequential API:
    https://keras.io/models/sequential/
    """

    model = Sequential()
    input_shape = (16, 112, 112, 3)

    model.add(Conv3D(64, (3, 3, 3), activation='relu',
                     padding='same', name='conv1',
                     input_shape=input_shape))
    model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),
                           padding='valid', name='pool1'))
    # 2nd layer group
    model.add(Conv3D(128, (3, 3, 3), activation='relu',
                     padding='same', name='conv2'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool2'))
    # 3rd layer group
    model.add(Conv3D(256, (3, 3, 3), activation='relu',
                     padding='same', name='conv3a'))
    model.add(Conv3D(256, (3, 3, 3), activation='relu',
                     padding='same', name='conv3b'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool3'))
    # 4th layer group
    model.add(Conv3D(512, (3, 3, 3), activation='relu',
                     padding='same', name='conv4a'))
    model.add(Conv3D(512, (3, 3, 3), activation='relu',
                     padding='same', name='conv4b'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool4'))
    # 5th layer group
    model.add(Conv3D(512, (3, 3, 3), activation='relu',
                     padding='same', name='conv5a'))
    model.add(Conv3D(512, (3, 3, 3), activation='relu',
                     padding='same', name='conv5b'))
    model.add(ZeroPadding3D(padding=((0, 0), (0, 1), (0, 1)), name='zeropad5'))
    model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),
                           padding='valid', name='pool5'))
    model.add(Flatten())
    # FC layers group
    model.add(Dense(4096, activation='relu', name='fc6'))
    model.add(Dropout(.5))
    model.add(Dense(4096, activation='relu', name='fc7'))
    model.add(Dropout(.5))
    model.add(Dense(487, activation='softmax', name='fc8'))

    return model