Пример #1
0
 def _pooling_function(self, inputs, pool_size, strides, padding,
                       data_format):
     input_real, input_imag = complex_to_real_imag(inputs)
     real_outputs = KL.AveragePooling3D(pool_size, strides,
                                        padding)(input_real)
     imag_outputs = KL.AveragePooling3D(pool_size, strides,
                                        padding)(input_imag)
     outputs = real_imag_to_complex(real_outputs, imag_outputs)
     return outputs
Пример #2
0
def neck(input_tensor):
    x = layers.Activation('relu')(input_tensor)
    x = layers.AveragePooling3D()(x)
    x = layers.Flatten()(x)
    x = layers.Dropout(0.2)(x)
   
    return x
Пример #3
0
def AvePooling3D(pool_size=(2, 2, 2),
                 strides=None,
                 padding='valid',
                 data_format='channels_first'):
    return kl.AveragePooling3D(pool_size,
                               strides=strides,
                               padding=padding,
                               data_format=data_format)
Пример #4
0
def D3GenerateModel(n_filter=16, number_of_class=1, input_shape=(16,144,144,1),activation_last='softmax', metrics=['mse', 'acc', dice_coef, recall_at_thresholds, precision_at_thresholds], loss='categorical_crossentropy', dropout=0.05, init='glorot_uniform', two_output=False):
    #init = initializers.VarianceScaling(scale=1.0, mode='fan_in', distribution='normal', seed=None)
    filter_size =n_filter
    input_x = layers.Input(shape=input_shape,name='Input_layer', dtype = 'float32')
    #1 level
    x = layers.Conv3D(filters=filter_size, kernel_size=(5,5,5), strides = (1,1,1), kernel_initializer=init, padding='same')(input_x)
    x = cyclical_learning_rate.SineReLU()(x)
    x = layers.Conv3D(filters=filter_size, kernel_size=(5,5,5), strides=(1,1, 1), 
                                        padding='same',kernel_initializer=init)(x)
    x = cyclical_learning_rate.SineReLU()(x)
    x = layers.MaxPooling3D(pool_size=(2,2,2), padding='same')(x)
    #2 level
    conv_list = []
    counter = 0
    x = layers.Conv3D(filters=filter_size*2, kernel_size=(3,3,3), strides=(1,1, 1), 
                                        padding='same',kernel_initializer=init)(x)
    x = cyclical_learning_rate.SineReLU()(x)
    x = layers.Conv3D(filters=filter_size*2, kernel_size=(3,3,3), strides=(1,1, 1), 
                                        padding='same',kernel_initializer=init)(x)
    x = cyclical_learning_rate.SineReLU()(x)
    x = layers.AveragePooling3D(pool_size=(1,2,2), padding='same')(x)
    x = layers.UpSampling3D(size=(1,2,2))(x)
    for index ,kernel_sizes in enumerate([
                                [(1,3,3), (3,3,1)], #Changed [(1,3,3), (1,1,3)]
                                [(3,3,3), (3,1,3)], #Changed [(3,3,3), (3,1,3)]
                                [(3,3,1), (3,3,3), (1,3,3)] #Changed [(3,3,1), (1,3,1)]
                                ]):
        for kernel_size in (kernel_sizes):
            x = layers.Conv3D(filters=(filter_size*4), kernel_size=kernel_size, kernel_initializer=init, strides =(1,1,1), padding='same', name='Conv3D_%s' % (counter))(x)
            x = layers.BatchNormalization()(x)
            x = cyclical_learning_rate.SineReLU()(x)
            counter = counter+1
        conv_list.append(x)
    x = layers.concatenate(conv_list)
    x = layers.Conv3D(filters=filter_size*8, kernel_size=(3,3,3), strides=(2,2, 2), kernel_initializer=init,
                        padding='same')(x)
    x = layers.BatchNormalization()(x)
    x = cyclical_learning_rate.SineReLU()(x)
    #x = layers.MaxPooling3D(pool_size=(2,2, 2))(x)
    x = layers.Reshape(target_shape=[4,-1, filter_size*8])(x)
    x = layers.Conv2D(filters=filter_size*8, kernel_size=(1,1296), kernel_initializer=init, strides=(1,1296))(x)
    x = layers.BatchNormalization()(x)
    x = cyclical_learning_rate.SineReLU()(x)
    x = layers.Reshape(target_shape=[filter_size*8,-1])(x)
    x = layers.Conv1D(filters=2, kernel_size=filter_size*8, strides=filter_size*8, kernel_initializer=init)(x)
    x = layers.Softmax()(x)
    y = layers.Flatten()(x)
    #Classification    
    model = Model(inputs=input_x, outputs=y)
    #optimizer = tf.contrib.opt.AdamWOptimizer(weight_decay=0.000001,lr=lr)
    #keras.optimizers.SGD(lr=lr, momentum=0.90, decay=decay, nesterov=False)
    #opt_noise = add_gradient_noise(optimizers.Adam)
    #optimizer = 'Adam'#opt_noise(lr, amsgrad=True)#, nesterov=True)#opt_noise(lr, amsgrad=True)
    import yogi
    optimizer = yogi.Yogi(lr=lr) 
    #optimizer=optimizers.adam(lr, amsgrad=True)
    model.compile(optimizer=optimizer,loss=loss, metrics=metrics)#categorical_crossentropy
    return model
Пример #5
0
def add_activation_and_max_pooling(adjustable, model, use_batch_norm, batch_norm_name, first_layer=False):
    """One-liner for adding: pooling + activation + batchnorm
    :param model:       the model to add to
    :return:            the model with added activation and max pooling
    :param trainable:   boolean indicating if layer is trainable
    """

    if adjustable.video_head_type == '3d_convolution':
        if first_layer:
            if adjustable.pooling_type == 'avg_pooling':
                model.add(layers.AveragePooling3D(pool_size=(adjustable.pooling_size[0][0], adjustable.pooling_size[0][1], adjustable.pooling_size[0][2])))
            else:  # max_pooling
                model.add(layers.MaxPool3D(pool_size=(adjustable.pooling_size[0][0], adjustable.pooling_size[0][1], adjustable.pooling_size[0][2])))
        else:
            if adjustable.pooling_type == 'avg_pooling':
                model.add(layers.AveragePooling3D(pool_size=(adjustable.pooling_size[1][0], adjustable.pooling_size[1][1], adjustable.pooling_size[1][2])))
            else:  # max_pooling
                model.add(layers.MaxPool3D(pool_size=(adjustable.pooling_size[1][0], adjustable.pooling_size[1][1], adjustable.pooling_size[1][2])))

        model.add(layers.Activation(adjustable.activation_function))

        if use_batch_norm:
            model.add(layers.BatchNormalization(name=batch_norm_name, trainable=adjustable.trainable_bn))

        return model

    elif adjustable.video_head_type == 'cnn_lstm':
        if first_layer:
            if adjustable.pooling_type == 'avg_pooling':
                pooling = layers.AveragePooling2D(
                    pool_size=(adjustable.pooling_size[0][0], adjustable.pooling_size[0][1]))(model)
            else:  # max_pooling
                pooling = layers.MaxPool2D(
                    pool_size=(adjustable.pooling_size[0][0], adjustable.pooling_size[0][1]))(model)
        else:
            if adjustable.pooling_type == 'avg_pooling':
                pooling = layers.AveragePooling2D(
                    pool_size=(adjustable.pooling_size[1][0], adjustable.pooling_size[1][1]))(model)
            else:  # max_pooling
                pooling = layers.MaxPool2D(
                    pool_size=(adjustable.pooling_size[1][0], adjustable.pooling_size[1][1]))(model)

        activation = layers.Activation(adjustable.activation_function)(pooling)

        return activation
Пример #6
0
def transition_block3D(x, reduction, name):
    """A transition block.

    # Arguments
        x: input tensor.
        reduction: float, compression rate at transition layers.
        name: string, block label.

    # Returns
        output tensor for the block.
    """
    bn_axis = 4
    x = layers.BatchNormalization(axis=bn_axis,
                                  epsilon=1.001e-5,
                                  name=name + '_bn')(x)
    x = layers.Activation('relu', name=name + '_relu')(x)
    x = layers.Conv3D(int(backend.int_shape(x)[bn_axis] * reduction),
                      1,
                      use_bias=False,
                      name=name + '_conv')(x)
    x = layers.AveragePooling3D(2, strides=2, name=name + '_pool')(x)
    return x
Пример #7
0
def build_model(config,args,print_summary=True):
   # feature_size = (3,3,3)
   # Pooling = kl.MaxPooling3D
   image_shape = config['data_handling']['image_shape']
   input_image = kl.Input(shape=tuple([1] + image_shape))
   logger.debug('input image = %s',input_image)
   layer_num = 0

   outputs = []
   for i in range(0,image_shape[0],4):

      logger.debug('i = %s',i)
      subimg = kl.Lambda(lambda x: x[:,:,i:i+4,:,:])(input_image)
      
      num_filters = 64
      x = subimg
      x = conv_layer(subimg,num_filters=num_filters)
      # Instantiate the stack of residual units
      for stack in range(2):
         for res_block in range(2):
            strides = (1,1,1)
            if stack > 0 and res_block == 0:  # first layer but not first stack
               strides = (2,2,2)  # downsample
            y = conv_layer(inputs=x,
                          num_filters=num_filters,
                          strides=strides)
            y = conv_layer(inputs=y,
                          num_filters=num_filters,
                          activation=None)
            if stack > 0 and res_block == 0:  # first layer but not first stack
               # linear projection residual shortcut connection to match
               # changed dims
               x = conv_layer(x,
                              num_filters=num_filters,
                              kernel_size=1,
                              strides=strides,
                              activation=None,
                              batch_normalization=False)
            x = kl.add([x, y])
            x = kl.Activation('relu')(x)
         num_filters *= 2
      
      outputs.append(x)

   num_filters = int(num_filters/2)
   logger.debug('filters = %s',num_filters)
   # logger.debug('outputs = %s',outputs)
   x = kl.Concatenate(axis=2)(outputs)
   logger.debug('concat: %s',x)

   # Instantiate the stack of residual units
   for stack in range(2):
      logger.debug('stack: %s',stack)
      for res_block in range(3):
         logger.debug('res_block: %s',res_block)
         strides = (1,1,1)
         if stack > 0 and res_block == 0:  # first layer but not first stack
            strides = (2,2,2)  # downsample
         logger.debug('x: %s',x)
         y = conv_layer(x,
                       num_filters=num_filters,
                       strides=strides)
         logger.debug('y: %s',y)
         y = conv_layer(y,
                       num_filters=num_filters,
                       activation=None)
         logger.debug('y: %s',y)
         if stack > 0 and res_block == 0:  # first layer but not first stack
            # linear projection residual shortcut connection to match
            # changed dims
            x = conv_layer(x,
                           num_filters=num_filters,
                           kernel_size=1,
                           strides=strides,
                           activation=None,
                           batch_normalization=False)
         x = kl.add([x, y])
         x = kl.Activation('relu')(x)
      num_filters *= 2

   logger.debug('out = %s',x)

   x = kl.AveragePooling3D(pool_size=(1,1,2))(x)
   
   y = kl.Flatten()(x)

   # x = kl.Dense(2048,activation='relu',kernel_initializer='normal',name='dense_{0}'.format(layer_num))(output)
   # output = kl.Activation('relu',name='relu_{0}'.format(layer_num))(output)
   # output = kl.Dropout(0.1,name='dropout_{0}'.format(layer_num))(output)
   # layer_num += 1

   outputs = kl.Dense(len(config['data_handling']['classes']),activation='softmax',kernel_initializer='he_normal')(y)
   # output = kl.Activation('softmax',name='softmax_{0}'.format(layer_num))(output)

   model = Model(input_image,outputs)

   line_length = 150
   positions = [.2, .45, .77, 1.]
   if print_summary:
      if args.horovod:
         import horovod.keras as hvd
         if hvd.rank() == 0:
            model.summary(line_length=line_length,positions=positions)
      elif args.ml_comm:
         import ml_comm as mc
         if mc.get_rank() == 0:
            model.summary(line_length=line_length,positions=positions)
      else:
         model.summary(line_length=line_length,positions=positions)

   return model
        i += 1
        yield x_train, y_train  # tuple 类型
        # 15个批次后重新遍历数据,此循环即死循环
        if i == 465 // batch_size:
            i = 0


# In[46]:

from keras import Sequential, layers
from sklearn import model_selection, metrics

model1 = keras.Sequential([
    layers.BatchNormalization(input_shape=(32, 32, 32, 1)),
    layers.Conv3D(32, (3, 3, 3), activation='relu'),
    layers.AveragePooling3D(pool_size=(2, 2, 2)),
    layers.BatchNormalization(),
    layers.Conv3D(64, (3, 3, 3), activation='relu'),
    layers.AveragePooling3D(pool_size=(2, 2, 2)),
    layers.BatchNormalization(),
    layers.Conv3D(128, (3, 3, 3), activation='relu'),
    layers.AveragePooling3D(pool_size=(2, 2, 2)),
    layers.Dropout(0.2),
    layers.Flatten(),
    layers.Dense(1024, activation='relu'),
    layers.Dense(256, activation='relu'),
    layers.Dense(2, ),
])
model1.compile(optimizer=keras.optimizers.Adadelta(),
               loss='mean_squared_error',
               metrics=['mean_squared_error'])