def train_model(airport): # Import data params = ["z", "z", "z"] levels = [500, 850, 1000] in1_var = get_era_full(params[0], levels[0]) in2_var = get_era_full(params[1], levels[1]) in3_var = get_era_full(params[2], levels[2]) x = np.concatenate( (np.expand_dims(in1_var, axis=3), np.expand_dims( in2_var, axis=3), np.expand_dims(in3_var, axis=3)), axis=3) X = np.zeros((13141, 80, 120, 8, 3)) for i in range(13141): X[i, :, :, :, :] = np.rollaxis(x[i:i + 8, :, :, :], 0, 3) Y = get_rains(airport)[7:] b = np.zeros((Y.shape[0], 2)) b[np.arange(Y.shape[0]), Y] = 1 model = None if os.path.isfile('model_3d_{}.h5'.format(airport)): model = load_model('model_3d_{}.h5'.format(airport)) else: model = Sequential() model.add( Convolution3D(128, (3, 3, 3), padding='same', activation='relu', name='block1_conv1', input_shape=(80, 120, 8, 3))) model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2))) model.add( Convolution3D(256, (3, 3, 3), padding='same', activation='relu', name='block2_conv1')) model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2))) model.add(Flatten()) model.add(Dense(2, activation='softmax', name='final_fully_connected')) adagrad = Adagrad(lr=0.0002) model.compile(loss='categorical_crossentropy', optimizer=adagrad, metrics=['accuracy']) csv_logger = CSVLogger('{}.log'.format(airport)) model.fit(X, b, batch_size=20, epochs=100, verbose=1, validation_split=0.2, callbacks=[csv_logger]) model.save('model_3d_{}.h5'.format(airport))
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2), trainable=True): nb_filter1, nb_filter2, nb_filter3 = filters bn_axis = 4 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = Convolution3D(nb_filter1, (1, 1, 1), strides=strides, name=conv_name_base + '2a', trainable=trainable)( input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = Convolution3D(nb_filter2, (kernel_size, kernel_size, kernel_size), padding='same', name=conv_name_base + '2b', trainable=trainable)(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = Convolution3D(nb_filter3, (1, 1, 1), name=conv_name_base + '2c', trainable=trainable)(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x) shortcut = Convolution3D(nb_filter3, (1, 1, 1), strides=strides, name=conv_name_base + '1', trainable=trainable)( input_tensor) shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x) return x
def create_cnn_network(input_dim, no_conv_filt, dense_n): '''Base network to be shared (eq. to feature extraction). ''' seq = Sequential() kern_size = 3 # conv layers # 1 seq.add(Convolution3D(int(no_conv_filt/2), kern_size, kern_size, kern_size, input_shape=input_dim, border_mode='valid', dim_ordering='th', activation='relu')) seq.add(Dropout(.1)) seq.add(BatchNormalization(mode=2)) # 1 seq.add(Convolution3D(no_conv_filt, kern_size, kern_size, kern_size, input_shape=input_dim, border_mode='valid', dim_ordering='th', activation='relu')) seq.add(Dropout(.1)) seq.add(BatchNormalization(mode=2)) # dense layers seq.add(Flatten()) seq.add(Dense(dense_n, activation='relu')) seq.add(BatchNormalization(mode=2)) # seq.add(Dense(50, activation='relu')) # seq.add(BatchNormalization(mode=2)) return seq
def identity_block_td(input_tensor, kernel_size, filters, stage, block, trainable=True): # identity block time distributed nb_filter1, nb_filter2, nb_filter3 = filters bn_axis = 4 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = TimeDistributed(Convolution3D(nb_filter1, (1, 1, 1), trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = TimeDistributed( Convolution3D(nb_filter2, (kernel_size, kernel_size, kernel_size), trainable=trainable, kernel_initializer='normal', padding='same'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = TimeDistributed(Convolution3D(nb_filter3, (1, 1, 1), trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '2c')(x) x = TimeDistributed(BatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x) x = Add()([x, input_tensor]) x = Activation('relu')(x) return x
def identity_block(input_tensor, kernel_size, filters, bn_axis=4, trainable=True): x = Convolution3D(filters[0], 1, 1, 1, trainable=trainable)(input_tensor) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) x = Convolution3D(filters[1], kernel_size, kernel_size, kernel_size, border_mode='same', trainable=trainable)(x) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) x = Convolution3D(filters[2], 1, 1, 1, trainable=trainable)(x) x = BatchNormalization(axis=bn_axis)(x) x = merge([x, input_tensor], mode='sum') x = Activation('relu')(x) return x
def model_26(GPUS = 1, box_size = 9): """ one dense: 3000 """ model = Sequential() model.add(Convolution3D(50, kernel_size = (3, 3, 3), strides = (1, 1, 1), input_shape = (box_size, box_size, box_size, 20))) # 32 output nodes, kernel_size is your moving window, activation function, input shape = auto calculated model.add(BatchNormalization()) model.add(Activation(activation = 'relu')) model.add(Convolution3D(100, (2, 2, 2))) model.add(BatchNormalization()) model.add(Activation(activation = 'relu')) model.add(Convolution3D(150, (2, 2, 2))) model.add(BatchNormalization()) model.add(Activation(activation = 'relu')) model.add(Convolution3D(200, (2, 2, 2))) model.add(BatchNormalization()) model.add(Activation(activation = 'relu')) model.add(Flatten()) # now our layers have been combined to one model.add(Dense(1000)) # 300 nodes in the last hidden layer model.add(BatchNormalization()) model.add(Activation(activation = 'relu')) model.add(Dense(20, activation = 'softmax')) # output layer has 20 possible classes (amino acids 0 - 19) if GPUS >= 2: model = multi_gpu_model(model, gpus=GPUS) return model
def classifier(input_shape, kernel_size, pool_size): model = Sequential() model.add(Convolution3D(16, kernel_size[0], kernel_size[1], kernel_size[2], border_mode='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=pool_size)) model.add(Convolution2D(32, kernel_size[0], kernel_size[1], kernel_size[2])) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=pool_size)) model.add(Convolution3D(64, kernel_size[0], kernel_size[1], kernel_size[2])) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=pool_size)) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model
def residual_block(x, nb_filters=16, subsample_factor=1): prev_nb_channels = K.int_shape(x)[4] if subsample_factor > 1: subsample = (subsample_factor, subsample_factor, subsample_factor) # shortcut: subsample + zero-pad channel dim shortcut = MaxPooling3D(pool_size=subsample)(x) else: subsample = (1, 1, 1) # shortcut: identity shortcut = x if nb_filters > prev_nb_channels: shortcut = Lambda(zero_pad_channels, arguments={ 'pad': nb_filters - prev_nb_channels})(shortcut) y = BatchNormalization(axis=4)(x) y = Activation('relu')(y) y = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, init='he_normal', border_mode='same')(y) y = BatchNormalization(axis=4)(y) y = Activation('relu')(y) y = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), init='he_normal', border_mode='same')(y) out = merge([y, shortcut], mode='sum') return out
def up_conv_block(self,input_tensor,filters,stage,block,stride=(1,1,1),size=(2,2,2),padding='same',layer=None): filters1,filters2,filters3=filters shortcut=input_tensor if K.image_data_format()=='channels_last': bn_axis=4 else: bn_axis=1 up_conv_name_base = 'up' + str(stage) + block + '_branch' conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x=UpSampling3D(size,name=up_conv_name_base + '2a')(input_tensor) x=Convolution3D(filters1,kernel_size=1, strides=stride,name=conv_name_base + '2a',kernel_regularizer=l2(1e-4))(x) if layer==None: x=BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x) x=Activation('relu')(x) x=Convolution3D(filters2,kernel_size=3,padding=padding,name=conv_name_base + '2b',kernel_regularizer=l2(1e-4))(x) if layer==None: x=BatchNormalization(axis=bn_axis,name=bn_name_base + '2b')(x) x=Activation('relu')(x) x=Convolution3D(filters3,kernel_size=1,name=conv_name_base + '2c',kernel_regularizer=l2(1e-4))(x) if layer==None: x=BatchNormalization(axis=bn_axis,name=bn_name_base + '2c')(x) shortcut=UpSampling3D(size, name=up_conv_name_base + '1')(input_tensor) shortcut=Convolution3D(filters3,kernel_size=1,strides=stride,padding=padding,name=conv_name_base + '1',kernel_regularizer=l2(1e-5))(shortcut) if layer==None: shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut) x=Add()([x,shortcut]) x=Activation('relu')(x) return x
def my_model(): '''Creation of the model Sequential and return a model for the training ''' model = Sequential() # Conv layer 1 model.add( Convolution3D( input_shape=(14, 32, 32, 32), filters=64, kernel_size=6, data_format='channels_first', )) model.add(LeakyReLU(alpha=0.1)) # Dropout 1 model.add(Dropout(0.2)) # Conv layer 2 model.add( Convolution3D( filters=64, kernel_size=3, padding='valid', data_format='channels_first', )) model.add(LeakyReLU(alpha=0.1)) # Maxpooling 1 model.add( MaxPooling3D(pool_size=(2, 2, 2), strides=None, padding='valid', data_format='channels_first')) # conv Layer 2 model.add( Convolution3D( filters=64, kernel_size=1, padding='valid', data_format='channels_first', )) model.add(LeakyReLU(alpha=0.1)) # Maxpooling 1 model.add( MaxPooling3D( pool_size=(2, 2, 2), strides=None, padding='valid', # Padding method data_format='channels_first')) # Dropout 2 model.add(Dropout(0.4)) # FC 1 model.add(Flatten()) model.add(Dense(128)) # model.add(LeakyReLU(alpha=0.1)) # Dropout 3 # Fully connected layer 2 to shape (3) for 3 classes model.add(Dense(3)) model.add(Activation('sigmoid')) model.summary() return model
def qc_model(): # data_dim = 160*256*224 nb_classes = 2 model = Sequential() model.add(Convolution3D(7, 3, 3, 3, activation='relu', input_shape=(1, 160, 256, 224))) model.add(BatchNormalization()) model.add(MaxPooling3D(pool_size=(2, 2, 2))) # model.add(SpatialDropout2D(0.5)) model.add(Convolution3D(8, 3, 3, 3, activation='relu')) model.add(BatchNormalization()) # model.add(MaxPooling2D(pool_size=(3, 3))) # model.add(SpatialDropout2D(0.5)) model.add(Convolution3D(12, 3, 3, 3, activation='relu')) model.add(BatchNormalization()) model.add(MaxPooling3D(pool_size=(2, 2, 2))) # model.add(SpatialDropout2D(0.2)) # model.add(Flatten()) model.add(Dense(10, init='uniform', activation='relu')) # model.add(Dropout(0.5)) model.add(Dense(10, init='uniform', activation='relu')) model.add(Dense(nb_classes, init='uniform')) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=["accuracy"]) return model
def classifier_3d(self): model = Sequential() model.add(Convolution3D(16, 3, 3, 3, input_shape=self.input_shape)) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Convolution3D(32, 3, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Convolution3D(64, 3, 3, 3)) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=(2, 2, 2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) return model
def arch_c(out=2, shape=(1, 26, 40, 40)): # Determine proper input shape if K.image_dim_ordering() != 'th': print('Wrong dim ordering: should be TH') patch_input = Input(shape=shape) x = Dropout(.2)(patch_input) bn_axis = 1 # shape: 1, 26, 40, 40 x = Convolution3D(64, 3, 5, 5, border_mode='same')(x) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) x = MaxPooling3D((2, 2, 2))(x) # shape: 64, 13, 20, 20 x = Convolution3D(64, 3, 5, 5, border_mode='same')(x) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) # shape: 64, 13, 20, 20 x = Convolution3D(64, 3, 5, 5, border_mode='same')(x) x = BatchNormalization(axis=bn_axis)(x) x = Activation('relu')(x) x = Flatten()(x) x = Dense(250, activation='relu')(x) x = Dense(2, activation='softmax')(x) return Model(patch_input, x)
def mnist_conv(): nb_filters = 32 kernel_size = (3, 3, 3) pool_size = (2, 2, 2) model = Sequential() model.add( Convolution3D(nb_filters, kernel_size[0], kernel_size[1], kernel_size[2], border_mode='valid', input_shape=input_shape)) model.add(Activation('relu')) model.add( Convolution3D(nb_filters, kernel_size[0], kernel_size[1], kernel_size[2])) model.add(Activation('relu')) model.add(MaxPooling3D(pool_size=pool_size)) model.add(Dropout(0.05)) model.add(Flatten()) model.add(Dense(32)) model.add(Activation('relu')) model.add(Dropout(0.05)) model.add(Dense(Y_test.shape[1])) model.add(Activation('softmax')) return model
def conv_block_api(kernel_size, filters, bn_axis=4): interm_layers = [ Convolution3D(filters[0], 1, 1, 1, subsample=(1, 1, 1), border_mode='same'), BatchNormalization(axis=bn_axis), Activation('relu'), Convolution3D(filters[1], kernel_size, kernel_size, kernel_size, subsample=(1, 1, 1), border_mode='same'), BatchNormalization(axis=bn_axis), Activation('relu'), Convolution3D(filters[2], 1, 1, 1, subsample=(1, 1, 1), border_mode='same'), BatchNormalization(axis=bn_axis), Activation('relu') ] shortcut = [ Convolution3D(filters[2], 1, 1, 1, subsample=(1, 1, 1)), BatchNormalization(axis=bn_axis) ] return interm_layers, shortcut
def sumModelSimple(Inputs,nclasses,nregressions,dropoutRate=0.05,momentum=0.6): x=Inputs[1] globals=Inputs[0] x=BatchNormalization(momentum=momentum)(x) x = GaussianDropout(dropoutRate)(x) x=Convolution3D(12,kernel_size=(3,3,3),strides=(1,1,1),padding='valid', activation='relu',kernel_initializer='lecun_uniform',name='conv3D_0a')(x) x=Convolution3D(12,kernel_size=(3,3,3),strides=(2,2,2),padding='valid', activation='relu',kernel_initializer='lecun_uniform',name='conv3D_0')(x) x=Convolution3D(16,kernel_size=(5,5,5),strides=(2,2,2),padding='same', activation='relu',kernel_initializer='lecun_uniform',name='conv3D_1')(x) x=BatchNormalization(momentum=momentum)(x) x=Convolution3D(4,kernel_size=(3,3,5),strides=(2,2,3),padding='same', activation='relu',kernel_initializer='lecun_uniform',name='conv3D_2')(x) x=BatchNormalization(momentum=momentum)(x) x=Flatten()(x) x=Concatenate()( [globals,x]) x=Dense(128, activation='relu',kernel_initializer='lecun_uniform')(x) predictID=Dense(nclasses, activation='softmax',kernel_initializer='lecun_uniform',name='ID_pred')(x) predictE=Dense(1, activation='linear',kernel_initializer='lecun_uniform',name='pre_Epred')(x) predictions = [predictID,predictE] model = Model(inputs=Inputs, outputs=predictions) return model
def create_cnn_network(input_dim): '''Base network to be shared (eq. to feature extraction). ''' seq = Sequential() # conv layers kern_size = 3 seq.add(Convolution3D(5, kern_size, kern_size, kern_size, input_shape=input_dim, border_mode='valid', dim_ordering='th', activation='relu')) seq.add(Dropout(.2)) # seq.add(BatchNormalization(mode=2)) kern_size = 3 seq.add(Convolution3D(10, kern_size, kern_size, kern_size, border_mode='valid', dim_ordering='th', activation='relu')) seq.add(Dropout(.2)) # seq.add(BatchNormalization(mode=2)) # seq.add(MaxPooling3D((2, 2, 2), dim_ordering='th')) # dense layers seq.add(Flatten()) seq.add(Dense(50, activation='relu')) seq.add(Dropout(.2)) # seq.add(BatchNormalization(mode=2)) return seq
def test_unet_from_layers(box_size, i, o): inputs = Input([box_size] * 3 + [i]) conv1 = Convolution3D(filters=3, kernel_size=1, activation='elu', padding='same')(inputs) outputs = Convolution3D(filters=o, kernel_size=1, activation='sigmoid', padding='same')(conv1) model = UNet(inputs=inputs, outputs=outputs, box_size=box_size, input_channels=i, output_channels=o) assert hasattr(model, 'data_handle') assert model.data_handle is None with pytest.raises(ValueError, match='input should be 5D'): UNet(inputs=inputs[0], outputs=inputs) with pytest.raises(ValueError, match='output should be 5D'): UNet(inputs=inputs, outputs=outputs[1]) with pytest.raises(ValueError, match='input and output shapes do not match'): UNet(inputs=inputs, outputs=concatenate([outputs, outputs], 1))
def initModelRegAux(imageSize, numberOfAuxInputs): # initialize regression model with auxiliary inputs input_image = Input(shape=imageSize + [1]) input_aux = Input(shape=(numberOfAuxInputs, )) x = Convolution3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu')(input_image) x = Convolution3D(filters=32, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu')(x) x = MaxPooling3D((2, 2, 2))(x) x = Convolution3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu')(x) x = Convolution3D(filters=64, kernel_size=(3, 3, 3), strides=(1, 1, 1), activation='relu')(x) x = MaxPooling3D((2, 2, 2))(x) conv_flat = Flatten()(x) conv_flat2 = Dense(64, activation='relu')(conv_flat) merged = concatenate([conv_flat2, input_aux]) predictions = Dense(1, activation='linear')(merged) model = Model(inputs=[input_image, input_aux], outputs=predictions) model.compile(loss='mean_absolute_error', optimizer='adam') return model
def build_model(self): model = Sequential() model.add(Convolution3D(8, (3, 3, 3), input_shape=self.__input_dim)) model.add(MaxPooling3D()) model.add(Convolution3D(8, (3, 3, 3))) model.add(MaxPooling3D()) model.add(Convolution3D(8, (3, 3, 3))) model.add(MaxPooling3D()) model.add(Convolution3D(8, (3, 3, 3))) model.add(MaxPooling3D()) model.add(Flatten()) model.add(Dense(1024, activation='relu', name='dense1')) model.add(Dropout(0.5, name='dropout1')) model.add(Dense(512, activation='relu', name='dense2')) model.add(Dropout(0.5, name='dropout2')) model.add(Dense(self.__no_classes, activation='softmax')) if self.weights_path: model.load_weights(self.weights_path) return model
def res_block(input_tensor, nb_filters=16, block=0, subsample_factor=1): subsample = (subsample_factor, subsample_factor, subsample_factor) x = BatchNormalization(axis=4)(input_tensor) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=subsample, border_mode='same')(x) x = BatchNormalization(axis=4)(x) x = Activation('relu')(x) x = Convolution3D(nb_filters, 3, 3, 3, subsample=(1, 1, 1), border_mode='same')(x) if subsample_factor > 1: shortcut = Convolution3D(nb_filters, 1, 1, 1, subsample=subsample, border_mode='same')(input_tensor) else: shortcut = input_tensor x = merge([x, shortcut], mode='sum') return x
def conv_block_td(input_tensor, kernel_size, filters, stage, block, input_shape, strides=(2, 2, 2), trainable=True): # conv block time distributed nb_filter1, nb_filter2, nb_filter3 = filters bn_axis = 4 conv_name_base = 'res' + str(stage) + block + '_branch' bn_name_base = 'bn' + str(stage) + block + '_branch' x = TimeDistributed( Convolution3D(nb_filter1, (1, 1, 1), strides=strides, trainable=trainable, kernel_initializer='normal'), input_shape=input_shape, name=conv_name_base + '2a')(input_tensor) x = TimeDistributed(BatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x) x = Activation('relu')(x) x = TimeDistributed( Convolution3D(nb_filter2, (kernel_size, kernel_size, kernel_size), padding='same', trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '2b')(x) x = TimeDistributed(BatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x) x = Activation('relu')(x) x = TimeDistributed(Convolution3D(nb_filter3, (1, 1, 1), kernel_initializer='normal'), name=conv_name_base + '2c', trainable=trainable)(x) x = TimeDistributed(BatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x) shortcut = TimeDistributed( Convolution3D(nb_filter3, (1, 1, 1), strides=strides, trainable=trainable, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor) shortcut = TimeDistributed(BatchNormalization(axis=bn_axis), name=bn_name_base + '1')(shortcut) x = Add()([x, shortcut]) x = Activation('relu')(x) return x
def get_model(self): inputlayer = Input((self.size_image[0], self.size_image[1], self.size_image[2], self.num_channels_in)) hiddenlayer_next = inputlayer list_hiddenlayer_skipconn = [] # ENCODING LAYERS for i in range(self.num_layers): for j in range(self.num_convols_downlayers): hiddenlayer_next = Convolution3D( self.num_featmaps_layers[i], self.size_convolfilter_downlayers[i], activation=self.type_activate_hidden, padding=self.type_padding_convol)(hiddenlayer_next) #endfor if self.isuse_dropout and self.where_dropout_downlayers[i]: hiddenlayer_next = Dropout(self.dropout_rate)(hiddenlayer_next) if self.isuse_batchnormalize and self.where_batchnormalize_downlayers[ i]: hiddenlayer_next = BatchNormalization()(hiddenlayer_next) if i != self.num_layers - 1: list_hiddenlayer_skipconn.append(hiddenlayer_next) hiddenlayer_next = MaxPooling3D( pool_size=self.size_pooling_downlayers[i])( hiddenlayer_next) #endfor # DECODING LAYERS for i in range(self.num_layers - 2, -1, -1): hiddenlayer_next = UpSampling3D( size=self.size_upsample_uplayers[i])(hiddenlayer_next) hiddenlayer_next = merge( [hiddenlayer_next, list_hiddenlayer_skipconn[i]], mode='concat', concat_axis=-1) for j in range(self.num_convols_downlayers): hiddenlayer_next = Convolution3D( self.num_featmaps_layers[i], self.size_convolfilter_uplayers[i], activation=self.type_activate_hidden, padding=self.type_padding_convol)(hiddenlayer_next) #endfor if self.isuse_dropout and self.where_dropout_uplayers[i]: hiddenlayer_next = Dropout(self.dropout_rate)(hiddenlayer_next) if self.isuse_batchnormalize and self.where_batchnormalize_uplayers[ i]: hiddenlayer_next = BatchNormalization()(hiddenlayer_next) #endfor outputlayer = Convolution3D( self.num_classes_out, (1, 1, 1), activation=self.type_activate_output)(hiddenlayer_next) out_model = Model(input=inputlayer, output=outputlayer) return out_model
def create_model(input_size=[2900, 15, 15]): t, h, w = input_size input_sequence = Input(shape=(1, t, h, w)) # (channels,frames,height,width) # conv1: spatial convolution (3 x 3), spatial pooling (2 x 2) conv_1 = Convolution3D(50, 1, 3, 3, activation='relu', border_mode='same') conv1 = conv_1(input_sequence) bn1 = BatchNormalization(axis=1)(conv1) pool_1 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)) pool1 = pool_1(bn1) # output size: t, h/2, w/2 # conv2: temporal convolution (4), temporal pooling (2) conv_2 = Convolution3D(50, 5, 1, 1, activation='relu', border_mode='same') conv2 = conv_2(pool1) bn2 = BatchNormalization(axis=1)(conv2) pool_2 = MaxPooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1)) pool2 = pool_2(bn2) # output size: t/2, h/2, w/2 drop3 = Dropout(0.5)(pool2) # conv3: spatial convolution (3 x 3), spatial pooling (2 x 2) conv_3 = Convolution3D(50, 1, 3, 3, activation='relu', border_mode='same') conv3 = conv_3(drop3) bn3 = BatchNormalization(axis=1)(conv3) pool_3 = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2)) pool3 = pool_3(bn3) # output size: t/2, h/4, w/4 # conv4: temporal convolution (4), temporal pooling (2) conv_4 = Convolution3D(50, 4, 1, 1, activation='relu', border_mode='same') conv4 = conv_4(pool3) bn4 = BatchNormalization(axis=1)(conv4) pool_4 = MaxPooling3D(pool_size=(2, 1, 1), strides=(2, 1, 1)) pool4 = pool_4(bn4) # output size: t/4, h/4, w/4 pool_5 = MaxPooling3D(pool_size=(t / 4, 1, 1), strides=(t / 4, 1, 1)) pool5 = pool_5(pool4) # output size: 1, h/4, w/4 drop5 = Dropout(0.5)(pool5) # fully connected layers reshape6 = Reshape((50 * (h / 4) * (w / 4), ))(drop5) fc_6 = Dense(1000, activation='relu') fc6 = fc_6(reshape6) fc_7 = Dense(2, activation='softmax') fc7 = fc_7(fc6) model = Model(input=input_sequence, output=fc7) sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) return model
def create_graph(cell_size=27, load_file=None, l2_lambda=0.01): model = Sequential() model.add( Convolution3D(8, (7, 7, 7), input_shape=(cell_size, cell_size, cell_size, 4), activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Convolution3D(16, (5, 5, 5), activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Convolution3D(32, (5, 5, 5), activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Convolution3D(64, (5, 5, 5), activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Convolution3D(128, (5, 5, 5), activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add(MaxPooling3D((2, 2, 2), strides=(2, 2, 2))) model.add(Flatten()) model.add( Dense(512, activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Dense(256, activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Dense(128, activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Dense(64, activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Dense(32, activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) model.add( Dense(2, activation='relu', kernel_regularizer=K.regularizers.l2(l2_lambda))) print(model.summary()) exit() if load_file is not None: model.load_weights(load_file) optim = Nadam() model.compile(optimizer=optim, loss=rmsle) return model
def get_net(input_shape=(64, 64, 64, 1), load_weight_path=None, features=False) -> Model: inputs = Input(shape=input_shape, name="input_1") x = inputs x = AveragePooling3D(strides=(2, 1, 1), pool_size=(2, 1, 1), padding="same")(x) x = Convolution3D(64, (3, 3, 3), activation='relu', strides=(1, 1, 1), padding='same', name='conv1', )(x) x = MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2), padding='valid', name='pool1')(x) x = Convolution3D(128, (3, 3, 3), activation='relu', padding='same', name='conv2', strides=(1, 1, 1))(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool2')(x) if USE_DROPOUT: x = Dropout(rate=0.3)(x) x = Convolution3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3a', strides=(1, 1, 1))(x) x = Convolution3D(256, (3, 3, 3), activation='relu', padding='same', name='conv3b', strides=(1, 1, 1))(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool3')(x) if USE_DROPOUT: x = Dropout(rate=0.4)(x) x = Convolution3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4a', strides=(1, 1, 1))(x) x = Convolution3D(512, (3, 3, 3), activation='relu', padding='same', name='conv4b', strides=(1, 1, 1),)(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool4')(x) if USE_DROPOUT: x = Dropout(rate=0.5)(x) # 新加部分 x = Convolution3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5a', strides=(1, 1, 1))(x) x = Convolution3D(512, (3, 3, 3), activation='relu', padding='same', name='conv5b', strides=(1, 1, 1), )(x) x = MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2), padding='valid', name='pool5')(x) last64 = Convolution3D(64, (2, 2, 2), activation="relu", name="last_64")(x) out_class = Convolution3D(5, (1, 1, 1), activation="softmax", name="out_class_last")(last64) out_class = Flatten(name="out_class")(out_class) # 1*1*1*1:一个像素点,即一个值 # out_malignancy = Convolution3D(1, (1, 1, 1), activation=None, name="out_malignancy_last")(last64) # out_malignancy = Flatten(name="out_malignancy")(out_malignancy) # 定义一个有一个输入一个输出的模型 model = Model(inputs=inputs, outputs=out_class) if load_weight_path is not None: model.load_weights(load_weight_path, by_name=False) # 定义损失函数、优化函数、和评测方法 # optimzer:SGD()是随机梯度下降以及对应参数 # loss:计算损失函数,这里指定了两个损失函数,分别对应两个输出结果,out_class:binary_crossentropy, out_malignancy:mean_absolute_error # metris:性能评估函数,这里指定了两个性能评估函数 # binary_accuracy: 对二分类问题,计算在所有预测值上的平均正确率,binary_crossentropy是对数损失 model.compile(optimizer=SGD(lr=LEARN_RATE, momentum=0.9, nesterov=True), loss=categorical_crossentropy, metrics=[categorical_crossentropy, categorical_accuracy]) if features: model = Model(input=inputs, output=[last64]) # 打印出模型概况 model.summary(line_length=140) return model
def rpn(base_layers, num_anchors): x = Convolution3D(512, (3, 3, 3), padding='same', activation='relu', kernel_initializer='normal', name='rpn_conv1')( base_layers) x_class = Convolution3D(num_anchors, (1, 1, 1), activation='sigmoid', kernel_initializer='uniform', name='rpn_out_class')(x) x_regr = Convolution3D(num_anchors * 4, (1, 1, 1), activation='linear', kernel_initializer='zero', name='rpn_out_regress')(x) return [x_class, x_regr, base_layers]
def make_disc_model_body(input_shape, n_disc_filters): def make_disc_encoder(input_shape, n_disc_filters): output = Sequential(name='disc_layer_stack') output.add( Convolution2D(n_disc_filters[0], 1, 1, border_mode='same', input_shape=input_shape)) output.add(LeakyReLU(0.2)) for n_filters_in_block in n_disc_filters: output.add( Convolution2D(n_filters_in_block, 3, 3, subsample=(2, 2), border_mode='same')) output.add(LeakyReLU(0.2)) output.add(Convolution2D(n_filters_in_block, 1, 1)) output.add(LeakyReLU(0.2)) # Add an extra dimension on front of this output, to allow convolving images together curr_output_shape = output.output_shape new_shape = (1, curr_output_shape[1], curr_output_shape[2], curr_output_shape[3]) output.add(Reshape(new_shape)) return output disc_encoder = make_disc_encoder(input_shape, n_disc_filters) blurry_img = Input(shape=input_shape) blurry_branch = disc_encoder(blurry_img) clear_img = Input(shape=input_shape) clear_branch = disc_encoder(clear_img) # SHould probably concat on axis 0 and convolve the images disc_merged = merge([blurry_branch, clear_branch], mode='concat', concat_axis=1) for i in range(2): encoded_pair = Convolution3D(32, 1, 1, 1, border_mode='same')(disc_merged) encoded_pair = LeakyReLU(0.2)(encoded_pair) encoded_pair = Convolution3D(32, 1, 3, 3, border_mode='same')(encoded_pair) encoded_pair = LeakyReLU(0.2)(encoded_pair) encoded_pair = MaxPooling3D(pool_size=(1, 2, 2))(encoded_pair) encoded_pair = Flatten()(encoded_pair) disc_output = Dense(50, name='disc_output')(encoded_pair) disc_output = LeakyReLU(0.2)(disc_output) disc_output = Dense(1, activation='sigmoid', name='disc_output')(encoded_pair) disc_model_body = Model(input=[blurry_img, clear_img], output=disc_output, name='disc_model_body') return disc_model_body
def srcnn3(input_shape=(33,33,110,1)): print '82' model = Sequential() model.add(Convolution3D(64, 9, 9, 7, input_shape=input_shape, activation='relu')) model.add(Convolution3D(32, 1, 1, 1, activation='relu')) model.add(Convolution3D(9, 1, 1, 1, activation='relu')) model.add(Convolution3D(1, 5, 5, 3)) model.compile(Adam(lr=0.00005), 'mse') return model
def AddConvolutionalLayer(model, filter_size, kernel_size, padding, activation, normalization, input_shape=None): if not input_shape == None: model.add(Convolution3D(filter_size, kernel_size, padding=padding, input_shape=input_shape)) else: model.add(Convolution3D(filter_size, kernel_size, padding=padding)) # add activation layer if activation == 'LeakyReLU': model.add(LeakyReLU(alpha=0.001)) else: model.add(Activation(activation)) # add normalization after activation if normalization: model.add(BatchNormalization())