def _Recognizer(x): x = Padding1(x) x = Conv1(x) x = Pooling1(x) x = Padding2(x) x = Conv2(x) x = Pooling2(x) x = Padding3(x) x = Conv3(x) x = Pooling3(x) x = Padding4(x) x = Conv4(x) x = Pooling4(x) x = Flatten(x) return x
def get_model(input_shape, intermediate_dim, latent_dim): # VAE model = encoder + decoder # build encoder model inputs = Input(shape=input_shape, name='encoder_input') x = Conv2D(64, (2, 2), activation=relu)(inputs) x = Conv2D(64, (3, 3), activation=relu)(x) x = Conv2D(64, (3, 3), activation=relu)(x) x = Flatten()(x) z_mean = Dense(latent_dim, name='z_mean')(x) z_log_var = Dense(latent_dim, name='z_log_var')(x) # use reparameterization trick to push the sampling out as input # note that "output_shape" isn't necessary with the TensorFlow backend z = Lambda(sampling, output_shape=(latent_dim, ), name='z')([z_mean, z_log_var]) # instantiate encoder model encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder') encoder.summary() # plot_model(encoder, to_file='vae_mlp_encoder.png', show_shapes=True) # build decoder model latent_inputs = Input(shape=(latent_dim, ), name='z_sampling') x = Dense(intermediate_dim, activation=relu)(latent_inputs) x = Dense(64 * 28 * 28, activation=relu)(x) x = Reshape((28, 28, 64))(x) x = Conv2DTranspose(64, (3, 3), activation=sigmoid, padding='same')(x) x = Conv2DTranspose(64, (3, 3), activation=sigmoid, padding='same')(x) outputs = Conv2D(image_channels, (2, 2), padding='same')(x) # instantiate decoder model decoder = Model(latent_inputs, outputs, name='decoder') decoder.summary() # plot_model(decoder, to_file='vae_mlp_decoder.png', show_shapes=True) # instantiate VAE model outputs = decoder(encoder(inputs)[2]) vae = Model(inputs, outputs, name='vae_mlp') return vae, encoder, decoder, inputs, z_mean, z_log_var, latent_inputs, outputs
def VGG19(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): """Instantiates the VGG19 architecture. Optionally loads weights pre-trained on ImageNet. Note that when using TensorFlow, for best performance you should set `image_data_format='channels_last'` in your Keras config at ~/.keras/keras.json. The model and the weights are compatible with both TensorFlow and Theano. The data format convention used by the model is the one specified in your Keras config file. Arguments: include_top: whether to include the 3 fully-connected layers at the top of the network. weights: one of `None` (random initialization), 'imagenet' (pre-training on ImageNet), or the path to the weights file to be loaded. input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or `(3, 224, 224)` (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 48. E.g. `(200, 200, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. Returns: A Keras model instance. Raises: ValueError: in case of invalid argument for `weights`, or invalid input shape. """ if not (weights in {'imagenet', None} or os.path.exists(weights)): raise ValueError('The `weights` argument should be either ' '`None` (random initialization), `imagenet` ' '(pre-training on ImageNet), ' 'or the path to the weights file to be loaded.') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as imagenet with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=48, data_format=K.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor # Block 1 x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input) x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # Block 4 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # Block 5 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) if include_top: # Classification block x = Flatten(name='flatten')(x) x = Dense(4096, activation='relu', name='fc1')(x) x = Dense(4096, activation='relu', name='fc2')(x) x = Dense(classes, activation='softmax', name='predictions')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = Model(inputs, x, name='vgg19') # load weights if weights == 'imagenet': if include_top: weights_path = get_file( 'vgg19_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', file_hash='cbe5617147190e668d6c5d5026f83318') else: weights_path = get_file( 'vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', file_hash='253f8cb515780f3b799900260a226db6') model.load_weights(weights_path) if K.backend() == 'theano': layer_utils.convert_all_kernels_in_model(model) if K.image_data_format() == 'channels_first': if include_top: maxpool = model.get_layer(name='block5_pool') shape = maxpool.output_shape[1:] dense = model.get_layer(name='fc1') layer_utils.convert_dense_weights_data_format( dense, shape, 'channels_first') elif weights is not None: model.load_weights(weights) return model
def ResNet50(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): """Instantiates the ResNet50 architecture. Optionally loads weights pre-trained on ImageNet. Note that when using TensorFlow, for best performance you should set `image_data_format='channels_last'` in your Keras config at ~/.keras/keras.json. The model and the weights are compatible with both TensorFlow and Theano. The data format convention used by the model is the one specified in your Keras config file. # Arguments include_top: whether to include the fully-connected layer at the top of the network. weights: one of `None` (random initialization) or 'imagenet' (pre-training on ImageNet). input_tensor: optional Keras tensor (i.e. output of `layers.Input()`) to use as image input for the model. input_shape: optional shape tuple, only to be specified if `include_top` is False (otherwise the input shape has to be `(224, 224, 3)` (with `channels_last` data format) or `(3, 224, 224)` (with `channels_first` data format). It should have exactly 3 inputs channels, and width and height should be no smaller than 197. E.g. `(200, 200, 3)` would be one valid value. pooling: Optional pooling mode for feature extraction when `include_top` is `False`. - `None` means that the output of the model will be the 4D tensor output of the last convolutional layer. - `avg` means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - `max` means that global max pooling will be applied. classes: optional number of classes to classify images into, only to be specified if `include_top` is True, and if no `weights` argument is specified. # Returns A Keras model instance. # Raises ValueError: in case of invalid argument for `weights`, or invalid input shape. """ if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') if weights == 'imagenet' and include_top and classes != 1000: raise ValueError('If using `weights` as imagenet with `include_top`' ' as true, `classes` should be 1000') # Determine proper input shape input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=48, data_format=K.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 x = Conv2D( #64, (7, 7), strides=(2, 2), padding='same', name='conv1')(img_input) 64, (7, 7), strides=(1, 1), padding='same', name='conv1')(img_input) x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x) x = Activation('relu')(x) x = MaxPooling2D((3, 3), strides=(2, 2))(x) x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') #x = AveragePooling2D((7, 7), name='avg_pool')(x) #x = AveragePooling2D((2, 2), name='avg_pool')(x) x = AveragePooling2D((4, 4), name='avg_pool')(x) if include_top: x = Flatten()(x) x = Dense(classes, activation='softmax', name='fc1000')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = Model(inputs, x, name='resnet50') # load weights if weights == 'imagenet': if include_top: weights_path = get_file( 'resnet50_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models', md5_hash='a7b3fe01876f51b976af0dea6bc144eb') else: weights_path = get_file( 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models', md5_hash='a268eb855778b3df3c7506639542a6af') model.load_weights(weights_path) if K.backend() == 'theano': layer_utils.convert_all_kernels_in_model(model) if include_top: maxpool = model.get_layer(name='avg_pool') shape = maxpool.output_shape[1:] dense = model.get_layer(name='fc1000') layer_utils.convert_dense_weights_data_format( dense, shape, 'channels_first') if K.image_data_format() == 'channels_first' and K.backend( ) == 'tensorflow': warnings.warn('You are using the TensorFlow backend, yet you ' 'are using the Theano ' 'image data format convention ' '(`image_data_format="channels_first"`). ' 'For best performance, set ' '`image_data_format="channels_last"` in ' 'your Keras config ' 'at ~/.keras/keras.json.') return model
def get_model(nb_classes=10, add_peer=True): model = Sequential() model.add( Conv2D(64, (3, 3), padding='same', input_shape=(32, 32, 3), name='img')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(64, (3, 3), padding='same', name='block1_conv2')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')) model.add(Conv2D(128, (3, 3), padding='same', name='block2_conv1')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(128, (3, 3), padding='same', name='block2_conv2')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')) model.add(Conv2D(256, (3, 3), padding='same', name='block3_conv1')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(256, (3, 3), padding='same', name='block3_conv2')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(256, (3, 3), padding='same', name='block3_conv3')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(256, (3, 3), padding='same', name='block3_conv4')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')) model.add(Conv2D(512, (3, 3), padding='same', name='block4_conv1')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(512, (3, 3), padding='same', name='block4_conv2')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(512, (3, 3), padding='same', name='block4_conv3')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(512, (3, 3), padding='same', name='block4_conv4')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')) model.add(Conv2D(512, (3, 3), padding='same', name='block5_conv1')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(512, (3, 3), padding='same', name='block5_conv2')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(512, (3, 3), padding='same', name='block5_conv3')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Conv2D(512, (3, 3), padding='same', name='block5_conv4')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(4096)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(4096, name='fc2')) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(BatchNormalization()) model.add(Activation('softmax')) return model
def VGG16(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') # Determine proper input shape input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=48, data_format=K.image_data_format(), require_flatten=include_top, weights=weights) if input_tensor is None: img_input = Input(shape=input_shape) else: img_input = Input(tensor=input_tensor, shape=input_shape) # Block 1 x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input) x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) # Block 2 x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) # Block 3 x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) # Block 4 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) # Block 5 x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x) if include_top: # Classification block x = Flatten(name='flatten')(x) x = Dense(4096, activation='relu', name='fc1')(x) x = Dropout(x, 0.5) x = Dense(4096, activation='relu', name='fc2')(x) x = Dropout(x, 0.5) x = Dense(classes, activation='softmax', name='predictions')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling == 'max': x = GlobalMaxPooling2D()(x) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input # Create model. model = Model(inputs, x, name='vgg16') # load weights if weights == 'imagenet': if include_top: weights_path = get_file( 'vgg16_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models') else: weights_path = get_file( 'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path) # Truncate and replace softmax layer for transfer learning model.layers.pop() model.outputs = [model.layers[-1].output] model.layers[-1].outbound_nodes = [] model.add(Dense(5089, activation='softmax', name='predictions')) # Learning rate is changed to 0.001 sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True) model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy']) """ if K.backend() == 'theano': layer_utils.convert_all_kernels_in_model(model) if K.image_data_format() == 'channels_first': if include_top: maxpool = model.get_layer(name='block5_pool') shape = maxpool.output_shape[1:] dense = model.get_layer(name='fc1') layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first') """ return model
Conv2 = Conv(64, (5, 5), activation='relu', kernel_initializer='glorot_normal') Pooling2 = Pooling((3, 3)) Padding3 = Padding((1, 1)) Conv3 = Conv(128, (3, 3), activation='relu', kernel_initializer='glorot_normal') Pooling3 = Pooling((2, 2)) Padding4 = Padding((1, 1)) Conv4 = Conv(256, (3, 3), activation='relu', kernel_initializer='glorot_normal') Pooling4 = Pooling((2, 2)) Flatten = Flatten() # MLP-Encoder Hidden1 = Dense(128, activation='softplus', kernel_initializer='glorot_normal') Hidden2 = Dense(128, activation='softplus', kernel_initializer='glorot_normal') Latent = Dense(latent_node, name='latent', kernel_initializer='glorot_normal') # Categorizer Categorize = Dense(12, activation='softmax', kernel_initializer='glorot_normal') # MLP-Decoder Unhidden2 = Dense(128, activation='softplus', kernel_initializer='glorot_normal')
X = pickle.load(open("X"+str(files)+"bw.pickle", "rb")) y = pickle.load(open('y'+str(files)+'bw.pickle', 'rb')) X = X/255.0 model = Sequential() # filter = 20, kernel = 3, strides = 1, input_shape = (X.shape[1:]) model.add(Conv2D(32, 3, strides = 2, activation = 'relu', input_shape = (20, 20, 1))) model.add(MaxPooling2D(pool_size=(1,1))) # strides = x # filter = 20, kernel = 3, strides = 1 model.add(Conv2D(32, 3, strides = 2, activation = 'relu')) model.add(MaxPooling2D(pool_size=(1,1))) # strides = x model.add(Flatten()) model.add(Dense(2, activation = 'relu')) model.compile(loss='sparse_categorical_crossentropy', metrics=['accuracy', 'mae', 'mse'], optimizer='sgd') for l_r in range (1, 4, 1): l_r /= 10 for mom in range (1, 4, 1): mom /= 10 optimizers.SGD(lr = l_r, momentum = mom) history = model.fit(X, y, batch_size=5, epochs=5, validation_split=0.1)