def build_model_combine_features(self, load_weight=False): cnn_branch = Sequential() cnn_branch.add( Conv2D(filters=16, kernel_size=5, strides=1, padding='valid', activation='relu', input_shape=(11, 11, 3), name='Conv1')) cnn_branch.add(Conv2D(filters=24, kernel_size=3, strides=1, padding='same', activation='relu', name='Conv2')) cnn_branch.add(Conv2D(filters=32, kernel_size=3, strides=1, padding='same', activation='relu', name='Conv3')) cnn_branch.add(MaxPooling2D(pool_size=(3, 3), strides=2)) cnn_branch.add(Conv2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', name='Conv4')) cnn_branch.add(MaxPooling2D(pool_size=(3, 3), strides=2)) cnn_branch.add(Conv2D(filters=96, kernel_size=3, strides=1, padding='same', activation='relu', name='Conv5')) cnn_branch.add(Flatten()) location_branch = Sequential() location_branch.add(Dense(2, input_shape=(2,), activation='relu')) model = Concatenate([location_branch, cnn_branch]) model.add(Dense(500, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(optimizer=Adam(lr=self.lr), loss='categorical_crossentropy', metrics=['accuracy']) if load_weight: print("Loading weight...") model.load_weight(WEIGHT_DIR + "") print("Weight loaded.") return model
def build_model_combine_features(self, load_weight=False): cnn_branch = Sequential() cnn_branch.add( Conv2D(filters=16, kernel_size=5, strides=1, padding='valid', activation='relu', input_shape=(11, 11, 3), name='Conv1')) cnn_branch.add( Conv2D(filters=24, kernel_size=3, strides=1, padding='same', activation='relu', name='Conv2')) cnn_branch.add( Conv2D(filters=32, kernel_size=3, strides=1, padding='same', activation='relu', name='Conv3')) cnn_branch.add(MaxPooling2D(pool_size=(3, 3), strides=2)) cnn_branch.add( Conv2D(filters=64, kernel_size=3, strides=1, padding='same', activation='relu', name='Conv4')) cnn_branch.add(MaxPooling2D(pool_size=(3, 3), strides=2)) cnn_branch.add( Conv2D(filters=96, kernel_size=3, strides=1, padding='same', activation='relu', name='Conv5')) cnn_branch.add(Flatten()) location_branch = Sequential() location_branch.add(Dense(2, input_shape=(2, ), activation='relu')) model = Concatenate([location_branch, cnn_branch]) model.add(Dense(500, activation='relu')) model.add(Dense(2, activation='softmax')) model.compile(optimizer=Adam(lr=self.lr), loss='categorical_crossentropy', metrics=['accuracy']) if load_weight: print("Loading weight...") model.load_weight(WEIGHT_DIR + "") print("Weight loaded.") return model
def build_base_network(self, X): input = Input(shape=X.T.shape, name='image_input') # VGG Layer vgg = VGG16(weights='imagenet', include_top=False) # can be removed vgg.add(Dense(4096)) # Shallow Layers shallow_1 = Sequential() shallow_1.add(MaxPooling2D(pool_size=4, strides=8, name='subsample_1')) shallow_1.add( Conv2D(96, kernel_size=8, strides=4, activation='relu', name='conv1')) shallow_1.add(LRN(name='conv1_norm')) # ?? shallow_1.add( MaxPooling2D(pool_size=7, strides=4, border_mode='valid', name='pool1')) shallow_1.add(Flatten()) shallow_2 = Sequential() shallow_2.add(MaxPooling2D(pool_size=8, strides=8, name='subsample_2')) shallow_2.add( Conv2D(96, kernel_size=8, strides=4, activation='relu', name='conv2')) shallow_2.add(LRN(name='conv2_norm')) # ?? shallow_2.add(MaxPooling2D(pool_size=3, strides=2, name='pool2')) shallow_2.add(Flatten()) # concatenated shallow layer shallow = Concatenate([shallow_1, shallow_2]) # TODO # combine VGG16 and shallow layer output shallow.add(Dense(4096)) shallow.add(LRN(name='shallow_norm', alpha=8191, n=8191, beta=0.5)) return model
def Discriminator(d): dx = Discriminator_x(d) dz = Discriminator_z() combined_tensor = Concatenate([dx, dz]) combined_tensor.add(Dropout(0.2)) combined_tensor.add(LeakyReLU(alpha = 0.1)) combined_tensor.add(Dense(units = 1, activation = 'linear')) return combined_tensor
import numpy as np x = np.array([1,2,3]) y = np.array([1,2,3]) first = Sequential() first.add(Dense(1, input_shape=(1,), activation='sigmoid')) second = Sequential() second.add(Dense(1, input_shape=(1,), activation='sigmoid')) # result = Sequential() merged = Concatenate([first, second]) # ada_grad = Adagrad(lr=0.1, epsilon=1e-08, decay=0.0) # result.add(merged) # result.compile(optimizer=ada_grad, loss=_loss_tensor, metrics=['accuracy']) # merged.summary() merged.add(Dense(2, activation="relu")) merged.add(Dense(1, activation="linear")) merged.summary() # result.compile(optimizer='adam', loss='mse', metrics=['accuracy']) # result.fit([x, x],[y, y], epochs=100, batch_size=1) # loss, acc = result.evaluate([x, x],[y, y], batch_size=1) # print("acc : ", acc)