def _build_model(sequence_length): model = Sequential() model.add(Embedding(20, 10, input_length=sequence_length)) model.add(Convolution1D(4, 3)) model.add(Flatten()) model.add(Dense(5, activation="softmax")) return model
def _build_mode(self): print("Now we build the model") model = Sequential() model.add(Convolution2D(32, 8, 8, subsample=(4, 4), border_mode='same', input_shape=(IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS))) # 80*80*4 model.add(Activation('relu')) model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same')) model.add(Activation('relu')) model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same')) model.add(Activation('relu')) model.add(Flatten()) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dense(2)) # get the 1 * 2 output represent each action's probability model.add(Activation('softmax')) return model
ImageResize(256, 256), ImageCenterCrop(224, 224), ImageChannelNormalize(123.0, 117.0, 104.0), ImageMatToTensor(), ImageFeatureToTensor() ]) full_model = Net.load_bigdl(model_path) # create a new model by remove layers after pool5/drop_7x7_s1 model = full_model.new_graph(["pool5/drop_7x7_s1"]) # freeze layers from input to pool4/3x3_s2 inclusive model.freeze_up_to(["pool4/3x3_s2"]) inputNode = Input(name="input", shape=(3, 224, 224)) inception = model.to_keras()(inputNode) flatten = Flatten()(inception) logits = Dense(2)(flatten) lrModel = Model(inputNode, logits) classifier = NNClassifier(lrModel, CrossEntropyCriterion(), transformer) \ .setLearningRate(0.003).setBatchSize(40).setMaxEpoch(1).setFeaturesCol("image") pipeline = Pipeline(stages=[classifier]) catdogModel = pipeline.fit(trainingDF) predictionDF = catdogModel.transform(validationDF).cache() predictionDF.show() correct = predictionDF.filter("label=prediction").count() overall = predictionDF.count()
convolve_net.add( Convolution2D( nb_filter=LAYER_2_NUM_CHANNEL, # 通道: 8 -> 2. nb_row=CONVOLVE_2_KERNEL_SIZE, # 尺寸: 12 - 5 + 1 = 8. nb_col=CONVOLVE_2_KERNEL_SIZE, activation="relu", W_regularizer=L2Regularizer(args.penalty_rate))) convolve_net.add( AveragePooling2D( pool_size=( POOLING_2_WINDOW_SIZE, # 尺寸: 8 / 2 = 4. POOLING_2_WINDOW_SIZE), strides=(POOLING_2_STRIDE_SIZE, POOLING_2_STRIDE_SIZE), )) convolve_net.add(BatchNormalization()) convolve_net.add(Flatten()) # 尺寸: 4 * 4 * 2 -> 32 convolve_net.add( Dense( output_dim=FC_LINEAR_DIMENSION, # 尺寸: 32 -> 64. activation="sigmoid", W_regularizer=L2Regularizer(args.penalty_rate))) convolve_net.add(Dropout(args.dropout_rate)) # BigDL 不支持 parameter sharing, 不得已而为之. both_feature = TimeDistributed(layer=convolve_net, input_shape=input_shape)(both_input) encode_left = both_feature.index_select(1, 0) encode_right = both_feature.index_select(1, 1) distance = autograd.abs(encode_left - encode_right)
convolve_net.add( Convolution2D( nb_filter=LAYER_2_NUM_CHANNEL, # 8 -> 2. nb_row=CONVOLVE_2_KERNEL_SIZE, # Size: 12 - 5 + 1 = 8. nb_col=CONVOLVE_2_KERNEL_SIZE, activation="relu", W_regularizer=L2Regularizer(args.penalty_rate))) convolve_net.add( AveragePooling2D( pool_size=( POOLING_2_WINDOW_SIZE, # Size: 8 / 2 = 4. POOLING_2_WINDOW_SIZE), strides=(POOLING_2_STRIDE_SIZE, POOLING_2_STRIDE_SIZE), )) convolve_net.add(BatchNormalization()) convolve_net.add(Flatten()) # Size: 4 * 4 * 2 -> 32 convolve_net.add( Dense( output_dim=FC_LINEAR_DIMENSION, # Size: 32 -> 64. activation="sigmoid", W_regularizer=L2Regularizer(args.penalty_rate))) convolve_net.add(Dropout(args.dropout_rate)) # BigDL Parameter Sharing and laying out the final model. both_feature = TimeDistributed(layer=convolve_net, input_shape=input_shape)(both_input) encode_left = both_feature.index_select(1, 0) encode_right = both_feature.index_select(1, 1) distance = autograd.abs(encode_left - encode_right)