def create_model(input_shape, num_class, k): fgc_base = MobileNet(input_shape=input_shape, include_top=False, weights='imagenet', alpha=1.) fgc_base.trainable = True # fgc_base.summary() feature2 = fgc_base.get_layer("conv_pw_11_relu").output fc_model = Model(fgc_base.inputs[0], [fgc_base.output, feature2]) fc_model.summary() input_tensor = Input(shape=input_shape) features = fc_model(input_tensor) fc_obj = GlobalMaxPool2D()(features[0]) fc_obj = Dropout(0.7)(fc_obj) fc_obj = Dense(num_class, activation="softmax")(fc_obj) fc_part = Conv2D(filters=num_class * k, kernel_size=(1, 1), activation="relu")(features[1]) fc_part = GlobalMaxPool2D()(fc_part) fc_part = Dropout(0.5)(fc_part) fc_ccp = Lambda(lambda tmp: tf.expand_dims(tmp, axis=-1))(fc_part) fc_ccp = AvgPool1D(pool_size=k)(fc_ccp) fc_ccp = Lambda(lambda tmp: tf.squeeze(tmp, [-1]))(fc_ccp) fc_ccp = Activation(activation="softmax")(fc_ccp) fc_part = Dense(num_class, activation="softmax")(fc_part) output = Concatenate(axis=-1)([fc_obj, fc_part, fc_ccp]) return Model(input_tensor, output)
def build_model(): input_tensor = Input(shape=(224, 224, 3)) base_model = MobileNet(include_top=False, weights='imagenet', input_tensor=input_tensor, input_shape=(224, 224, 3), pooling='avg') base_model.trainable = True for layer in base_model.layers: layer.trainable = True # trainable has to be false in order to freeze the layers op = Dense(224, activation='relu')(base_model.output) op = Dropout(.25)(op) ## # softmax: calculates a probability for every possible class. # # activation='softmax': return the highest probability; # for example, if 'Coat' is the highest probability then the result would be # something like [0,0,0,0,1,0,0,0,0,0] with 1 in index 5 indicate 'Coat' in our case. ## output_tensor = Dense(1, activation='softmax')(op) model = Model(inputs=input_tensor, outputs=output_tensor) return model
def __init__(self, gpu_id=5): os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id) num_class = 12 BATCH_SIZE = 32 k = 10 fgc_base = MobileNet(input_shape=(224, 224, 3), include_top=False, weights=None, alpha=1.) fgc_base.trainable = True # fgc_base.summary() feature2 = fgc_base.get_layer("conv_pw_11_relu").output fc_model = Model(fgc_base.inputs[0], [fgc_base.output, feature2]) # fc_model.summary() input_tensor = Input(shape=(224, 224, 3)) input_tensor_bn = BatchNormalization()(input_tensor) features = fc_model(input_tensor_bn) fc_obj = GlobalMaxPool2D()(features[0]) fc_obj = Dropout(0.7)(fc_obj) fc_obj = Dense(12, activation="softmax")(fc_obj) fc_part = Conv2D(filters=num_class * k, kernel_size=(1, 1), activation="relu")(features[1]) fc_part = GlobalMaxPool2D()(fc_part) fc_part = Dropout(0.5)(fc_part) fc_ccp = Lambda(lambda tmp: tf.expand_dims(tmp, axis=-1))(fc_part) fc_ccp = AvgPool1D(pool_size=k)(fc_ccp) fc_ccp = Lambda(lambda tmp: tf.squeeze(tmp, [-1]))(fc_ccp) fc_ccp = Activation(activation="softmax")(fc_ccp) fc_part = Dense(12, activation="softmax")(fc_part) output = Concatenate(axis=-1)([fc_obj, fc_part, fc_ccp]) self.dfb_cnn = Model(input_tensor, output) lr = 0.001 clip_value = 0.01 self.dfb_cnn.compile(optimizer=SGD(lr=lr, momentum=0.9, decay=1e-5, nesterov=True, clipvalue=clip_value), loss=ctm_loss, metrics=[ctm_acc1, ctm_acck]) path_prefix = "./datasets/model/escale/focal_loss_2_0.25/" # path_prefix = "./datasets/focal_loss_2_0.25/" self.dfb_cnn.load_weights(filepath=path_prefix + "weights.h5", skip_mismatch=True) ######
def MobileNetmodel(no_classes, shape): base_model = MobileNet(include_top=False, weights='imagenet', input_shape=shape) # Freeze the base_model base_model.trainable = False inputs = Input(shape=shape) x = base_model(inputs, training=False) x = GlobalAveragePooling2D()(x) x = Dense(1024, activation='relu')(x) #x = Dropout(0.2)(x) predictions = Dense(no_classes, activation='softmax', name='predictions')(x) model = Model(inputs, outputs=predictions) return model
def _load_mobilenet(): global feature_extraction_model, mobilenet_activations # Feature extraction module feature_extraction_model = MobileNet(input_shape=(EMBEDDING_IMAGE_SIZE, EMBEDDING_IMAGE_SIZE, 3), alpha=1.0, depth_multiplier=1, include_top=True, weights='imagenet') # Set it up so that we can do inference on MobileNet without training it by mistake feature_extraction_model.graph = tf.get_default_graph() feature_extraction_model.trainable = False # Get the pre-softmax activations from MobileNet mobilenet_activations = Model(feature_extraction_model.input, feature_extraction_model.layers[-3].output) mobilenet_activations.trainable = False
from keras.models import Model from keras.applications.mobilenet import MobileNet from keras.layers import SeparableConv2D, Dropout, Dense, MaxPooling2D, Flatten, GlobalAveragePooling2D, Input from keras.preprocessing.image import ImageDataGenerator #model base_model = MobileNet(alpha=0.25, depth_multiplier=1, dropout=1e-4, weights='imagenet', include_top=False) base_model.trainable = False x = base_model.output x = GlobalAveragePooling2D()(x) x = Dropout(0.3)(x) x = Dense(64, activation='relu')(x) x = Dropout(0.1)(x) predictions = Dense(2, activation='softmax')(x) model = Model(inputs=base_model.input, outputs=predictions) model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) #数据读入 train_datagen = ImageDataGenerator( rescale=1. / 255, # horizontal_flip=True, # vertical_flip=True,
def create_posenet_mobilenet_v1(weights_path=None, trainable=True): # create the base pre-trained model if K.image_data_format() == 'channels_first': image_input = Input(shape=(None, 3, 224, 224)) else: image_input = Input(shape=(None, 224, 224, 3)) alpha = 1.0 dropout = 1e-3 if K.image_data_format() == 'channels_first': input_shape = (3, 224, 224) else: input_shape = (224, 224, 3) base_model = MobileNet(input_shape=input_shape, alpha=alpha, dropout=dropout, weights='imagenet', include_top=False) base_model.trainable = False # add top layer model_output = TimeDistributed(base_model)(image_input) if K.image_data_format() == 'channels_first': shape = (int(1024 * alpha), 1, 1) else: shape = (1, 1, int(1024 * alpha)) model_output = TimeDistributed(GlobalAveragePooling2D())(model_output) model_output = TimeDistributed(Reshape(shape, name='reshape_1'))(model_output) model_output = TimeDistributed(Dropout(dropout, name='dropout'))(model_output) conv_pose_xyz = TimeDistributed( Conv2D(1024, (1, 1), padding='same', name='conv_pose_xyz'))(model_output) conv_pose_xyz_flat = TimeDistributed(Flatten())(conv_pose_xyz) lstm_xyz = LSTM(256, return_sequences=True, name='lstm_xyz')(conv_pose_xyz_flat) lstm_dense_xyz = TimeDistributed(Dense(128, activation='relu'), name='lstm_dense_xyz')(lstm_xyz) lstm_pose_xyz = TimeDistributed(Dense(3), name='lstm_pose_xyz')(lstm_dense_xyz) conv_pose_wpqr = TimeDistributed( Conv2D(1024, (1, 1), padding='same', name='conv_pose_wpqr'))(model_output) conv_pose_wpqr_flat = TimeDistributed(Flatten())(conv_pose_wpqr) lstm_wpqr = LSTM(256, return_sequences=True, name='lstm_wpqr')(conv_pose_wpqr_flat) lstm_dense_wpqr = TimeDistributed(Dense(128, activation='relu'), name='lstm_dense_wpqr')(lstm_wpqr) lstm_pose_wpqr = TimeDistributed(Dense(4), name='lstm_pose_wpqr')(lstm_dense_wpqr) posenet = Model(inputs=image_input, outputs=[lstm_pose_xyz, lstm_pose_wpqr]) if weights_path: print("start load image network weights") posenet.load_weights(weights_path, by_name=True) print("finish load image network weights") if not trainable: for layer in posenet.layers: layer.trainable = False return posenet
def create_model_functional(X, Y0size=576, freeze_fac=0.75, quick_setup=False): """ accepts a 'large' grayscale image (or set of images), runs a convnet & pooling to shrink it in a learnable way # this produces a smaller set of 3 'color' images for different features. This newer model incorporates sigmoid activations for existence / noobj predictions, and uses an "InterleaveColumns" layer to reorder the columns and produce output compatible with the previous model. """ print("Using functional API model, cf.basemodel =", cf.basemodel) # First part of the model is to take the large grayscale image, shrink it, and # add 3 conv operations to produce something akin to color channels to feed into the # 'standard' models below: kernel_size = (3, 3) # convolution kernel size pool_size = (2, 2) # size of pooling area for max pooling nfilters = 3 # adding 3 'color' channels print("X[0].shape = ", X[0].shape) inputs = Input(shape=X[0].shape) x = inputs x = Conv2D(nfilters, kernel_size, strides=(1, 1), padding='same', use_bias=False)(x) # give us 3 colors x = AveragePooling2D(pool_size=pool_size)(x) # shrink image # ResNet block: x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) #x = Activation('Mish')(x) x = Conv2D(nfilters, kernel_size, strides=(1, 1), padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = LeakyReLU(alpha=0.1)(x) #x = Activation('Mish')(x) x = Conv2D(nfilters, kernel_size, strides=(1, 1), padding='same', use_bias=False)(x) x = BatchNormalization()(x) x = Add()([x, AveragePooling2D(pool_size=pool_size)(inputs) ]) # residual skip connection on shrunk input #x = Add()([x, id]) # residual skip connection on shrunk input x = Dropout(0.1)(x) # Finished with first part of the model print("After initial convolutions & pooling, x.shape = ", x.shape) # 'PreFab'/standard CNN middle section #weights = 'imagenet' # None or 'imagenet'. Note: If you ever get "You are trying to load a model with __layers___", you need to add by_name=True in the load_weights call for your Prefab CNN weights = None weights = "imagenet" base_model_class = str_to_class(cf.basemodel) if cf.basemodel == 'MobileNet': # with CustomObjectScope({'relu6': ReLU(6.),'DepthwiseConv2D': DepthwiseConv2D}): # newer keras with CustomObjectScope({ 'relu6': keras.applications.mobilenet.relu6, 'DepthwiseConv2D': keras.applications.mobilenet.DepthwiseConv2D, 'custom_loss': custom_loss, 'tf': tf }): # older keras input_shape = x.shape[1:4] print("input_shape = ", input_shape) print("input_shape[-1] = ", input_shape[-1]) base_model = MobileNet( input_shape=input_shape, weights=weights, include_top=False, input_tensor=x, dropout=0.6) # Works well, VERY FAST! Needs img size 224x224 else: base_model_class = getattr( sys.modules[__name__], cf.basemodel) # use basemodel string to load Keras model # Note: this requires modifying keras file applications/<model_name>.py to add ", by_name=True" in the load_weights() line(s) base_model = base_model_class(weights=None, include_top=False, input_tensor=x) num_layers = len(base_model.layers) freeze_layers = int(num_layers * freeze_fac) print("Freezing ", freeze_layers, "/", num_layers, " layers of base_model") if (freeze_fac == 1.0): base_model.trainable = False else: base_model.trainable = True # set the first N layers of the base model (e.g., p to the last conv block) # to non-trainable (weights will not be updated) # for fine-tuning, "freeze" most of the pre-trained model, and then unfreeze it later if (freeze_layers > 0): for i in range(freeze_layers): base_model.layers[i].trainable = False print("base_model.output_shape =", base_model.output_shape) # Finally we stack on top, a 'flat' output x = Flatten(input_shape=base_model.output_shape[1:])(base_model.output) if cf.model_type == 'compound': # note, this is rarely used, instead we use either SelectiveSigmoid or just alter the loss function if (Y0size % cf.vars_per_pred != 0): raise ValueError("Y0size (=" + str(Y0size) + ") must be a multiple of cf.vars_per_pred (=" + str(cf.vars_per_pred) + ")") n_preds = int(Y0size / cf.vars_per_pred) sigout = Dense(n_preds, activation='sigmoid', name='SigmoidOutput')(x) # activation='sigmoid', denseout = Dense(Y0size - n_preds, name='DenseOutput')(x) top = Concatenate()([sigout, denseout]) top = InterleaveColumns(start_index=cf.ind_noobj, name='FinalOutput')(top) else: top = Dense(Y0size, name='FinalOutput')(x) ''' # We'll handle 'same' loss function option in the loss routine itself if cf.loss_type != 'same': # cf.model_type == 'ss': print("**Adding SelectiveSigmoid**") top = SelectiveSigmoid(name='ReallyFinalOutput')(top) # make the existence variables sigmoids ''' model = Model(inputs=inputs, outputs=top) if quick_setup: return model model = add_regularization(model) print("After adding l2 regularization, model.losses =", model.losses) # One more time just for good measure: Freeze base model # set the first N layers of the base model (e.g., p to the last conv block) # to non-trainable (weights will not be updated) # for fine-tuning, "freeze" most of the pre-trained model, and then unfreeze it later num_layers = len(base_model.layers) freeze_layers = int(num_layers * freeze_fac) if (freeze_layers > 0): print("again: Freezing ", freeze_layers, "/", num_layers, " layers of base_model") for i in range(freeze_layers): base_model.layers[i].trainable = False # show how many trainable parameters there are trainable_count = int( np.sum([K.count_params(p) for p in set(model.trainable_weights)])) non_trainable_count = int( np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])) print('create_model_functional: Total params: {:,}'.format( trainable_count + non_trainable_count)) print('create_model_functional: Trainable params: {:,}'.format( trainable_count)) print('create_model_functional: Non-trainable params: {:,}'.format( non_trainable_count)) return model
def train(self, pipeline): """ This fuction trains the model. """ print("Traing " + self.backbone) if self.backbone == "inception": if self.use_pretrained_weights: backbone = InceptionV3(weights="imagenet", include_top=False, input_shape=(self.size, self.size, 3)) backbone.trainable = False else: backbone = InceptionV3(include_top=False, input_shape=(self.size, self.size, 3)) backbone.trainable = True elif self.backbone == "VGG16": if self.use_pretrained_weights: backbone = VGG16(weights="imagenet", include_top=False, input_shape=(self.size, self.size, 3)) backbone.trainable = False else: backbone = VGG16(include_top=False, input_shape=(self.size, self.size, 3)) backbone.trainable = True elif self.backbone == "MobileNet": if self.use_pretrained_weights: backbone = MobileNet(weights="imagenet", include_top=False, input_shape=(self.size, self.size, 3)) backbone.trainable = False else: backbone = MobileNet(include_top=False, input_shape=(self.size, self.size, 3)) backbone.trainable = True else: raise NotImplementedError("Do not support this kind of backbone.") model = Sequential([ backbone, GlobalAveragePooling2D(), Dropout(0.1), Dense(1024, activation='relu'), Dense(6, activation='softmax') ]) """ This part can be modified to change learning rate schedule. We will make it cleaner in the future. these messy code is because of the coming ddl. """ initial_learning_rate = 0.001 lr_schedule = keras.experimental.CosineDecay(initial_learning_rate, decay_steps=1000, alpha=0.0, name=None) opt = keras.optimizers.RMSprop(learning_rate=lr_schedule) # opt=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.1, amsgrad=False) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy']) checkpoint = ModelCheckpoint(self.save_dir, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max') callbacks_list = [checkpoint] steps_per_epoch = pipeline["train_pipeline"].n // self.bs validation_steps = pipeline["val_pipeline"].n // self.bs history = model.fit_generator( generator=pipeline["train_pipeline"], epochs=self.num_epoch, steps_per_epoch=steps_per_epoch, validation_data=pipeline["train_pipeline"], validation_steps=validation_steps, callbacks=callbacks_list) # Here the log is saved, and the model is also saved. with open(self.backbone + self.logname + 'train_log.txt', 'wb') as log: pickle.dump(history.history, log) log.close()
print('--> Starting evalutation...') from keras.preprocessing.image import ImageDataGenerator from keras import metrics def in_top_k(y_true, y_pred): return metrics.top_k_categorical_accuracy(y_true, y_pred, k=5) val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) validation_generator = val_datagen.flow_from_directory( './imagenet-data/validation', target_size=(224, 224), batch_size=10, class_mode='categorical', shuffle=False) model.trainable = False model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy', in_top_k]) results = model.evaluate(validation_generator, steps=5000, workers=1, max_queue_size=1) print('--> Results for ' + sys.argv[1]) print(model.metrics_names) print(results) #########################################
np.asarray(Image.fromarray(X_test[i]).resize((224, 224))) for i in range(0, len(X_test)) ]) X_test_new = X_test_new.astype("float32") # 測試資料的資料前處理 test_input = preprocess_input(X_test_new) # 定義模型 model = Sequential() model.add(mobilenet_model) model.add(Dropout(0.5)) model.add(GlobalAveragePooling2D()) model.add(Dropout(0.5)) model.add(Dense(10, activation="softmax")) model.summary() # 顯示模型摘要資訊 # 凍結上層模型 mobilenet_model.trainable = False # 編譯模型 model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"]) # 訓練模型 history = model.fit(train_input, Y_train, validation_data=(test_input, Y_test), epochs=17, batch_size=32, verbose=2) # 評估模型 print("\nTesting ...") loss, accuracy = model.evaluate(test_input, Y_test) print("測試資料集的準確度 = {:.2f}".format(accuracy))
valid_batches = ImageDataGenerator().flow_from_directory( validation_dir, target_size=(img_width, img_height), classes=[ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'R', 'S', 'T', 'U', 'V', 'Y', 'Z', 'del', 'nothing', 'space' ], batch_size=validBatchSize) base_model = MobileNet( weights='imagenet', include_top=False, input_shape=IMG_SHAPE, pooling='avg', ) base_model.trainable = True model = Sequential() model.add(base_model) model.add(Dropout(0.25)) model.add(Dense(26, activation='softmax')) model.summary() stepsPerEpoch = numpy.ceil(train_batches.n / 4000) validationSteps = numpy.ceil(valid_batches.n / 2000) model.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics=['accuracy']) checkDir = 'checkpoints' checkpoint = ModelCheckpoint(
def create_posenet_mobilenet_v1(num_beacon, image_beacon_weights_path=None, trainable=True): # create the base pre-trained model if K.image_data_format() == 'channels_first': image_input = Input(shape=(None, 3, 224, 224), name='input_1') else: image_input = Input(shape=(None, 224, 224, 3), name='input_1') alpha = 1.0 dropout = 1e-3 if K.image_data_format() == 'channels_first': input_shape = (3, 224, 224) else: input_shape = (224, 224, 3) base_model = MobileNet(input_shape=input_shape, alpha=alpha, dropout=dropout, weights='imagenet', include_top=False) base_model.trainable = False # add top layer model_output = TimeDistributed(base_model)(image_input) if K.image_data_format() == 'channels_first': shape = (int(1024 * alpha), 1, 1) else: shape = (1, 1, int(1024 * alpha)) model_output = TimeDistributed(GlobalAveragePooling2D())(model_output) model_output = TimeDistributed(Reshape(shape, name='reshape_1'))(model_output) model_output = TimeDistributed(Dropout(dropout, name='dropout'))(model_output) image_conv_pose_xyz = TimeDistributed( Conv2D(1024, (1, 1), padding='same', name='conv_pose_xyz'))(model_output) image_conv_pose_xyz_flat = TimeDistributed(Flatten())(image_conv_pose_xyz) image_conv_pose_wpqr = TimeDistributed( Conv2D(1024, (1, 1), padding='same', name='conv_pose_wpqr'))(model_output) image_conv_pose_wpqr_flat = TimeDistributed( Flatten())(image_conv_pose_wpqr) # beacon subnet 1 beacon_input = Input(shape=(None, num_beacon, 1, 1), name='input_2') beacon_icp1_out1 = TimeDistributed( Conv2D(16, (1, 1), padding='same', activation='relu', name='beacon_icp1_out1'))(beacon_input) beacon_icp1_out1.trainable = False # beacon subnet 2 beacon_icp4_out1 = TimeDistributed( Conv2D(16, (1, 1), padding='same', activation='relu', name='beacon_icp4_out1'))(beacon_icp1_out1) beacon_icp4_out1.trainable = False # beacon subnet 3 beacon_icp7_out1 = TimeDistributed( Conv2D(16, (1, 1), padding='same', activation='relu', name='beacon_icp7_out1'))(beacon_icp4_out1) beacon_icp7_out1.trainable = False beacon_cls3_fc1_flat = TimeDistributed(Flatten())(beacon_icp7_out1) beacon_cls3_fc1_flat.trainable = False beacon_cls3_fc1_pose = TimeDistributed( Dense(2048, activation='relu', name='beacon_cls3_fc1_pose'))(beacon_cls3_fc1_flat) beacon_cls3_fc1_pose.trainable = False # image, beacon classify 3 image_beacon_cls3_fc1_pose_xyz = concatenate( [image_conv_pose_xyz_flat, beacon_cls3_fc1_pose], name='image_beacon_cls3_fc1_pose_xyz') image_beacon_lstm_xyz = LSTM( 256, return_sequences=True, name='image_beacon_lstm_xyz')(image_beacon_cls3_fc1_pose_xyz) image_beacon_lstm_dense_xyz = TimeDistributed( Dense(128, activation='relu'), name='image_beacon_lstm_dense_xyz')(image_beacon_lstm_xyz) image_beacon_lstm_pose_xyz = TimeDistributed( Dense(3), name='image_beacon_lstm_pose_xyz')(image_beacon_lstm_dense_xyz) image_beacon_cls3_fc1_pose_wpqr = concatenate( [image_conv_pose_wpqr_flat, beacon_cls3_fc1_pose], name='image_beacon_cls3_fc1_pose_wpqr') image_beacon_lstm_wpqr = LSTM( 256, return_sequences=True, name='image_beacon_lstm_wpqr')(image_beacon_cls3_fc1_pose_wpqr) image_beacon_lstm_dense_wpqr = TimeDistributed( Dense(128, activation='relu'), name='image_beacon_lstm_dense_wpqr')(image_beacon_lstm_wpqr) image_beacon_lstm_pose_wpqr = TimeDistributed( Dense(4), name='image_beacon_lstm_pose_wpqr')(image_beacon_lstm_dense_wpqr) image_beacon_posenet = Model( inputs=[image_input, beacon_input], outputs=[image_beacon_lstm_pose_xyz, image_beacon_lstm_pose_wpqr]) if image_beacon_weights_path: print("start load image beacon network weights") image_beacon_weights_path_ext = os.path.splitext( image_beacon_weights_path)[-1] if image_beacon_weights_path_ext == ".npy": weights_data = np.load(image_beacon_weights_path).item() for layer in image_beacon_posenet.layers: if layer.name in weights_data.keys(): layer_weights = weights_data[layer.name] layer.set_weights( (layer_weights['weights'], layer_weights['biases'])) print("finish load imaege beacon network weights") elif image_beacon_weights_path_ext == ".h5": image_beacon_posenet.load_weights(image_beacon_weights_path, by_name=True) print("finish load image beacon network weights") else: print("invalid weight file : " + image_weights_path) sys.exit() if not trainable: for layer in image_beacon_posenet.layers: layer.trainable = False return image_beacon_posenet