def _clone_and_build_model(mode, keras_model, custom_objects, features=None, labels=None): """Clone and build the given keras_model. Args: mode: training mode. keras_model: an instance of compiled keras model. custom_objects: Dictionary for custom objects. features: labels: Returns: The newly built model. """ # Set to True during training, False for inference. K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN) # Clone keras model. input_tensors = None if features is None else _create_ordered_io( keras_model, features) if custom_objects: with CustomObjectScope(custom_objects): model = models.clone_model(keras_model, input_tensors=input_tensors) else: model = models.clone_model(keras_model, input_tensors=input_tensors) # Compile/Build model if mode is model_fn_lib.ModeKeys.PREDICT and not model.built: model.build() else: optimizer_config = keras_model.optimizer.get_config() optimizer = keras_model.optimizer.__class__.from_config(optimizer_config) optimizer.iterations = training_util.get_or_create_global_step() # Get list of outputs. if labels is None: target_tensors = None elif isinstance(labels, dict): target_tensors = _create_ordered_io(keras_model, labels, is_input=False) else: target_tensors = [ _cast_tensor_to_floatx( sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(labels)) ] model.compile( optimizer, keras_model.loss, metrics=keras_model.metrics, loss_weights=keras_model.loss_weights, sample_weight_mode=keras_model.sample_weight_mode, weighted_metrics=keras_model.weighted_metrics, target_tensors=target_tensors) if isinstance(model, models.Sequential): model = model.model return model
def convert(in_path, out_path): """Convert any Keras model to the frugally-deep model format.""" assert K.backend() == "tensorflow" assert K.floatx() == "float32" assert K.image_data_format() == 'channels_last' print('loading {}'.format(in_path)) with CustomObjectScope({ 'relu6': mobilenet.relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D }): model = load_model(in_path) # Force creation of underlying functional model. # see: https://github.com/fchollet/keras/issues/8136 # Loss and optimizer type do not matter, since to don't train the model. model.compile(loss='mse', optimizer='sgd') model = convert_sequential_to_model(model) test_data = gen_test_data(model) json_output = {} json_output['architecture'] = json.loads(model.to_json()) json_output['image_data_format'] = K.image_data_format() for depth in range(1, 3, 1): json_output['conv2d_valid_offset_depth_' + str(depth)] =\ check_operation_offset(depth, offset_conv2d_eval, 'valid') json_output['conv2d_same_offset_depth_' + str(depth)] =\ check_operation_offset(depth, offset_conv2d_eval, 'same') json_output['separable_conv2d_valid_offset_depth_' + str(depth)] =\ check_operation_offset(depth, offset_sep_conv2d_eval, 'valid') json_output['separable_conv2d_same_offset_depth_' + str(depth)] =\ check_operation_offset(depth, offset_sep_conv2d_eval, 'same') json_output['max_pooling_2d_valid_offset'] =\ check_operation_offset(1, conv2d_offset_max_pool_eval, 'valid') json_output['max_pooling_2d_same_offset'] =\ check_operation_offset(1, conv2d_offset_max_pool_eval, 'same') json_output['average_pooling_2d_valid_offset'] =\ check_operation_offset(1, conv2d_offset_average_pool_eval, 'valid') json_output['average_pooling_2d_same_offset'] =\ check_operation_offset(1, conv2d_offset_average_pool_eval, 'same') json_output['input_shapes'] = get_shapes(test_data['inputs']) json_output['output_shapes'] = get_shapes(test_data['outputs']) json_output['tests'] = [test_data] json_output['trainable_params'] = get_all_weights(model) print('writing {}'.format(out_path)) write_text_file( out_path, json.dumps(json_output, allow_nan=False, indent=2, sort_keys=True))
def evaluate_test(model_path, model_type, test_dset, batch_size=64, confusion_mat=False): x_test, y_media_test, y_emotion_test = test_dset if model_type == "mobile": # model = tf.keras.models.load_model(model_path, # custom_objects={'relu6': tf.keras.applications.mobilenet.relu6, # 'DepthwiseConv2D': tf.keras.applications.mobilenet.DepthwiseConv2D}) from tensorflow.python.keras._impl.keras.utils.generic_utils import CustomObjectScope from tensorflow.python.keras._impl.keras.applications import mobilenet from tensorflow.python.keras._impl.keras.models import load_model with CustomObjectScope({ 'relu6': mobilenet.relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D }): model = load_model(model_path) else: model = tf.keras.models.load_model(model_path) results = model.evaluate(x_test, { 'output_media': y_media_test, 'output_emotion': y_emotion_test }, batch_size=batch_size, verbose=True) for i in range(0, len(results)): print(model.metrics_names[i]) print(results[i]) if confusion_mat: y_media_pred, y_emotion_pred = model.predict(x_test, batch_size=batch_size) y_media_test_label = np.argmax(y_media_test, axis=1) y_emotion_test_label = np.argmax(y_emotion_test, axis=1) y_media_pred_label = np.argmax(y_media_pred, axis=1) y_emotion_pred_label = np.argmax(y_emotion_pred, axis=1) cm_media = sklearn.metrics.confusion_matrix(y_media_test_label, y_media_pred_label) cm_emotion = sklearn.metrics.confusion_matrix(y_emotion_test_label, y_emotion_pred_label) print("Confusion matrix for media:") print(cm_media) print("Confusion matrix for emotion:") print(cm_emotion)
def load_ensemble(ensemble_folder): print("Load models for ensemble...") models = [] from tensorflow.python.keras._impl.keras.utils.generic_utils import CustomObjectScope from tensorflow.python.keras._impl.keras.applications import mobilenet from tensorflow.python.keras._impl.keras.models import load_model with CustomObjectScope({ 'relu6': mobilenet.relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D }): for model_name in os.listdir(ensemble_folder): i = 1 model_path = os.path.join(ensemble_folder, model_name) model = load_model(model_path) model._base_name = "model_" + str(i) model._name = "model_" + str(i) models.append(model) i += 1 return models
def _clone_and_build_model(mode, keras_model, custom_objects, features=None, labels=None): """Clone and build the given keras_model. Args: mode: training mode. keras_model: an instance of compiled keras model. custom_objects: Dictionary for custom objects. features: Dict of tensors. labels: Dict of tensors, or single tensor instance. Returns: The newly built model. """ # Set to True during training, False for inference. K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN) # Get list of inputs. if features is None: input_tensors = None else: input_tensors = _create_ordered_io(keras_model, estimator_io=features, is_input=True) # Get list of outputs. if labels is None: target_tensors = None elif isinstance(labels, dict): target_tensors = _create_ordered_io(keras_model, estimator_io=labels, is_input=False) else: target_tensors = [_convert_tensor(labels)] if keras_model._is_graph_network: if custom_objects: with CustomObjectScope(custom_objects): model = models.clone_model(keras_model, input_tensors=input_tensors) else: model = models.clone_model(keras_model, input_tensors=input_tensors) else: model = keras_model _in_place_subclassed_model_reset(model) if input_tensors is not None: model._set_inputs(input_tensors) # Compile/Build model if mode is model_fn_lib.ModeKeys.PREDICT: if isinstance(model, models.Sequential): model.build() else: if isinstance(keras_model.optimizer, optimizers.TFOptimizer): optimizer = keras_model.optimizer else: optimizer_config = keras_model.optimizer.get_config() optimizer = keras_model.optimizer.__class__.from_config( optimizer_config) optimizer.iterations = training_util.get_or_create_global_step() model.compile(optimizer, keras_model.loss, metrics=keras_model.metrics, loss_weights=keras_model.loss_weights, sample_weight_mode=keras_model.sample_weight_mode, weighted_metrics=keras_model.weighted_metrics, target_tensors=target_tensors) return model