def test_serializing_loss_class(): orig_loss_class = MSE_MAE_loss(0.3) with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}): serialized = losses.serialize(orig_loss_class) with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}): deserialized = losses.deserialize(serialized) assert isinstance(deserialized, MSE_MAE_loss) assert deserialized.mse_fraction == 0.3
def test_serializing_model_with_loss_class(tmpdir): model_filename = str(tmpdir / 'custom_loss.hdf') with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}): loss = MSE_MAE_loss(0.3) inputs = keras.layers.Input((2,)) outputs = keras.layers.Dense(1, name='model_output')(inputs) model = keras.models.Model(inputs, outputs) model.compile(optimizer='sgd', loss={'model_output': loss}) model.fit(np.random.rand(256, 2), np.random.rand(256, 1)) model.save(model_filename) with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}): loaded_model = keras.models.load_model(model_filename) loaded_model.predict(np.random.rand(128, 2))
def test_serializing_model_with_loss_class(tmpdir): model_filename = str(tmpdir / 'custom_loss.hdf') with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}): loss = MSE_MAE_loss(0.3) inputs = keras.layers.Input((2, )) outputs = keras.layers.Dense(1, name='model_output')(inputs) model = keras.models.Model(inputs, outputs) model.compile(optimizer='sgd', loss={'model_output': loss}) model.fit(np.random.rand(256, 2), np.random.rand(256, 1)) model.save(model_filename) with custom_object_scope({'MSE_MAE_loss': MSE_MAE_loss}): loaded_model = keras.models.load_model(model_filename) loaded_model.predict(np.random.rand(128, 2))
def test_serializing_model_with_loss_with_custom_object_scope(self, value): with generic_utils.custom_object_scope({ 'MyMeanAbsoluteError': MyMeanAbsoluteError, 'my_mae': my_mae, 'Bias': testing_utils.Bias, }): model = _get_multi_io_model() model.compile(optimizer_v2.gradient_descent.SGD(0.1), loss=value, run_eagerly=testing_utils.should_run_eagerly()) history = model.fit([self.x, self.x], [self.y, self.y], batch_size=3, epochs=3, sample_weight=[self.w, self.w]) # Assert training. self.assertAllClose(history.history['loss'], [2., 1.6, 1.2], 1e-3) eval_results = model.evaluate([self.x, self.x], [self.y, self.y], sample_weight=[self.w, self.w]) if h5py is None: return model.save(self.model_filename) loaded_model = keras.models.load_model(self.model_filename) loaded_model.predict([self.x, self.x]) loaded_eval_results = loaded_model.evaluate( [self.x, self.x], [self.y, self.y], sample_weight=[self.w, self.w]) # Assert all evaluation results are the same. self.assertAllClose(eval_results, loaded_eval_results, 1e-9)
def __init__(self, mark_model=current_model): """Initialization""" # A face detector is required for mark detection. self.face_detector = FaceDetector() self.marks = None if mark_model.split(".")[1] == "pb": # Get a TensorFlow session ready to do landmark detection # Load a (frozen) Tensorflow model into memory. self.cnn_input_size = 64 detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(mark_model, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') self.graph = detection_graph self.sess = tf.Session(graph=detection_graph) else: self.cnn_input_size = 64 # with CustomObjectScope({'tf': tf}): with custom_object_scope({ 'smoothL1': smoothL1, 'relu6': relu6, 'DepthwiseConv2D': DepthwiseConv2D, 'mask_weights': mask_weights, 'tf': tf }): self.sess = load_model(mark_model)
def test_get_from_module_uses_custom_object(): get_custom_objects().clear() assert (get_from_module("CustomObject", globals(), "test_generic_utils") == CustomObject) with pytest.raises(ValueError): get_from_module("TestObject", globals(), "test_generic_utils") with custom_object_scope({"TestObject": CustomObject}): assert (get_from_module("TestObject", globals(), "test_generic_utils") == CustomObject)
def test_one_to_many(self): with custom_object_scope({'OneToManyLayer': OneToManyLayer}): layer_multi_io_test(OneToManyLayer, kwargs={'units': 10}, input_shapes=[(2, 4)], expected_output_dtypes=['float32', 'float32'], expected_output_shapes=[(None, 10), (None, 10)])
def test_custom_object_scope_adds_objects(): get_custom_objects().clear() assert (len(get_custom_objects()) == 0) with custom_object_scope({ "Test1": object, "Test2": object }, {"Test3": object}): assert (len(get_custom_objects()) == 3) assert (len(get_custom_objects()) == 0)
def test_serializing_model_with_metric_with_custom_object_scope( self, value): def get_instance(x): if isinstance(x, str): return x if isinstance(x, type) and issubclass(x, metrics.Metric): return x() return x metric_input = tf.nest.map_structure(get_instance, value) weighted_metric_input = tf.nest.map_structure(get_instance, value) with generic_utils.custom_object_scope({ "MyMeanAbsoluteError": MyMeanAbsoluteError, "_my_mae": _my_mae, "Bias": test_utils.Bias, }): model = _get_multi_io_model() model.compile( optimizer_v2.gradient_descent.SGD(0.1), "mae", metrics=metric_input, weighted_metrics=weighted_metric_input, run_eagerly=test_utils.should_run_eagerly(), ) history = model.fit( [self.x, self.x], [self.y, self.y], batch_size=3, epochs=3, sample_weight=[self.w, self.w], ) # Assert training. self.assertAllClose(history.history["loss"], [2.0, 1.6, 1.2], 1e-3) eval_results = model.evaluate( [self.x, self.x], [self.y, self.y], sample_weight=[self.w, self.w], ) if h5py is None: return model.save(self.model_filename) loaded_model = keras.models.load_model(self.model_filename) loaded_model.predict([self.x, self.x]) loaded_eval_results = loaded_model.evaluate( [self.x, self.x], [self.y, self.y], sample_weight=[self.w, self.w], ) # Assert all evaluation results are the same. self.assertAllClose(eval_results, loaded_eval_results, 1e-9)
def __init__(self, mark_model=current_model): """Initialization""" self.input_size = 64 # with CustomObjectScope({'tf': tf}): with custom_object_scope({ 'smoothL1': smoothL1, 'relu6': relu6, 'DepthwiseConv2D': DepthwiseConv2D, 'mask_weights': mask_weights, 'tf': tf }): self.sess = load_model(mark_model)
def test_custom_objects_scope(): def custom_fn(): pass class CustomClass(object): pass with custom_object_scope({'CustomClass': CustomClass, 'custom_fn': custom_fn}): act = activations.get('custom_fn') assert act == custom_fn cl = regularizers.get('CustomClass') assert cl.__class__ == CustomClass
def test_saving_model_with_custom_object(self): with generic_utils.custom_object_scope(), self.cached_session(): @generic_utils.register_keras_serializable() class CustomLoss(losses.MeanSquaredError): pass model = sequential.Sequential( [core.Dense(units=1, input_shape=(1,))]) model.compile(optimizer='sgd', loss=CustomLoss()) model.fit(np.zeros([10, 1]), np.zeros([10, 1])) temp_dir = self.get_temp_dir() filepath = os.path.join(temp_dir, 'saving') model.save(filepath) # Make sure the model can be correctly load back. _ = save.load_model(filepath, compile=True)
def test_layer(self): with custom_object_scope({'LogitsWithGuidance': LogitsWithGuidance}): test_utils.layer_test( HierarchicalMultiScaleAttention, kwargs={'layer': LogitsWithGuidance(), 'scales': ((0.5,), (0.25, 0.5, 2.0)), 'filters': 256, 'dropout': 0.}, input_shape=[2, 128, 128, 3], input_dtype='float32', expected_output_shape=[None, 128, 128, 2], expected_output_dtype='float32' ) mixed_precision.set_global_policy('mixed_float16') test_utils.layer_test( HierarchicalMultiScaleAttention, kwargs={'layer': LogitsWithGuidance(), 'scales': ((0.5,), (0.5, 2.0)), 'filters': 256, 'dropout': 0.}, input_shape=[2, 128, 128, 3], input_dtype='float16', expected_output_shape=[None, 128, 128, 2], expected_output_dtype='float16' )
def test_custom_object_scope_adds_objects(): get_custom_objects().clear() assert (len(get_custom_objects()) == 0) with custom_object_scope({"Test1": object, "Test2": object}, {"Test3": object}): assert (len(get_custom_objects()) == 3) assert (len(get_custom_objects()) == 0)
'focus_sigma_reg':None,'augment':False, 'Epochs':Epochs, 'batch_size':512,'repeats':1, 'lr_all':0.1} sc, hs, model, cb = test_comp(mod,random_sid=0) callback_records =[] for i in range(len(cb)): if isinstance(cb[i],keras_utils.RecordVariable): callback_records.append(cb[i].record) then = datetime.now() timestr += then.strftime("%Y%m%d-%H%M%S") filename = 'outputs/Kfocusing/'+dset+'/'+timestr+'_'+mod['neuron']+'.trained_model.npz' import keras.utils.generic_utils as gen_util from Kfocusing import FocusedLayer1D with gen_util.custom_object_scope({'FocusedLayer1D':FocusedLayer1D}): np.savez_compressed(filename,scores=sc, history=hs, model=model) if mod=='focused' and save_records: #root_to_save ='/home/btek/Dropbox/code/pythoncode/FocusingNeuron/outputs/Kfocusing/vids/' file_to_save = filename = 'outputs/Kfocusing/'+dset+'/'+timestr+'_'+mod+'.trained_model_records.npz' np.savez_compressed(file_to_save, callbacks=callback_records) else: # cifar-10 focused CNN 256 nhidden, 200 epochs pretrained=np.load('outputs/Kfocusing/'+dset+'/20191005-15455320191005-15465520191005-15471020191005-154726_focused.trained_model.npz') import keras.utils.generic_utils as gen_util from Kfocusing import FocusedLayer1D from keras_utils import SGDwithLR
def scope(): return custom_object_scope({"LayerNormalization":LayerNormalization})
def get_activation(identifier): if identifier is None: return None with custom_object_scope(_get_activations_scope_dict()): return activations.get(identifier)
def load_grud_model(file_name): from nn_utils import _get_scope_dict with custom_object_scope(_get_scope_dict()): model = load_model(file_name) return model