def build_network(self, inputs, phase_train=True, nclass=1001): try: from official.recommendation import neumf_model # pylint: disable=g-import-not-at-top except ImportError as e: if 'neumf_model' not in e.message: raise raise ImportError('To use the experimental NCF model, you must clone the ' 'repo https://github.com/tensorflow/models and add ' 'tensorflow/models to the PYTHONPATH.') del nclass users, items, _ = inputs params = { 'num_users': _NUM_USERS_20M, 'num_items': _NUM_ITEMS_20M, 'model_layers': (256, 256, 128, 64), 'mf_dim': 64, 'mf_regularization': 0, 'mlp_reg_layers': (0, 0, 0, 0), 'use_tpu': False } if self.data_type == tf.float32: keras_model = neumf_model.construct_model(users, items, params) logits = keras_model.output else: assert self.data_type == tf.float16 tf.keras.backend.set_floatx('float16') # We cannot rely on the variable_scope's fp16 custom getter here, because # the NCF model uses keras layers, which ignore variable scopes. So we use # a variable_creator_scope instead. with tf.variable_creator_scope(_fp16_variable_creator): keras_model = neumf_model.construct_model(users, items, params) logits = tf.cast(keras_model.output, tf.float32) return model.BuildNetworkResult(logits=logits, extra_info=None)
def _get_keras_model(params): """Constructs and returns the model.""" batch_size = params['batch_size'] user_input = tf.keras.layers.Input(shape=(), batch_size=batch_size, name=movielens.USER_COLUMN, dtype=rconst.USER_DTYPE) item_input = tf.keras.layers.Input(shape=(), batch_size=batch_size, name=movielens.ITEM_COLUMN, dtype=rconst.ITEM_DTYPE) base_model = neumf_model.construct_model(user_input, item_input, params) base_model_output = base_model.output zeros = tf.keras.layers.Lambda(lambda x: x * 0)(base_model_output) softmax_logits = tf.keras.layers.concatenate([zeros, base_model_output], axis=-1) keras_model = tf.keras.Model(inputs=[user_input, item_input], outputs=softmax_logits) keras_model.summary() return keras_model
def _get_keras_model(params): """Constructs and returns the model.""" batch_size = params['batch_size'] user_input = tf.keras.layers.Input( shape=(), batch_size=batch_size, name=movielens.USER_COLUMN, dtype=rconst.USER_DTYPE) item_input = tf.keras.layers.Input( shape=(), batch_size=batch_size, name=movielens.ITEM_COLUMN, dtype=rconst.ITEM_DTYPE) base_model = neumf_model.construct_model(user_input, item_input, params) base_model_output = base_model.output zeros = tf.keras.layers.Lambda( lambda x: x * 0)(base_model_output) softmax_logits = tf.keras.layers.concatenate( [zeros, base_model_output], axis=-1) keras_model = tf.keras.Model( inputs=[user_input, item_input], outputs=softmax_logits) keras_model.summary() return keras_model
def build_network(self, inputs, phase_train=True, nclass=1001, data_type=tf.float32): try: from official.recommendation import neumf_model # pylint: disable=g-import-not-at-top except ImportError as e: if 'neumf_model' not in e.message: raise raise ImportError('To use the experimental NCF model, you must clone the ' 'repo https://github.com/tensorflow/models and add ' 'tensorflow/models to the PYTHONPATH.') del nclass if data_type != tf.float32: raise ValueError('NCF model only supports float32 for now.') users, items = inputs params = { 'num_users': _NUM_USERS_20M, 'num_items': _NUM_ITEMS_20M, 'model_layers': (256, 256, 128, 64), 'mf_dim': 64, 'mf_regularization': 0, 'mlp_reg_layers': (0, 0, 0, 0), } logits = neumf_model.construct_model(users, items, params) return model.BuildNetworkResult(logits=logits, extra_info=None)
def build_network(self, images, phase_train=True, nclass=1001, data_type=tf.float32): try: from official.recommendation import neumf_model # pylint: disable=g-import-not-at-top except ImportError: raise ImportError( 'To use the experimental NCF model, you must clone the ' 'repo https://github.com/tensorflow/models and add ' 'tensorflow/models to the PYTHONPATH.') del nclass if data_type != tf.float32: raise ValueError('NCF model only supports float32 for now.') batch_size = int(images.shape[0]) # Create synthetic users and items. tf_cnn_benchmarks only passes images to # this function, which we cannot use in the NCF model. We use functions as # initializers for XLA compatibility. def users_init_val(): return tf.random_uniform((batch_size, ), minval=0, maxval=_NUM_USERS_20M, dtype=tf.int32) users = tf.Variable(users_init_val, dtype=tf.int32, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES], name='synthetic_users') def items_init_val(): return tf.random_uniform((batch_size, ), minval=0, maxval=_NUM_ITEMS_20M, dtype=tf.int32) items = tf.Variable(items_init_val, dtype=tf.int32, trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES], name='synthetic_items') params = { 'num_users': _NUM_USERS_20M, 'num_items': _NUM_ITEMS_20M, 'model_layers': (256, 256, 128, 64), 'mf_dim': 64, 'mf_regularization': 0, 'mlp_reg_layers': (0, 0, 0, 0), } logits = neumf_model.construct_model(users, items, params) return model.BuildNetworkResult(logits=logits, extra_info=None)
def _get_keras_model(params): """Constructs and returns the model.""" batch_size = params["batch_size"] user_input = tf.keras.layers.Input(shape=(1, ), name=movielens.USER_COLUMN, dtype=tf.int32) item_input = tf.keras.layers.Input(shape=(1, ), name=movielens.ITEM_COLUMN, dtype=tf.int32) valid_pt_mask_input = tf.keras.layers.Input(shape=(1, ), name=rconst.VALID_POINT_MASK, dtype=tf.bool) dup_mask_input = tf.keras.layers.Input(shape=(1, ), name=rconst.DUPLICATE_MASK, dtype=tf.int32) label_input = tf.keras.layers.Input(shape=(1, ), name=rconst.TRAIN_LABEL_KEY, dtype=tf.bool) base_model = neumf_model.construct_model(user_input, item_input, params) logits = base_model.output zeros = tf.keras.layers.Lambda(lambda x: x * 0)(logits) softmax_logits = tf.keras.layers.concatenate([zeros, logits], axis=-1) # Custom training loop calculates loss and metric as a part of # training/evaluation step function. if not params["keras_use_ctl"]: softmax_logits = MetricLayer( params["match_mlperf"])([softmax_logits, dup_mask_input]) # TODO(b/134744680): Use model.add_loss() instead once the API is well # supported. softmax_logits = LossLayer(batch_size)( [softmax_logits, label_input, valid_pt_mask_input]) keras_model = tf.keras.Model(inputs={ movielens.USER_COLUMN: user_input, movielens.ITEM_COLUMN: item_input, rconst.VALID_POINT_MASK: valid_pt_mask_input, rconst.DUPLICATE_MASK: dup_mask_input, rconst.TRAIN_LABEL_KEY: label_input }, outputs=softmax_logits) keras_model.summary() return keras_model
def _get_keras_model(params): """Constructs and returns the model.""" batch_size = params["batch_size"] # The input layers are of shape (1, batch_size), to match the size of the # input data. The first dimension is needed because the input data are # required to be batched to use distribution strategies, and in this case, it # is designed to be of batch_size 1 for each replica. user_input = tf.keras.layers.Input( shape=(batch_size,), batch_size=params["batches_per_step"], name=movielens.USER_COLUMN, dtype=tf.int32) item_input = tf.keras.layers.Input( shape=(batch_size,), batch_size=params["batches_per_step"], name=movielens.ITEM_COLUMN, dtype=tf.int32) base_model = neumf_model.construct_model( user_input, item_input, params, need_strip=True) base_model_output = base_model.output logits = tf.keras.layers.Lambda( lambda x: tf.expand_dims(x, 0), name="logits")(base_model_output) zeros = tf.keras.layers.Lambda( lambda x: x * 0)(logits) softmax_logits = tf.keras.layers.concatenate( [zeros, logits], axis=-1) keras_model = tf.keras.Model( inputs=[user_input, item_input], outputs=softmax_logits) keras_model.summary() return keras_model
def _get_keras_model(params): """Constructs and returns the model.""" batch_size = params['batch_size'] # The input layers are of shape (1, batch_size), to match the size of the # input data. The first dimension is needed because the input data are # required to be batched to use distribution strategies, and in this case, it # is designed to be of batch_size 1 for each replica. user_input = tf.keras.layers.Input( shape=(batch_size,), batch_size=params["batches_per_step"], name=movielens.USER_COLUMN, dtype=tf.int32) item_input = tf.keras.layers.Input( shape=(batch_size,), batch_size=params["batches_per_step"], name=movielens.ITEM_COLUMN, dtype=tf.int32) base_model = neumf_model.construct_model( user_input, item_input, params, need_strip=True) base_model_output = base_model.output logits = tf.keras.layers.Lambda( lambda x: tf.expand_dims(x, 0), name="logits")(base_model_output) zeros = tf.keras.layers.Lambda( lambda x: x * 0)(logits) softmax_logits = tf.keras.layers.concatenate( [zeros, logits], axis=-1) keras_model = tf.keras.Model( inputs=[user_input, item_input], outputs=softmax_logits) keras_model.summary() return keras_model
def _get_keras_model(params): """Constructs and returns the model.""" batch_size = params["batch_size"] # The input layers are of shape (1, batch_size), to match the size of the # input data. The first dimension is needed because the input data are # required to be batched to use distribution strategies, and in this case, it # is designed to be of batch_size 1 for each replica. user_input = tf.keras.layers.Input( shape=(batch_size,), batch_size=params["batches_per_step"], name=movielens.USER_COLUMN, dtype=tf.int32) item_input = tf.keras.layers.Input( shape=(batch_size,), batch_size=params["batches_per_step"], name=movielens.ITEM_COLUMN, dtype=tf.int32) valid_pt_mask_input = tf.keras.layers.Input( shape=(batch_size,), batch_size=params["batches_per_step"], name=rconst.VALID_POINT_MASK, dtype=tf.bool) dup_mask_input = tf.keras.layers.Input( shape=(batch_size,), batch_size=params["batches_per_step"], name=rconst.DUPLICATE_MASK, dtype=tf.int32) label_input = tf.keras.layers.Input( shape=(batch_size, 1), batch_size=params["batches_per_step"], name=rconst.TRAIN_LABEL_KEY, dtype=tf.bool) base_model = neumf_model.construct_model( user_input, item_input, params, need_strip=True) base_model_output = base_model.output logits = tf.keras.layers.Lambda( lambda x: tf.expand_dims(x, 0), name="logits")(base_model_output) zeros = tf.keras.layers.Lambda( lambda x: x * 0)(logits) softmax_logits = tf.keras.layers.concatenate( [zeros, logits], axis=-1) softmax_logits = MetricLayer(params)([softmax_logits, dup_mask_input]) keras_model = tf.keras.Model( inputs={ movielens.USER_COLUMN: user_input, movielens.ITEM_COLUMN: item_input, rconst.VALID_POINT_MASK: valid_pt_mask_input, rconst.DUPLICATE_MASK: dup_mask_input, rconst.TRAIN_LABEL_KEY: label_input}, outputs=softmax_logits) loss_obj = tf.keras.losses.SparseCategoricalCrossentropy( from_logits=True, reduction="sum") keras_model.add_loss(loss_obj( y_true=label_input, y_pred=softmax_logits, sample_weight=valid_pt_mask_input) * 1.0 / batch_size) keras_model.summary() return keras_model