def __init__(self,
                batch_size,
                gpus,
                init_value, 
                name_,
                embedding_type,
                optimizer_type,
                max_vocabulary_size_per_gpu,
                opt_hparams,
                update_type,
                atomic_update,
                scaler,
                slot_num,
                max_nnz,
                max_feature_num,
                embedding_vec_size,
                combiner,
                num_dense_layers,
                input_buffer_reset=False):
        super(PluginSparseModel, self).__init__()

        self.num_dense_layers = num_dense_layers
        self.input_buffer_reset = input_buffer_reset

        self.batch_size = batch_size
        self.slot_num = slot_num
        self.embedding_vec_size = embedding_vec_size
        self.gpus = gpus

        hugectr_tf_ops_v2.init(visible_gpus=gpus, seed=0, key_type='int64', value_type='float',
                                batch_size=batch_size, batch_size_eval=len(gpus))

        self.embedding_name = hugectr_tf_ops_v2.create_embedding(init_value=init_value, 
                                name_=name_, embedding_type=embedding_type, optimizer_type=optimizer_type, 
                                max_vocabulary_size_per_gpu=max_vocabulary_size_per_gpu, opt_hparams=opt_hparams,
                                update_type=update_type, atomic_update=atomic_update, scaler=scaler, slot_num=slot_num,
                                max_nnz=max_nnz, max_feature_num=max_feature_num, embedding_vec_size=embedding_vec_size, 
                                combiner=combiner)

        self.dense_layers = []
        for _ in range(self.num_dense_layers - 1):
            self.dense_layers.append(tf.keras.layers.Dense(units=1024, activation='relu'))

        self.out_layer = tf.keras.layers.Dense(units=1, activation='sigmoid', use_bias=True,
                                                kernel_initializer='glorot_normal', 
                                                bias_initializer='glorot_normal')
예제 #2
0
            def __init__(self, init_value, name_, embedding_type,
                         optimizer_type, max_vocabulary_size_per_gpu,
                         opt_hparams, update_type, atomic_update, scaler,
                         slot_num, max_nnz, max_feature_num,
                         embedding_vec_size, combiner):
                super(TestModel, self).__init__()

                self.input_buffer_reset = True if "distributed" == embedding_type else False

                self.embedding_name = hugectr_tf_ops_v2.create_embedding(
                    init_value=init_value,
                    name_=name_,
                    embedding_type=embedding_type,
                    optimizer_type=optimizer_type,
                    max_vocabulary_size_per_gpu=max_vocabulary_size_per_gpu,
                    opt_hparams=opt_hparams,
                    update_type=update_type,
                    atomic_update=atomic_update,
                    scaler=scaler,
                    slot_num=slot_num,
                    max_nnz=max_nnz,
                    max_feature_num=max_feature_num,
                    embedding_vec_size=embedding_vec_size,
                    combiner=combiner)
    def __init__(self,
                 batch_size,
                 gpus,
                 init_value,
                 name_,
                 embedding_type,
                 optimizer_type,
                 max_vocabulary_size_per_gpu,
                 opt_hparams,
                 update_type,
                 atomic_update,
                 scaler,
                 slot_num,
                 max_nnz,
                 max_feature_num,
                 embedding_vec_size,
                 combiner,
                 num_dense_layers,
                 input_buffer_reset=False):
        super(PluginSparseModel, self).__init__()

        self.num_dense_layers = num_dense_layers
        self.input_buffer_reset = input_buffer_reset

        self.batch_size = batch_size
        self.slot_num = slot_num
        self.embedding_vec_size = embedding_vec_size
        self.gpus = gpus

        # Make use init() only be called once. It will create resource manager for embedding_plugin.
        hugectr_tf_ops_v2.init(visible_gpus=gpus,
                               seed=0,
                               key_type='int64',
                               value_type='float',
                               batch_size=batch_size,
                               batch_size_eval=len(gpus))

        # create one embedding layer, and its embedding_name will be unique if there are more than one embedding layer.
        self.embedding_name = hugectr_tf_ops_v2.create_embedding(
            init_value=init_value,
            name_=name_,
            embedding_type=embedding_type,
            optimizer_type=optimizer_type,
            max_vocabulary_size_per_gpu=max_vocabulary_size_per_gpu,
            opt_hparams=opt_hparams,
            update_type=update_type,
            atomic_update=atomic_update,
            scaler=scaler,
            slot_num=slot_num,
            max_nnz=max_nnz,
            max_feature_num=max_feature_num,
            embedding_vec_size=embedding_vec_size,
            combiner=combiner)

        # define other parts of this DNN model
        self.dense_layers = []
        for _ in range(self.num_dense_layers - 1):
            self.dense_layers.append(
                tf.keras.layers.Dense(units=1024, activation='relu'))

        self.out_layer = tf.keras.layers.Dense(
            units=1,
            activation='sigmoid',
            use_bias=True,
            kernel_initializer='glorot_normal',
            bias_initializer='glorot_normal')