コード例 #1
0
    data_reader_type=hugectr.DataReaderType_t.Norm,
    source=["./data/ml-20m/train_filelist.txt"],
    eval_source="./data/ml-20m/test_filelist.txt",
    check_type=hugectr.Check_t.Non,
    num_workers=10)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global,
                                    beta1=0.25,
                                    beta2=0.5,
                                    epsilon=0.0000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=1,
                  dense_name="dense",
                  data_reader_sparse_param_array=[
                      hugectr.DataReaderSparseParam("data", 1, True, 2)
                  ]))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
        workspace_size_per_gpu_in_mb=49,  # 3 for 1M dataset
        embedding_vec_size=64,
        combiner="sum",
        sparse_embedding_name="mlp_embedding",
        bottom_name="data",
        optimizer=optimizer))
model.add(
    hugectr.DenseLayer(layer_type=hugectr.Layer_t.Reshape,
                       bottom_names=["mlp_embedding"],
コード例 #2
0
ファイル: din_parquet.py プロジェクト: NVIDIA/HugeCTR
    slot_size_array=[
        192403, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 63001, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0, 801
    ])
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global,
                                    beta1=0.9,
                                    beta2=0.999,
                                    epsilon=0.000000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=0,
                  dense_name="dense",
                  data_reader_sparse_param_array=[
                      hugectr.DataReaderSparseParam("UserID", 1, True, 1),
                      hugectr.DataReaderSparseParam("GoodID", 1, True, 11),
                      hugectr.DataReaderSparseParam("CateID", 1, True, 11)
                  ]))

model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
        workspace_size_per_gpu_in_mb=28,
        embedding_vec_size=18,
        combiner="sum",
        sparse_embedding_name="sparse_embedding_user",
        bottom_name="UserID",
        optimizer=optimizer))
model.add(
コード例 #3
0
                              lr = 0.001,
                              vvgpu = [[0]],
                              repeat_dataset = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
                                  source = ["./criteo_data/file_list.txt"],
                                  eval_source = "./criteo_data/file_list_test.txt",
                                  check_type = hugectr.Check_t.Sum)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
                                    update_type = hugectr.Update_t.Global,
                                    beta1 = 0.9,
                                    beta2 = 0.999,
                                    epsilon = 0.0000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
                        dense_dim = 13, dense_name = "dense",
                        data_reader_sparse_param_array = 
                        [hugectr.DataReaderSparseParam("wide_data", 30, True, 1),
                        hugectr.DataReaderSparseParam("deep_data", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, 
                            workspace_size_per_gpu_in_mb = 23,
                            embedding_vec_size = 1,
                            combiner = "sum",
                            sparse_embedding_name = "sparse_embedding2",
                            bottom_name = "wide_data",
                            optimizer = optimizer))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, 
                            workspace_size_per_gpu_in_mb = 358,
                            embedding_vec_size = 16,
                            combiner = "sum",
                            sparse_embedding_name = "sparse_embedding1",
                            bottom_name = "deep_data",
コード例 #4
0
    source=[data_generator_params.source],
    eval_source=data_generator_params.eval_source,
    check_type=data_generator_params.check_type)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global,
                                    beta1=0.9,
                                    beta2=0.999,
                                    epsilon=0.0000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(
        label_dim=data_generator_params.label_dim,
        label_name="label",
        dense_dim=data_generator_params.dense_dim,
        dense_name="dense",
        data_reader_sparse_param_array=
        # the total number of slots should be equal to data_generator_params.num_slot
        [
            hugectr.DataReaderSparseParam("wide_data", 2, True, 1),
            hugectr.DataReaderSparseParam("deep_data", 1, True, 26)
        ]))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
        workspace_size_per_gpu_in_mb=23,
        embedding_vec_size=1,
        combiner="sum",
        sparse_embedding_name="sparse_embedding2",
        bottom_name="wide_data",
        optimizer=optimizer))
model.add(
コード例 #5
0
                                      display=200,
                                      eval_interval=1000,
                                      i64_input_key=False,
                                      use_mixed_precision=False,
                                      repeat_dataset=True)
optimizer = hugectr.optimizer.CreateOptimizer(
    optimizer_type=hugectr.Optimizer_t.Adam, use_mixed_precision=False)
model = hugectr.Model(solver, optimizer)
model.add(
    hugectr.Input(data_reader_type=hugectr.DataReaderType_t.Norm,
                  source="./criteo_data/train/_file_list.txt",
                  eval_source="./criteo_data/val/_file_list.txt",
                  check_type=hugectr.Check_t.Non,
                  label_dim=1,
                  label_name="label",
                  dense_dim=13,
                  dense_name="dense",
                  data_reader_sparse_param_array=[
                      hugectr.DataReaderSparseParam(
                          hugectr.DataReaderSparse_t.Distributed, 30, 1, 26)
                  ],
                  sparse_names=["data1"]))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
        max_vocabulary_size_per_gpu=1447751,
        embedding_vec_size=16,
        combiner=0,
        sparse_embedding_name="sparse_embedding1",
        bottom_name="data1"))
model.add(
コード例 #6
0
def _run_model(slot_sizes, total_cardinality):

    solver = hugectr.CreateSolver(
        vvgpu=[[0]],
        batchsize=2048,
        batchsize_eval=2048,
        max_eval_batches=160,
        i64_input_key=True,
        use_mixed_precision=False,
        repeat_dataset=True,
    )

    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Parquet,
        source=[DATA_DIR + "train/_file_list.txt"],
        eval_source=DATA_DIR + "valid/_file_list.txt",
        check_type=hugectr.Check_t.Non,
    )

    optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam)
    model = hugectr.Model(solver, reader, optimizer)

    model.add(
        hugectr.Input(
            label_dim=1,
            label_name="label",
            dense_dim=0,
            dense_name="dense",
            data_reader_sparse_param_array=[
                hugectr.DataReaderSparseParam("data1", len(slot_sizes) + 1, True, len(slot_sizes))
            ],
        )
    )

    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
            workspace_size_per_gpu_in_mb=107,
            embedding_vec_size=16,
            combiner="sum",
            sparse_embedding_name="sparse_embedding1",
            bottom_name="data1",
            slot_size_array=slot_sizes,
            optimizer=optimizer,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["sparse_embedding1"],
            top_names=["reshape1"],
            leading_dim=48,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["reshape1"],
            top_names=["fc1"],
            num_output=128,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU,
            bottom_names=["fc1"],
            top_names=["relu1"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu1"],
            top_names=["fc2"],
            num_output=128,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU,
            bottom_names=["fc2"],
            top_names=["relu2"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu2"],
            top_names=["fc3"],
            num_output=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
            bottom_names=["fc3", "label"],
            top_names=["loss"],
        )
    )
    model.compile()
    model.summary()
    model.fit(max_iter=2000, display=100, eval_interval=200, snapshot=1900)
    model.graph_to_json(graph_config_file=NETWORK_FILE)
コード例 #7
0
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.Norm,
    source=["./criteo_data/train/_file_list.txt"],
    eval_source="./criteo_data/val/_file_list.txt",
    check_type=hugectr.Check_t.Non)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global,
                                    beta1=0.9,
                                    beta2=0.999,
                                    epsilon=0.0000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=13,
                  dense_name="dense",
                  data_reader_sparse_param_array=[
                      hugectr.DataReaderSparseParam("data1", 2, False, 26)
                  ]))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
        workspace_size_per_gpu_in_mb=89,
        embedding_vec_size=16,
        combiner="sum",
        sparse_embedding_name="sparse_embedding1",
        bottom_name="data1",
        optimizer=optimizer))
model.add(
    hugectr.DenseLayer(layer_type=hugectr.Layer_t.Reshape,
                       bottom_names=["sparse_embedding1"],
コード例 #8
0
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Local,
                                    beta1=0.9,
                                    beta2=0.999,
                                    epsilon=0.0000001)
model = hugectr.Model(solver, reader, optimizer)
num_gpus = 1
workspace_size_per_gpu_in_mb = (int(40004 * 16 * 4 / 1000000) + 10)
model.add(
    hugectr.Input(
        label_dim=3,
        label_name="label",
        dense_dim=3,
        dense_name="dense",
        data_reader_sparse_param_array=[
            hugectr.DataReaderSparseParam(
                "data1",
                [1, 1, 1, 1],
                False,
                4,
            )
        ],
    ))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.LocalizedSlotSparseEmbeddingHash,
        workspace_size_per_gpu_in_mb=workspace_size_per_gpu_in_mb,
        embedding_vec_size=16,
        combiner="mean",
        sparse_embedding_name="sparse_embedding1",
        bottom_name="data1",
        optimizer=optimizer,
コード例 #9
0
                                      i64_input_key=True,
                                      use_mixed_precision=False,
                                      repeat_dataset=True)
optimizer = hugectr.optimizer.CreateOptimizer(
    optimizer_type=hugectr.Optimizer_t.Adam, use_mixed_precision=False)
model = hugectr.Model(solver, optimizer)
model.add(
    hugectr.Input(data_reader_type=hugectr.DataReaderType_t.Parquet,
                  source="./criteo_data/train/_file_list.txt",
                  eval_source="./criteo_data/val/_file_list.txt",
                  check_type=hugectr.Check_t.Non,
                  label_dim=1,
                  label_name="label",
                  dense_dim=13,
                  dense_name="dense",
                  slot_size_array=[
                      203931, 18598, 14092, 7012, 18977, 4, 6385, 1245, 49,
                      186213, 71328, 67288, 11, 2168, 7338, 61, 4, 932, 15,
                      204515, 141526, 199433, 60919, 9137, 71, 34
                  ],
                  data_reader_sparse_param_array=[
                      hugectr.DataReaderSparseParam(
                          hugectr.DataReaderSparse_t.Distributed, 30, 1, 26)
                  ],
                  sparse_names=["data1"]))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
        max_vocabulary_size_per_gpu=1447751,
        embedding_vec_size=16,
        combiner=0,
        sparse_embedding_name="sparse_embedding1",
コード例 #10
0
                              repeat_dataset=True)
reader = hugectr.DataReaderParams(
    data_reader_type=data_generator_params.format,
    source=[data_generator_params.source],
    eval_source=data_generator_params.eval_source,
    # For parquet, generated dataset doesn't guarantee uniqueness, slot_size_array is still a must
    slot_size_array=data_generator_params.slot_size_array,
    check_type=data_generator_params.check_type)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=data_generator_params.label_dim,
                  label_name="label",
                  dense_dim=data_generator_params.dense_dim,
                  dense_name="dense",
                  data_reader_sparse_param_array=[
                      hugectr.DataReaderSparseParam(
                          "data1", 1, True, data_generator_params.num_slot)
                  ]))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
        workspace_size_per_gpu_in_mb=25,
        embedding_vec_size=16,
        combiner="sum",
        sparse_embedding_name="sparse_embedding1",
        bottom_name="data1",
        optimizer=optimizer))
model.add(
    hugectr.DenseLayer(layer_type=hugectr.Layer_t.Reshape,
                       bottom_names=["sparse_embedding1"],
コード例 #11
0
def train(input_train, input_val, max_iter,
                batchsize, snapshot, num_gpus, eval_interval,
                dense_model_file, sparse_model_files):

    logging.info(f"GPU Devices: {num_gpus}")

    # Configure and define the HugeCTR model
    solver = hugectr.solver_parser_helper(num_epochs = 0,
                                        max_iter = max_iter,
                                        max_eval_batches = 100,
                                        batchsize_eval = batchsize,
                                        batchsize = batchsize,
                                        model_file = dense_model_file,
                                        embedding_files = sparse_model_files,
                                        display = 200,
                                        eval_interval = eval_interval,
                                        i64_input_key = True,
                                        use_mixed_precision = False,
                                        repeat_dataset = True,
                                        snapshot = snapshot,
                                        vvgpu = [num_gpus],
                                        use_cuda_graph = False
                                        )

    optimizer = hugectr.optimizer.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
                                        use_mixed_precision = False)
    model = hugectr.Model(solver, optimizer)

    # The slot_size_array are the cardinalities of each categorical feature after NVTabular preprocessing
    model.add(hugectr.Input(data_reader_type = hugectr.DataReaderType_t.Parquet,
                                source = input_train,
                                eval_source = input_val,
                                check_type = hugectr.Check_t.Non,
                                label_dim = 1, label_name = "label",
                                dense_dim = 13, dense_name = "dense",
                                slot_size_array = [18576837, 29428, 15128, 7296, 19902, 4, 6466, 1311, 62, 11700067, 622921, 219557, 11, 2209, 9780, 71, 4, 964, 15, 22022124, 4384510, 15960286, 290588, 10830, 96, 35],
                                data_reader_sparse_param_array =
                                [hugectr.DataReaderSparseParam(hugectr.DataReaderSparse_t.Distributed, 30, 1, 26)],
                                sparse_names = ["data1"]))

    # Sparse Embedding Layer
    model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
                                max_vocabulary_size_per_gpu = 88656602,
                                embedding_vec_size = 16,
                                combiner = 0,
                                sparse_embedding_name = "sparse_embedding1",
                                bottom_name = "data1"))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
                                bottom_names = ["sparse_embedding1"],
                                top_names = ["reshape1"],
                                leading_dim=416))

    # Concatenate sparse embedding and dense input
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
                                bottom_names = ["reshape1", "dense"], top_names = ["concat1"]))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
                                bottom_names = ["concat1"],
                                top_names = ["slice11", "slice12"],
                                ranges=[(0,429),(0,429)]))

    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross,
                                bottom_names = ["slice11"],
                                top_names = ["multicross1"],
                                num_layers=6))

    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
                                bottom_names = ["slice12"],
                                top_names = ["fc1"],
                                num_output=1024))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
                                bottom_names = ["fc1"],
                                top_names = ["relu1"]))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
                                bottom_names = ["relu1"],
                                top_names = ["dropout1"],
                                dropout_rate=0.5))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
                                bottom_names = ["dropout1"],
                                top_names = ["fc2"],
                                num_output=1024))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
                                bottom_names = ["fc2"],
                                top_names = ["relu2"]))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
                                bottom_names = ["relu2"],
                                top_names = ["dropout2"],
                                dropout_rate=0.5))

    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
                                bottom_names = ["dropout2", "multicross1"],
                                top_names = ["concat2"]))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
                                bottom_names = ["concat2"],
                                top_names = ["fc3"],
                                num_output=1))
    model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
                                bottom_names = ["fc3", "label"],
                                top_names = ["loss"]))
    model.compile()
    model.summary()
    model.fit()
コード例 #12
0
def train_hugectr(workflow, devices, out_path):
    # Gets embeddings and devices
    embeddings = list(get_embedding_sizes(workflow).values())
    embeddings = [emb[0] for emb in embeddings]
    devices = [[int(d)] for d in list(devices)[0::2]]
    # Set solver and model
    solver = hugectr.solver_parser_helper(
        vvgpu=[[0]],
        max_iter=10000,
        max_eval_batches=100,
        batchsize_eval=2720,
        batchsize=2720,
        display=1000,
        eval_interval=3200,
        snapshot=3200,
        i64_input_key=True,
        use_mixed_precision=False,
        repeat_dataset=True,
    )
    optimizer = hugectr.optimizer.CreateOptimizer(
        optimizer_type=hugectr.Optimizer_t.SGD, use_mixed_precision=False)
    model = hugectr.Model(solver, optimizer)
    model.add(
        hugectr.Input(
            data_reader_type=hugectr.DataReaderType_t.Parquet,
            source=out_path + "/output/train/_file_list.txt",
            eval_source=out_path + "/output/valid/_file_list.txt",
            check_type=hugectr.Check_t.Non,
            label_dim=1,
            label_name="label",
            dense_dim=13,
            dense_name="dense",
            slot_size_array=embeddings,
            data_reader_sparse_param_array=[
                hugectr.DataReaderSparseParam(
                    hugectr.DataReaderSparse_t.Localized, 26, 1, 26)
            ],
            sparse_names=["data1"],
        ))
    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.
            LocalizedSlotSparseEmbeddingHash,
            max_vocabulary_size_per_gpu=15500000,
            embedding_vec_size=128,
            combiner=0,
            sparse_embedding_name="sparse_embedding1",
            bottom_name="data1",
        ))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["dense"],
            top_names=["fc1"],
            num_output=512,
        ))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc1"],
                           top_names=["relu1"]))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu1"],
            top_names=["fc2"],
            num_output=256,
        ))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc2"],
                           top_names=["relu2"]))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu2"],
            top_names=["fc3"],
            num_output=128,
        ))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc3"],
                           top_names=["relu3"]))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Interaction,
            bottom_names=["relu3", "sparse_embedding1"],
            top_names=["interaction1"],
        ))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["interaction1"],
            top_names=["fc4"],
            num_output=1024,
        ))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc4"],
                           top_names=["relu4"]))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu4"],
            top_names=["fc5"],
            num_output=1024,
        ))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc5"],
                           top_names=["relu5"]))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu5"],
            top_names=["fc6"],
            num_output=512,
        ))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc6"],
                           top_names=["relu6"]))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu6"],
            top_names=["fc7"],
            num_output=256,
        ))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc7"],
                           top_names=["relu7"]))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu7"],
            top_names=["fc8"],
            num_output=1,
        ))
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
            bottom_names=["fc8", "label"],
            top_names=["loss"],
        ))
    # Run training
    model.compile()
    model.summary()
    model.fit()
コード例 #13
0
ファイル: dlrm.py プロジェクト: Oneflow-Inc/DLPerf
def DLRM(args):
    vvgpu = [[g for g in range(args.gpu_num_per_node)]
             for n in range(args.num_nodes)]
    solver = hugectr.CreateSolver(max_eval_batches=args.eval_batchs,
                                  batchsize_eval=args.batch_size,
                                  batchsize=args.batch_size,
                                  lr=args.learning_rate,
                                  warmup_steps=args.warmup_steps,
                                  decay_start=args.decay_start,
                                  decay_steps=args.decay_steps,
                                  decay_power=args.decay_power,
                                  end_lr=args.end_lr,
                                  vvgpu=vvgpu,
                                  repeat_dataset=True)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Raw,
        source=[f"{args.data_dir}/train_data.bin"],
        eval_source=f"{args.data_dir}/test_data.bin",
        num_samples=36672493,
        eval_num_samples=4584062,
        check_type=hugectr.Check_t.Non)
    optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.SGD,
                                        update_type=hugectr.Update_t.Local,
                                        atomic_update=True)
    model = hugectr.Model(solver, reader, optimizer)
    model.add(
        hugectr.Input(label_dim=1,
                      label_name="label",
                      dense_dim=13,
                      dense_name="dense",
                      data_reader_sparse_param_array=[
                          hugectr.DataReaderSparseParam("data1", 2, False, 26)
                      ]))
    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.
            LocalizedSlotSparseEmbeddingOneHot,
            slot_size_array=[
                1460, 583, 10131227, 2202608, 305, 24, 12517, 633, 3, 93145,
                5683, 8351593, 3194, 27, 14992, 5461306, 10, 5652, 2173, 4,
                7046547, 18, 15, 286181, 105, 142572
            ],
            workspace_size_per_gpu_in_mb=args.workspace_size_per_gpu_in_mb,
            embedding_vec_size=args.embedding_vec_size,
            combiner="sum",
            sparse_embedding_name="sparse_embedding1",
            bottom_name="data1",
            optimizer=optimizer))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["dense"],
                           top_names=["fc1"],
                           num_output=512))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc1"],
                           top_names=["relu1"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu1"],
                           top_names=["fc2"],
                           num_output=256))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc2"],
                           top_names=["relu2"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu2"],
                           top_names=["fc3"],
                           num_output=128))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc3"],
                           top_names=["relu3"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.Interaction,
                           bottom_names=["relu3", "sparse_embedding1"],
                           top_names=["interaction1"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["interaction1"],
                           top_names=["fc4"],
                           num_output=1024))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc4"],
                           top_names=["relu4"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu4"],
                           top_names=["fc5"],
                           num_output=1024))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc5"],
                           top_names=["relu5"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu5"],
                           top_names=["fc6"],
                           num_output=512))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc6"],
                           top_names=["relu6"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu6"],
                           top_names=["fc7"],
                           num_output=256))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc7"],
                           top_names=["relu7"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu7"],
                           top_names=["fc8"],
                           num_output=1))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
                           bottom_names=["fc8", "label"],
                           top_names=["loss"]))
    return model
コード例 #14
0
ファイル: benchmark_train.py プロジェクト: rhdong/HugeCTR
def create_din(solver):
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Parquet,
        source=["./din_data/train/_file_list.txt"],
        eval_source="./din_data/valid/_file_list.txt",
        check_type=hugectr.Check_t.Non,
        num_workers=1,
        slot_size_array=[
            192403,
            0,
            0,
            0,
            0,
            0,
            0,
            0,
            0,
            0,
            0,
            63001,
            0,
            0,
            0,
            0,
            0,
            0,
            0,
            0,
            0,
            0,
            801,
        ],
    )
    optimizer = hugectr.CreateOptimizer(
        optimizer_type=hugectr.Optimizer_t.Adam,
        update_type=hugectr.Update_t.Global,
        beta1=0.9,
        beta2=0.999,
        epsilon=0.000000001,
    )
    model = hugectr.Model(solver, reader, optimizer)
    model.add(
        hugectr.Input(
            label_dim=1,
            label_name="label",
            dense_dim=0,
            dense_name="dense",
            data_reader_sparse_param_array=[
                hugectr.DataReaderSparseParam("UserID", 1, True, 1),
                hugectr.DataReaderSparseParam("GoodID", 1, True, 11),
                hugectr.DataReaderSparseParam("CateID", 1, True, 11),
            ],
        )
    )

    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
            workspace_size_per_gpu_in_mb=28,
            embedding_vec_size=18,
            combiner="sum",
            sparse_embedding_name="sparse_embedding_user",
            bottom_name="UserID",
            optimizer=optimizer,
        )
    )
    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
            workspace_size_per_gpu_in_mb=24,
            embedding_vec_size=18,
            combiner="sum",
            sparse_embedding_name="sparse_embedding_good",
            bottom_name="GoodID",
            optimizer=optimizer,
        )
    )
    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
            workspace_size_per_gpu_in_mb=10,
            embedding_vec_size=18,
            combiner="sum",
            sparse_embedding_name="sparse_embedding_cate",
            bottom_name="CateID",
            optimizer=optimizer,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.FusedReshapeConcat,
            bottom_names=["sparse_embedding_good", "sparse_embedding_cate"],
            top_names=["FusedReshapeConcat_item_his_em", "FusedReshapeConcat_item"],
        )
    )

    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Slice,
            bottom_names=["FusedReshapeConcat_item"],
            top_names=["item1", "item2"],
            ranges=[(0, 36), (0, 36)],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Slice,
            bottom_names=["FusedReshapeConcat_item_his_em"],
            top_names=["item_his1", "item_his2", "item_his3", "item_his4", "item_his5"],
            ranges=[(0, 36), (0, 36), (0, 36), (0, 36), (0, 36)],
        )
    )

    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Scale,
            bottom_names=["item1"],
            top_names=["Scale_item"],
            axis=1,
            factor=10,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Slice,
            bottom_names=["Scale_item"],
            top_names=["Scale_item1", "Scale_item2", "Scale_item3"],
            ranges=[(0, 36), (0, 36), (0, 36)],
        )
    )

    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Sub,
            bottom_names=["Scale_item1", "item_his1"],
            top_names=["sub_ih"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ElementwiseMultiply,
            bottom_names=["Scale_item2", "item_his2"],
            top_names=["ElementWiseMul_i"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Concat,
            bottom_names=["Scale_item3", "item_his3", "sub_ih", "ElementWiseMul_i"],
            top_names=["concat_i_h"],
        )
    )

    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["concat_i_h"],
            top_names=["fc_att_i2"],
            num_output=40,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["fc_att_i2"],
            top_names=["fc_att_i3"],
            num_output=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["fc_att_i3"],
            top_names=["reshape_score"],
            leading_dim=10,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Softmax,
            bottom_names=["reshape_score"],
            top_names=["softmax_att_i"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Scale,
            bottom_names=["softmax_att_i"],
            top_names=["Scale_i"],
            axis=0,
            factor=36,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["item_his4"],
            top_names=["reshape_item_his"],
            leading_dim=360,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ElementwiseMultiply,  # matmul
            bottom_names=["Scale_i", "reshape_item_his"],
            top_names=["ElementwiseMul_ih"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReduceSum,
            bottom_names=["ElementwiseMul_ih"],
            top_names=["reduce_ih"],
            axis=1,
        )
    )

    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["item_his5"],
            top_names=["reshape_his"],
            leading_dim=36,
            time_step=10,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReduceMean,
            bottom_names=["reshape_his"],
            top_names=["reduce_item_his"],
            axis=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["reduce_item_his"],
            top_names=["reshape_reduce_item_his"],
            leading_dim=36,
        )
    )

    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["sparse_embedding_user"],
            top_names=["reshape_user"],
            leading_dim=18,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Concat,
            bottom_names=[
                "reshape_user",
                "reshape_reduce_item_his",
                "reduce_ih",
                "item2",
            ],
            top_names=["concat_din_i"],
        )
    )
    # build_fcn_net
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["concat_din_i"],
            top_names=["fc_din_i1"],
            num_output=200,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.PReLU_Dice,
            bottom_names=["fc_din_i1"],
            top_names=["dice_1"],
            elu_alpha=0.2,
            eps=1e-8,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["dice_1"],
            top_names=["fc_din_i2"],
            num_output=80,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.PReLU_Dice,
            bottom_names=["fc_din_i2"],
            top_names=["dice_2"],
            elu_alpha=0.2,
            eps=1e-8,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["dice_2"],
            top_names=["fc3"],
            num_output=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
            bottom_names=["fc3", "label"],
            top_names=["loss"],
        )
    )
    return model
コード例 #15
0
ファイル: benchmark_train.py プロジェクト: rhdong/HugeCTR
def create_deepfm(solver):
    dataset_path = os.getenv("DCN_DATA_PATH")
    os.symlink(dataset_path, "./dcn_data", target_is_directory=True)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Norm,
        source=["./dcn_data/file_list.txt"],
        eval_source="./dcn_data/file_list_test.txt",
        check_type=hugectr.Check_t.Sum,
    )
    optimizer = hugectr.CreateOptimizer(
        optimizer_type=hugectr.Optimizer_t.SGD,
        update_type=hugectr.Update_t.Local,
        atomic_update=True,
    )
    model = hugectr.Model(solver, reader, optimizer)
    model.add(
        hugectr.Input(
            label_dim=1,
            label_name="label",
            dense_dim=13,
            dense_name="dense",
            data_reader_sparse_param_array=[
                hugectr.DataReaderSparseParam("data1", 2, False, 26)
            ],
        )
    )
    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
            workspace_size_per_gpu_in_mb=61,
            embedding_vec_size=11,
            combiner="sum",
            sparse_embedding_name="sparse_embedding1",
            bottom_name="data1",
            optimizer=optimizer,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["sparse_embedding1"],
            top_names=["reshape1"],
            leading_dim=11,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Slice,
            bottom_names=["reshape1"],
            top_names=["slice11", "slice12"],
            ranges=[(0, 10), (10, 11)],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["slice11"],
            top_names=["reshape2"],
            leading_dim=260,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["slice12"],
            top_names=["reshape3"],
            leading_dim=26,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.WeightMultiply,
            bottom_names=["dense"],
            top_names=["weight_multiply1"],
            weight_dims=[13, 10],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.WeightMultiply,
            bottom_names=["dense"],
            top_names=["weight_multiply2"],
            weight_dims=[13, 1],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Concat,
            bottom_names=["reshape2", "weight_multiply1"],
            top_names=["concat1"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["concat1"],
            top_names=["fc1"],
            num_output=400,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU, bottom_names=["fc1"], top_names=["relu1"]
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Dropout,
            bottom_names=["relu1"],
            top_names=["dropout1"],
            dropout_rate=0.5,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["dropout1"],
            top_names=["fc2"],
            num_output=400,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU, bottom_names=["fc2"], top_names=["relu2"]
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Dropout,
            bottom_names=["relu2"],
            top_names=["dropout2"],
            dropout_rate=0.5,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["dropout2"],
            top_names=["fc3"],
            num_output=400,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU, bottom_names=["fc3"], top_names=["relu3"]
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Dropout,
            bottom_names=["relu3"],
            top_names=["dropout3"],
            dropout_rate=0.5,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["dropout3"],
            top_names=["fc4"],
            num_output=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.FmOrder2,
            bottom_names=["concat1"],
            top_names=["fmorder2"],
            out_dim=10,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReduceSum,
            bottom_names=["fmorder2"],
            top_names=["reducesum1"],
            axis=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Concat,
            bottom_names=["reshape3", "weight_multiply2"],
            top_names=["concat2"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReduceSum,
            bottom_names=["concat2"],
            top_names=["reducesum2"],
            axis=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Add,
            bottom_names=["fc4", "reducesum1", "reducesum2"],
            top_names=["add"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
            bottom_names=["add", "label"],
            top_names=["loss"],
        )
    )
    return model
コード例 #16
0
ファイル: benchmark_train.py プロジェクト: rhdong/HugeCTR
def create_dcn(solver):
    dataset_path = os.getenv("DCN_DATA_PATH")
    os.symlink(dataset_path, "./dcn_data", target_is_directory=True)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Norm,
        source=["./dcn_data/file_list.txt"],
        eval_source="./dcn_data/file_list_test.txt",
        check_type=hugectr.Check_t.Sum,
    )
    optimizer = hugectr.CreateOptimizer(
        optimizer_type=hugectr.Optimizer_t.SGD,
        update_type=hugectr.Update_t.Local,
        atomic_update=True,
    )
    model = hugectr.Model(solver, reader, optimizer)
    model.add(
        hugectr.Input(
            label_dim=1,
            label_name="label",
            dense_dim=13,
            dense_name="dense",
            data_reader_sparse_param_array=[
                hugectr.DataReaderSparseParam("data1", 2, False, 26)
            ],
        )
    )
    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
            workspace_size_per_gpu_in_mb=300,
            embedding_vec_size=16,
            combiner="sum",
            sparse_embedding_name="sparse_embedding1",
            bottom_name="data1",
            optimizer=optimizer,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["sparse_embedding1"],
            top_names=["reshape1"],
            leading_dim=416,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Concat,
            bottom_names=["reshape1", "dense"],
            top_names=["concat1"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.MultiCross,
            bottom_names=["concat1"],
            top_names=["multicross1"],
            num_layers=6,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["concat1"],
            top_names=["fc1"],
            num_output=1024,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU, bottom_names=["fc1"], top_names=["relu1"]
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Dropout,
            bottom_names=["relu1"],
            top_names=["dropout1"],
            dropout_rate=0.5,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["dropout1"],
            top_names=["fc2"],
            num_output=1024,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU, bottom_names=["fc2"], top_names=["relu2"]
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Dropout,
            bottom_names=["relu2"],
            top_names=["dropout2"],
            dropout_rate=0.5,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Concat,
            bottom_names=["dropout2", "multicross1"],
            top_names=["concat2"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["concat2"],
            top_names=["fc3"],
            num_output=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
            bottom_names=["fc3", "label"],
            top_names=["loss"],
        )
    )
    return model