Esempio n. 1
0
def model_test(json_file):
    solver = hugectr.CreateSolver(max_eval_batches=100,
                                  batchsize_eval=16384,
                                  batchsize=16384,
                                  vvgpu=[[0, 1, 2, 3], [4, 5, 6, 7]],
                                  i64_input_key=False,
                                  use_mixed_precision=False,
                                  repeat_dataset=True,
                                  use_cuda_graph=True)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Norm,
        source=["./file_list.txt"],
        eval_source="./file_list_test.txt",
        check_type=hugectr.Check_t.Sum)
    optimizer = hugectr.CreateOptimizer(
        optimizer_type=hugectr.Optimizer_t.Adam)
    model = hugectr.Model(solver, reader, optimizer)
    model.construct_from_json(graph_config_file=json_file,
                              include_dense_network=True)
    model.summary()
    model.compile()
    model.fit(max_iter=10000,
              display=200,
              eval_interval=1000,
              snapshot=100000,
              snapshot_prefix="criteo")
Esempio n. 2
0
def multi_node_test():
    parser = argparse.ArgumentParser()
    parser.add_argument("--benchmark", type=str, required=True)
    parser.add_argument("--batchsize_per_gpu", type=int, required=True)
    parser.add_argument("--node_num", type=int, required=True, default=1)
    parser.add_argument("--gpu_num", type=int, required=True, default=1)
    parser.add_argument("--use_mixed_precision", action='store_true', default=False)
    args = parser.parse_args()

    vvgpu = [[g for g in range(args.gpu_num)] for _ in range(args.node_num)]
    batchsize = args.batchsize_per_gpu * args.node_num * args.gpu_num

    args.i64_input_key = False
    if args.use_mixed_precision:
        args.scaler = 1024
    else:
        args.scaler = 1

    solver = hugectr.CreateSolver(
        max_eval_batches=1,  # we dont evaluate
        batchsize_eval=args.gpu_num * args.node_num,  # we dont evaluate
        batchsize=batchsize,
        vvgpu=vvgpu,
        lr=1e-3,
        i64_input_key=args.i64_input_key,
        use_mixed_precision=args.use_mixed_precision,
        scaler=args.scaler,
    )

    if args.benchmark.lower() == "wdl":
        model = create_wdl(solver)
    if args.benchmark.lower() == "din":
        model = create_din(solver)
    if args.benchmark.lower() == "dcn":
        model = create_dcn(solver)
    if args.benchmark.lower() == "deepfm":
        model = create_deepfm(solver)

    model.compile()
    model.summary()

    model.fit(
        max_iter=2000,
        display=200,
        eval_interval=3000,  # benchmark we dont want evalute
        snapshot=3000,  # benchmark we dont want snapshot
    )
Esempio n. 3
0
def single_node_test(args):
    solver = hugectr.CreateSolver(max_eval_batches=args.max_eval_batches,
                                  batchsize_eval=args.batchsize_eval,
                                  batchsize=args.batchsize,
                                  vvgpu=args.vvgpu,
                                  lr=args.learning_rate,
                                  warmup_steps=args.warmup_steps,
                                  decay_start=args.decay_start,
                                  decay_steps=args.decay_steps,
                                  decay_power=args.decay_power,
                                  end_lr=args.end_lr,
                                  i64_input_key=args.i64_input_key,
                                  use_mixed_precision=args.use_mixed_precision,
                                  scaler=args.scaler)
    reader = hugectr.DataReaderParams(data_reader_type=args.data_reader_type,
                                      source=[args.source],
                                      eval_source=args.eval_source,
                                      check_type=args.check_type,
                                      cache_eval_data=args.cache_eval_data,
                                      num_samples=args.num_samples,
                                      eval_num_samples=args.eval_num_samples,
                                      float_label_dense=args.float_label_dense,
                                      num_workers=args.num_workers,
                                      slot_size_array=args.slot_size_array)
    optimizer = hugectr.CreateOptimizer(optimizer_type=args.optimizer_type,
                                        beta1=args.beta1,
                                        beta2=args.beta2,
                                        epsilon=args.epsilon,
                                        update_type=args.update_type,
                                        momentum_factor=args.momentum_factor,
                                        atomic_update=args.atomic_update)
    model = hugectr.Model(solver, reader, optimizer)
    model.construct_from_json(graph_config_file=args.json_file,
                              include_dense_network=True)
    model.compile()
    model.summary()
    if args.auc_check:
        train(model, args.max_iter, args.display, args.max_eval_batches,
              args.eval_interval, args.auc_threshold)
    else:
        model.fit(max_iter=args.max_iter,
                  display=args.display,
                  eval_interval=args.eval_interval,
                  snapshot=args.snapshot)
    return
Esempio n. 4
0
def wdl_test(json_file, export_path_prefix):
    solver = hugectr.CreateSolver(max_eval_batches=2048,
                                  batchsize_eval=16384,
                                  batchsize=16384,
                                  vvgpu=[[0, 1, 2, 3, 4, 5, 6, 7]],
                                  lr=0.001,
                                  i64_input_key=False,
                                  use_mixed_precision=True,
                                  scaler=1024,
                                  repeat_dataset=True,
                                  use_cuda_graph=True)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Norm,
        source=["./file_list.txt"],
        eval_source="./file_list_test.txt",
        check_type=hugectr.Check_t.Sum)
    optimizer = hugectr.CreateOptimizer(
        optimizer_type=hugectr.Optimizer_t.Adam,
        beta1=0.9,
        beta2=0.999,
        epsilon=0.0001)
    model = hugectr.Model(solver, reader, optimizer)
    model.construct_from_json(graph_config_file=json_file,
                              include_dense_network=True)
    model.compile()
    model.summary()
    model.start_data_reading()
    lr_sch = model.get_learning_rate_scheduler()
    for i in range(10000):
        lr = lr_sch.get_next()
        model.set_learning_rate(lr)
        model.train(False)
        if (i % 100 == 0):
            loss = model.get_current_loss()
            print("[HUGECTR][INFO] iter: {}; loss: {}".format(i, loss))
        if (i % 1000 == 0 and i != 0):
            for _ in range(solver.max_eval_batches):
                model.eval()
                model.export_predictions(
                    export_path_prefix + "prediction" + str(i),
                    export_path_prefix + "label" + str(i))
            metrics = model.get_eval_metrics()
            print("[HUGECTR][INFO] iter: {}, {}".format(i, metrics))
    return
Esempio n. 5
0
def embedding_training_cache_test(json_file, output_dir):
    dataset = [("file_list." + str(i) + ".txt",
                "file_list." + str(i) + ".keyset") for i in range(5)]
    solver = hugectr.CreateSolver(batchsize=16384,
                                  batchsize_eval=16384,
                                  vvgpu=[[0, 1, 2, 3], [4, 5, 6, 7]],
                                  use_mixed_precision=False,
                                  i64_input_key=False,
                                  use_algorithm_search=True,
                                  use_cuda_graph=True,
                                  repeat_dataset=False)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Norm,
        source=["file_list." + str(i) + ".txt" for i in range(5)],
        keyset=["file_list." + str(i) + ".keyset" for i in range(5)],
        eval_source="./file_list.5.txt",
        check_type=hugectr.Check_t.Sum)
    optimizer = hugectr.CreateOptimizer(
        optimizer_type=hugectr.Optimizer_t.Adam)
    hc_cnfg = hugectr.CreateHMemCache(4, 0.5, 2)
    etc = hugectr.CreateETC(
        ps_types=[hugectr.TrainPSType_t.Staged, hugectr.TrainPSType_t.Cached],
        sparse_models=[
            output_dir + "/wdl_0_sparse_model",
            output_dir + "/wdl_1_sparse_model"
        ],
        local_paths=[output_dir + "_1", output_dir + "_2"],
        hmem_cache_configs=[hc_cnfg])
    model = hugectr.Model(solver, reader, optimizer, etc)
    model.construct_from_json(graph_config_file=json_file,
                              include_dense_network=True)
    model.compile()
    model.summary()
    model.fit(num_epochs=1, eval_interval=200, display=200)
    updated_model = model.get_incremental_model()
    model.save_params_to_files("wdl")
    model.set_source(
        source=["file_list." + str(i) + ".txt" for i in range(6, 9)],
        keyset=["file_list." + str(i) + ".keyset" for i in range(6, 9)],
        eval_source="./file_list.5.txt")
    model.fit(num_epochs=1, eval_interval=200, display=200)
    updated_model = model.get_incremental_model()
    model.save_params_to_files("wdl")
Esempio n. 6
0
import hugectr
from mpi4py import MPI

solver = hugectr.CreateSolver(max_eval_batches=1,
                              batchsize_eval=1024,
                              batchsize=1024,
                              lr=0.01,
                              end_lr=0.0001,
                              warmup_steps=8000,
                              decay_start=48000,
                              decay_steps=24000,
                              vvgpu=[[0]],
                              repeat_dataset=True,
                              i64_input_key=True)
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.Parquet,
    source=["./multi_cross/data/train/_file_list.txt"],
    eval_source="./multi_cross/data/test/_file_list.txt",
    check_type=hugectr.Check_t.Sum,
    slot_size_array=[10001, 10001, 10001, 10001])
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Local,
                                    beta1=0.9,
                                    beta2=0.999,
                                    epsilon=0.0000001)
model = hugectr.Model(solver, reader, optimizer)
num_gpus = 1
workspace_size_per_gpu_in_mb = (int(40004 * 16 * 4 / 1000000) + 10)
model.add(
    hugectr.Input(
        label_dim=3,
Esempio n. 7
0
def embedding_training_cache_test(json_file, output_dir):
    dataset = [("file_list." + str(i) + ".txt",
                "file_list." + str(i) + ".keyset") for i in range(5)]
    solver = hugectr.CreateSolver(batchsize=16384,
                                  batchsize_eval=16384,
                                  vvgpu=[[0]],
                                  use_mixed_precision=False,
                                  i64_input_key=False,
                                  use_algorithm_search=True,
                                  use_cuda_graph=True,
                                  repeat_dataset=False)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Norm,
        source=["./file_list.0.txt"],
        keyset=["./file_list.0.keyset"],
        eval_source="./file_list.5.txt",
        check_type=hugectr.Check_t.Sum)
    optimizer = hugectr.CreateOptimizer(
        optimizer_type=hugectr.Optimizer_t.Adam)
    etc = hugectr.CreateETC(
        ps_types=[hugectr.TrainPSType_t.Staged, hugectr.TrainPSType_t.Staged],
        sparse_models=[
            output_dir + "/wdl_0_sparse_model",
            output_dir + "/wdl_1_sparse_model"
        ])
    model = hugectr.Model(solver, reader, optimizer, etc)
    model.construct_from_json(graph_config_file=json_file,
                              include_dense_network=True)
    model.compile()
    model.summary()
    lr_sch = model.get_learning_rate_scheduler()
    data_reader_train = model.get_data_reader_train()
    data_reader_eval = model.get_data_reader_eval()
    embedding_training_cache = model.get_embedding_training_cache()
    data_reader_eval.set_source("file_list.5.txt")
    data_reader_eval_flag = True
    iteration = 0
    for file_list, keyset_file in dataset:
        data_reader_train.set_source(file_list)
        data_reader_train_flag = True
        embedding_training_cache.update(keyset_file)
        while True:
            lr = lr_sch.get_next()
            model.set_learning_rate(lr)
            data_reader_train_flag = model.train(False)
            if not data_reader_train_flag:
                break
            if iteration % 100 == 0:
                batches = 0
                while data_reader_eval_flag:
                    if batches >= solver.max_eval_batches:
                        break
                    data_reader_eval_flag = model.eval()
                    batches += 1
                if not data_reader_eval_flag:
                    data_reader_eval.set_source()
                    data_reader_eval_flag = True
                metrics = model.get_eval_metrics()
                print("[HUGECTR][INFO] iter: {}, metrics: {}".format(
                    iteration, metrics))
            iteration += 1
        print("[HUGECTR][INFO] trained with data in {}".format(file_list))
    updated_model = model.get_incremental_model()
    model.save_params_to_files("wdl", iteration)
Esempio n. 8
0
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(max_eval_batches = 300,
                              batchsize_eval = 16384,
                              batchsize = 16384,
                              lr = 0.001,
                              vvgpu = [[0]],
                              repeat_dataset = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
                                  source = ["./criteo_data/file_list.txt"],
                                  eval_source = "./criteo_data/file_list_test.txt",
                                  check_type = hugectr.Check_t.Sum)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
                                    update_type = hugectr.Update_t.Global,
                                    beta1 = 0.9,
                                    beta2 = 0.999,
                                    epsilon = 0.0000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
                        dense_dim = 13, dense_name = "dense",
                        data_reader_sparse_param_array = 
                        [hugectr.DataReaderSparseParam("wide_data", 30, True, 1),
                        hugectr.DataReaderSparseParam("deep_data", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, 
                            workspace_size_per_gpu_in_mb = 23,
                            embedding_vec_size = 1,
                            combiner = "sum",
                            sparse_embedding_name = "sparse_embedding2",
                            bottom_name = "wide_data",
                            optimizer = optimizer))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, 
Esempio n. 9
0
def _run_model(slot_sizes, total_cardinality):

    solver = hugectr.CreateSolver(
        vvgpu=[[0]],
        batchsize=2048,
        batchsize_eval=2048,
        max_eval_batches=160,
        i64_input_key=True,
        use_mixed_precision=False,
        repeat_dataset=True,
    )

    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Parquet,
        source=[DATA_DIR + "train/_file_list.txt"],
        eval_source=DATA_DIR + "valid/_file_list.txt",
        check_type=hugectr.Check_t.Non,
    )

    optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam)
    model = hugectr.Model(solver, reader, optimizer)

    model.add(
        hugectr.Input(
            label_dim=1,
            label_name="label",
            dense_dim=0,
            dense_name="dense",
            data_reader_sparse_param_array=[
                hugectr.DataReaderSparseParam("data1", len(slot_sizes) + 1, True, len(slot_sizes))
            ],
        )
    )

    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
            workspace_size_per_gpu_in_mb=107,
            embedding_vec_size=16,
            combiner="sum",
            sparse_embedding_name="sparse_embedding1",
            bottom_name="data1",
            slot_size_array=slot_sizes,
            optimizer=optimizer,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.Reshape,
            bottom_names=["sparse_embedding1"],
            top_names=["reshape1"],
            leading_dim=48,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["reshape1"],
            top_names=["fc1"],
            num_output=128,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU,
            bottom_names=["fc1"],
            top_names=["relu1"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu1"],
            top_names=["fc2"],
            num_output=128,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.ReLU,
            bottom_names=["fc2"],
            top_names=["relu2"],
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.InnerProduct,
            bottom_names=["relu2"],
            top_names=["fc3"],
            num_output=1,
        )
    )
    model.add(
        hugectr.DenseLayer(
            layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
            bottom_names=["fc3", "label"],
            top_names=["loss"],
        )
    )
    model.compile()
    model.summary()
    model.fit(max_iter=2000, display=100, eval_interval=200, snapshot=1900)
    model.graph_to_json(graph_config_file=NETWORK_FILE)
Esempio n. 10
0
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(model_name="dcn",
                              max_eval_batches=1,
                              batchsize_eval=16384,
                              batchsize=16384,
                              lr=0.001,
                              vvgpu=[[0]],
                              repeat_dataset=True,
                              use_mixed_precision=False,
                              scaler=1.0,
                              use_cuda_graph=True,
                              metrics_spec={hugectr.MetricsType.AUC: 1.0})
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.Norm,
    source=["./dcn_data/file_list.txt"],
    eval_source="./dcn_data/file_list_test.txt",
    check_type=hugectr.Check_t.Sum,
    num_workers=16)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global,
                                    beta1=0.9,
                                    beta2=0.999,
                                    epsilon=0.0001)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=13,
                  dense_name="dense",
                  data_reader_sparse_param_array=[
Esempio n. 11
0
import hugectr

solver = hugectr.CreateSolver(max_eval_batches=100,
                              batchsize_eval=2048,
                              batchsize=2048,
                              vvgpu=[[0]],
                              i64_input_key=False,
                              use_mixed_precision=False,
                              repeat_dataset=True,
                              use_cuda_graph=True)
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.Norm,
    source=["./file_list.txt"],
    eval_source="./file_list_test.txt",
    check_type=hugectr.Check_t.Sum)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=13,
                  dense_name="dense",
                  data_reader_sparse_param_array=[
                      hugectr.DataReaderSparseParam("data1", 1, False, 26)
                  ]))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
        workspace_size_per_gpu_in_mb=107,
        embedding_vec_size=16,
        combiner="sum",
Esempio n. 12
0
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(max_eval_batches=70,
                              batchsize_eval=65536,
                              batchsize=65536,
                              lr=0.5,
                              warmup_steps=300,
                              vvgpu=[[0, 1, 2, 3, 4, 5, 6, 7]],
                              repeat_dataset=True)
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.Raw,
    source=["./train_data.bin"],
    eval_source="./test_data.bin",
    num_samples=36634624,
    eval_num_samples=4584062,
    check_type=hugectr.Check_t.Non)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.SGD,
                                    update_type=hugectr.Update_t.Local,
                                    atomic_update=True)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=13,
                  dense_name="dense",
                  data_reader_sparse_param_array=[
                      hugectr.DataReaderSparseParam("data1", 2, False, 26)
                  ]))
model.add(
    hugectr.SparseEmbedding(
        embedding_type=hugectr.Embedding_t.LocalizedSlotSparseEmbeddingOneHot,
Esempio n. 13
0
def DLRM(args):
    vvgpu = [[g for g in range(args.gpu_num_per_node)]
             for n in range(args.num_nodes)]
    solver = hugectr.CreateSolver(max_eval_batches=args.eval_batchs,
                                  batchsize_eval=args.batch_size,
                                  batchsize=args.batch_size,
                                  lr=args.learning_rate,
                                  warmup_steps=args.warmup_steps,
                                  decay_start=args.decay_start,
                                  decay_steps=args.decay_steps,
                                  decay_power=args.decay_power,
                                  end_lr=args.end_lr,
                                  vvgpu=vvgpu,
                                  repeat_dataset=True)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Raw,
        source=[f"{args.data_dir}/train_data.bin"],
        eval_source=f"{args.data_dir}/test_data.bin",
        num_samples=36672493,
        eval_num_samples=4584062,
        check_type=hugectr.Check_t.Non)
    optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.SGD,
                                        update_type=hugectr.Update_t.Local,
                                        atomic_update=True)
    model = hugectr.Model(solver, reader, optimizer)
    model.add(
        hugectr.Input(label_dim=1,
                      label_name="label",
                      dense_dim=13,
                      dense_name="dense",
                      data_reader_sparse_param_array=[
                          hugectr.DataReaderSparseParam("data1", 2, False, 26)
                      ]))
    model.add(
        hugectr.SparseEmbedding(
            embedding_type=hugectr.Embedding_t.
            LocalizedSlotSparseEmbeddingOneHot,
            slot_size_array=[
                1460, 583, 10131227, 2202608, 305, 24, 12517, 633, 3, 93145,
                5683, 8351593, 3194, 27, 14992, 5461306, 10, 5652, 2173, 4,
                7046547, 18, 15, 286181, 105, 142572
            ],
            workspace_size_per_gpu_in_mb=args.workspace_size_per_gpu_in_mb,
            embedding_vec_size=args.embedding_vec_size,
            combiner="sum",
            sparse_embedding_name="sparse_embedding1",
            bottom_name="data1",
            optimizer=optimizer))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["dense"],
                           top_names=["fc1"],
                           num_output=512))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc1"],
                           top_names=["relu1"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu1"],
                           top_names=["fc2"],
                           num_output=256))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc2"],
                           top_names=["relu2"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu2"],
                           top_names=["fc3"],
                           num_output=128))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc3"],
                           top_names=["relu3"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.Interaction,
                           bottom_names=["relu3", "sparse_embedding1"],
                           top_names=["interaction1"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["interaction1"],
                           top_names=["fc4"],
                           num_output=1024))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc4"],
                           top_names=["relu4"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu4"],
                           top_names=["fc5"],
                           num_output=1024))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc5"],
                           top_names=["relu5"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu5"],
                           top_names=["fc6"],
                           num_output=512))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc6"],
                           top_names=["relu6"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu6"],
                           top_names=["fc7"],
                           num_output=256))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.ReLU,
                           bottom_names=["fc7"],
                           top_names=["relu7"]))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.InnerProduct,
                           bottom_names=["relu7"],
                           top_names=["fc8"],
                           num_output=1))
    model.add(
        hugectr.DenseLayer(layer_type=hugectr.Layer_t.BinaryCrossEntropyLoss,
                           bottom_names=["fc8", "label"],
                           top_names=["loss"]))
    return model
Esempio n. 14
0
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(max_eval_batches=1361,
                              batchsize_eval=65536,
                              batchsize=65536,
                              lr=24.0,
                              warmup_steps=8000,
                              decay_start=48000,
                              decay_steps=24000,
                              decay_power=2.0,
                              end_lr=0.0,
                              vvgpu=[[0, 1, 2, 3, 4, 5, 6, 7]],
                              repeat_dataset=True,
                              use_mixed_precision=True,
                              scaler=1024)
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.Raw,
    source=["./train_data.bin"],
    eval_source="./test_data.bin",
    num_samples=4195197692,
    eval_num_samples=89137319,
    check_type=hugectr.Check_t.Non,
    cache_eval_data=1361)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.SGD,
                                    update_type=hugectr.Update_t.Local,
                                    atomic_update=True)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=13,
Esempio n. 15
0
        39884, 39043, 17289, 7420, 20263, 3, 7120, 1543, 39884, 39043, 17289,
        7420, 20263, 3, 7120, 1543, 63, 63, 39884, 39043, 17289, 7420, 20263,
        3, 7120, 1543
    ],
    # for parquet, check_type doesn't make any difference
    check_type=hugectr.Check_t.Non,
    dist_type=hugectr.Distribution_t.PowerLaw,
    power_law_type=hugectr.PowerLaw_t.Short)
data_generator = DataGenerator(data_generator_params)
data_generator.generate()

# DCN train
solver = hugectr.CreateSolver(max_eval_batches=1280,
                              batchsize_eval=1024,
                              batchsize=1024,
                              lr=0.001,
                              vvgpu=[[0]],
                              i64_input_key=True,
                              repeat_dataset=True)
reader = hugectr.DataReaderParams(
    data_reader_type=data_generator_params.format,
    source=[data_generator_params.source],
    eval_source=data_generator_params.eval_source,
    # For parquet, generated dataset doesn't guarantee uniqueness, slot_size_array is still a must
    slot_size_array=data_generator_params.slot_size_array,
    check_type=data_generator_params.check_type)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=data_generator_params.label_dim,
Esempio n. 16
0
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(model_name = "wdl",
                              max_eval_batches = 1,
                              batchsize_eval = 16384,
                              batchsize = 16384,
                              lr = 0.001,
                              vvgpu = [[0]],
                              repeat_dataset = True)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
                                  source = ["./wdl_data/file_list.txt"],
                                  eval_source = "./wdl_data/file_list_test.txt",
                                  check_type = hugectr.Check_t.Sum)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
                                    update_type = hugectr.Update_t.Global,
                                    beta1 = 0.9,
                                    beta2 = 0.999,
                                    epsilon = 0.0000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
                        dense_dim = 13, dense_name = "dense",
                        data_reader_sparse_param_array = 
                        [hugectr.DataReaderSparseParam("wide_data", 30, False, 1),
                        hugectr.DataReaderSparseParam("deep_data", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, 
                            workspace_size_per_gpu_in_mb = 23,
                            embedding_vec_size = 1,
                            combiner = "sum",
                            sparse_embedding_name = "sparse_embedding2",
                            bottom_name = "wide_data",
                            optimizer = optimizer))
Esempio n. 17
0
import hugectr
from mpi4py import MPI
# 1. Create Solver, DataReaderParams and Optimizer
solver = hugectr.CreateSolver(model_name = "dlrm",
                              max_eval_batches = 300,
                              batchsize_eval = 16384,
                              batchsize = 16384,
                              lr = 0.001,
                              vvgpu = [[0]],
                              repeat_dataset = True,
                              use_mixed_precision = False)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
                                  source = ["./dcn_data/file_list.txt"],
                                  eval_source = "./dcn_data/file_list_test.txt",
                                  check_type = hugectr.Check_t.Sum)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
                                    update_type = hugectr.Update_t.Global,
                                    beta1 = 0.9,
                                    beta2 = 0.999,
                                    epsilon = 0.0001)
# 2. Initialize the Model instance
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
                        dense_dim = 13, dense_name = "dense",
                        data_reader_sparse_param_array = 
                        [hugectr.DataReaderSparseParam("data1", 1, True, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, 
                            workspace_size_per_gpu_in_mb = 1000,
                            embedding_vec_size = 128,
                            combiner = "sum",
                            sparse_embedding_name = "sparse_embedding1",
Esempio n. 18
0
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(max_eval_batches = 100,
                              batchsize_eval = 27700,
                              batchsize = 175480,
                              lr = 0.0045,
                              vvgpu = [[0]],
                              metrics_spec = {hugectr.MetricsType.HitRate: 0.8,
                                              hugectr.MetricsType.AverageLoss:0.0,
                                              hugectr.MetricsType.AUC: 1.0},
                              repeat_dataset = False)
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
                                  source = ["./data/ml-20m/train_filelist.txt"],
                                  eval_source = "./data/ml-20m/test_filelist.txt",
                                  check_type = hugectr.Check_t.Non,
                                  num_workers = 10)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
                                    update_type = hugectr.Update_t.Global,
                                    beta1 = 0.25,
                                    beta2 = 0.5,
                                    epsilon = 0.001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
                        dense_dim = 1, dense_name = "dense",
                        data_reader_sparse_param_array = 
                        [hugectr.DataReaderSparseParam("data", 1, True, 2)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash, 
                            workspace_size_per_gpu_in_mb = 20,
                            embedding_vec_size = 16,
                            combiner = "sum",
                            sparse_embedding_name = "gmf_embedding",
Esempio n. 19
0
import hugectr
from mpi4py import MPI
# 1. Create Solver, DataReaderParams and Optimizer
solver = hugectr.CreateSolver(max_eval_batches=51,
                              batchsize_eval=1769472,
                              batchsize=55296,
                              vvgpu=[[0, 1, 2, 3, 4, 5, 6, 7]],
                              repeat_dataset=True,
                              lr=24.0,
                              warmup_steps=2750,
                              decay_start=49315,
                              decay_steps=27772,
                              decay_power=2.0,
                              end_lr=0.0,
                              use_mixed_precision=True,
                              scaler=1024,
                              use_cuda_graph=False,
                              async_mlp_wgrad=True,
                              gen_loss_summary=False,
                              overlap_lr=True,
                              overlap_init_wgrad=True,
                              overlap_ar_a2a=True,
                              use_holistic_cuda_graph=True,
                              use_overlapped_pipeline=True,
                              all_reduce_algo=hugectr.AllReduceAlgo.OneShot,
                              grouped_all_reduce=False,
                              num_iterations_statistics=20,
                              metrics_spec={hugectr.MetricsType.AUC: 0.8025},
                              is_dlrm=True)
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.RawAsync,
    source=["./train_data.bin"],
Esempio n. 20
0
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(max_eval_batches=1,
                              batchsize_eval=4096,
                              batchsize=64,
                              lr=0.001,
                              vvgpu=[[0]],
                              repeat_dataset=True,
                              i64_input_key=True,
                              use_cuda_graph=True)
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.Parquet,
    source=["./din_data/train/_file_list.txt"],
    eval_source="./din_data/valid/_file_list.txt",
    check_type=hugectr.Check_t.Non,
    num_workers=1,
    slot_size_array=[
        192403, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 63001, 0, 0, 0, 0, 0, 0, 0, 0, 0,
        0, 801
    ])
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global,
                                    beta1=0.9,
                                    beta2=0.999,
                                    epsilon=0.000000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=0,
                  dense_name="dense",
Esempio n. 21
0
def set_source_raw_test(json_file):
    train_data = "./train_data.bin"
    test_data = "./test_data.bin"
    solver = hugectr.CreateSolver(max_eval_batches=5441,
                                  batchsize_eval=16384,
                                  batchsize=16384,
                                  vvgpu=[[0, 1, 2, 3, 4, 5, 6, 7]],
                                  lr=24.0,
                                  warmup_steps=8000,
                                  decay_start=480000000,
                                  decay_steps=240000000,
                                  decay_power=2.0,
                                  end_lr=0,
                                  i64_input_key=False,
                                  use_mixed_precision=True,
                                  scaler=1024,
                                  repeat_dataset=False,
                                  use_cuda_graph=True)
    reader = hugectr.DataReaderParams(
        data_reader_type=hugectr.DataReaderType_t.Raw,
        source=[train_data],
        eval_source=test_data,
        check_type=hugectr.Check_t.Non,
        num_samples=4195197692,
        eval_num_samples=89137319,
        cache_eval_data=1361)
    optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.SGD,
                                        atomic_update=True)
    model = hugectr.Model(solver, reader, optimizer)
    model.construct_from_json(graph_config_file=json_file,
                              include_dense_network=True)
    model.compile()
    model.summary()
    lr_sch = model.get_learning_rate_scheduler()
    data_reader_train = model.get_data_reader_train()
    data_reader_eval = model.get_data_reader_eval()
    data_reader_eval.set_source(test_data)
    data_reader_eval_flag = True
    iteration = 1
    for cnt in range(2):
        data_reader_train.set_source(train_data)
        data_reader_train_flag = True
        print("[HUGECTR][INFO] round: {}".format(cnt), flush=True)
        while True:
            lr = lr_sch.get_next()
            model.set_learning_rate(lr)
            data_reader_train_flag = model.train(False)
            if not data_reader_train_flag:
                break
            if iteration % 4000 == 0:
                batches = 0
                while data_reader_eval_flag:
                    if batches >= solver.max_eval_batches:
                        break
                    data_reader_eval_flag = model.eval()
                    batches += 1
                if not data_reader_eval_flag:
                    data_reader_eval.set_source()
                    data_reader_eval_flag = True
                metrics = model.get_eval_metrics()
                print("[HUGECTR][INFO] iter: {}, metrics: {}".format(
                    iteration, metrics),
                      flush=True)
            iteration += 1
        print("[HUGECTR][INFO] trained with data in {}".format(train_data),
              flush=True)
Esempio n. 22
0
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(
    max_eval_batches=1000,
    batchsize_eval=2770,  # 1208 for 1M dataset
    batchsize=17548,  # 32205 for 1M dataset
    lr=0.0045,
    vvgpu=[[0]],
    metrics_spec={
        hugectr.MetricsType.HitRate: 0.8,
        hugectr.MetricsType.AverageLoss: 0.0,
        hugectr.MetricsType.AUC: 1.0
    },
    repeat_dataset=True)
reader = hugectr.DataReaderParams(
    data_reader_type=hugectr.DataReaderType_t.Norm,
    source=["./data/ml-20m/train_filelist.txt"],
    eval_source="./data/ml-20m/test_filelist.txt",
    check_type=hugectr.Check_t.Non,
    num_workers=10)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.Adam,
                                    update_type=hugectr.Update_t.Global,
                                    beta1=0.25,
                                    beta2=0.5,
                                    epsilon=0.0000001)
model = hugectr.Model(solver, reader, optimizer)
model.add(
    hugectr.Input(label_dim=1,
                  label_name="label",
                  dense_dim=1,
                  dense_name="dense",
Esempio n. 23
0
        203931, 18598, 14092, 7012, 18977, 4, 6385, 1245, 49, 186213, 71328,
        67288, 11, 2168, 7338, 61, 4, 932, 15, 204515, 141526, 199433, 60919,
        9137, 71, 34
    ],
    nnz_array=[
        1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
        1, 1
    ])
data_generator = DataGenerator(data_generator_params)
data_generator.generate()

# DLRM train
solver = hugectr.CreateSolver(max_eval_batches=1280,
                              batchsize_eval=1024,
                              batchsize=1024,
                              lr=0.5,
                              warmup_steps=500,
                              vvgpu=[[0]],
                              repeat_dataset=True)
reader = hugectr.DataReaderParams(
    data_reader_type=data_generator_params.format,
    source=[data_generator_params.source],
    eval_source=data_generator_params.eval_source,
    num_samples=data_generator_params.num_samples,
    eval_num_samples=data_generator_params.eval_num_samples,
    check_type=data_generator_params.check_type)
optimizer = hugectr.CreateOptimizer(optimizer_type=hugectr.Optimizer_t.SGD,
                                    update_type=hugectr.Update_t.Local,
                                    atomic_update=True)
model = hugectr.Model(solver, reader, optimizer)
model.add(