Esempio n. 1
0
    # data iterator
    train_data = mx.io.LibSVMIter(data_libsvm=train_data,
                                  data_shape=(num_features, ),
                                  batch_size=batch_size,
                                  num_parts=num_worker,
                                  part_index=rank)
    eval_data = mx.io.LibSVMIter(data_libsvm=val_data,
                                 data_shape=(num_features, ),
                                 batch_size=batch_size)

    # model
    # The positive class weight, says how much more we should upweight the importance of
    # positive instances in the objective function.
    # This is used to combat the extreme class imbalance.
    positive_class_weight = 2
    model = linear_model(num_features, positive_class_weight)

    # module
    mod = mx.mod.Module(symbol=model,
                        data_names=['data'],
                        label_names=['softmax_label'])
    mod.bind(data_shapes=train_data.provide_data,
             label_shapes=train_data.provide_label)
    mod.init_params()
    optim = mx.optimizer.create(optimizer,
                                learning_rate=0.01,
                                rescale_grad=1.0 / batch_size / num_worker)
    mod.init_optimizer(optimizer=optim, kvstore=kv)
    # use accuracy as the metric
    metric = mx.gluon.metric.create(['nll_loss'])
Esempio n. 2
0
    get_avazu_data(data_dir, AVAZU['train'], AVAZU['url'])
    get_avazu_data(data_dir, AVAZU['test'], AVAZU['url'])

    # data iterator
    train_data = mx.io.LibSVMIter(data_libsvm=train_data, data_shape=(num_features,),
                                  batch_size=batch_size, num_parts=num_worker,
                                  part_index=rank)
    eval_data = mx.io.LibSVMIter(data_libsvm=val_data, data_shape=(num_features,),
                                 batch_size=batch_size)

    # model
    # The positive class weight, says how much more we should upweight the importance of
    # positive instances in the objective function.
    # This is used to combat the extreme class imbalance.
    positive_class_weight = 2
    model = linear_model(num_features, positive_class_weight)

    # module
    mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['softmax_label'])
    mod.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label)
    mod.init_params()
    optim = mx.optimizer.create(optimizer, learning_rate=0.01, rescale_grad=1.0/batch_size/num_worker)
    mod.init_optimizer(optimizer=optim, kvstore=kv)
    # use accuracy as the metric
    metric = mx.metric.create(['nll_loss'])

    # get the sparse weight parameter
    speedometer = mx.callback.Speedometer(batch_size, 100)

    logging.info('Training started ...')
    for epoch in range(num_epoch):