예제 #1
0
def main():
    log.set_logger(logger)

    logger.debug("Starting mongo sender")
    prometheus_instrumentation.start_http_server(9902)
    mongo_piper = MongoPiper(MONGO_GROUP_ID)
    mongo_piper.consume_pipe()
예제 #2
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = NET_parse_args(
        main_description=
        "Training launcher for Neural net classifier on HIGGS benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    model = build_model(args, -1)
    os.makedirs(model.results_directory, exist_ok=True)
    config = Config()
    config_table = evaluate_config(config)
    config_table.to_csv(
        os.path.join(model.results_directory, 'config_table.csv'))
    # RUN
    if not args.conditional_only:
        eval_table = get_eval_table(args, model.results_directory)
    if not args.estimate_only:
        eval_conditional = get_eval_conditional(args, model.results_directory)
    if not args.estimate_only and not args.conditional_only:
        eval_table = pd.concat([eval_table, eval_conditional], axis=1)
        # EVALUATION
        print_line()
        print_line()
        print(eval_table)
        print_line()
        print_line()
        eval_table.to_csv(
            os.path.join(model.results_directory, 'evaluation.csv'))
    gather_images(model.results_directory)
예제 #3
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = REG_parse_args(
        main_description="Training launcher for Regressor on S3D2 benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    args.net = AR5R5E(n_in=3, n_out=2, n_extra=2)
    args.optimizer = get_optimizer(args)
    model = get_model(args, Regressor)
    model.set_info(DATA_NAME, BENCHMARK_NAME, -1)
    pb_config = S3D2Config()

    # RUN
    results = [run(args, i_cv) for i_cv in range(N_ITER)]
    results = pd.concat(results, ignore_index=True)
    results.to_csv(os.path.join(model.results_directory, 'results.csv'))
    # EVALUATION
    eval_table = evaluate_estimator(pb_config.INTEREST_PARAM_NAME, results)
    print_line()
    print_line()
    print(eval_table)
    print_line()
    print_line()
    eval_table.to_csv(os.path.join(model.results_directory, 'evaluation.csv'))
    gather_images(model.results_directory)
예제 #4
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = NET_parse_args(
        main_description=
        "Training launcher for Gradient boosting on S3D2 benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    model = build_model(args, -1)
    os.makedirs(model.results_directory, exist_ok=True)
    # config = Config()
    # config_table = evaluate_config(config)
    # config_table.to_csv(os.path.join(model.results_directory, 'config_table.csv'))
    # RUN
    evaluation = [run(args, i_cv) for i_cv in range(N_ITER)]
    # EVALUATION
    evaluation = pd.concat(evaluation)
    evaluation.to_csv(os.path.join(model.results_directory, "evaluation.csv"))
    plot_auc(evaluation,
             model_name=model.base_name,
             directory=model.results_directory)
    plot_accuracy(evaluation,
                  model_name=model.base_name,
                  directory=model.results_directory)
    if False:  # Temporary removed
        gather_images(model.results_directory)
def main():
    # BASIC SETUP
    logger = set_logger()
    args = REG_parse_args(
        main_description=
        "Training launcher for Marginal Regressor on HIGGS benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    model = build_model(args, -1)
    os.makedirs(model.results_directory, exist_ok=True)
    config = Config()
    config_table = evaluate_config(config)
    config_table.to_csv(
        os.path.join(model.results_directory, 'config_table.csv'))
    # RUN
    eval_table = get_eval_table(args, model.results_directory)
    # EVALUATION
    print_line()
    print_line()
    print(eval_table)
    print_line()
    print_line()
    eval_table.to_csv(os.path.join(model.results_directory, 'evaluation.csv'))
    gather_images(model.results_directory)
def main():
    # BASIC SETUP
    logger = set_logger()
    args = REG_parse_args(
        main_description=
        "Training launcher for Gradient boosting on S3D2 benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    model = build_model(args, -1)
    os.makedirs(model.results_directory, exist_ok=True)
    config = Config()
    config_table = evaluate_config(config)
    config_table.to_csv(
        os.path.join(model.results_directory, 'config_table.csv'))
    # RUN
    results = [run(args, i_cv) for i_cv in range(N_ITER)]
    results = pd.concat(results, ignore_index=True)
    results.to_csv(os.path.join(model.results_directory, 'estimations.csv'))
    # EVALUATION
    eval_table = evaluate_estimator(config.INTEREST_PARAM_NAME, results)
    print_line()
    print_line()
    print(eval_table)
    print_line()
    print_line()
    eval_table.to_csv(os.path.join(model.results_directory, 'evaluation.csv'))
    gather_images(model.results_directory)
예제 #7
0
def main():
    logger = set_logger()
    directory = os.path.join(DIRECTORY, "nll_contour")
    os.makedirs(directory, exist_ok=True)
    args = parse_args()

    train_generator, valid_generator, test_generator = get_generators()

    config = Config()
    model = load_some_NN(cuda=args.cuda)
    compute_nll = get_nll_computer(model, config, valid_generator,
                                   test_generator)

    nll = compute_nll(*config.CALIBRATED)
    logger.info(f"calib nll = {nll}")
    nll = compute_nll(*config.TRUE)
    logger.info(f"calib nll = {nll}")

    f = lambda xk: compute_nll(*xk)
    xk = np.array(list(config.TRUE))
    print(xk)
    EPSILON = 1e-8
    epsilon = np.array([EPSILON] * 2)
    grad = approx_fprime(xk, f, epsilon)
    print(grad, grad.dot(grad.T))

    logger.info(f"Running BFGS on the NLL")
    x_0 = np.array(list(config.CALIBRATED))
    print(fmin_bfgs(f, x_0))
예제 #8
0
def main():
    logger = set_logger()
    root_directory = os.path.join(DIRECTORY, "nll_contour")
    os.makedirs(root_directory, exist_ok=True)
    args = parse_args()

    N_CV = 3
    # FIXME : remove lili and STEP to use all iteration !
    STEP = 1
    lili = list(Config().iter_test_config())[::STEP]
    N_ITER = len(lili)
    logger.info(f"{N_CV} cv and {N_ITER} iteractions ({N_ITER*N_CV} loops)")
    data = []
    for i_cv in range(N_CV):
        model = load_some_NN(i_cv=i_cv, cuda=args.cuda)
        model.to_double()
        # model = load_some_GB(i_cv=i_cv)
        for i_iter, config in enumerate(lili):
            i_iter = i_iter * STEP
            values = run_cv_iter(args, i_cv, i_iter, config, model,
                                 root_directory)
            data.append(values)
    data = pd.DataFrame(data)
    fname = os.path.join(root_directory, "data.csv")
    data.to_csv(fname)
예제 #9
0
def supply_mayang_yunying(date):
    mysql_table = 'crm_supply_mayang_yunying_month'
    logger = log.set_logger('mayang_yunying.txt')

    # hive_sql = _sql.get_crm_mayang_yunying_sql(date)
    with open('./loadfile/mayang_yunying_month.txt', 'r',
              encoding='UTF-8') as hive_sql:
        hive_mysql_diff(logger, hive_sql, mysql_table,
                        mayang_yunying_dict.keys(), date, True)
    pass
예제 #10
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = REG_parse_args(
        main_description="Training launcher for Regressor on S3D2 benchmark")
    logger.info(args)
    flush(logger)

    # Setup model
    logger.info("Setup model")
    model = build_model(args, 0)
    os.makedirs(model.results_directory, exist_ok=True)

    # Setup data
    logger.info("Setup data")
    config = Config()
    config_table = evaluate_config(config)
    config_table.to_csv(
        os.path.join(model.results_directory, 'config_table.csv'))
    seed = SEED + 99999
    train_generator, valid_generator, test_generator = get_generators_torch(
        seed, cuda=args.cuda, GeneratorClass=GeneratorClass)
    train_generator = GeneratorCPU(train_generator)
    train_generator = TrainGenerator(param_generator, train_generator)
    valid_generator = GeneratorCPU(valid_generator)
    test_generator = GeneratorCPU(test_generator)

    i_cv = 0
    result_row = {'i_cv': i_cv}

    # TRAINING / LOADING
    train_or_load_neural_net(model, train_generator, retrain=args.retrain)

    # CHECK TRAINING
    result_row.update(evaluate_neural_net(model, prefix='valid'))
    evaluate_regressor(model, prefix='valid')
    print_line()

    result_table = [
        run_iter(model, result_row, i, test_config, valid_generator,
                 test_generator)
        for i, test_config in enumerate(config.iter_test_config())
    ]
    result_table = pd.DataFrame(result_table)
    result_table.to_csv(os.path.join(model.results_directory, 'results.csv'))

    logger.info('Plot params')
    param_names = [CALIB_PARAM_NAME]
    for name in param_names:
        plot_params(name,
                    result_table,
                    title=model.full_name,
                    directory=model.results_directory)

    logger.info('DONE')
예제 #11
0
def supply_warehose_rank(date):
    '''断货'''
    mysql_table = 'crm_supply_warehouse_month_top'
    logger = log.set_logger('warehouse_month_top.txt')

    # hive_sql=_sql.get_crm_warehouse_month_top_sql(date)
    with open('./loadfile/warehouse_month.txt', 'r',
              encoding='UTF-8') as hive_sql:
        hive_mysql_diff(logger, hive_sql, mysql_table, warehouse_dict.keys(),
                        date, True)

    pass
예제 #12
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = GB_parse_args(main_description="Training launcher for Gradient boosting on S3D2 benchmark")
    logger.info(args)
    flush(logger)
    # Config
    config = Config()
    config.TRUE = Parameter(r=0.1, lam=2.7, mu=0.1)

    train_generator = Generator(SEED)
    valid_generator = Generator(SEED+1)
    test_generator  = Generator(SEED+2)
    X_test, y_test, w_test = test_generator.generate(*config.TRUE, n_samples=config.N_TESTING_SAMPLES)

    # for nuisance in p(nuisance | data)
    nuisance_param_sample = [param_generator().nuisance_parameters for _ in range(25)]
    average_list = []
    variance_list = []
    all_results = []
    for nuisance_params in nuisance_param_sample:
        logger.info(f"nuisance_params = {nuisance_params}")
        estimator_values = []
        results = {name : value for name, value in zip(config.TRUE.nuisance_parameters_names, nuisance_params)}
        for i_cv in range(N_ITER):
            clf = build_model(args, i_cv)
            parameters = Parameter(*nuisance_params, config.CALIBRATED.interest_parameters)
            print(parameters)
            n_samples = config.N_TRAINING_SAMPLES
            X_train, y_train, w_train = train_generator.generate(*parameters, n_samples=n_samples)
            logger.info(f"Training {clf.full_name}")
            # TODO : is it OK to provide w_train to the classifier or useless ?
            clf.fit(X_train, y_train, w_train)
            compute_summaries = ClassifierSummaryComputer(clf, n_bins=10)
            nll_computer = NLLComputer(compute_summaries, valid_generator, X_test, w_test, config=config)
            compute_nll = lambda mu : nll_computer(*nuisance_params, mu)
            minimizer = get_minimizer(compute_nll)
            results.update(evaluate_minuit(minimizer, [config.TRUE.interest_parameters]))
            all_results.append(results.copy())
            # TODO : Add results to some csv
            estimator_values.append(results['mu'])
        average_list.append(np.mean(estimator_values))
        variance_list.append(np.var(estimator_values))

    logger.info(f"average_list {average_list}")
    logger.info(f"variance_list {variance_list}")
    v_stat = np.mean(variance_list)
    v_syst = np.var(average_list)
    v_total = v_stat + v_syst
    logger.info(f"V_stat = {v_stat}")
    logger.info(f"V_syst = {v_syst}")
    logger.info(f"V_total = {v_total}")
예제 #13
0
def product_rank_month_year(date, month=True):
    '''top品'''
    if month:
        logger = log.set_logger('product_rank_month.txt')
        mysql_table = "crm_supply_product_month_top"
    else:
        logger = log.set_logger('product_rank_year.txt')
        mysql_table = "crm_supply_product_year_top"

    hive_sql = get_crm_product_month_year_top_sql(date, month)
    #每个分组取top N
    # N=3
    # hive_sql="select * from ("+hive_sql+") t where rank<="+str(N)

    #hive_file ,通过文件
    if month:
        file = 'product_month.txt'
    else:
        file = 'product_rank_year.txt'
    with open('./loadfile/' + file, 'r', encoding='UTF-8') as hive_sql:
        hive_mysql_diff(logger, hive_sql, mysql_table, product_dict.keys(),
                        date, month)
예제 #14
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = INFERNO_parse_args(
        main_description=
        "Training launcher for Gradient boosting on S3D2 benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    model = build_model(args, -1)
    os.makedirs(model.results_directory, exist_ok=True)
    config = Config()
    config_table = evaluate_config(config)
    config_table.to_csv(
        os.path.join(model.results_directory, 'config_table.csv'))
    # RUN
    if args.load_run:
        logger.info(f'Loading previous runs [{args.start_cv},{args.end_cv}[')
        directory = model.results_directory
        estimations = load_estimations(directory,
                                       start_cv=args.start_cv,
                                       end_cv=args.end_cv)
        conditional_estimations = load_conditional_estimations(
            directory, start_cv=args.start_cv, end_cv=args.end_cv)
    else:
        logger.info(f'Running runs [{args.start_cv},{args.end_cv}[')
        results = [
            run(args, i_cv) for i_cv in range(args.start_cv, args.end_cv)
        ]
        estimations = [e0 for e0, e1 in results]
        estimations = pd.concat(estimations, ignore_index=True)
        conditional_estimations = [e1 for e0, e1 in results]
        conditional_estimations = pd.concat(conditional_estimations)
    estimations.to_csv(os.path.join(model.results_directory,
                                    'estimations.csv'))
    conditional_estimations.to_csv(
        os.path.join(model.results_directory, 'conditional_estimations.csv'))
    # EVALUATION
    eval_table = evaluate_estimator(config.INTEREST_PARAM_NAME, estimations)
    eval_conditional = evaluate_conditional_estimation(
        conditional_estimations,
        interest_param_name=config.INTEREST_PARAM_NAME)
    eval_table = pd.concat([eval_table, eval_conditional], axis=1)
    print_line()
    print_line()
    print(eval_table)
    print_line()
    print_line()
    eval_table.to_csv(os.path.join(model.results_directory, 'evaluation.csv'))
    gather_images(model.results_directory)
예제 #15
0
 def __init__(
     self,
     model,
     optimizer=None,
     scheduler=None,
     save_dir="",
     save_to_disk=None,
     logger=None,
 ):
     self.model = model
     self.optimizer = optimizer
     self.scheduler = scheduler
     self.save_dir = save_dir
     self.save_to_disk = save_to_disk
     if logger is None:
         logger = set_logger(__name__)
     self.logger = logger
예제 #16
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = TP_parse_args(main_description="Training launcher for INFERNO on GG benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    model = build_model(args, -1)
    os.makedirs(model.results_directory, exist_ok=True)
    # RUN
    logger.info(f'Running runs [{args.start_cv},{args.end_cv}[')
    results = [run(args, i_cv) for i_cv in range(args.start_cv, args.end_cv)]
    results = pd.concat(results, ignore_index=True)
    # EVALUATION
    results.to_csv(os.path.join(model.results_directory, 'fisher.csv'))
    print(results)
    print("DONE !")
def main():
    logger = set_logger()
    directory = os.path.join(DIRECTORY, "nll_contour")
    os.makedirs(directory, exist_ok=True)
    args = parse_args()
    i_cv = 0
    seed = SEED + i_cv * 5
    train_generator, valid_generator, test_generator = get_generators_torch(seed, cuda=args.cuda)
    train_generator = GeneratorCPU(train_generator)
    valid_generator = GeneratorCPU(valid_generator)
    test_generator = GeneratorCPU(test_generator)

    model = load_some_NN()

    config = Config()
    config_table = evaluate_config(config)
    os.makedirs(os.path.join(directory, model.name), exist_ok=True)
    config_table.to_csv(os.path.join(directory, model.name, 'config_table.csv'))
    for i_iter, test_config in enumerate(config.iter_test_config()):
        do_iter(test_config, model, i_iter, valid_generator, test_generator, directory)
예제 #18
0
def main():
    logger = set_logger()
    logger.info("Hello world !")
    os.makedirs(DIRECTORY, exist_ok=True)
    set_plot_config()
    args = None

    config = Config()
    results = [run(args, i_cv) for i_cv in range(N_ITER)]
    results = pd.concat(results, ignore_index=True)
    results.to_csv(os.path.join(DIRECTORY, 'results.csv'))
    # EVALUATION
    eval_table = evaluate_estimator(config.TRUE.interest_parameters_names, results)
    print_line()
    print_line()
    print(eval_table)
    print_line()
    print_line()
    eval_table.to_csv(os.path.join(DIRECTORY, 'evaluation.csv'))
    gather_images(DIRECTORY)
예제 #19
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = GB_parse_args(main_description="Training launcher for Gradient boosting on AP1 benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    model = get_model(args, GradientBoostingModel)
    model.set_info(BENCHMARK_NAME, -1)
    pb_config = AP1Config()
    # RUN
    results = [run(args, i_cv) for i_cv in range(N_ITER)]
    results = pd.concat(results, ignore_index=True)
    results.to_csv(os.path.join(model.directory, 'results.csv'))
    # EVALUATION
    eval_table = evaluate_estimator(pb_config.INTEREST_PARAM_NAME, results)
    print_line()
    print_line()
    print(eval_table)
    print_line()
    print_line()
    eval_table.to_csv(os.path.join(model.directory, 'evaluation.csv'))
    gather_images(model.directory)
예제 #20
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = INFERNO_parse_args(
        main_description="Training launcher for Regressor on S3D2 benchmark")
    logger.info(args)
    flush(logger)
    # INFO
    model = build_model(args, -1)
    pb_config = Config()
    # RUN
    results = [run(args, i_cv) for i_cv in range(N_ITER)]
    results = pd.concat(results, ignore_index=True)
    results.to_csv(os.path.join(model.directory, 'results.csv'))
    # EVALUATION
    eval_table = evaluate_estimator(pb_config.INTEREST_PARAM_NAME, results)
    print_line()
    print_line()
    print(eval_table)
    print_line()
    print_line()
    eval_table.to_csv(os.path.join(model.directory, 'evaluation.csv'))
    gather_images(model.directory)
예제 #21
0
import datetime
import argparse
import numpy as np
from utils.log import set_logger
from utils.metric_logger import MetricLogger
from utils.checkpoint import Checkpointer
from solver import make_optimizer
from solver import make_scheduler

import net.network as ntk
from net import pool_aggregator as pa
from data.fashion import FashionDataset
from torch.utils.data import DataLoader
from data.dataloader import get_loader

logger = set_logger("trainer")

nets = {
    "alexnet": ntk.AlexNet,
    "vggnet": ntk.VGGNet,
    "resnet": ntk.ResNet,
}

def train(args):
    try:
        model = nets[args.net](args.margin, args.omega, args.use_hardtriplet)
        model.to(args.device)
    except Exception as e:
        logger.error("Initialize {} error: {}".format(args.net, e))
        return
    logger.info("Training {}.".format(args.net))
def main():
    # BASIC SETUP
    logger = set_logger()
    args = parse_args()
    logger.info(args)
    flush(logger)

    # SET MODEL
    model = get_model(args)

    # LOAD/GENERATE DATA
    logger.info('Generating data ...')
    pb_config = Config()
    generator = Synthetic3D(seed=config.SEED, n_expected_events=1050)
    generator.N_SIG = pb_config.N_SIG
    generator.N_BKG = pb_config.N_BKG
    D_train = generator.train_sample(pb_config.CALIBRATED_R,
                                     pb_config.CALIBRATED_LAMBDA,
                                     pb_config.CALIBRATED_MU,
                                     n_samples=pb_config.N_TRAINING_SAMPLES)
    D_test = generator.test_sample(pb_config.CALIBRATED_R,
                                   pb_config.CALIBRATED_LAMBDA,
                                   pb_config.CALIBRATED_MU)
    X_train, y_train, w_train = split_data_label_weights(D_train)
    X_test, y_test, w_test = split_data_label_weights(D_test)

    # TRAINING
    model.fit(X_train, y_train, w_train)
    # SAVE MODEL
    i = 99
    model_name = '{}-{}'.format(model.get_name(), i)
    model_path = os.path.join(config.SAVING_DIR, model_name)
    logger.info("Saving in {}".format(model_path))
    os.makedirs(model_path, exist_ok=True)
    model.save(model_path)

    # CHECK TRAINING
    plot_test_distrib(model, model_name, model_path, X_test, y_test)
    plot_summaries(model, model_name, model_path, X_test, y_test, w_test)

    # NLL
    summary_computer = lambda X, w: compute_summaries(model, X, w, n_bins=10)
    D_final = generator.final_sample(pb_config.TRUE_R, pb_config.TRUE_LAMBDA,
                                     pb_config.TRUE_MU)
    X_final, y_final, w_final = split_data_label_weights(D_final)
    compute_nll = Synthetic3DNLL(summary_computer, generator, X_final, w_final)

    # NLL PLOTS
    plot_R_around_min(compute_nll, model_path)
    plot_LAMBDA_around_min(compute_nll, model_path)
    plot_MU_around_min(compute_nll, model_path)

    # MINIMIZE NLL
    minimizer = iminuit.Minuit(
        compute_nll,
        errordef=ERRORDEF_NLL,
        r=pb_config.CALIBRATED_R,
        error_r=pb_config.CALIBRATED_R_ERROR,
        #limit_r=(0, None),
        lam=pb_config.CALIBRATED_LAMBDA,
        error_lam=pb_config.CALIBRATED_LAMBDA_ERROR,
        limit_lam=(0, None),
        mu=pb_config.CALIBRATED_MU,
        error_mu=pb_config.CALIBRATED_MU_ERROR,
        limit_mu=(0, 1),
    )
    minimizer.print_param()
    fmin, param = minimizer.migrad()
    param = minimizer.hesse()
    for name, (value,
               err) in {p['name']: (p['value'], p['error'])
                        for p in param}.items():
        print('{name:3} = {value} ({err})'.format(**locals()))

    print('true_r', pb_config.TRUE_R)
    print('true_lam', pb_config.TRUE_LAMBDA)
    print('true_mu', pb_config.TRUE_MU)

    print(param[2]['value'] * 1050, 'signal events estimated')
    print(param[2]['error'] * 1050, 'error on # estimated sig event')
    print('Done.')
예제 #23
0
商品曝光UV_CTR:商品点击UV/商品曝光UV
原始收订账户数:指当前查询的应用类型,带来的全部收订账户,收订账户根据用户账号id进行统计
原始收订行数:
原始订单行数:指当前查询的模块,带来的订单行数
订单行占比:指当前查询的模块,所产生的原始订单行数占全站订单行数的比例。 公式=原始订单行数/全站订单行数
PV订单转换率:
UV订单转化率:指当前查询的模块,带来的订单行数占推荐商品的点击UV的比例。 公式=原始订单行数/原始点击UV
最大曝光位置:指当前查询的模块,商品的最深曝光位置。依据商品曝光以及坑位顺序统计
平均浏览位置:指当前查询的模块,商品的平均曝光位置
最大点击位置:指当前查询的模块,商品的最深点击位置
平均点击位置:指当前查询的模块,商品的平均点击深度
人均点击位置:商品点击PV/商品点击UV
'''

#日志设置
reco = log.set_logger('reco.txt')
#推荐conn
conn_ck = util.connect_clickhouse(host='10.7.30.177')

def ck_vs_davi(webdata,filters):
    '''达芬奇数据和ck数据比对'''
    filterdict=dict(filters)
    data={}
    if len(webdata)>0:
        i=0
        for data in webdata:
            if data['商品ID']=='-1':
                data={}
                continue

            if data.__contains__('商品PV点击率') and data['商品PV点击率'] is not None :
예제 #24
0
def main():
    # BASIC SETUP
    logger = set_logger()
    args = GB_parse_args(
        main_description=
        "Training launcher for Gradient boosting on S3D2 benchmark")
    logger.info(args)
    flush(logger)
    # Config
    config = Config()
    config.TRUE = Parameter(rescale=0.9, mu=0.1)
    train_generator = Generator(SEED)
    valid_generator = Generator(SEED + 1)
    test_generator = Generator(SEED + 2)
    X_test, y_test, w_test = test_generator.generate(
        *config.TRUE, n_samples=config.N_TESTING_SAMPLES)

    # for nuisance in p(nuisance | data)
    nuisance_param_sample = [
        param_generator().nuisance_parameters for _ in range(25)
    ]
    average_list = []
    variance_list = []
    result_table = []
    for nuisance_params in nuisance_param_sample:
        logger.info(f"nuisance_params = {nuisance_params}")
        estimator_values = []
        for i_cv in range(N_ITER):
            clf = build_model(args, i_cv)
            parameters = Parameter(*nuisance_params,
                                   config.CALIBRATED.interest_parameters)
            print(parameters)
            n_samples = config.N_TRAINING_SAMPLES
            X_train, y_train, w_train = train_generator.generate(
                *parameters, n_samples=n_samples)
            logger.info(f"Training {clf.full_name}")
            clf.fit(X_train, y_train, w_train)
            compute_summaries = ClassifierSummaryComputer(clf, n_bins=10)
            nll_computer = NLLComputer(compute_summaries,
                                       valid_generator,
                                       X_test,
                                       w_test,
                                       config=config)
            compute_nll = lambda mu: nll_computer(*nuisance_params, mu)
            minimizer = get_minimizer(compute_nll)
            results = evaluate_minuit(minimizer,
                                      [config.TRUE.interest_parameters])
            estimator_values.append(results['mu'])
            results['i_cv'] = i_cv
            results.update(params_to_dict(parameters, suffix='true'))
            result_table.append(results.copy())
        average_list.append(np.mean(estimator_values))
        variance_list.append(np.var(estimator_values))

    model = build_model(args, 0)
    model.set_info(DATA_NAME, BENCHMARK_NAME, 0)
    save_directory = model.results_path
    os.makedirs(save_directory, exist_ok=True)
    result_table = pd.DataFrame(result_table)
    result_table.to_csv(os.path.join(save_directory, 'results.csv'))
    logger.info(f"average_list {average_list}")
    logger.info(f"variance_list {variance_list}")
    v_stat = np.mean(variance_list)
    v_syst = np.var(average_list)
    v_total = v_stat + v_syst
    logger.info(f"V_stat = {v_stat}")
    logger.info(f"V_syst = {v_syst}")
    logger.info(f"V_total = {v_total}")
    eval_dict = {"V_stat": v_stat, "V_syst": v_syst, "V_total": v_total}
    eval_path = os.path.join(save_directory, 'info.json')
    with open(eval_path, 'w') as f:
        json.dump(eval_dict, f)
from utils import util, log
from kafka import KafkaProducer
import json
import requests, pymysql
from utils.decorate import complog
from utils.util import simplediff
#日志设置
oper = log.set_logger('operation.txt')
#mysql conn
mysql_cursor = util.connect_mysql(host="10.255.254.225",
                                  port=3308,
                                  user="******",
                                  password="******",
                                  database="ioc_adm")
ck_client = util.client_ck(host='10.7.30.148',
                           user='******',
                           password='******',
                           database='ioc')
#ck conn

#kafka集群地址
kafka_hosts = [
    '10.255.242.91:9092', '10.255.242.92:9092', '10.255.242.93:9092',
    '10.255.242.94:9092', '10.255.242.95:9092'
]

TableName = 'smart_cust_detail'
CkTableName = 'ioc.user_tag_base_info_all'
mysql_table = 'ioc_adm.smart_cust_detail'

예제 #26
0
        except requests.exceptions.ConnectionError as e:
            error_code = 521
            request_info = "{},{},{},{},{}/{},{},{},{},{},\"{}\",{}".format(
                datetime.now(), ip, token, resource_method, request_path,
                path_param, cookie, accept, content_type, ip,
                random_user_agent, error_code)
            util_methods.write_to_file(dataset_path, request_info, "a")
            logger.error("Connection Error: {}".format(e))
        except requests.exceptions.RequestException:
            logger.exception("Request Failure")


# Program Execution
if __name__ == '__main__':

    logger = log.set_logger('Extreme_Delete')
    try:
        with open(
                os.path.abspath(
                    os.path.join(__file__,
                                 "../../../../../config/api_details.yaml")),
                "r") as config_file:
            config = yaml.load(config_file, Loader=yaml.FullLoader)

        with open(
                os.path.abspath(
                    os.path.join(__file__,
                                 "../../../../../config/attack-tool.yaml")),
                "r") as attack_config_file:
            attack_config = yaml.load(attack_config_file,
                                      Loader=yaml.FullLoader)
예제 #27
0
import torch.nn.functional as F

import sys
import os
from tqdm import tqdm
import time
import argparse
import numpy as np
from utils.log import set_logger
from utils.checkpoint import Checkpointer
from utils.pca import PCAW

import net.network as ntk
from data.dataloader import get_loader

logger = set_logger("extractor")

nets = {
    "alexnet": ntk.AlexNet,
    "vggnet": ntk.VGGNet,
    "resnet": ntk.ResNet,
}

def extract_image_feature(args):
    try:
        model = nets[args.net]()
        model.to(args.device)
    except Exception as e:
        logger.error("Initialize {} error: {}".format(args.net, e))
        return 
    logger.info("Extracting {} feature.".format(args.net))
예제 #28
0
                        accept,
                        content_type,
                        random_ip,
                        random_user_agent,
                        error_code,
                    )
                    util_methods.write_to_file(dataset_path, request_info, "a")
                    logger.error("Connection Error: {}".format(e))
                except requests.exceptions.RequestException:
                    logger.exception("Request Failure")


# Program Execution
if __name__ == '__main__':

    logger = log.set_logger("Stolen_TOKEN")

    # Constants
    STOLEN_TOKEN = 'stolen_token'

    try:
        with open(
                os.path.abspath(
                    os.path.join(
                        __file__,
                        "../../../../traffic-tool/data/runtime_data/scenario_pool.sav"
                    )), "rb") as scenario_file:
            scenario_pool = pickle.load(scenario_file, )

        with open(
                os.path.abspath(
예제 #29
0
def main():
    log.set_logger(logger)
    logger.debug('Solr piper starting')
    prometheus_instrumentation.start_http_server(9901)
    solr_piper = SolrPiper(SOLR_GROUP_ID)
    solr_piper.consume_pipe()
예제 #30
0
#encoding=utf-8
import json
import requests
from utils import util, log
from db.map.report_map import path1_name
from api.service.report import report_api_bussiness_post
from db.dao.report import report_sql
from utils import decorate

#日志logger
report = log.set_logger('report.txt')


@util.retry(2)
def get_pathlist(level=1, parentid=0):
    api = "http://10.4.32.223:8085/api/v3/reportForm/categoryList?level={}&parentId={}".format(
        level, parentid)
    s = requests.Session()

    req = s.get(url=api)
    apiresult = json.loads(req.content.decode('utf-8'))
    apiresult_list = apiresult['payload']['list']
    apilist = []
    if apiresult_list != []:
        for ele in apiresult_list:
            apilist.append(ele['path'])
    return apilist


def get_all_path_list():
    categorylist = []