示例#1
0
def summary_result(result):

    logger = get_logger(name='summary_result',
                        file_name=log_dir + '/summary_result',
                        verbose=True)
    logger.info('\tTraining set\t\t\t\t\t\t\tTest set\t\t\t\t\t\t')
    logger.info(
        'TPR\tTNR\tAUC\tTPR\tTNR\tAUC\tR1\tR2\tR3\tR4\tR5\tP1\tP2\tP3\tP4\tP5')
    for train_result, test_result, lift_result, param in result:

        response_matrix = np.array(
            lift_result[0])[:, 0:5]  # take only first 5 decile
        profit_matrix = np.array(
            lift_result[1])[:, 0:5]  # take only first 5 decile
        # train_result = np.array()
        all_metric = np.hstack(
            [train_result, test_result, response_matrix, profit_matrix])
        all_metric = np.vstack([
            all_metric,
            np.mean(all_metric, axis=0),
            np.std(all_metric, axis=0)
        ])
        # param = experiment_list[i]
        logger.info(param)

        for x in all_metric:
            s = ''
            for y in x:
                s = s + str(y) + '\t'
            logger.info('%s' % s)
示例#2
0
def run_experiment_parallel():
    experiment_list = []
    experiment_list.append(
        {'h': [60, 60], 'training_epochs': 100, 'pretraining_epochs': 20, 'finetune_lr': 0.01, 'beta': 1,
         'cl': [0.1, 0.1], 'cost_vec': [1, 3], 'batch_size': 1, 'pretrain_lr': 0.001})
    experiment_list.append(
        {'h': [100, 100], 'training_epochs': 100, 'pretraining_epochs': 20, 'finetune_lr': 0.01, 'beta': 1,
         'cl': [0.1, 0.1], 'cost_vec': [1, 3], 'batch_size': 1, 'pretrain_lr': 0.001})
    experiment_list.append(
        {'h': [120, 120], 'training_epochs': 100, 'pretraining_epochs': 20, 'finetune_lr': 0.01, 'beta': 1,
         'cl': [0.1, 0.1], 'cost_vec': [1, 3], 'batch_size': 1, 'pretrain_lr': 0.001})
    experiment_list.append(
        {'h': [60], 'training_epochs': 100, 'pretraining_epochs': 20, 'finetune_lr': 0.01, 'beta': 1,
         'cl': [0.1, 0.1], 'cost_vec': [1, 3], 'batch_size': 1, 'pretrain_lr': 0.001})
    experiment_list.append(
        {'h': [80], 'training_epochs': 100, 'pretraining_epochs': 20, 'finetune_lr': 0.01, 'beta': 1,
         'cl': [0.1, 0.1], 'cost_vec': [1, 3], 'batch_size': 1, 'pretrain_lr': 0.001})
    experiment_list.append(
        {'h': [100], 'training_epochs': 100, 'pretraining_epochs': 20, 'finetune_lr': 0.01, 'beta': 1,
         'cl': [0.1, 0.1], 'cost_vec': [1, 3], 'batch_size': 1, 'pretrain_lr': 0.001})
    experiment_list.append(
        {'h': [120], 'training_epochs': 100, 'pretraining_epochs': 20, 'finetune_lr': 0.01, 'beta': 1,
         'cl': [0.1, 0.1], 'cost_vec': [1, 3], 'batch_size': 1, 'pretrain_lr': 0.001})
    experiment_list.append(
        {'h': [130], 'training_epochs': 100, 'pretraining_epochs': 20, 'finetune_lr': 0.01, 'beta': 1,
         'cl': [0.1, 0.1], 'cost_vec': [1, 3], 'batch_size': 1, 'pretrain_lr': 0.001})
    logger = get_logger(name='experiment_settings', file_name=log_dir + '/experiment_setting.txt', verbose=True)
    for e in experiment_list:
        logger.info('experiment_list.append(%s)', e)
    p = multiprocessing.Pool()
    func = partial(run_helper, log_dir)

    result = (p.map(func, experiment_list))
    summary_result(result)
示例#3
0
 def csdnn_helper(self, p_dict):
     # (train_result, test_result,model_name,y_pred_score_test)
     logger = get_logger(name=p_dict['nn'],
                         file_name=p_dict['nn'],
                         verbose=True)
     train_result, test_result, model_path, y_pred_score_test = csdnn_classifier(
         log_dir=self.log_dir, logger=logger, p_dict=p_dict)
     return (train_result, test_result, p_dict), y_pred_score_test
示例#4
0
def run_helper(log_dir, p_dict,ens_param=None):  # contain dict
    # If main model existed

    nn = log_dir + '/' +p_dict['exp_name']+'_'+str(random.random()) + '.txt'
    main_file_list.append(nn)
    logger = get_logger(name=nn, file_name= nn, verbose=True)
    train_result, test_result = test_SdA(p_dict=p_dict, logger=logger,ens_param=ens_param)
    return train_result, test_result, p_dict
示例#5
0
文件: SAE_LIFT.py 项目: ymcoin/demo
def run_helper(log_dir,p_dict):  # contain dict
    nn = str(random.random()) + '.txt'
    logger = get_logger(name=nn, file_name=log_dir + '/' + nn, verbose=True)

    train_result, test_result, test_lift = test_SdA(finetune_lr=p_dict['finetune_lr'], pretraining_epochs=p_dict['pretraining_epochs'],
             pretrain_lr=p_dict['pretrain_lr'], training_epochs=p_dict['training_epochs'],
             batch_size=p_dict['batch_size'], h=p_dict['h'], cl=p_dict['cl'], cost_vec=p_dict['cost_vec'],beta=p_dict['beta'],logger=logger)
    return train_result, test_result, test_lift, p_dict
示例#6
0
                s = s + str(y) + '\t'
            logger.info('%s' % s)


if __name__ == '__main__':
    datetim = time.strftime("%d_%m_%Y_%H_%M_%S")
    log_dir = 'log/' + data_dir + '/' + datetim
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)

    start_time = timeit.default_timer()

    run_experiment_parallel()
    #run_experiment()

    end_time = (timeit.default_timer() - start_time) / 60
    #logger.info('Time took:%f mn', end_time)
    logger = get_logger(name='summary_result',
                        file_name=log_dir + '/summary_result',
                        verbose=True)
    logger.info('Time took:%f mn', end_time)

    # combine test file to summary.txt
    time.sleep(5)
    file_list = [f for f in os.listdir(log_dir)]
    with open(log_dir + '/' + datetim + '.txt', 'w') as outfile:
        for fname in file_list:
            with open(log_dir + '/' + fname) as infile:
                for line in infile:
                    outfile.write(line)
    print(file_list)
示例#7
0

if __name__ == '__main__':
    main_file_list = []
    datetim = time.strftime("%d_%m_%Y_%H_%M_%S")
    log_dir = 'log/' + data_dir + '/' + datetim
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    if not os.path.exists(log_dir + '/tmp_model'):
        os.makedirs(log_dir + '/tmp_model')
    if not os.path.exists(log_dir + '/tmp_data'):
        os.makedirs(log_dir + '/tmp_data')
    settings.init(log_dir)
    start_time = timeit.default_timer()
    nn = log_dir + '/summary_result.txt'
    main_summary_logger = get_logger(name=nn, file_name=nn, verbose=False)
    # run_experiment_parallel()
    # run_experiment()
    run_repeat_experiment()
    end_time = (timeit.default_timer() - start_time) / 60
    # logger.info('Time took:%f mn', end_time)
    logger = get_logger(name='summary_result',
                        file_name=log_dir + '/summary_result.txt',
                        verbose=True)
    logger.info('Time took:%f mn', end_time)

    time.sleep(10)
    main_file_list.append(log_dir + '/summary_result.txt')
    combine_log(path=log_dir + '/' + datetim + '.txt',
                file_list=main_file_list)