Beispiel #1
0
def run_opt_single( conf, iteration, globals ):
    '''
    Evaluate the algorithms for a single split
        --------
        conf: dict
            Configuration dictionary
        slice: int
            Optional index for the window slice
    '''
    print( 'run test opt single' )
    
    algorithms = create_algorithms_dict( conf['algorithms'] )
    for k, a in algorithms.items(): 
        aclass = type(a)
        if not aclass in globals:
            globals[aclass] = { 'key': '', 'best': -1 }
    
    metrics = create_metric_list( conf['metrics'] )
    metric_opt = create_metric( conf['optimize'] )
    metrics = metric_opt + metrics
    evaluation = load_evaluation( conf['evaluation'] )
    
    train_eval = True
    if 'train_eval' in conf['data']:
        train_eval = conf['data']['train_eval']
        
    if 'opts' in conf['data']:
        train, test = dl.load_data_session( conf['data']['folder'], conf['data']['prefix'], train_eval=train_eval, **conf['data']['opts'] )
    else:
        train, test = dl.load_data_session( conf['data']['folder'], conf['data']['prefix'], train_eval=train_eval )
                                    
    for m in metrics:
        m.init( train )
    
    results = {}

    for k, a in algorithms.items():
        eval_algorithm(train, test, k, a, evaluation, metrics, results, conf, iteration=iteration, out=False)
        
    write_results_csv( results, conf, iteration=iteration )
    
    for k, a in algorithms.items(): 
        aclass = type(a)
        current_value = results[k][0][1]
        if globals[aclass]['best'] < current_value: 
            print( 'found new best configuration' )
            print( k )
            print( 'improvement from {} to {}'.format(globals[aclass]['best'], current_value) )
            send_message( 'improvement for {} from {} to {} in test {}'.format(k, globals[aclass]['best'], current_value, iteration ) )
            globals[aclass]['best'] = current_value
            globals[aclass]['key'] = k
    
    globals['results'].append( results )
    
    del algorithms
    del metrics
    del evaluation
    del results
    gc.collect()
def run_single(conf, slice=None):
    '''
    Evaluate the algorithms for a single split
        --------
        conf: dict
            Configuration dictionary
        slice: int
            Optional index for the window slice
    '''
    print('run test single')

    algorithms = create_algorithms_dict(conf['algorithms'])
    metrics = create_metric_list(conf['metrics'])
    evaluation = load_evaluation(conf['evaluation'])

    if 'opts' in conf['data']:
        train, test = dl.load_data_session(conf['data']['folder'],
                                           conf['data']['prefix'],
                                           slice_num=slice,
                                           **conf['data']['opts'])
    else:
        train, test = dl.load_data_session(conf['data']['folder'],
                                           conf['data']['prefix'],
                                           slice_num=slice)

    buys = pd.DataFrame()
    if 'buys' in conf['data'] and 'file_buys' in conf['data']:
        buys = dl.load_buys(
            conf['data']['folder'],
            conf['data']['file_buys'])  # load buy actions in addition

    for m in metrics:
        m.init(train)
        if hasattr(m, 'set_buys'):
            m.set_buys(buys, test)

    results = {}

    for k, a in algorithms.items():
        eval_algorithm(train,
                       test,
                       k,
                       a,
                       evaluation,
                       metrics,
                       results,
                       conf,
                       slice=slice,
                       iteration=slice)

    print_results(results)
    write_results_csv(results, conf, iteration=slice)
Beispiel #3
0
def run_single(conf, slice=None):
    '''
    Evaluate the algorithms for a single split
        --------
        conf: dict
            Configuration dictionary
        slice: int
            Optional index for the window slice
    '''
    print('run test single')

    algorithms = create_algorithms_dict(conf['algorithms'])
    metrics = create_metric_list(conf['metrics'])
    evaluation = load_evaluation(conf['evaluation'])

    buys = pd.DataFrame()

    if 'type' in conf['data']:
        if conf['data']['type'] == 'hdf':  # hdf5 file
            if 'opts' in conf['data']:
                # ( path, file, sessions_train=None, sessions_test=None, slice_num=None, train_eval=False )
                train, test = dl.load_data_session_hdf(conf['data']['folder'],
                                                       conf['data']['prefix'],
                                                       slice_num=slice,
                                                       **conf['data']['opts'])
            else:
                train, test = dl.load_data_session_hdf(conf['data']['folder'],
                                                       conf['data']['prefix'],
                                                       slice_num=slice)
        # elif conf['data']['type'] == 'csv': # csv file
    else:  # csv file (default)
        if 'opts' in conf['data']:
            train, test = dl.load_data_session(conf['data']['folder'],
                                               conf['data']['prefix'],
                                               slice_num=slice,
                                               **conf['data']['opts'])
        else:
            train, test = dl.load_data_session(conf['data']['folder'],
                                               conf['data']['prefix'],
                                               slice_num=slice)
        if 'buys' in conf['data'] and 'file_buys' in conf['data']:
            buys = dl.load_buys(
                conf['data']['folder'],
                conf['data']['file_buys'])  # load buy actions in addition
    # else:
    #     raise RuntimeError('Unknown data type: {}'.format(conf['data']['type']))

    for m in metrics:
        m.init(train)
        if hasattr(m, 'set_buys'):
            m.set_buys(buys, test)

    results = {}

    for k, a in algorithms.items():
        eval_algorithm(train,
                       test,
                       k,
                       a,
                       evaluation,
                       metrics,
                       results,
                       conf,
                       slice=slice,
                       iteration=slice)

    print_results(results)
    write_results_csv(results, conf, iteration=slice)