예제 #1
0
def main():
    data = load_data()

    data = data['Baseline']

    t = max(data.index)
    state = interp(data, t)

    g = get_graph()
    show(g, state)
예제 #2
0
파일: main.py 프로젝트: STRCWearlab/GAN_SHL
def mrmr(K=908,log=True,dataset='SHL_ext'):
    import pandas as pd
    from sklearn.feature_selection import f_regression
    
    from sliding_window import get_FX_names
   
    P = Params(name='mRMR_full_user1',dataset=dataset,FX_sel='all',cross_val='combined',undersampling=True)
    V = ds.load_data(P)
    F = pp.perform_preprocessing(P, V)
    
    # X = np.concatenate([X0 for X0,_ in F])
    # Y = np.concatenate([Y0 for _,Y0 in F])
    X, Y = F[0]
   
    P.log(f"X: {X.shape}, Y: {Y.shape}")
    P.log(', '.join([ f"{int(val)}: {count}" for val,count in zip(*np.unique(Y,return_counts=True))]))
   
    X = pd.DataFrame(X, columns = get_FX_names())
    Y = pd.Series(Y.ravel())
    
    F = pd.Series(f_regression(X, Y)[0], index = X.columns)
    corr = pd.DataFrame(.00001, index = X.columns, columns = X.columns)
    
    # initialize list of selected features and list of excluded features
    selected = []
    not_selected = X.columns.to_list()
    
    # repeat K times
    for i in range(K):
      
        # compute (absolute) correlations between the last selected feature and all the (currently) excluded features
        if i > 0:
            last_selected = selected[-1]
            corr.loc[not_selected, last_selected] = X[not_selected].corrwith(X[last_selected]).abs().clip(.00001)
            
        # compute FCQ score for all the (currently) excluded features (this is Formula 2)
        score = F.loc[not_selected] / corr.loc[not_selected, selected].mean(axis = 1).fillna(.00001)
        
        # find best feature, add it to selected and remove it from not_selected
        best = score.index[score.argmax()]
        
        if log:
            P.log(str(i+1).rjust(3,' ')+f': {best} (Score: {score[best]:.4f})')
        selected.append(best)
        not_selected.remove(best)
        
    indeces = [X.columns.get_loc(c) for c in selected]
        
    if log:
        P.log(str(selected))
        P.log(str(indeces))

    return selected, indeces
예제 #3
0
seq_length = 20

# iterate through this many frames before training
washout = 10

# fraction of frames to reserve for testing
test = 0.1

# iterate over training data this many times
epochs = 100

print('Building model')
network = RadarNet()

print('Loading input data')
train_inputs, test_inputs = load_data('data', test_rate=test)
print('Generating target outputs')
train_outputs = generate_targets(train_inputs, pred_distance, target_location,
                                 target_size)
test_outputs = generate_targets(test_inputs, pred_distance, target_location,
                                target_size)

train_inputs = train_inputs[0:len(train_outputs)]
test_inputs = test_inputs[0:len(test_outputs)]

print('Data length: train={}, test={}'.format(len(train_outputs),
                                              len(test_outputs)))

with tf.Session() as session:
    session.run(tf.global_variables_initializer())
    err_plot = []
예제 #4
0
파일: report.py 프로젝트: numpde/nct1
def main():
    report(load_data()['Baseline'].iloc[-1])
예제 #5
0
def load_data(params,dataset):
    X, _ = ds.load_data(params,dataset)
    return X
예제 #6
0
def get_data(params, dataset):
    X, Y = ds.load_data(params, dataset)
    if Y is not None:
        Y = labels_to_one_hot(params, Y)
    return X, Y
예제 #7
0
파일: main.py 프로젝트: STRCWearlab/GAN_SHL
def main():
    import argparse
    from params import DEFAULT_PARAMS as default
    
    parser = argparse.ArgumentParser()
    
    parser.add_argument('-clear', dest='CLEAR', action='store_true')
    parser.set_defaults(CLEAR=False)
    
    parser.add_argument('-test', dest='TEST', action='store_true')
    parser.set_defaults(TEST=False)
    
    parser.add_argument('-run', dest='RUN', action='store_true')
    parser.set_defaults(RUN=False)
    
    parser.add_argument('-eval', dest='EVAL', action='store_true')
    parser.set_defaults(EVAL=False)
    
    parser.add_argument('-eval_r', dest='BASE', action='store_true')
    parser.add_argument('-eval_c', dest='BASE', action='store_true')
    parser.set_defaults(BASE=False)
    
    parser.add_argument('-search', dest='SEARCH', action='store_true')
    parser.set_defaults(SEARCH=False)
    
    parser.add_argument('-search_r', dest='SEARCH_C', action='store_true')
    parser.add_argument('-search_c', dest='SEARCH_C', action='store_true')
    parser.set_defaults(SEARCH_C=False)
    
    parser.add_argument('-search_gd', dest='SEARCH_GD', action='store_true')
    parser.set_defaults(SEARCH_GD=False)
    
    parser.add_argument('-mrmr', dest='MRMR', action='store_true')
    parser.set_defaults(MRMR=False)
    
    parser.add_argument('-sklearn', dest='SKLEARN', action='store_true')
    parser.set_defaults(SKLEARN=False)

    parser.add_argument('-fx_num', type=int, dest='FX_NUM')
    parser.set_defaults(FX_NUM=None)
    
    parser.add_argument('-cuda', dest='CUDA', action='store_true')
    parser.add_argument('-cpu', dest='CUDA', action='store_false')
    parser.set_defaults(CUDA=default['CUDA'])
    
    parser.add_argument('-data_path', type=str, dest='data_path')
    parser.add_argument('-datapath', type=str, dest='data_path')
    parser.set_defaults(data_path=default['data_path'])
    
    parser.add_argument('-dataset', type=str, dest='dataset')
    parser.set_defaults(dataset=default['dataset'])
    
    parser.add_argument('-print', dest='PRINT', action='store_true')
    parser.set_defaults(PRINT=default['print_epoch'])
    
    parser.add_argument('-basic', dest='BASIC', action='store_true')
    parser.add_argument('-no_basic', dest='BASIC', action='store_false')
    parser.set_defaults(BASIC=default['C_basic_train'])
    
    parser.add_argument('-max_evals', type=int, dest='max_evals')
    parser.set_defaults(max_evals=None)
    
    args = parser.parse_args()
    P_args = Params(
        data_path = args.data_path,
        CUDA = args.CUDA,
        print_epoch = args.PRINT,
        C_basic_train = args.BASIC,
        dataset = args.dataset,
        )
    
    P_test = P_args.copy().set_keys(
        name = 'Test',
        dataset = 'Test',

        epochs = 10,
        GD_ratio = 0.5,

        save_step = 1,
        runs = 1,
        
        FX_sel = 'all',
        cross_val = 'user',
        
        sample_no = None,
        undersampling = False,
        oversampling = False,
        
        CB1 = 0.02482259369526197, 
        CLR = 0.00033565485364740803, 
        C_ac_func = 'relu', 
        C_hidden = 92, 
        C_optim = 'AdamW', 
        DB1 = 0.1294935579262613, 
        DLR = 0.010144020667237321, 
        D_ac_func = 'leaky', 
        D_hidden = 317, 
        D_optim = 'AdamW', 
        GB1 = 0.023718651003136713,
        GLR = 0.0005411668775518598, 
        G_ac_func = 'relu', 
        G_hidden = 140, 
        G_optim = 'SGD', 
        batch_size = 110
        ) 
    
    P = P_args.copy().set_keys(
        name = 'eval_user1',

        epochs = 300,
        save_step = 1,
        runs = 10,
        
        FX_sel = 'all',
        R_active = False,
        cross_val = 'user1',
        
        sample_no = None,
        undersampling = False,
        oversampling = False,
        
        User_L = 1,
        User_U = 2,
        User_V = 3,
        
        batch_size = 512,
        FX_num = 150, 
        
        GD_ratio = 0,
        
        RB1 = 0.02621587421913803, 
        RLR = 0.03451171211996072, 
        R_ac_func = 'leaky20', 
        R_hidden = 1294, 
        R_hidden_no = 4, 
        R_optim = 'SGD',
        
        CB1 = 0.8661148142428583, 
        CLR = 8.299645247840653e-05, 
        C_ac_func = 'leaky20', 
        C_hidden = 1790, 
        C_hidden_no = 2, 
        C_optim = 'AdamW', 
        C_tau = 2.833757972503762, 
        
        DB1 = 0.04397295845368007, 
        DLR = 0.0243252689035249, 
        D_ac_func = 'leaky', 
        D_hidden = 113, 
        D_hidden_no = 6, 
        
        GB1 = 0.6201555853224091, 
        GLR = 0.006959406242448824, 
        G_ac_func = 'relu', 
        G_hidden = 318, 
        G_hidden_no = 5,
        
        ) 
    

    if args.CLEAR:
        clear_cache()

    if args.TEST:
        param_space={'GD_ratio': hp.uniform('GD_ratio', 0, 0.9)}
        
        P_test.set_keys(name='Test_CUDA', CUDA = True,)
        evaluate(P_test)
        hyperopt_GAN(P_test.copy(),param_space,eval_step=2,max_evals=5)
        
        P_test.set_keys(name='Test_CPU', CUDA = False, D_fake_step=2)
        evaluate(P_test)
        hyperopt_GAN(P_test.copy(),param_space,eval_step=2,max_evals=5)
        
            
    if args.RUN:
        P_run = P.copy().set_keys(
            runs = 5,
            )
        
        # P_run.set_keys(
        #     cross_val = 'user1', 
        #     save_step = 1, 
        #     epochs = 150, 
        #     GD_ratio = 0,
        #     runs = 10,
        #     sample_no = None,
        #     undersampling = True,
        #     oversampling = False,
        #     )
        
        for step_size in [5,None,10]:
            P_run.set_keys(name='eval_user1_fpos_step_'+str(step_size), cross_val='user1', D_fake_step = step_size, sample_no = 11136, undersampling = False, oversampling = False, )
            evaluate(P_run,P_run.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ),epoch_lst = [50,100,150,200,250])
            
            # P_run.set_keys(
            #     name='eval_LR_C_'+str(step_size),
            #     D_fake_step = step_size,
                
            #     RB1 = 0.8661148142428583,
            #     RLR = 8.299645247840653e-05,
            #     R_ac_func = 'leaky20',
            #     R_hidden = 1790,
            #     R_hidden_no = 2,
            #     R_optim = 'AdamW',
                
            #     CB1 = 0.8661148142428583,
            #     CLR = 8.299645247840653e-05,
            #     C_ac_func = 'leaky20',
            #     C_hidden = 1790,
            #     C_hidden_no = 2,
            #     C_optim = 'AdamW',)
            # evaluate(P_run,P_run.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ),epoch_lst=[50,100])
            
            # P_run.set_keys(
            #     name='eval_LR_R_'+str(step_size),
            #     D_fake_step = step_size,
                
            #     RB1 = 0.02621587421913803,
            #     RLR = 0.03451171211996072,
            #     R_ac_func = 'leaky20',
            #     R_hidden = 1790,
            #     R_hidden_no = 2,
            #     R_optim = 'AdamW',
                
            #     CB1 = 0.02621587421913803,
            #     CLR = 0.03451171211996072,
            #     C_ac_func = 'leaky20',
            #     C_hidden = 1790,
            #     C_hidden_no = 2,
            #     C_optim = 'AdamW',)
            # evaluate(P_run,P_run.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ))
        
        # P_run.set_keys(
        #     name='eval_R',
        #     RB1 = 0.02621587421913803, 
        #     RLR = 0.03451171211996072, 
        #     R_ac_func = 'leaky20', 
        #     R_hidden = 1294, 
        #     R_hidden_no = 4, 
        #     R_optim = 'SGD',
            
        #     CB1 = 0.02621587421913803, 
        #     CLR = 0.03451171211996072, 
        #     C_ac_func = 'leaky20', 
        #     C_hidden = 1294, 
        #     C_hidden_no = 4, 
        #     C_optim = 'SGD',)
        # evaluate(P_run,P_run.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ))
        
        
        
        # P_run.set_keys(name='eval_0_GD', epochs=50, GD_ratio = 0,)
        # evaluate(P_run,P_run.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ))
        
        # P_run.set_keys(name='eval_50_GD', epochs=100, GD_ratio = 0.5,)
        # evaluate(P_run,P_run.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ))
        
        # P_run.set_keys(name='eval_100_GD', epochs=150, GD_ratio = 2/3,)
        # evaluate(P_run,P_run.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ))
        
        
    
    if args.EVAL: 
        
        V = ds.load_data(P)
        
        for sample_no in [512,1024,4096,11136]: 
            P.set_keys(name='eval_combined_'+str(sample_no), cross_val='combined', sample_no = sample_no, undersampling = False, oversampling = False, )
            evaluate(P,P.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ),epoch_lst = [50,100,150,200,250],V=V)
            #sklearn_baseline(P,V)
        
        for sample_no in [512,1024,4096,11136]:        
            
            P.set_keys(name='eval_user1_'+str(sample_no), cross_val='user1', sample_no = sample_no, undersampling = False, oversampling = False, )
            evaluate(P,P.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ),epoch_lst = [50,100,150,200,250],V=V)
            #sklearn_baseline(P,V)

            
        # for cross_val in ['user','none']:
        #     for basic_train in [True,False]:
        #            P.set_keys(
        #                 name = '_'.join(['eval','C',('Complete' if basic_train else 'GAN'),cross_val,'cross']),
        #                 C_basic_train = basic_train,
        #                 cross_val = cross_val,
        #                 )
        #            evaluate(P,P.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ))

    if args.BASE:
        P_base1 = P.copy().set_keys( name='eval_c_normal', dataset='SHL', epochs=1500, sample_no=400, undersampling=False, oversampling=False, )
        P_base2 = P.copy().set_keys( name='eval_c_extended', dataset='SHL_ext', epochs=200, sample_no=None, undersampling=True, oversampling=False, )
        
        pytorch_baseline(P_base1,P_base1.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, )) 
        pytorch_baseline(P_base2,P_base2.copy().set_keys( sample_no = None, undersampling = False, oversampling = False, ))
    

    indeces = None
    if args.MRMR:
        selected, indeces = mrmr(dataset='SHL_ext')
        
    if args.SKLEARN:
        P_sklearn = P.copy().set_keys(
            name = 'eval_sklearn',
            runs = 10,
            )
        sklearn_baseline(P_sklearn)
        
    if args.FX_NUM is not None:
        P_fx_num = P.copy().set_keys( name='fx_num', dataset='SHL_ext', runs=8, sample_no=512, undersampling=False, oversampling=False, )
        plt_FX_num(P_fx_num,max_n=args.FX_NUM,P_val=P_fx_num.copy().set_keys(sample_no=None, undersampling=False, oversampling=False,),indeces=indeces)

    if args.SEARCH:
        hyper_GAN_3_5(P_args)
    
    if args.SEARCH_C:
        hyper_R_1_3(P_args)
    
    if args.SEARCH_GD:
        hyper_GD_1_3(P_args)