コード例 #1
0
     strs = 'lookuptable_5folds_'+str(i)+'.json'
     jsonfiles.append(os.path.join(temp_path,strs))
 #print(jsonfiles)
 temp_path = os.path.join(os.getcwd(),'files_h5')
 h5folders = []
 for i in (5,10,15):
     strs = 'subjects_'+str(i)
     h5folders.append(os.path.join(temp_path,strs))
 # print(h5folders)
 file_trans_input = os.path.join(os.getcwd(),'dataset','scattered','standard1' )
 file_trans_output = os.path.join(os.getcwd(),'dataset','difsizesSVM')
 valpath = os.path.join(file_trans_output, 'val')
 testpath = os.path.join(file_trans_output, 'test')
 report1 = [valpath, 'validation']
 report2 = [testpath, 'test']
 sys.stdout = logger(filename=os.path.join(os.getcwd(),'log&&materials','Supploopsvm_multipledesignevalutionresults.log'))
 # This loop is for different sizess of datasets
 topCon = {'SVM':[]}
 wholest = time.time()
 for jsonfile, h5folder, flag in zip(jsonfiles,h5folders,(5,10,15,20)):
     datalooper = trainvalFormation(file_trans_input, None, 5, 'specified')
     # jsonfile = os.path.join(os.getcwd(),'files_json', 'lookuptable_5folds_10.json')
     datalooper.specifybyloading(path=jsonfile)
     dur = time.time()
     datalooper.subjects_transfer(file_trans_output)
     features, targets = skstyleDataset(file_trans_output,flag)
     print('dataset formation time: {}s.'.format(round(time.time() - dur, 2)))
     print()
     print()
     print()
     print('#'*80)
コード例 #2
0
    file_trans_input = os.path.join(os.getcwd(), 'dataset', 'scattered',
                                    'standard1')
    file_trans_output = os.path.join(os.getcwd(), 'dataset', 'constructed2')
    looper = trainvalFormation(file_trans_input, file_trans_output, 5,
                               'specified')
    looper.specifybyloading()

    basedatapath = '/home/zhaok14/example/PycharmProjects/setsail/5foldCNNdesign/dataset/constructed2'
    valdatapath = os.path.join(os.getcwd(), 'dataset', 'constructed2', 'val')
    testdatapath = os.path.join(os.getcwd(), 'dataset', 'constructed2', 'test')
    report1 = [valdatapath, 'validation']
    report2 = [testdatapath, 'test']

    ev = time.time()
    # 1. initialize the dataset
    sys.stdout = logger(filename=os.path.join(os.getcwd(), 'log&&materials',
                                              'spec_lstmresults.log'))
    # 2. for every single rounds of evaluation, we need to train the models.
    print('Note this time lstm is with the upgraded spec feature....')
    for i in (0, 1, 2, 3, 4):
        strg = 'NEWCHECKING:ROUND_{}'.format(str(i))
        print()
        print(40 * '-' + strg + 40 * '-')
        print()
        # 2.1 generate different data
        dur = time.time()
        looper.loop_files_transfer(i)
        dur = round(time.time() - dur, 2)
        print('file transformation finished. time:{}s'.format(dur))
        # 2.2 build individual networks
        nn = comparativeNetwork()
        nn.CreateLstmModel()
コード例 #3
0
 #print(jsonfiles)
 temp_path = os.path.join(os.getcwd(), 'files_h5')
 h5folders = []
 for i in (5, 10, 15, 20):
     strs = 'subjects_' + str(i)
     h5folders.append(os.path.join(temp_path, strs))
 # print(h5folders)
 file_trans_input = os.path.join(os.getcwd(), 'dataset', 'scattered',
                                 'standard1')
 file_trans_output = os.path.join(os.getcwd(), 'dataset', 'difsizes')
 valpath = os.path.join(file_trans_output, 'val')
 testpath = os.path.join(file_trans_output, 'test')
 report1 = [valpath, 'validation']
 report2 = [testpath, 'test']
 sys.stdout = logger(
     filename=os.path.join(os.getcwd(), 'log&&materials',
                           'Bigloop_20subjectsdesignevalutionresults.log'))
 # This loop is for different sizess of datasets
 topCon = {'CNN': [], 'LSTM': [], 'FC': []}
 wholest = time.time()
 for jsonfile, h5folder, flag in zip(jsonfiles, h5folders, (5, 10, 15, 20)):
     if flag != 20:
         print('We just skip the {}-scale dataset..'.format(str(flag)))
         continue
     datalooper = trainvalFormation(file_trans_input, file_trans_output, 5,
                                    'specified')
     datalooper.specifybyloading(path=jsonfile)
     print()
     print()
     print()
     print('#' * 80)
コード例 #4
0
    #define the file transfer object
    file_trans_input = os.path.join(os.getcwd(), 'dataset', 'scattered', 'standard1')
    file_trans_output = os.path.join(os.getcwd(), 'dataset', 'constructed')
    looper = trainvalFormation(file_trans_input, file_trans_output, 5, 'specified')
    looper.specifybyloading()

    basedatapath = '/home/zhaok14/example/PycharmProjects/setsail/5foldCNNdesign/dataset/constructed'
    valdatapath = os.path.join(os.getcwd(),'dataset','constructed','val')
    testdatapath = os.path.join(os.getcwd(),'dataset','constructed','test')
    report1 = [valdatapath,'validation']
    report2 = [testdatapath, 'test']

    ev = time.time()
    # 1. initialize the dataset
    sys.stdout = logger(filename=os.path.join(os.getcwd(),'log&&materials','Newfeature_multipleensemble_evaluationresults.log'))
    print('This time the corrected features are applied....')
    for i in (0, 1, 2, 3, 4):
        print()
        print(40 * '-' + 'NEWCHECKING:ROUND_{}'.format(str(i)) + 40 * '-')
        print()
        looper.loop_files_transfer(i)
        nn = flexible_average()
        # 2. create individual and ensemble model
        regudir = os.path.join(os.getcwd(),'CNNevaluation','regular','regi='+ str(i))
        residir = os.path.join(os.getcwd(), 'CNNevaluation', 'residual','resi='+ str(i))
        incedir = os.path.join(os.getcwd(),'CNNevaluation','inception','inci='+ str(i))
        nn.ensembleForward(regudir=regudir, residir=residir, incedir=incedir)
        regAresModel, regAresName = nn.average(regudir=True,residir=True)
        regAincModel, regAincName = nn.average(regudir=True,incedir=True)
        resAincModel, resAincName = nn.average(residir=True,incedir=True)
コード例 #5
0
     jsonfiles.append(os.path.join(temp_path, strs))
 # print(jsonfiles)
 temp_path = os.path.join(os.getcwd(), 'files_h5')
 h5folders = []
 for i in (5, 10, 15, 20):
     strs = 'subjects_' + str(i)
     h5folders.append(os.path.join(temp_path, strs))
 # print(h5folders)
 file_trans_input = os.path.join(os.getcwd(), 'dataset', 'scattered',
                                 'standard_aligned')
 file_trans_output = os.path.join(os.getcwd(), 'dataset', 'difsizes')
 valpath = os.path.join(file_trans_output, 'val')
 testpath = os.path.join(file_trans_output, 'test')
 report1 = [valpath, 'validation']
 report2 = [testpath, 'test']
 sys.stdout = logger(filename=os.path.join(
     os.getcwd(), 'log&&materials', 'std150_designevalutionresults.log'))
 # This loop is for different sizess of datasets
 topCon = {'CNN': [], 'LSTM': [], 'FC': []}
 wholest = time.time()
 for jsonfile, h5folder, flag in zip(jsonfiles, h5folders, (5, 10, 15, 20)):
     if flag != 20:
         print('We just skip the {}-scale dataset..'.format(str(flag)))
         continue
     datalooper = trainvalFormation(file_trans_input, file_trans_output, 5,
                                    'specified')
     datalooper.specifybyloading(path=jsonfile)
     print()
     print()
     print()
     print('#' * 80)
     print('The followed data formation jsonfile:{}'.format(