예제 #1
0
def backUp(inputDirs, backUpTo, DataBaseAddress, spreadsheet):

    subjectClassList = []
    for newDirectory in inputDirs:
        subjClass = subj.subject(newDirectory, backUpTo)
        checkFileNumbers(subjClass)
        subjectClassList.append(subjClass)

        executeCopy(subjClass)

        subjDf = saveLog(subjClass)

        dbDf = processDB(DataBaseAddress)

        newDf = pd.concat([dbDf, subjDf]).reset_index()
        newDf = newDf[[
            u'koreanName', u'subjectName', u'subjectInitial', u'group', u'sex',
            u'age', u'DOB', u'scanDate', u'timeline', u'studyname',
            u'patientNumber', u'T1', u'T2', u'REST_LR', u'REST_LR_SBRef',
            u'REST_BLIP_LR', u'REST_BLIP_RL', u'DTI_LR_1000', u'DTI_LR_2000',
            u'DTI_LR_3000', u'DTI_BLIP_LR', u'DTI_BLIP_RL', u'dx',
            u'folderName', u'backUpBy', u'note'
        ]]
        #please confirm here

        newDf['koreanName'] = newDf['koreanName'].str.decode('utf-8')
        newDf['note'] = newDf['note'].str.decode('utf-8')
        newDf.to_excel(DataBaseAddress, 'Sheet1')
        # os.chmod(DataBaseAddress, 0o2770)

        updateSpreadSheet.main(False, DataBaseAddress, spreadsheet)  #False
    print('Completed\n')
예제 #2
0
def main(entity, entity_code):
    if entity == 'chapters':
        return chapter(entity_code)
    if entity == 'units':
        return unit(entity_code)
    if entity == 'concepts':
        return concept(entity_code)
    if entity == 'subjects':
        return subject(entity_code)
예제 #3
0
파일: backUp.py 프로젝트: kcho/backUp
def backUp(inputDirs, backUpTo,
           DataBaseAddress, spreadsheet):

    subjectClassList = []
    for newDirectory in inputDirs:
        subjClass = subj.subject(newDirectory, backUpTo)
        checkFileNumbers(subj.correct_modality_re_dict, subjClass)
        subjectClassList.append(subjClass)

        executeCopy(subjClass)

        subjDf = saveLog(subjClass)
        print(subjDf)

        dbDf = processDB(DataBaseAddress)

        newDf = pd.concat([dbDf, subjDf]).reset_index()

        # ordering
        newDf = newDf[[ u'koreanName',  
                        u'subjectName',
                        u'subjectInitial',
                        u'group',
                        u'sex',
                        u'age',
                        u'DOB',
                        u'scanDate',
                        u'timeline',
                        u'studyname',
                        u'patientNumber'] + \
                       [subj.correct_modality_re_dict] + \
                       [u'dx',
                        u'folderName',
                        u'backUpBy',
                        u'note']]
        #please confirm here

        newDf['koreanName'] = newDf['koreanName'].str.decode('utf-8')
        newDf['note'] = newDf['note'].str.decode('utf-8')
        newDf.to_excel(DataBaseAddress, 'Sheet1', encode='utf-8')
        # os.chmod(DataBaseAddress, 0o2770)

        updateSpreadSheet.main(False, DataBaseAddress, spreadsheet)#False

    print('Completed\n')
예제 #4
0
def create_and_save_subjects():
    ##### initialize subjects from score.csv #####
    scores_df = pd.read_csv('../data/scores.csv')

    condition_df = scores_df[23:]
    mean_days = condition_df['days'].mean()
    print(mean_days)

    # replace nan & empty str with -1
    scores_df = scores_df.replace(np.nan, -1)
    scores_df = scores_df.replace(' ', -1)
    print(sum(scores_df['days']))
    # check there is no nan
    assert scores_df.isnull().sum().sum() == 0

    subjects = [
        subject(row.number, row.days, row.gender, row.age, row.afftype,
                row.melanch, row.inpatient, row.edu, row.marriage, row.work,
                row.madrs1, row.madrs2) for row in scores_df.itertuples()
    ]
    #for s in subjects:
    #print(s)

    # add motor data
    for s in subjects:
        file = '../data/' + s.label + '/' + s.number + '.csv'
        s.add_motor_data(file)

    ### correct & verify number of days in subject.days, values from scores.csv are incorrect ###
    # number of groups
    for s in subjects:
        num_of_group = len(s.motor_data_days)
        num_of_distinct_days = len(set(s.motor_data_df['date']))
        assert num_of_group == num_of_distinct_days
        s.days = num_of_distinct_days
    ### end of correction & verification ###

    # save subjects to file
    save_object(subjects, '../data/subject.pkl')

    return
예제 #5
0
def backUp(inputDirs, backUpFrom, USBlogFile, backUpTo, DataBaseAddress, spreadsheet, freesurferOn, motionOn, copyExecuteOn, nasBackupOn):
    # External HDD log
    if USBlogFile:
        logFileInUSB = USBlogFile
    elif inputDirs:
        logFileInUSB = os.path.join(os.getcwd(),"log.xlsx")
    else:
        logFileInUSB = os.path.join(backUpFrom,"log.xlsx")

    logDf = copiedDirectoryCheck(backUpFrom,logFileInUSB)
    newDirectoryList,logDf = newDirectoryGrep(inputDirs, backUpFrom,logDf)
    logDf.to_excel(logFileInUSB,'Sheet1')

    if newDirectoryList==[]:
        sys.exit('Everything have been backed up !')

    subjectClassList = []
    for newDirectory in newDirectoryList:
        subjClass = subj.subject(newDirectory, backUpTo)
        checkFileNumbers(subjClass)
        subjectClassList.append(subjClass)

        if copyExecuteOn:
            executeCopy(subjClass)

            subjDf = saveLog(subjClass)

            dbDf = processDB(DataBaseAddress)

            newDf = pd.concat([dbDf, subjDf]).reset_index()
            newDf = newDf[[ u'koreanName',    u'subjectName', u'subjectInitial',
                            u'group',            u'sex',            u'age',
                            u'DOB',       u'scanDate',       u'timeline',
                            u'studyname',  u'patientNumber',       u'T1Number',
                            u'DTINumber',      u'DKINumber',     u'RESTNumber',
                            u'REST2Number',     u'folderName',       u'backUpBy',
                            u'note']]

            newDf['koreanName'] = newDf['koreanName'].str.decode('utf-8')
            newDf['note'] = newDf['note'].str.decode('utf-8')
            newDf.to_excel(DataBaseAddress, 'Sheet1')
            #os.chmod(DataBaseAddress, 0o2770)

            updateSpreadSheet.main(False, DataBaseAddress, spreadsheet)


    if motionOn:
        print 'Now, running motion_extraction'
        for subjectClass in subjectClassList:
            motion_extraction.main(subjectClass.targetDir, True, True, False)

    if nasBackupOn:
        server = '147.47.228.192'
        for subjectClass in subjectClassList:
            copiedDir=os.path.dirname(subjectClass.targetDir)
            server_connect(server, copiedDir)

    if freesurferOn:
        for subjectClass in subjectClassList:
            freesurfer.main(subjectClass.targetDir, 
                            os.path.join(subjectClass.targetDir, 'FREESURFER'))
            freesurfer_summary.main(copiedDir, None, "ctx_lh_G_cuneus", True, True, True, True)

    print 'Completed\n'
예제 #6
0
def backUp(inputDirs, backUpFrom, USBlogFile, backUpTo,
           DataBaseAddress, spreadsheet,
           freesurfer, motion, copyExecute, nasBackup):

    # External HDD log
    if USBlogFile:
        logFileInUSB = USBlogFile
    elif inputDirs:
        logFileInUSB = os.path.join(os.getcwd(),"log.xlsx")
    else:
        logFileInUSB = os.path.join(backUpFrom,"log.xlsx")

    logDf = copiedDirectoryCheck(backUpFrom, logFileInUSB)
    newDirectoryList,logDf = newDirectoryGrep(inputDirs, backUpFrom, logDf)
    logDf.to_excel(logFileInUSB,'Sheet1')

    if newDirectoryList == []:
        sys.exit('Everything have been backed up !')

    subjectClassList = []
    for newDirectory in newDirectoryList:
        subjClass = subj.subject(newDirectory, backUpTo)
        checkFileNumbers(subjClass)
        subjectClassList.append(subjClass)

        if copyExecute:
            executeCopy(subjClass)

            subjDf = saveLog(subjClass)

            dbDf = processDB(DataBaseAddress)

            newDf = pd.concat([dbDf, subjDf]).reset_index()
            newDf = newDf[[ u'koreanName',  u'subjectName',   u'subjectInitial',
                            u'group',       u'sex',           u'age',
                            u'DOB',         u'scanDate',      u'timeline',
                            u'studyname',   u'patientNumber', u'T1Number',
                            u'DTINumber',   u'DKINumber',     u'RESTNumber',
                            u'REST2Number', u'folderName',    u'backUpBy',
                            u'note']]

            newDf['koreanName'] = newDf['koreanName'].str.decode('utf-8')
            newDf['note'] = newDf['note'].str.decode('utf-8')
            newDf.to_excel(DataBaseAddress, 'Sheet1')
            # os.chmod(DataBaseAddress, 0o2770)

            updateSpreadSheet.main(False, DataBaseAddress, spreadsheet)#False

    if motion:
        print 'Now, running motion_extraction'
        for subjectClass in subjectClassList:
            motionExtraction.main(subjectClass.targetDir, True, True, False)
            bien.dtiFit(os.path.join(subjectClass.targetDir,'DTI'))
    if nasBackup:
        server = '147.47.228.192'
        for subjectClass in subjectClassList:
            copiedDir = os.path.dirname(subjectClass.targetDir)
            server_connect(server, copiedDir)

    if freesurfer:
        for subjectClass in subjectClassList:
            easyFreesurfer.main(subjectClass.targetDir, 
                                os.path.join(subjectClass.targetDir,'FREESURFER'))
            freesurfer_Summary.main(copiedDir, None,                #bienseo: only use freesurfer.
                                    "ctx_lh_G_cuneus", True, True, True, True)
    print 'Completed\n'
예제 #7
0
import textwrap
import pickle
import backUp
import pandas as pd
import motion_extraction
import freesurfer
import freesurfer_summary


import subject as subj
if os.path.isfile('subjectPickle'):
    with open('subjectPickle', 'r') as f:
        subjClass = pickle.load(f)
else:
    with open('subjectPickle', 'w') as f:
        subjClass = subj.subject('/Users/kangik/KIM_SE_UK_46676612', '/Volumes/promise/nas_BackUp/CCNC_MRI_3T/')
        pickle.dump(subjClass, f)


#execute copy test
try:
    backUp.executeCopy(subjClass)
except:
    pass
print subjClass.folderName
subjDf = backUp.saveLog(subjClass)
print subjDf
DataBaseAddress = 'database.xls'

dbDf = backUp.processDB(DataBaseAddress)
newDf = pd.concat([dbDf, subjDf])
예제 #8
0
    p2=[]
    p3=[]
    p4=[]
    p5=[]
    count_comma = 0
    cc=0
    trigger=int

    for item in res1:
        if item == ',':
            count_comma = res1.count(item)
    print('整句中有 %d 个逗号' %(count_comma))


    if count_comma == 0:
        subject(res)
        object(res)
        object_by_tree(t)
        attrib(res)
        advclause(res)
        predicat(res)
        special(res)
        nonFinite(res)
        compara(res)


        for j in range(0,len(t)):
            for k in t[j].treepositions()[1:]:
                if type(t[j]) == nltk.tree.Tree and t[j].label() == 'VP':
                    if t[j,0,0] in ['am','is','are']:
                        print('主系表结构','一般现在时','系动词为: %s'%(t[j,0,0]))
 def getSubjectbyID(self, sid):
     return subject.subject(sid, self)
예제 #10
0
	def crawl_subject(self):
		import subject
		subj = subject.subject(self.target_ita_url)
		self.subject_dict = subj.publish_subject_dict()
예제 #11
0
import argparse
import textwrap
import pickle
import backUp
import pandas as pd
import motion_extraction
import freesurfer
import freesurfer_summary

import subject as subj
if os.path.isfile('subjectPickle'):
    with open('subjectPickle', 'r') as f:
        subjClass = pickle.load(f)
else:
    with open('subjectPickle', 'w') as f:
        subjClass = subj.subject('/Users/kangik/KIM_SE_UK_46676612',
                                 '/Volumes/promise/nas_BackUp/CCNC_MRI_3T/')
        pickle.dump(subjClass, f)

#execute copy test
try:
    backUp.executeCopy(subjClass)
except:
    pass
print subjClass.folderName
subjDf = backUp.saveLog(subjClass)
print subjDf
DataBaseAddress = 'database.xls'

dbDf = backUp.processDB(DataBaseAddress)
newDf = pd.concat([dbDf, subjDf])
#print newDf
예제 #12
0
파일: main.py 프로젝트: rookie0806/ajounice
import login
import subject
import download
import requests
import zippy
import os
if __name__ == '__main__':
    userid = input("id : ")
    userpw = input("pw : ")
    req = requests.Session()
    dirname = os.getcwd() + "/" + str(userid)
    req, result = login.login(req, str(userid), str(userpw))
    if not os.path.isdir(dirname):
        os.mkdir(dirname)
    if (result):
        print("login success")
        sub_name, sub_num = subject.subject(req)
        for i in range(len(sub_name) - 1):
            download.download(req, dirname, sub_num[i], sub_name[i])
        zippy.makezip(userid, userid)
    else:
        print("login failed")