コード例 #1
0
def createProjects(files):
    """Create multiple projects at once from csv files"""

    for filename in files:
        print filename
        name = os.path.splitext(filename)[0]
        #create/open db
        DB = PDatabase(local=os.path.join(savepath,name))
        DB.add('wt')
        #add wt pdb
        stream = DBActions.fetchPDB(name)
        DBActions.addPDBFile(DB, 'wt', pdbdata=stream, pdbname=name, gui=False)
        DB.meta.refprotein = 'wt'
        DB.meta.info['protein'] = name
        #import data from csv
        DB.importCSV(os.path.join(cpath,filename), namefield='Mutations')
        print 'imported ok'
        DB.deleteField('PDB')
        DB.commit()
        DB.close()
        print 'done'
    return
コード例 #2
0
ファイル: zodbtest.py プロジェクト: shambo001/peat
def importOldProj(datadir,local=None, server=None,
                    project=None, username=None):
    """Import old peat projects"""
    import PEAT_DB.Database as peatDB
    from PEAT_DB.PEAT_dict import PEAT_dict, sub_dict    
    import copy
    if local != None:
        newDB = PDatabase(local=local)
    elif server != None:
        newDB = PDatabase(server=server, username=username, port=8080,
                          password='******', project=project)

    print newDB
    PT = peatDB.Database(datadir, Tk=False)
    oldDB = PT.DB
    print 'got old peat_db with %s proteins' %len(PT.proteins)

    print PT.DB.keys()
    #import meta stuff like userfields, table
    for p in newDB.meta.special:
        if not p in PT.DB.keys():
            continue
        print 'adding',p
        for k in PT.DB[p]:
            newDB.meta[p][k] = copy.deepcopy(PT.DB[p][k])
    newDB.meta._p_changed = 1

    for p in PT.proteins:
        if p in newDB.meta.special:
            continue

        name = oldDB[p]['Name']         
        rec = PEATRecord(name=name)
        for col in oldDB[p].keys():
            cdata = oldDB[p][col]
            recdata = {}
            if col == 'name':
                cdata = oldDB[p]['Name']
  
            if oldDB['userfields'].has_key(col) and oldDB['userfields'][col]['field_type'] in ekintypes:
                E=EkinProject(data=cdata)
                E.length = len(E.datasets)
                if len(E.datasets)==0:
                    continue
                cdata = E

            if type(cdata) == sub_dict:
                for k in cdata.keys():
                    recdata[k] = copy.deepcopy(cdata[k])
            else:
                recdata = cdata
            if cdata != '' and cdata != None:
                rec.addField(col, data=recdata)
        newDB.add(p,rec)
    print newDB.meta.userfields
    #remove any file cols, too hard to import
    for m in newDB.meta.userfields.keys()[:]:
        if newDB.meta.userfields[m]['field_type'] == 'File':
            newDB.deleteField(m)
    newDB.commit(user='******', note='import')
    newDB.close()
    print 'import done'

    return
コード例 #3
0
def importOldProj(datadir,
                  local=None,
                  server=None,
                  project=None,
                  username=None):
    """Import old peat projects"""
    import PEAT_DB.Database as peatDB
    from PEAT_DB.PEAT_dict import PEAT_dict, sub_dict
    import copy
    if local != None:
        newDB = PDatabase(local=local)
    elif server != None:
        newDB = PDatabase(server=server,
                          username=username,
                          port=8080,
                          password='******',
                          project=project)

    print newDB
    PT = peatDB.Database(datadir, Tk=False)
    oldDB = PT.DB
    print 'got old peat_db with %s proteins' % len(PT.proteins)

    print PT.DB.keys()
    #import meta stuff like userfields, table
    for p in newDB.meta.special:
        if not p in PT.DB.keys():
            continue
        print 'adding', p
        for k in PT.DB[p]:
            newDB.meta[p][k] = copy.deepcopy(PT.DB[p][k])
    newDB.meta._p_changed = 1

    for p in PT.proteins:
        if p in newDB.meta.special:
            continue

        name = oldDB[p]['Name']
        rec = PEATRecord(name=name)
        for col in oldDB[p].keys():
            cdata = oldDB[p][col]
            recdata = {}
            if col == 'name':
                cdata = oldDB[p]['Name']

            if oldDB['userfields'].has_key(col) and oldDB['userfields'][col][
                    'field_type'] in ekintypes:
                E = EkinProject(data=cdata)
                E.length = len(E.datasets)
                if len(E.datasets) == 0:
                    continue
                cdata = E

            if type(cdata) == sub_dict:
                for k in cdata.keys():
                    recdata[k] = copy.deepcopy(cdata[k])
            else:
                recdata = cdata
            if cdata != '' and cdata != None:
                rec.addField(col, data=recdata)
        newDB.add(p, rec)
    print newDB.meta.userfields
    #remove any file cols, too hard to import
    for m in newDB.meta.userfields.keys()[:]:
        if newDB.meta.userfields[m]['field_type'] == 'File':
            newDB.deleteField(m)
    newDB.commit(user='******', note='import')
    newDB.close()
    print 'import done'

    return