Beispiel #1
0
    def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,update_datasource_only,which_models):
        QtCore.QThread.__init__(self)
        self.panddas_directory=panddas_directory
        self.datasource=datasource
        self.initial_model_directory=initial_model_directory
        self.db=XChemDB.data_source(self.datasource)
        self.db.create_missing_columns()
        self.db_list=self.db.get_empty_db_dict()
        self.external_software=XChemUtils.external_software(xce_logfile).check()
        self.xce_logfile=xce_logfile
        self.Logfile=XChemLog.updateLog(xce_logfile)
        self.update_datasource_only=update_datasource_only
        self.which_models=which_models
        self.already_exported_models=[]

        self.RefmacParams={ 'HKLIN':            '',                 'HKLOUT': '',
                            'XYZIN':            '',                 'XYZOUT': '',
                            'LIBIN':            '',                 'LIBOUT': '',
                            'TLSIN':            '',                 'TLSOUT': '',
                            'TLSADD':           '',
                            'NCYCLES':          '10',
                            'MATRIX_WEIGHT':    'AUTO',
                            'BREF':             '    bref ISOT\n',
                            'TLS':              '',
                            'NCS':              '',
                            'TWIN':             ''    }
Beispiel #2
0
def append_dict_of_gda_barcodes(out_dict,files,xce_logfile):
#    out_dict={}
#    for files in glob.glob(os.path.join('/dls_sw',beamline,'logs','*')):
    Logfile=XChemLog.updateLog(xce_logfile)
    found_barcode_entry=False
    gda_log= files[files.rfind('/')+1:]
    if gda_log.startswith('gda_server.') and gda_log.endswith('.gz'):
        with gzip.open(files,'r') as f:
            for line in f:
                if 'BART SampleChanger - getBarcode() returning' in line:
                    barcode=line.split()[len(line.split())-1]
                    found_barcode_entry=True
                if found_barcode_entry:
                    if 'Snapshots will be saved' in line:
                        sampleID=line.split()[len(line.split())-1].split('/')[-1]
                        out_dict[sampleID]=barcode
                        Logfile.insert('found: sample=%s, barcode=%s, file=%s' %(sampleID,barcode,files))
                        found_barcode_entry=False
    elif gda_log.startswith('gda_server') and gda_log.endswith('log'):
        for line in open(files):
            if 'BART SampleChanger - getBarcode() returning' in line:
                barcode=line.split()[len(line.split())-1]
                found_barcode_entry=True
            if found_barcode_entry:
                if 'Snapshots will be saved' in line:
                    sampleID=line.split()[len(line.split())-1].split('/')[-1]
                    out_dict[sampleID]=barcode
                    Logfile.insert('found: sample=%s, barcode=%s, file=%s' %(sampleID,barcode,files))
                    found_barcode_entry=False

    return out_dict
Beispiel #3
0
 def __init__(self,initial_model_directory,xce_logfile,datasource):
     QtCore.QThread.__init__(self)
     self.xce_logfile=xce_logfile
     self.Logfile=XChemLog.updateLog(xce_logfile)
     self.initial_model_directory=initial_model_directory
     self.datasource=datasource
     self.db=XChemDB.data_source(datasource)
Beispiel #4
0
 def __init__(self, htmlDir, projectDir, database, xce_logfile):
     self.htmlDir = htmlDir
     self.projectDir = projectDir
     self.Logfile = XChemLog.updateLog(xce_logfile)
     self.db = XChemDB.data_source(database)
     self.db_dict = None
     self.pdb = None
Beispiel #5
0
def print_acedrg_status(xce_logfile,xtal_db_dict):
    Logfile=XChemLog.updateLog(xce_logfile)
    Logfile.insert('compound restraints summary:')
    pending=0
    started=0
    running=0
    missing_smiles=0
    failed=0
    success=0
    unknown=0
    for xtal in xtal_db_dict:
        db_dict=xtal_db_dict[xtal]
        status=db_dict['RefinementCIFStatus']
        if 'pending' in status:
            pending+=1
        elif 'started' in status:
            status+=1
        elif 'running' in status:
            running+=1
        elif 'missing' in status:
            missing_smiles+=1
        elif 'failed' in status:
            failed +=1
        elif 'generated' in status:
            success+=1
        else:
            unknown+=1
    Logfile.insert('restraint generation pending: ...... %s' %str(pending))
    Logfile.insert('restraint generation started: ...... %s' %str(started))
    Logfile.insert('restraint generation running: ...... %s' %str(running))
    Logfile.insert('missing smiles string: ............. %s' %str(missing_smiles))
    Logfile.insert('restraint generation failed: ....... %s' %str(failed))
    Logfile.insert('restraints successfully created: ... %s' %str(success))
    Logfile.insert('unknown status: .................... %s' %str(unknown))
Beispiel #6
0
 def __init__(self,initial_model_directory,pandda_params,xce_logfile,datasource,run_pandda_analyse):
     QtCore.QThread.__init__(self)
     self.panddas_directory=pandda_params['out_dir']
     self.pdb_style=pandda_params['pdb_style']
     self.mtz_style=pandda_params['mtz_style']
     self.Logfile=XChemLog.updateLog(xce_logfile)
     self.initial_model_directory=initial_model_directory
     self.db=XChemDB.data_source(datasource)
     self.run_pandda_analyse=run_pandda_analyse
Beispiel #7
0
def change_links_to_selected_data_collection_outcome(sample,data_collection_dict,data_collection_column_three_dict,dataset_outcome_dict,initial_model_directory,data_source_file,xce_logfile):
    Logfile=XChemLog.updateLog(xce_logfile)
    # find out which row was selected in respective data collection table
    selected_processing_result='n/a'
    indexes=data_collection_column_three_dict[sample][0].selectionModel().selectedRows()
    if indexes != []:       # i.e. logfile exists
        for index in sorted(indexes):
            selected_processing_result=index.row()

    for n,entry in enumerate(data_collection_dict[sample]):
        if entry[0]=='logfile':
            if entry[7]==selected_processing_result:
                visit=entry[1]
                run=entry[2]
                autoproc=entry[4]
                db_dict=entry[6]
                outcome=dataset_outcome_dict[sample]
                path_to_logfile=db_dict['DataProcessingPathToLogfile']
                path_to_mtzfile=db_dict['DataProcessingPathToMTZfile']
                mtz_filename=db_dict['DataProcessingMTZfileName']
                log_filename=db_dict['DataProcessingLOGfileName']
                relative_path_to_mtzfile='./'+path_to_mtzfile.replace(initial_model_directory,'')
                if relative_path_to_mtzfile.startswith('.//'):
                    relative_path_to_mtzfile=relative_path_to_mtzfile.replace('.//','./')
                relative_path_to_logfile='./'+path_to_logfile.replace(initial_model_directory,'')
                if relative_path_to_logfile.startswith('.//'):
                    relative_path_to_logfile=relative_path_to_logfile.replace('.//','./')


                # first check if folders and files exist
                # since user might do this before data are actually copied over

                if os.path.isdir(os.path.join(initial_model_directory,sample,'autoprocessing',visit+'-'+run+autoproc)):
                    db_dict['DataProcessingAutoAssigned']='False'
                    Logfile.insert('changing directory to: '+os.path.join(initial_model_directory,sample))
                    os.chdir(os.path.join(initial_model_directory,sample))
                    # first remove old links
                    os.system('/bin/rm '+sample+'.mtz 2> /dev/null')
                    os.system('/bin/rm '+sample+'.log 2> /dev/null')
                    # make new links
#                    Logfile.insert('setting symlink: '+os.path.join(path_to_logfile,log_filename)+' -> '+sample+'.log')
#                    os.symlink(os.path.join(path_to_logfile,log_filename),sample+'.log')
#                    Logfile.insert('setting symlink: '+os.path.join(path_to_mtzfile,mtz_filename)+' -> '+sample+'.mtz')
#                    os.symlink(os.path.join(path_to_mtzfile,mtz_filename),sample+'.mtz')
                    Logfile.insert('setting relative symlink: '+os.path.join(relative_path_to_logfile,log_filename)+' -> '+sample+'.log')
                    os.symlink(os.path.join(relative_path_to_logfile,log_filename),sample+'.log')
                    Logfile.insert('setting relative symlink: '+os.path.join(relative_path_to_mtzfile,mtz_filename)+' -> '+sample+'.mtz')
                    os.symlink(os.path.join(relative_path_to_mtzfile,mtz_filename),sample+'.mtz')

                    # update data source
                    data_source=XChemDB.data_source(data_source_file)
                    data_source.update_insert_data_source(sample,db_dict)

                else:
                    Logfile.insert('please copy data to PROJECT DIRECTORY first!')
Beispiel #8
0
 def __init__(self,pandda_params,xce_logfile,datasource):
     self.data_directory=pandda_params['data_dir']
     self.panddas_directory=pandda_params['out_dir']
     self.min_build_datasets=pandda_params['min_build_datasets']
     self.pdb_style=pandda_params['pdb_style']
     self.mtz_style=pandda_params['mtz_style']
     self.input_dir_structure=pandda_params['pandda_dir_structure']
     self.problem_found=False
     self.error_code=-1
     self.Logfile=XChemLog.updateLog(xce_logfile)
     self.db=XChemDB.data_source(datasource)
Beispiel #9
0
def get_gda_barcodes(sampleList, gzipped_logs_parsed, gda_log_start_line,
                     beamline, xce_logfile):
    Logfile = XChemLog.updateLog(xce_logfile)
    Logfile.insert('checking GDA logfile in {0!s}'.format(
        os.path.join('/dls_sw', beamline, 'logs')))
    pinDict = {}
    found_barcode_entry = False
    for gdaLogFile in glob.glob(
            os.path.join('/dls_sw', beamline, 'logs', 'gda-server*log*')):
        Logfile.insert('parsing {0!s}'.format(gdaLogFile))
        if gzipped_logs_parsed and gdaLogFile.endswith('.gz'):
            Logfile.insert('{0!s} was already parsed during this visit'.format(
                gdaLogFile))
            continue
        if gdaLogFile.endswith('.gz'):
            with gzip.open(gdaLogFile, 'r') as f:
                for line in f:
                    if 'BART SampleChanger - getBarcode() returning' in line:
                        barcode = line.split()[len(line.split()) - 1]
                        found_barcode_entry = True
                    if found_barcode_entry:
                        if 'Snapshots will be saved' in line:
                            sampleID = line.split()[len(line.split()) -
                                                    1].split('/')[-1]
                            if sampleID in sampleList:
                                pinDict[sampleID] = barcode
                                Logfile.insert(
                                    'found: sample={0!s}, barcode={1!s}, file={2!s}'
                                    .format(sampleID, barcode, gdaLogFile))
                            found_barcode_entry = False
        else:
            for n, line in enumerate(
                    open(gdaLogFile).readlines()[gda_log_start_line:]):
                if 'BART SampleChanger - getBarcode() returning' in line:
                    barcode = line.split()[len(line.split()) - 1]
                    found_barcode_entry = True
                if found_barcode_entry:
                    if 'Snapshots will be saved' in line:
                        sampleID = line.split()[len(line.split()) -
                                                1].split('/')[-1]
                        if sampleID in sampleList:
                            pinDict[sampleID] = barcode
                            Logfile.insert(
                                'found: sample={0!s}, barcode={1!s}, file={2!s}'
                                .format(sampleID, barcode, gdaLogFile))
                        found_barcode_entry = False
            gda_log_start_line = gda_log_start_line + n - 1

    return pinDict, gda_log_start_line
Beispiel #10
0
 def __init__(self,initial_model_directory,run_dict,protocol,spg,ref,reso_limit,cc_half,xce_logfile,external_software,ccp4_scratch_directory,max_queue_jobs,database):
     QtCore.QThread.__init__(self)
     self.initial_model_directory=initial_model_directory
     self.run_dict=run_dict
     self.protocol=protocol
     self.spg=spg
     self.ref=ref
     self.reso_limit=reso_limit
     self.cc_half=cc_half
     self.xce_logfile=xce_logfile
     self.Logfile=XChemLog.updateLog(xce_logfile)
     self.external_software=external_software
     self.ccp4_scratch_directory=ccp4_scratch_directory
     self.max_queue_jobs=max_queue_jobs
     self.database=database
     self.db=XChemDB.data_source(database)
Beispiel #11
0
 def __init__(self,initial_model_directory,run_dict,protocol,spg,ref,reso_limit,cc_half,xce_logfile,external_software,ccp4_scratch_directory,max_queue_jobs,database,overwrite):
     QtCore.QThread.__init__(self)
     self.initial_model_directory=initial_model_directory
     self.run_dict=run_dict
     self.protocol=protocol
     self.spg=spg
     self.ref=ref
     self.reso_limit=reso_limit
     self.cc_half=cc_half
     self.xce_logfile=xce_logfile
     self.Logfile=XChemLog.updateLog(xce_logfile)
     self.external_software=external_software
     self.ccp4_scratch_directory=ccp4_scratch_directory
     self.max_queue_jobs=max_queue_jobs
     self.database=database
     self.db=XChemDB.data_source(database)
     self.overwrite=overwrite
Beispiel #12
0
    def __init__(self,project_directory,xtalID,event_map,ligand_pdb,xce_logfile,db_file,resolution):
        self.Logfile=XChemLog.updateLog(xce_logfile)

        self.event_map=event_map
        if not os.path.isfile(self.event_map):
            self.Logfile.insert('cannot find Event map: '+self.event_map)
            self.Logfile.insert('cannot convert event_map to structure factors!')
            return None

        self.project_directory=project_directory
        self.xtalID=xtalID
        self.event_map=event_map
        self.ligand_pdb=ligand_pdb
        self.event=event_map[event_map.rfind('/')+1:].replace('.map','').replace('.ccp4','')
#        self.resolution=resolution
        self.db=XChemDB.data_source(db_file)
#        self.db_file=db_file
        self.resolution=resolution
Beispiel #13
0
def linkAutoProcessingResult(xtal, dbDict, projectDir, xce_logfile):
    Logfile = XChemLog.updateLog(xce_logfile)

    run = dbDict['DataCollectionRun']
    visit = dbDict['DataCollectionVisit']
    autoproc = dbDict['DataProcessingProgram']
    mtzFileAbs = dbDict['DataProcessingPathToMTZfile']
    mtzfile = mtzFileAbs[mtzFileAbs.rfind('/') + 1:]
    logFileAbs = dbDict['DataProcessingPathToLogfile']
    logfile = logFileAbs[logFileAbs.rfind('/') + 1:]

    Logfile.insert('changing directory to ' + os.path.join(projectDir, xtal))
    os.chdir(os.path.join(projectDir, xtal))

    # MTZ file
    Logfile.warning('removing %s.mtz' % xtal)
    os.system('/bin/rm %s.mtz' % xtal)
    print xtal, os.path.join('autoprocessing', visit + '-' + run + autoproc,
                             mtzfile)
    if os.path.isfile(
            os.path.join('autoprocessing', visit + '-' + run + autoproc,
                         mtzfile)):
        os.symlink(
            os.path.join('autoprocessing', visit + '-' + run + autoproc,
                         mtzfile), xtal + '.mtz')
        Logfile.insert(
            'linking MTZ file from different auto-processing pipeline:')
        Logfile.insert('ln -s ' + os.path.join('autoprocessing', visit + '-' +
                                               run + autoproc, mtzfile) + ' ' +
                       xtal + '.mtz')
    # LOG file
    Logfile.warning('removing %s.log' % xtal)
    os.system('/bin/rm %s.log' % xtal)
    if os.path.isfile(
            os.path.join('autoprocessing', visit + '-' + run + autoproc,
                         logfile)):
        os.symlink(
            os.path.join('autoprocessing', visit + '-' + run + autoproc,
                         logfile), xtal + '.log')
        Logfile.insert(
            'linking LOG file from different auto-processing pipeline:')
        Logfile.insert('ln -s ' + os.path.join('autoprocessing', visit + '-' +
                                               run + autoproc, logfile) + ' ' +
                       xtal + '.log')
Beispiel #14
0
def print_cluster_status_message(program,cluster_dict,xce_logfile):
    Logfile=XChemLog.updateLog(xce_logfile)
    Logfile.insert('cluster status summary:')
    Logfile.insert('%s %s jobs are running on the cluster' %(len(cluster_dict[program]),program))
    if len(cluster_dict[program]) > 0:
        cumulative_runtime=0
        job_ids = []
        for n,item in enumerate(cluster_dict[program]):
            cumulative_runtime += item[2]
            if not item[0] in job_ids:
                job_ids.append(item[0])
        average_runtime=round(float(cumulative_runtime)/float(n+1),0)
        Logfile.insert('average run time '+str(average_runtime)+' minutes')
        if job_ids != []:
            Logfile.insert('you can kill them by pasting the following line into a new terminal window:')
            out='qdel '
            for job in job_ids:
                out += str(job)+' '
            Logfile.insert(out)
Beispiel #15
0
def get_names_of_current_clusters(xce_logfile,panddas_directory):
    Logfile=XChemLog.updateLog(xce_logfile)
    Logfile.insert('parsing %s/cluster_analysis' %panddas_directory)
    os.chdir('%s/cluster_analysis' %panddas_directory)
    cluster_dict={}
    for out_dir in sorted(glob.glob('*')):
        if os.path.isdir(out_dir):
            cluster_dict[out_dir]=[]
            found_first_pdb=False
            for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
                xtal=folder[folder.rfind('/')+1:]
                if not found_first_pdb:
                    if os.path.isfile(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb') ):
                        cluster_dict[out_dir].append(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb'))
                        found_first_pdb=True
                cluster_dict[out_dir].append(xtal)
#    for key in cluster_dict:
#        Logfile.insert('cluster %s:   %s datasets' %(str(key),str(len(cluster_dict[key])-1)))
    return cluster_dict
Beispiel #16
0
def print_acedrg_status(xce_logfile, xtal_db_dict):
    Logfile = XChemLog.updateLog(xce_logfile)
    Logfile.insert('compound restraints summary:')
    pending = 0
    started = 0
    running = 0
    missing_smiles = 0
    failed = 0
    success = 0
    unknown = 0
    for xtal in xtal_db_dict:
        db_dict = xtal_db_dict[xtal]
        status = db_dict['RefinementCIFStatus']
        if 'pending' in status:
            pending += 1
        elif 'started' in status:
            started += 1
        elif 'running' in status:
            running += 1
        elif 'missing' in status:
            missing_smiles += 1
        elif 'failed' in status:
            failed += 1
        elif 'generated' in status:
            success += 1
        else:
            unknown += 1
    Logfile.insert('restraint generation pending: ...... {0!s}'.format(
        str(pending)))
    Logfile.insert('restraint generation started: ...... {0!s}'.format(
        str(started)))
    Logfile.insert('restraint generation running: ...... {0!s}'.format(
        str(running)))
    Logfile.insert('missing smiles string: ............. {0!s}'.format(
        str(missing_smiles)))
    Logfile.insert('restraint generation failed: ....... {0!s}'.format(
        str(failed)))
    Logfile.insert('restraints successfully created: ... {0!s}'.format(
        str(success)))
    Logfile.insert('unknown status: .................... {0!s}'.format(
        str(unknown)))
Beispiel #17
0
def append_dict_of_gda_barcodes(out_dict, files, xce_logfile):
    #    out_dict={}
    #    for files in glob.glob(os.path.join('/dls_sw',beamline,'logs','*')):
    Logfile = XChemLog.updateLog(xce_logfile)
    found_barcode_entry = False
    gda_log = files[files.rfind('/') + 1:]
    if gda_log.startswith('gda_server.') or gda_log.startswith(
            'gda-server.') and gda_log.endswith('.gz'):
        with gzip.open(files, 'r') as f:
            for line in f:
                if 'BART SampleChanger - getBarcode() returning' in line:
                    barcode = line.split()[len(line.split()) - 1]
                    found_barcode_entry = True
                if found_barcode_entry:
                    if 'Snapshots will be saved' in line:
                        sampleID = line.split()[len(line.split()) -
                                                1].split('/')[-1]
                        out_dict[sampleID] = barcode
                        Logfile.insert(
                            'found: sample={0!s}, barcode={1!s}, file={2!s}'.
                            format(sampleID, barcode, files))
                        found_barcode_entry = False
    elif gda_log.startswith('gda_server') or gda_log.startswith(
            'gda-server') and gda_log.endswith('txt'):
        for line in open(files):
            if 'BART SampleChanger - getBarcode() returning' in line:
                barcode = line.split()[len(line.split()) - 1]
                found_barcode_entry = True
            if found_barcode_entry:
                if 'Snapshots will be saved' in line:
                    sampleID = line.split()[len(line.split()) -
                                            1].split('/')[-1]
                    out_dict[sampleID] = barcode
                    Logfile.insert(
                        'found: sample={0!s}, barcode={1!s}, file={2!s}'.
                        format(sampleID, barcode, files))
                    found_barcode_entry = False

    return out_dict
Beispiel #18
0
def print_cluster_status_message(program, cluster_dict, xce_logfile):
    Logfile = XChemLog.updateLog(xce_logfile)
    Logfile.insert('cluster status summary:')
    Logfile.insert('{0!s} {1!s} jobs are running on the cluster'.format(
        len(cluster_dict[program]), program))
    if len(cluster_dict[program]) > 0:
        cumulative_runtime = 0
        job_ids = []
        for n, item in enumerate(cluster_dict[program]):
            cumulative_runtime += item[2]
            if not item[0] in job_ids:
                job_ids.append(item[0])
        average_runtime = round(float(cumulative_runtime) / float(n + 1), 0)
        Logfile.insert('average run time ' + str(average_runtime) + ' minutes')
        if job_ids != []:
            Logfile.insert(
                'you can kill them by pasting the following line into a new terminal window:'
            )
            out = 'qdel '
            for job in job_ids:
                out += str(job) + ' '
            Logfile.insert(out)
Beispiel #19
0
    def __init__(self,pandda_params,xce_logfile,dataset_list,datasource):
        QtCore.QThread.__init__(self)
        self.data_directory=pandda_params['data_dir']
        self.panddas_directory=pandda_params['out_dir']
        self.submit_mode=pandda_params['submit_mode']
#        if self.submit_mode == 'local machine':
#            self.nproc=pandda_params['nproc']
#        else:
#            self.nproc='7'
        self.nproc=pandda_params['nproc']
        self.min_build_datasets=pandda_params['min_build_datasets']
        self.pdb_style=pandda_params['pdb_style']
        self.mtz_style=pandda_params['mtz_style']
        self.sort_event=pandda_params['sort_event']
        self.number_of_datasets=pandda_params['N_datasets']
        self.max_new_datasets=pandda_params['max_new_datasets']
        self.grid_spacing=pandda_params['grid_spacing']
        self.filter_pdb=pandda_params['filter_pdb']
        self.Logfile=XChemLog.updateLog(xce_logfile)
        self.dataset_list=dataset_list
        self.datasource=datasource
        self.db=XChemDB.data_source(datasource)
Beispiel #20
0
    def settings(self, xce_object):
        # set XCE version
        xce_object.xce_version = 'v1.4.0'

        # general settings
        xce_object.allowed_unitcell_difference_percent = 12
        xce_object.acceptable_low_resolution_limit_for_data = 3.5
        xce_object.filename_root = '${samplename}'
        xce_object.data_source_set = False
        xce_object.max_queue_jobs = 100

        ## directory settings

        # set current directory and direct log to it
        xce_object.current_directory = os.getcwd()
        xce_object.xce_logfile = os.path.join(xce_object.current_directory,
                                              'xce.log')

        # if in the correct place, set the various directories
        if 'labxchem' in xce_object.current_directory:
            if len(xce_object.current_directory.split('/')
                   ) >= 9 and xce_object.current_directory.split(
                       '/'
                   )[6] == 'processing' and xce_object.current_directory.split(
                       '/')[8] == 'processing':
                xce_object.labxchem_directory = '/' + os.path.join(
                    *xce_object.current_directory.split('/')
                    [1:8])  # need splat operator: *
                xce_object.labxchem_directory_current = '/' + os.path.join(
                    *xce_object.current_directory.split('/')[1:9]
                )  # labxchem_directory_current is where they actually have write permission
            else:
                xce_object.labxchem_directory = '/' + os.path.join(
                    *xce_object.current_directory.split('/')
                    [1:6])  # need splat operator: *
                xce_object.labxchem_directory_current = '/' + os.path.join(
                    *xce_object.current_directory.split('/')
                    [1:7])  # need splat operator: *
#            xce_object.labxchem_directory = '/' + os.path.join(
#                *xce_object.current_directory.split('/')[1:6])  # need splat operator: *
            xce_object.beamline_directory = os.path.join(
                xce_object.labxchem_directory, 'processing', 'beamline')
            if os.path.isdir(
                    os.path.join(xce_object.labxchem_directory, 'processing',
                                 'analysis', 'model_building')):
                xce_object.initial_model_directory = os.path.join(
                    xce_object.labxchem_directory, 'processing', 'analysis',
                    'model_building')
            else:
                xce_object.initial_model_directory = os.path.join(
                    xce_object.labxchem_directory, 'processing', 'analysis',
                    'initial_model')
            xce_object.reference_directory = os.path.join(
                xce_object.labxchem_directory, 'processing', 'reference')
            xce_object.database_directory = os.path.join(
                xce_object.labxchem_directory, 'processing', 'database')
            xce_object.panddas_directory = os.path.join(
                xce_object.labxchem_directory, 'processing', 'analysis',
                'panddas')
            xce_object.datasets_summary_file = os.path.join(
                xce_object.database_directory,
                str(os.getcwd().split('/')[5]) + '_summary.pkl')
            xce_object.data_source_file = ''
            xce_object.html_export_directory = os.path.join(
                xce_object.labxchem_directory, 'processing', 'html')
            xce_object.group_deposit_directory = os.path.join(
                xce_object.labxchem_directory, 'processing',
                'group_deposition')
            if os.path.isfile(
                    os.path.join(xce_object.labxchem_directory, 'processing',
                                 'database', 'soakDBDataFile.sqlite')):
                xce_object.data_source_file = 'soakDBDataFile.sqlite'
                xce_object.database_directory = os.path.join(
                    xce_object.labxchem_directory, 'processing', 'database')
                xce_object.data_source_set = True
                xce_object.db = XChemDB.data_source(
                    os.path.join(xce_object.database_directory,
                                 xce_object.data_source_file))
                xce_object.db.create_missing_columns()

            xce_object.ccp4_scratch_directory = os.path.join(
                xce_object.labxchem_directory, 'processing', 'tmp')

            directory_list = [
                xce_object.beamline_directory,
                os.path.join(xce_object.labxchem_directory, 'processing',
                             'analysis'), xce_object.initial_model_directory,
                xce_object.panddas_directory, xce_object.reference_directory,
                xce_object.database_directory,
                xce_object.ccp4_scratch_directory,
                xce_object.html_export_directory,
                xce_object.group_deposit_directory
            ]

            for directory in directory_list:
                if not os.path.isdir(directory):
                    os.mkdir(directory)

        # otherwise, use the current working directory
        else:
            xce_object.labxchem_directory_current = xce_object.current_directory
            xce_object.beamline_directory = xce_object.current_directory
            xce_object.initial_model_directory = xce_object.current_directory
            xce_object.reference_directory = xce_object.current_directory
            xce_object.database_directory = xce_object.current_directory
            xce_object.data_source_file = ''
            xce_object.ccp4_scratch_directory = os.getenv('CCP4_SCR')
            xce_object.panddas_directory = xce_object.current_directory
            xce_object.datasets_summary_file = ''
            xce_object.group_deposit_directory = xce_object.current_directory

        ## deposition

        xce_object.deposit_dict = {}

        ## internal lists and dictionaries

        xce_object.data_collection_list = []
        xce_object.visit_list = []
        xce_object.target = ''
        xce_object.dataset_outcome_combobox_dict = {}
        xce_object.data_collection_dict = {}
        xce_object.xtal_db_dict = {}
        xce_object.pandda_analyse_input_table_dict = {}
        xce_object.dewar_configuration_dict = {}
        xce_object.data_collection_statistics_dict = {}
        xce_object.initial_model_dimple_dict = {
        }  # contains toggle button if dimple should be run
        xce_object.reference_file_list = []
        xce_object.all_columns_in_data_source = XChemDB.data_source(os.path.join
                                                                    (xce_object.database_directory,
                                                                     xce_object.data_source_file)) \
            .return_column_list()
        xce_object.albula_button_dict = {
        }  # using dials.image_viewer instead of albula, but keep name for dictionary
        xce_object.xtalform_dict = {}

        xce_object.dataset_outcome_dict = {
        }  # contains the dataset outcome buttons
        xce_object.data_collection_table_dict = {
        }  # contains the dataset table
        xce_object.data_collection_image_dict = {}
        xce_object.data_collection_column_three_dict = {}
        xce_object.datasets_summary_dict = {}
        xce_object.diffraction_data_table_dict = {}
        xce_object.refinement_table_dict = {}
        xce_object.main_data_collection_table_exists = False
        xce_object.timer_to_check_for_new_data_collection = QtCore.QTimer()

        xce_object.agamemnon = False
        xce_object.target_list, xce_object.visit_list = XChemMain.get_target_and_visit_list(
            xce_object.beamline_directory, False)

        xce_object.diffraction_data_dict = {}

        ## internal switches and flags

        xce_object.explorer_active = 0
        xce_object.coot_running = 0
        xce_object.progress_bar_start = 0
        xce_object.progress_bar_step = 0
        xce_object.albula = None
        xce_object.albula_subframes = []
        xce_object.show_diffraction_image = None
        xce_object.gdaLogInstructions = [0, False]
        # can be any widget to be displayed in data collection summary tab
        xce_object.data_collection_details_currently_on_display = None

        xce_object.dataset_outcome = [
            "success", "Failed - centring failed", "Failed - no diffraction",
            "Failed - processing", "Failed - loop empty",
            "Failed - loop broken", "Failed - low resolution",
            "Failed - no X-rays", "Failed - unknown"
        ]

        xce_object.refinement_stage = [
            '0 - All Datasets', '1 - Analysis Pending', '2 - PANDDA model',
            '3 - In Refinement', '4 - CompChem ready', '5 - Deposition ready',
            '6 - Deposited'
        ]

        self.set_xce_logfile(xce_object)

        ## external software packages
        xce_object.using_remote_qsub_submission = False
        xce_object.remote_qsub_submission = "/usr/bin/ssh <dls fed ID>@nx.diamond.ac.uk 'module load global/cluster; qsub'"

        xce_object.update_log = XChemLog.updateLog(xce_object.xce_logfile)
        xce_object.update_log.insert('new session started')
        xce_object.diffraction_data_directory = xce_object.current_directory
        xce_object.diffraction_data_search_info = 'n/a'
        xce_object.diffraction_data_reference_mtz = 'ignore'
        xce_object.html_export_directory = os.getcwd()
        xce_object.external_software = XChemUtils.external_software(
            xce_object.xce_logfile).check()

        xce_object.second_cif_file = None

        software_list = ['acedrg', 'phenix.elbow', 'grade']

        for software in software_list:
            if xce_object.external_software[software]:
                xce_object.restraints_program = str(software)
                xce_object.update_log.insert(
                    'will use ' + str(software) +
                    ' for generation of ligand coordinates and'
                    ' restraints')
            else:
                xce_object.restraints_program = ''
                xce_object.update_log.insert(
                    'No program for generation of ligand coordinates and restraints available!'
                )
Beispiel #21
0
 def set_xce_logfile(self, xce_object):
     XChemLog.startLog(xce_object.xce_logfile).create_logfile(
         xce_object.xce_version)
     xce_object.update_log = XChemLog.updateLog(xce_object.xce_logfile)
Beispiel #22
0
    def RunRefmac(self,Serial,RefmacParams,external_software,xce_logfile):
        Logfile=XChemLog.updateLog(xce_logfile)
        Serial=str(Serial)

        # first check if refinement is ongoing and exit if yes
        if os.path.isfile(os.path.join(self.ProjectPath,self.xtalID,'REFINEMENT_IN_PROGRESS')):
#            coot.info_dialog('*** REFINEMENT IN PROGRESS ***')
            Logfile.insert('cannot start new refinement for %s: *** REFINEMENT IN PROGRESS ***' %self.xtalID)
            return None

        #######################################################
        # HKLIN & HKLOUT
        if os.path.isfile(os.path.join(self.ProjectPath,self.xtalID,self.xtalID+'.free.mtz')):
            RefmacParams['HKLIN']='HKLIN '+os.path.join(self.ProjectPath,self.xtalID,self.xtalID+'.free.mtz \\\n')
        elif os.path.isfile(os.path.join(self.ProjectPath,self.xtalID,self.xtalID+'-pandda-input.mtz')):
            RefmacParams['HKLIN']='HKLIN '+os.path.join(self.ProjectPath,self.xtalID,self.xtalID+'-pandda-input.mtz \\\n')
        else:
            Logfile.insert('%s: cannot find HKLIN for refinement; aborting...' %self.xtalID)
            return None
        RefmacParams['HKLOUT']='HKLOUT '+os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'refine_'+Serial+'.mtz \\\n')

        #######################################################
        # XYZIN & XYZOUT
        RefmacParams['XYZIN']='XYZIN '+os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'in.pdb \\\n')
        RefmacParams['XYZOUT']='XYZOUT '+os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'refine_'+Serial+'.pdb \\\n')

        #######################################################
        # LIBIN & LIBOUT
        found_cif=False
        if os.path.isfile(os.path.join(self.ProjectPath,self.xtalID,self.compoundID+'.cif')):
            found_cif=True
            # in cases where multiple liagnds are present (e.g. NOG, ATP) the user for now needs to put
            # the respective dictionary into the xtalID folder
            # need to think of a better mechanism in the future
            additional_cif=False
            additional_cif_file=''
            for file in glob.glob(os.path.join(self.ProjectPath,self.xtalID,'*')):
                if file.endswith('.cif'):
                    if self.compoundID not in file:
                        additional_cif_file=file
#                        additional_cif=True   <- should be true, but need to check this part of the code! 16/11/2016
                        additional_cif=False
            if additional_cif:
                Cmds = (
                    '#!'+os.getenv('SHELL')+'\n'
                    '\n'
                    '$CCP4/bin/libcheck << eof \n'
                    '_Y\n'
                    '_FILE_L '+os.path.join(self.ProjectPath,self.xtalID,self.compoundID+'.cif')+'\n'
                    '_FILE_L2 '+additional_cif_file+'\n'
                    '_FILE_O '+os.path.join(self.ProjectPath,self.xtalID,'combined_cif')+'\n'
                    '_END\n'
                    'eof\n'
                    )
                os.chdir(os.path.join(self.ProjectPath,self.xtalID))
                print Cmds
                os.system(Cmds)
                RefmacParams['LIBIN']='LIBIN '+self.ProjectPath+'/'+self.xtalID+'/combined_cif.lib \\\n'
            else:
                RefmacParams['LIBIN']='LIBIN '+self.ProjectPath+'/'+self.xtalID+'/'+self.compoundID+'.cif \\\n'
            RefmacParams['LIBOUT']='LIBOUT '+self.ProjectPath+'/'+self.xtalID+'/Refine_'+Serial+'/refine_'+Serial+'.cif \\\n'
        if not found_cif:
        # this should actually not be necessary, but the following scenario can happen:
        # if a new data source is created from a file system, but smiles and compoundID where not updated;
        # so the ligand may still be in the structure, but since the compoundID is unknown to the datasource,
        # its restraints won't be read in and refmac will fail
            for file in glob.glob(os.path.join(self.ProjectPath,self.xtalID,'*')):
                if file.endswith('.cif'):
                    RefmacParams['LIBIN']='LIBIN '+file+' \\\n'
                    RefmacParams['LIBOUT']='LIBOUT '+self.ProjectPath+'/'+self.xtalID+'/Refine_'+Serial+'/refine_'+Serial+'.cif \\\n'
                    break

        #######################################################
        # TLSIN & TLSOUT
        findTLS='\n'
        TLSphenix=''
        if RefmacParams['TLS'].startswith('refi'):
            if external_software['phenix.find_tls_groups']:
                findTLS=os.path.join(os.getenv('XChemExplorer_DIR'),'helpers','phenix_find_TLS_groups.py')+' in.pdb\n'
                RefmacParams['TLSIN']='TLSIN '+self.ProjectPath+'/'+self.xtalID+'/Refine_'+Serial+'/refmac.tls \\\n'
                RefmacParams['TLSOUT']='TLSOUT '+self.ProjectPath+'/'+self.xtalID+'/Refine_'+Serial+'/refine.tls \\\n'
                TLSphenix=' phenix.tls '
            else:
                RefmacParams['TLS']='\n'


#        #######################################################
#        # create folder for new refinement cycle
#        os.mkdir(os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial))
#
#        #######################################################
#        # write PDB file
#        # now take protein pdb file and write it to newly create Refine_<serial> folder
#        # note: the user has to make sure that the ligand file was merged into main file
#        for item in coot_utils_XChem.molecule_number_list():
#            if coot.molecule_name(item).endswith(self.prefix+'.pdb'):
#                coot.write_pdb_file(item,os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'in.pdb'))

        #######################################################
        # PANDDAs stuff
        # only use occupancy refinement if EVENT map is present
        occupancy_refinement=''
        if external_software['giant.create_occupancy_params']:
            os.chdir(os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial))
            cmd = ( '#!'+os.getenv('SHELL')+'\n'
                    'export XChemExplorer_DIR="'+os.getenv('XChemExplorer_DIR')+'"\n'
                    'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','xce.setup-sh')+'\n'
                    'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')+'\n'
                    "giant.create_occupancy_params pdb=in.pdb refmac_occ_out='refmac_refine.params'\n"  )
#            os.system("giant.create_occupancy_params pdb=in.pdb refmac_occ_out='refmac_refine.params'")
            os.system(cmd)
            # quick fix for the moment; need to talk to Nick since this should not be necessary
            try:
                params_file = fileinput.input('refmac_refine.params',inplace=True)
                for line in params_file:
                    if 'incomplete' in line:
                        line=line.replace('incomplete','complete')
                        print line,
#                elif 'occupancy refine' in line:
#                    line=line.replace('occupancy refine','occupancy refine ncycle 10\noccupancy refine')
#                    print line,
                    else:
                        print line,
                params_file.close()
            except OSError:
                # this may happen in case giant.create_occupancy_params did not produce a params output file
                pass

        create_bound_conformation=''
        if os.path.isfile(os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'refmac_refine.params')):
            occupancy_refinement='@refmac_refine.params\n'
            create_bound_conformation="/bin/rm *bound.pdb\ngiant.strip_conformations pdb=refine.pdb suffix='.bound.pdb'\n"

        #######################################################
        # we write 'REFINEMENT_IN_PROGRESS' immediately to avoid unncessary refiment
        os.chdir(os.path.join(self.ProjectPath,self.xtalID))
        os.system('touch REFINEMENT_IN_PROGRESS')

        #######################################################
        # clean up!
        # and remove all files which will be re-created by current refinement cycle
        os.system('/bin/rm refine.pdb refine.mtz validation_summary.txt validate_ligands.txt 2fofc.map fofc.map refine_molprobity.log')

        if external_software['qsub']:
            pbs_line='#PBS -joe -N XCE_refmac\n'
        else:
            pbs_line='\n'

        #######################################################
        # weight
        if str(RefmacParams['MATRIX_WEIGHT']).lower() == 'auto':
            weight='weight AUTO\n'
        else:
            weight='weight matrix '+str(RefmacParams['MATRIX_WEIGHT'])+'\n'

        #######################################################
        # PANDDA validation @ spider plot
        spider_plot=''
        if os.path.isfile(os.path.join(self.ProjectPath,self.xtalID,self.xtalID+'-ensemble-model.pdb')):
            if os.path.isfile(os.path.join(self.ProjectPath,self.xtalID,self.xtalID+'-pandda-input.mtz')):
                pdb_two=os.path.join(self.ProjectPath,self.xtalID,self.xtalID+'-ensemble-model.pdb')
                mtz_two=os.path.join(self.ProjectPath,self.xtalID,self.xtalID+'-pandda-input.mtz')
                pdb_one=os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'refine_'+Serial+'.pdb')
                mtz_one=os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'refine_'+Serial+'.mtz')
                spider_plot='giant.score_model pdb1=%s mtz1=%s pdb2=%s mtz2=%s res_names=LIG,UNL,DRG,FRG\n' %(pdb_one,mtz_one,pdb_two,mtz_two)

        #######################################################
        # PHENIX stuff (if working at DLS)
        module_load=''
        if os.getcwd().startswith('/dls'):
            module_load='module load phenix\n'

        source =''
        if 'bash' in os.getenv('SHELL'):
            source = (
                'export XChemExplorer_DIR="'+os.getenv('XChemExplorer_DIR')+'"\n'
                '\n'
                'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','xce.setup-sh')+'\n'
                'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')+'\n'   )
        elif 'csh' in os.getenv('SHELL'):
            source = (
                'setenv XChemExplorer_DIR '+os.getenv('XChemExplorer_DIR')+'\n'
                '\n'
                'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','xce.setup-csh')+'\n'
                'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh')+'\n'   )


        refmacCmds = (
            '#!'+os.getenv('SHELL')+'\n'
            +pbs_line+
            '\n'
            +source+
            '\n'
            +module_load+
            'cd '+self.ProjectPath+'/'+self.xtalID+'/Refine_'+Serial+'\n'
            '\n'
            '$CCP4/bin/ccp4-python $XChemExplorer_DIR/helpers/update_status_flag.py %s %s %s %s\n' %(self.datasource,self.xtalID,'RefinementStatus','running') +
            '\n'
            +findTLS+
            'refmac5 '
            +RefmacParams['HKLIN']
            +RefmacParams['HKLOUT']
            +RefmacParams['XYZIN']
            +RefmacParams['XYZOUT']
            +RefmacParams['LIBIN']
            +RefmacParams['LIBOUT']
            +RefmacParams['TLSIN']
            +RefmacParams['TLSOUT']+
            ' << EOF > refmac.log\n'
            'make -\n'
            '    hydrogen ALL -\n'
            '    hout NO -\n'
            '    peptide NO -\n'
            '    cispeptide YES -\n'
            '    ssbridge YES -\n'
            '    symmetry YES -\n'
            '    sugar YES -\n'
            '    connectivity NO -\n'
            '    link NO\n'
            +RefmacParams['NCS']+
            'refi -\n'
            '    type REST -\n'
            '    resi MLKF -\n'
            '    meth CGMAT -\n'
            +RefmacParams['BREF']
            +RefmacParams['TLS']
            +RefmacParams['TWIN']+
            'ncyc '+RefmacParams['NCYCLES']+'\n'
            'scal -\n'
            '    type SIMP -\n'
            '    LSSC -\n'
            '    ANISO -\n'
            '    EXPE\n'
            +weight+
            'solvent YES\n'
            +occupancy_refinement+
            'monitor MEDIUM -\n'
            '    torsion 10.0 -\n'
            '    distance 10.0 -\n'
            '    angle 10.0 -\n'
            '    plane 10.0 -\n'
            '    chiral 10.0 -\n'
            '    bfactor 10.0 -\n'
            '    bsphere 10.0 -\n'
            '    rbond 10.0 -\n'
            '    ncsr 10.0\n'
            'labin  FP=F SIGFP=SIGF FREE=FreeR_flag\n'
            'labout  FC=FC FWT=FWT PHIC=PHIC PHWT=PHWT DELFWT=DELFWT PHDELWT=PHDELWT FOM=FOM\n'
            +RefmacParams['TLSADD']+'\n'
            'DNAME '+self.xtalID+'\n'
            'END\n'
            'EOF\n'
            '\n'
            +spider_plot+
            '\n'
            'phenix.molprobity refine_%s.pdb refine_%s.mtz\n' %(Serial,Serial)+
            '/bin/mv molprobity.out refine_molprobity.log\n'
            'mmtbx.validate_ligands refine_%s.pdb refine_%s.mtz LIG > validate_ligands.txt\n' %(Serial,Serial)+
            'cd '+self.ProjectPath+'/'+self.xtalID+'\n'
            '#ln -s %s/%s/Refine_%s/refine_%s.pdb refine.pdb\n' %(self.ProjectPath,self.xtalID,Serial,Serial)+
            '#ln -s %s/%s/Refine_%s/refine_%s.mtz refine.mtz\n' %(self.ProjectPath,self.xtalID,Serial,Serial)+
            'ln -s ./Refine_%s/refine_%s.pdb refine.pdb\n' %(Serial,Serial)+
            'ln -s ./Refine_%s/refine_%s.mtz refine.mtz\n' %(Serial,Serial)+
            '\n'
            +create_bound_conformation+
            '\n'
            'ln -s Refine_%s/validate_ligands.txt .\n' %Serial+
            'ln -s Refine_%s/refine_molprobity.log .\n' %Serial+
            'mmtbx.validation_summary refine.pdb > validation_summary.txt\n'
            '\n'
            'fft hklin refine.mtz mapout 2fofc.map << EOF\n'
            'labin F1=FWT PHI=PHWT\n'
            'EOF\n'
            '\n'
            'fft hklin refine.mtz mapout fofc.map << EOF\n'
            'labin F1=DELFWT PHI=PHDELWT\n'
            'EOF\n'
             '\n'
            '$CCP4/bin/ccp4-python '+os.path.join(os.getenv('XChemExplorer_DIR'),'helpers','update_data_source_after_refinement.py')+
            ' %s %s %s %s\n' %(self.datasource,self.xtalID,self.ProjectPath,os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial))+
            '\n'
            '/bin/rm %s/%s/REFINEMENT_IN_PROGRESS\n' %(self.ProjectPath,self.xtalID)+
            '\n'
           )

        Logfile.insert('writing refinement shell script to'+os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'refmac.csh'))
        cmd = open(os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial,'refmac.csh'),'w')
        cmd.write(refmacCmds)
        cmd.close()

        os.chdir(os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial))
#        os.system('ssh artemis "cd %s/%s/Refine_%s; qsub refmac.csh"' %(self.ProjectPath,self.xtalID,Serial))
        Logfile.insert('changing directory to %s' %(os.path.join(self.ProjectPath,self.xtalID,'Refine_'+Serial)))
        if external_software['qsub']:
            Logfile.insert('starting refinement on cluster')
            os.system('qsub -P labxchem refmac.csh')
        else:
            os.system('chmod +x refmac.csh')
            Logfile.insert('starting refinement on local machine')
            os.system('./refmac.csh &')
    def __init__(self):

        ###########################################################################################
        # read in settings file from XChemExplorer to set the relevant paths
        self.settings = pickle.load(open(".xce_settings.pkl", "rb"))
        remote_qsub_submission = self.settings['remote_qsub']
        self.database_directory = self.settings['database_directory']
        self.xce_logfile = self.settings['xce_logfile']
        self.Logfile = XChemLog.updateLog(self.xce_logfile)
        self.Logfile.insert('starting COOT gui for reference model refinement')
        self.data_source = self.settings['data_source']

        # checking for external software packages
        self.external_software = XChemUtils.external_software(
            self.xce_logfile).check()
        self.external_software['qsub_remote'] = remote_qsub_submission

        # the Folder is kind of a legacy thing because my inital idea was to have separate folders
        # for Data Processing and Refinement
        self.reference_directory = self.settings['reference_directory']
        self.refinementDir = ''
        self.Serial = 0
        self.Refine = None

        self.xtalID = ''
        self.compoundID = ''
        self.spider_plot = ''
        self.refinement_folder = ''
        self.pdbFile = ''
        self.mtzFree = ''

        self.pdb_style = 'refine.pdb'
        self.mtz_style = 'refine.mtz'

        # stores imol of currently loaded molecules and maps
        self.mol_dict = {
            'protein': -1,
            'ligand': -1,
            '2fofc': -1,
            'fofc': -1,
            'event': -1
        }

        self.ground_state_map_List = []
        self.job_running = False

        ###########################################################################################
        # some COOT settings
        coot.set_map_radius(17)
        coot.set_colour_map_rotation_for_map(0)
        #        coot.set_colour_map_rotation_on_read_pdb_flag(21)

        self.QualityIndicators = {
            'Rcryst': '-',
            'Rfree': '-',
            'RfreeTL': 'gray',
            'ResolutionHigh': '-',
            'ResolutionColor': 'gray',
            'MolprobityScore': '-',
            'MolprobityScoreColor': 'gray',
            'RamachandranOutliers': '-',
            'RamachandranOutliersColor': 'gray',
            'RamachandranFavored': '-',
            'RamachandranFavoredColor': 'gray',
            'rmsdBonds': '-',
            'rmsdBondsTL': 'gray',
            'rmsdAngles': '-',
            'rmsdAnglesTL': 'gray',
            'MatrixWeight': '-'
        }

        # default refmac parameters
        self.RefmacParams = {
            'HKLIN': '',
            'HKLOUT': '',
            'XYZIN': '',
            'XYZOUT': '',
            'LIBIN': '',
            'LIBOUT': '',
            'TLSIN': '',
            'TLSOUT': '',
            'TLSADD': '',
            'NCYCLES': '10',
            'MATRIX_WEIGHT': 'AUTO',
            'BREF': '    bref ISOT\n',
            'TLS': '',
            'NCS': '',
            'TWIN': ''
        }
Beispiel #24
0
def change_links_to_selected_data_collection_outcome(
        sample, data_collection_dict, data_collection_column_three_dict,
        dataset_outcome_dict, initial_model_directory, data_source_file,
        xce_logfile):
    Logfile = XChemLog.updateLog(xce_logfile)
    # find out which row was selected in respective data collection table
    selected_processing_result = 'n/a'
    indexes = data_collection_column_three_dict[sample][0].selectionModel(
    ).selectedRows()
    if indexes != []:  # i.e. logfile exists
        for index in sorted(indexes):
            selected_processing_result = index.row()

    for n, entry in enumerate(data_collection_dict[sample]):
        if entry[0] == 'logfile':
            if entry[7] == selected_processing_result:
                visit = entry[1]
                run = entry[2]
                autoproc = entry[4]
                db_dict = entry[6]
                outcome = dataset_outcome_dict[sample]
                path_to_logfile = db_dict['DataProcessingPathToLogfile']
                path_to_mtzfile = db_dict['DataProcessingPathToMTZfile']
                mtz_filename = db_dict['DataProcessingMTZfileName']
                log_filename = db_dict['DataProcessingLOGfileName']
                #                relative_path_to_mtzfile='./'+path_to_mtzfile.replace(initial_model_directory,'')
                relative_path_to_mtzfile = './' + path_to_mtzfile.replace(
                    os.path.join(initial_model_directory, sample), '')
                if relative_path_to_mtzfile.startswith('.//'):
                    relative_path_to_mtzfile = relative_path_to_mtzfile.replace(
                        './/', './')
                relative_path_to_logfile = './' + path_to_logfile.replace(
                    os.path.join(initial_model_directory, sample), '')
                if relative_path_to_logfile.startswith('.//'):
                    relative_path_to_logfile = relative_path_to_logfile.replace(
                        './/', './')

                # first check if folders and files exist
                # since user might do this before data are actually copied over

                if os.path.isdir(
                        os.path.join(initial_model_directory, sample,
                                     'autoprocessing',
                                     visit + '-' + run + autoproc)):
                    db_dict['DataProcessingAutoAssigned'] = 'False'
                    Logfile.insert(
                        'changing directory to: ' +
                        os.path.join(initial_model_directory, sample))
                    os.chdir(os.path.join(initial_model_directory, sample))
                    # first remove old links
                    os.system('/bin/rm ' + sample + '.mtz 2> /dev/null')
                    os.system('/bin/rm ' + sample + '.log 2> /dev/null')
                    # make new links
                    #                    Logfile.insert('setting symlink: '+os.path.join(path_to_logfile,log_filename)+' -> '+sample+'.log')
                    #                    os.symlink(os.path.join(path_to_logfile,log_filename),sample+'.log')
                    #                    Logfile.insert('setting symlink: '+os.path.join(path_to_mtzfile,mtz_filename)+' -> '+sample+'.mtz')
                    #                    os.symlink(os.path.join(path_to_mtzfile,mtz_filename),sample+'.mtz')
                    Logfile.insert(
                        'setting relative symlink: ' +
                        os.path.join(relative_path_to_logfile, log_filename) +
                        ' -> ' + sample + '.log')
                    os.symlink(
                        os.path.join(relative_path_to_logfile, log_filename),
                        sample + '.log')
                    Logfile.insert(
                        'setting relative symlink: ' +
                        os.path.join(relative_path_to_mtzfile, mtz_filename) +
                        ' -> ' + sample + '.mtz')
                    os.symlink(
                        os.path.join(relative_path_to_mtzfile, mtz_filename),
                        sample + '.mtz')

                    # update data source
                    data_source = XChemDB.data_source(data_source_file)
                    data_source.update_insert_data_source(sample, db_dict)

                else:
                    Logfile.insert(
                        'please copy data to PROJECT DIRECTORY first!')