Exemple #1
0
def update_pdbs_json():
    '''This function was used to update the pdbs.json file to include chain sequences and types.'''
    pdb_data = {}
    pdb_data_ = json.loads(read_file(os.path.join('..', 'json', 'pdbs.json')))
    for k, v in pdb_data_.items():
        assert(len(k) == 4)
        newk = k.upper()
        pdb = PDB(read_file(os.path.join('..', 'pdbs', newk + '.pdb')))
        chain_ids = set(pdb.chain_types.keys()).union(set(pdb.seqres_chain_order)).union(set(pdb.atom_sequences.keys()))
        v['Chains'] = dict.fromkeys(chain_ids)
        for chain_id in chain_ids:
            v['Chains'][chain_id] = dict(
                Sequence = str(pdb.atom_sequences.get(chain_id)),
                Type = pdb.chain_types.get(chain_id),
            )
        pdb_data[newk] = v
    write_file(os.path.join('..', 'json', 'pdbs.json.new'), json.dumps(pdb_data, indent = 4, sort_keys=True))
Exemple #2
0
def update_pdbs_json():
    '''This function was used to update the pdbs.json file to include chain sequences and types.'''
    pdb_data = {}
    pdb_data_ = json.loads(read_file(os.path.join('..', 'json', 'pdbs.json')))
    for k, v in pdb_data_.iteritems():
        assert(len(k) == 4)
        newk = k.upper()
        pdb = PDB(read_file(os.path.join('..', 'pdbs', newk + '.pdb')))
        chain_ids = set(pdb.chain_types.keys()).union(set(pdb.seqres_chain_order)).union(set(pdb.atom_sequences.keys()))
        v['Chains'] = dict.fromkeys(chain_ids)
        for chain_id in chain_ids:
            v['Chains'][chain_id] = dict(
                Sequence = str(pdb.atom_sequences.get(chain_id)),
                Type = pdb.chain_types.get(chain_id),
            )
        pdb_data[newk] = v
    write_file(os.path.join('..', 'json', 'pdbs.json.new'), json.dumps(pdb_data, indent = 4, sort_keys=True))
Exemple #3
0
def create_constraints_file(preminimization_log, outfile_path):
    '''This does the work of the convert_to_cst_file.sh script in the Rosetta repository.'''
    constraints = []
    contents = read_file(preminimization_log)
    for line in contents.split('\n'):
        if line.startswith("c-alpha"):
            line = line.split()
            constraints.append("AtomPair CA %s CA %s HARMONIC %s %s" % (line[5], line[7], line[9], line[12]))
    write_file(outfile_path, '\n'.join(constraints))
    return outfile_path
Exemple #4
0
def create_constraints_file(preminimization_log, outfile_path):
    '''This does the work of the convert_to_cst_file.sh script in the Rosetta repository.'''
    constraints = []
    contents = read_file(preminimization_log)
    for line in contents.split('\n'):
        if line.startswith("c-alpha"):
            line = line.split()
            constraints.append("AtomPair CA %s CA %s HARMONIC %s %s" %
                               (line[5], line[7], line[9], line[12]))
    write_file(outfile_path, '\n'.join(constraints))
    return outfile_path
Exemple #5
0
        if arguments.get('--parallel'):
            valid_options = [int(x) for x in arguments['--parallel'] if x.isdigit()]
            if not valid_options:
                raise Exception('None of the arguments to --parallel are valid. The argument must be an integer between 1 and the number of processors (%d).' % num_system_processors)
            else:
                num_processors = max(valid_options)
        else:
            # If the user has not specified the number of processors, only one is selected, and more exist then let them know that this process may run faster
            if num_processors == 1 and num_system_processors > 1:
                print('The setup is configured to use one processor but this machine has %d processors. The --parallel or --maxp options may make this setup run faster.' % num_system_processors)
    if 1 > num_processors or num_processors > num_system_processors:
        raise Exception('The number of processors must be an integer between 1 and %d.' % num_system_processors)

    # Read the dataset from disk
    try:
        dataset = json.loads(read_file(dataset_filepath))
        dataset_cases = dataset['data']
    except Exception, e:
        raise Exception('An error occurred parsing the JSON file: %s..' % str(e))

    # Set the job directory name
    job_name = '%s_%s_ddg_monomer_16' % (time.strftime("%y-%m-%d-%H-%M"), getpass.getuser())
    if arguments.get('--run_identifier'):
        job_name += '_' + arguments['--run_identifier'][0]

    # Set the root output directory
    root_output_directory = 'job_output'
    if arguments.get('--output_directory'):
        root_output_directory = arguments['--output_directory'][0]
    if not os.path.exists(root_output_directory):
        print('Creating directory %s:' % root_output_directory)
Exemple #6
0
        if arguments.get('--parallel'):
            valid_options = [int(x) for x in arguments['--parallel'] if x.isdigit()]
            if not valid_options:
                raise Exception('None of the arguments to --parallel are valid. The argument must be an integer between 1 and the number of processors (%d).' % num_system_processors)
            else:
                num_processors = max(valid_options)
        else:
            # If the user has not specified the number of processors, only one is selected, and more exist then let them know that this process may run faster
            if num_processors == 1 and num_system_processors > 1:
                print('The setup is configured to use one processor but this machine has %d processors. The --parallel or --maxp options may make this setup run faster.' % num_system_processors)
    if 1 > num_processors or num_processors > num_system_processors:
        raise Exception('The number of processors must be an integer between 1 and %d.' % num_system_processors)

    # Read the dataset from disk
    try:
        dataset = json.loads(read_file(dataset_filepath))
        dataset_cases = dataset['data']
    except Exception, e:
        raise Exception('An error occurred parsing the JSON file: %s..' % str(e))

    # Set the job directory name
    job_name = '%s_%s_ddg_monomer_16' % (time.strftime("%y-%m-%d-%H-%M"), getpass.getuser())
    if arguments.get('--run_identifier'):
        job_name += '_' + arguments['--run_identifier'][0]

    # Set the root output directory
    root_output_directory = 'job_output'
    if arguments.get('--output_directory'):
        root_output_directory = arguments['--output_directory'][0]
    if not os.path.exists(root_output_directory):
        print('Creating directory %s:' % root_output_directory)