Пример #1
0
                         Predicted=float(linetokens[1]),
                         ID=id))
                id += 1
            else:
                raise Exception(
                    'At least two columns (experimental DDG, predicted DDG) are expected.'
                )
        return table
    except Exception, e:
        raise Exception('An exception occurred parsing the CSV/TSV file: %s' %
                        str(e))


if __name__ == '__main__':
    try:
        arguments = docopt.docopt(__doc__.format(**locals()))
    except Exception, e:
        print('Failed while parsing arguments: %s.' % str(e))
        sys.exit(1)

    # Read file input file
    input_filename = arguments['<inputfile>'][0]
    if not os.path.exists(input_filename):
        print('Error: the input file %s does not exist.' % input_filename)
        sys.exit(2)
    analysis_table = read_json(input_filename)
    if not analysis_table:
        analysis_table = parse_csv(input_filename)

    # Set up the output filename
    output_filename = arguments['--output']
Пример #2
0
        for linetokens in lines:
            if len(linetokens) >= 3:
                table.append(dict(Experimental = float(linetokens[0]), Predicted = float(linetokens[1]), ID = str(linetokens[2])))
            elif len(linetokens) == 2:
                table.append(dict(Experimental = float(linetokens[0]), Predicted = float(linetokens[1]), ID = id))
                id += 1
            else:
                raise Exception('At least two columns (experimental DDG, predicted DDG) are expected.')
        return table
    except Exception, e:
        raise Exception('An exception occurred parsing the CSV/TSV file: %s' % str(e))


if __name__ == '__main__':
    try:
        arguments = docopt.docopt(__doc__.format(**locals()))
    except Exception, e:
        print('Failed while parsing arguments: %s.' % str(e))
        sys.exit(1)

    # Read file input file
    input_filename = arguments['<inputfile>'][0]
    if not os.path.exists(input_filename):
        print('Error: the input file %s does not exist.' % input_filename)
        sys.exit(2)
    analysis_table = read_json(input_filename)
    if not analysis_table:
        analysis_table = parse_csv(input_filename)

    # Set up the output filename
    output_filename = arguments['--output']
Пример #3
0
                # Create the new PDB file with the loop residue backbone atoms added back in
                write_file(os.path.join(output_directory, '{0}.pdb'.format(pdb_prefix)), new_pdb_content)

                # Create a loop file in Rosetta numbering for protocols which require that format (the loopmodel code currently
                # requires this at the time of writing)
                write_file(os.path.join(output_directory, '{0}.loop'.format(pdb_prefix)), loop_file_content)

            sys.stdout.write('.')
            sys.stdout.flush()
        print('')


if __name__ == '__main__':
    from libraries import docopt
    arguments = docopt.docopt(__doc__)
    output_directory = arguments['<output_directory>']
    e, trc = '', ''

    if True:
        # Disable this code by default
        try:
            os.mkdir(output_directory)
        except Exception, e: trc = traceback.format_exc()
        if not os.path.exists(output_directory):
            colortext.error('Error: Could not create the output directory.')
            if e: colortext.error(str(e))
            colortext.warning(trc)

        #create_pruned_structures(output_directory)
        add_missing_residues(output_directory)
Пример #4
0
#!/usr/bin/env python2

"""\
Usage: hilbert_demo.py [--steps=STEPS] [--test-run]
"""

import subprocess, numpy as np
from libraries import docopt, utils

args = docopt.docopt(__doc__)
num_particles = 512 if args['--test-run'] else 512
num_steps = int(args['--steps'] or 
    (1 if args['--test-run'] else (20 if utils.running_on_cluster() else 3)))
num_xyz_restraints = np.linspace(10, 50, num=num_steps).astype(int)
num_pair_restraints = np.linspace(150, 500, num=num_steps).astype(int)

params = []
for n_xyz in num_xyz_restraints:
    for n_pair in num_pair_restraints:
        params.append({
            'num_particles': num_particles,
            'num_xyz_restraints': n_xyz,
            'num_pair_restraints': n_pair,
        })

utils.clear_directories('jsons', 'pdbs', 'movies')
utils.submit_job('run_hilbert_demo.py', params)