def find_validation_workspaces(name, round=None): """ Find all the workspaces containing validated designs. """ workspaces = [] for round in [round] if round else itertools.count(1): workspace = pipeline.ValidatedDesigns(name, round) if not workspace.exists(): break workspaces.append(workspace) if not workspaces: scripting.print_error_and_die('No validated designs found.') return workspaces
def find_validation_workspaces(name, rounds=None): """ Find all the workspaces containing validated designs. """ workspaces = [] if rounds is not None: rounds = indices_from_str(rounds) else: rounds = itertools.count(1) for round in rounds: workspace = pipeline.ValidatedDesigns(name, round) if not workspace.exists(): break workspaces.append(workspace) if not workspaces: scripting.print_error_and_die('No validated designs found.') return workspaces
def main(): args = docopt.docopt(__doc__) cluster.require_qsub() # Setup the workspace. workspace = pipeline.ValidatedDesigns(args['<workspace>'], args['<round>']) workspace.check_paths() workspace.make_dirs() if args['--clear'] or args['--test-run']: workspace.clear_outputs() # Setup an output directory for each input. inputs = workspace.unclaimed_inputs nstruct = len(inputs) * int(args['--nstruct']) if nstruct == 0: scripting.print_error_and_die("""\ No unclaimed input files. If you previously started a round of simulations and then stopped them for some reason, the problem is probably that all the inputs are still claimed by those simulations. Use the '--clear' flag to remove the claims and try again.""") for input in inputs: subdir = workspace.output_subdir(input) scripting.clear_directory(subdir) # Launch the validation job. big_jobs.submit( 'pip_validate.py', workspace, inputs=inputs, nstruct=nstruct, max_runtime=args['--max-runtime'], max_memory=args['--max-memory'], test_run=args['--test-run'], )
def main(): from pkg_resources import iter_entry_points, DistributionNotFound # Load every PIP command installed on this system. This is cool because by # using ``pkg_resources``, other packages can add commands to PIP! entry_points = {} for entry_point in iter_entry_points(group='roseasy.commands'): entry_points[entry_point.name] = entry_point # Read the command the user typed on the command line. command_table = make_command_table(entry_points) arguments = docopt.docopt( __doc__.format(**locals()), version=__version__, options_first=True, ) command_name = arguments['<command>'] # Find all the commands that match what the user typed. matching_entry_points = [ name for name in entry_points if name.startswith(command_name)] # If no commands match, print out an error and suggest a command the user # might have been trying to type. if len(matching_entry_points) == 0: scripting.print_error_and_die("""\ Unknown command '{0}'. Did you mean: $ roseasy {1} {2} """, command_name, did_you_mean(command_name, entry_points), ' '.join(arguments['<args>'])) # If two or more commands match, print all the ambiguous commands and tell # the user to be more specific. elif len(matching_entry_points) > 1: message = "Command '{0}' is ambiguous. Did you mean:\n\n" for matching_entry_point in matching_entry_points: message += " $ roseasy {0} {{1}}\n".format(matching_entry_point) message += '\n' scripting.print_error_and_die(message, command_name, ' '.join(arguments['<args>'])) # If a unique command was given, make sure all of its dependencies are # installed (because the dependencies for the analysis scripts are not by # default). If there is a problem, suggest how to fix it. Otherwise, run # the command. else: entry_point = entry_points[matching_entry_points[0]] try: entry_point.require() except DistributionNotFound as error: scripting.print_error_and_die("""\ The '{0}' command requires the '{1.req}' package. The analysis scripts have a number of dependencies that aren't installed by default, because they would make PIP needlessly hard to install on clusters. You can install all of these dependencies at once with the following command: $ pip install 'roseasy [analysis]' """.format(command_name, error)) sys.argv = sys.argv[:1] + matching_entry_points + arguments['<args>'] entry_point.load()()
def main(): args = docopt.docopt(__doc__) root = args['<workspace>'] round = args['<round>'] directory = args['<directory>'] if directory: models, filters = structures.load(directory) resfile = pipeline.load_resfile(directory) resis = sorted(int(i) for i in resfile.designable) print(resis) title = directory sequences = [ ''.join(models['sequence'][i][j - 1] for j in resis) for i in models.index ] else: workspace = pipeline.ValidatedDesigns(root, round) workspace.check_paths() title = workspace.focus_dir designs = [structures.Design(x) for x in workspace.output_subdirs] sequences = [x.resfile_sequence for x in designs] sequences = weblogo.seq.SeqList( [weblogo.seq.Seq(x) for x in sequences], alphabet=weblogo.seq.unambiguous_protein_alphabet, ) logo_data = weblogo.LogoData.from_seqs(sequences) logo_options = weblogo.LogoOptions() logo_options.title = title logo_format = weblogo.LogoFormat(logo_data, logo_options) if args['--output']: preview = False logo_file = open(args['--output'], 'wb') with open(args['--output'][:-len('pdf')] + 'txt', 'w') as f: for line in str(logo_data): f.write(line) else: preview = True logo_file = tempfile.NamedTemporaryFile('wb', prefix='weblogo_', suffix='.pdf') ext = os.path.splitext(logo_file.name)[-1] formatters = { '.pdf': weblogo.pdf_formatter, '.svg': weblogo.svg_formatter, '.eps': weblogo.eps_formatter, '.png': weblogo.png_formatter, '.jpeg': weblogo.jpeg_formatter, '.txt': weblogo.txt_formatter, } if ext not in formatters: scripting.print_error_and_die( "'{0}' is not a supported file format".format(ext)) document = formatters[ext](logo_data, logo_format) logo_file.write(document) logo_file.flush() if preview: pdf = os.environ.get('PDF', 'evince'), logo_file.name subprocess.call(pdf) logo_file.close()
def main(): arguments = docopt.docopt(__doc__) workspace = pipeline.Workspace(arguments['<workspace>']) # Make a new workspace directory. if workspace.incompatible_with_fragments_script: scripting.print_error_and_die("""\ Illegal character(s) found in workspace path: {} The full path to a workspace must contain only characters that are alphanumeric or '.' or '_'. The reason for this ridiculous rule is the fragment generation script, which will silently fail if the full path to its input file contains any characters but those.""", workspace.abs_root_dir) if workspace.exists(): if arguments['--overwrite']: shutil.rmtree(workspace.root_dir) else: scripting.print_error_and_die("""\ Design '{0}' already exists. Use '-o' to overwrite.""", workspace.root_dir) workspace.make_dirs() # Decide which settings to ask for. if arguments['--remote']: installers = ( RosettaDir, RsyncUrl, ) else: installers = ( RosettaDir, InputPdb, LoopsFile, Resfile, RestraintsFile, ScoreFunction, BuildScript, DesignScript, ValidateScript, FilterScript, SharedDefs, FlagsFile, ) # Get the necessary settings from the user and use them to fill in the # workspace. print "Please provide the following pieces of information:" print scripting.use_path_completion() for installer in installers: # If the installer doesn't have a prompt, just install it without # asking any questions. if installer.prompt is None: installer.install(workspace) continue # Otherwise, print a description of the setting being installed and # prompt the user for a value. print installer.description print while True: try: setting = raw_input(installer.prompt) installer.install(workspace, setting) except (ValueError, IOError) as problem: print problem continue except (KeyboardInterrupt, EOFError): shutil.rmtree(workspace.root_dir) scripting.print_error_and_die("\nReceived exit command, no workspace created.") else: break print # If we made a link to a remote workspace, immediately try to synchronize # with it. Rsync will say whether or not it succeeded. Otherwise just # print a success message. if arguments['--remote']: pipeline.fetch_data(workspace.root_dir) else: print "Setup successful for design '{0}'.".format(workspace.root_dir)
def main(): from pkg_resources import iter_entry_points, DistributionNotFound # Load every PIP command installed on this system. This is cool because by # using ``pkg_resources``, other packages can add commands to PIP! entry_points = {} for entry_point in iter_entry_points(group='helix.commands'): entry_points[entry_point.name] = entry_point # Read the command the user typed on the command line. command_table = make_command_table(entry_points) arguments = docopt.docopt( __doc__.format(**locals()), version=__version__, options_first=True, ) command_name = arguments['<command>'] # Find all the commands that match what the user typed. matching_entry_points = [ name for name in entry_points if name.startswith(command_name) ] # If no commands match, print out an error and suggest a command the user # might have been trying to type. if len(matching_entry_points) == 0: scripting.print_error_and_die( """\ Unknown command '{0}'. Did you mean: $ helix {1} {2} """, command_name, did_you_mean(command_name, entry_points), ' '.join(arguments['<args>'])) # If two or more commands match, print all the ambiguous commands and tell # the user to be more specific. elif len(matching_entry_points) > 1: message = "Command '{0}' is ambiguous. Did you mean:\n\n" for matching_entry_point in matching_entry_points: message += " $ helix {0} {{1}}\n".format(matching_entry_point) message += '\n' scripting.print_error_and_die(message, command_name, ' '.join(arguments['<args>'])) # If a unique command was given, make sure all of its dependencies are # installed (because the dependencies for the analysis scripts are not by # default). If there is a problem, suggest how to fix it. Otherwise, run # the command. else: entry_point = entry_points[matching_entry_points[0]] try: entry_point.require() except DistributionNotFound as error: scripting.print_error_and_die("""\ The '{0}' command requires the '{1.req}' package. Installation will be further streamlined in the future. In the meantime, the helix.yml conda environment should get you most of the packages (except PyRosetta and klab): conda env create --file helix.yml The latest version of klab can be installed via the following commands: git clone https://github.com/kortemme-lab/klab cd klab python setup.py install Any other dependencies should be available via conda or pip. """.format(command_name, error)) #'$ pip install 'helix [analysis]' sys.argv = sys.argv[:1] + matching_entry_points + arguments['<args>'] entry_point.load()()