def get_pandas_non_summary(): ''' returns a dictionary of pandas keys are the metrics that each panda has data for vals are the pandas that have the data organized however they organzed it DEPRECATED - may not work ''' num_trials = Project.selected().experiment().num_trials trials = Project.selected().experiment().trials(xrange(0, num_trials)) trial_data = {} for i in xrange(0, num_trials): trial_data[i] = trials[i].get_data() start = time.time() metric_data = {} for trial in xrange(0, num_trials): thread_data = [] for i in xrange(0, len(trial_data[trial])): for j in xrange(0, len(trial_data[trial][i])): for k in xrange(0, len(trial_data[trial][i][j])): thread_data.append(trial_data[trial][i][j][k].interval_data()) metric_data[trial_data[trial][i][j][k].metric] = pd.concat(thread_data) metric_data[trial_data[trial][i][j][k].metric].index.names = ['trial', 'rank', 'context', 'thread', 'region'] end = time.time() print('Time spent constructing dataframes %s' %(end-start)) print('\nMetrics included:') for m in metric_data.keys(): print("\t%s"%m) return metric_data
def main(self, argv): args = self._parse_args(argv) profile_tools = getattr(args, 'profile_tools', None) trace_tools = getattr(args, 'trace_tools', None) data_files = [] trial_numbers = [] for num in getattr(args, 'trial_numbers', []) + getattr(args, 'data_files', []): if os.path.exists(num): data_files.append(num) else: try: trial_numbers.append(int(num)) except ValueError: self.parser.error("Invalid trial number: %s" % num) tau = TauInstallation.get_minimal() dataset = {} if not (data_files or trial_numbers): expr = Project.selected().experiment() for fmt, path in expr.trials()[0].get_data_files().iteritems(): dataset[fmt] = [path] elif trial_numbers: expr = Project.selected().experiment() for trial in expr.trials(trial_numbers): for fmt, path in trial.get_data_files().iteritems(): dataset.setdefault(fmt, []).append(path) for path in data_files: fmt = tau.get_data_format(path) dataset.setdefault(fmt, []).append(path) return tau.show_data_files(dataset, profile_tools, trace_tools)
def main(self, argv): args = self._parse_args(argv) profile_tools = getattr(args, 'profile_tools', None) trace_tools = getattr(args, 'trace_tools', None) data_files = [] trial_numbers = [] for num in getattr(args, 'trial_numbers', []) + getattr( args, 'data_files', []): if os.path.exists(num): data_files.append(num) else: try: trial_numbers.append(int(num)) except ValueError: self.parser.error("Invalid trial number: %s" % num) tau = TauInstallation.get_minimal() dataset = {} if not (data_files or trial_numbers): expr = Project.selected().experiment() for fmt, path in expr.trials()[0].get_data_files().iteritems(): dataset[fmt] = [path] elif trial_numbers: expr = Project.selected().experiment() for trial in expr.trials(trial_numbers): for fmt, path in trial.get_data_files().iteritems(): dataset.setdefault(fmt, []).append(path) for path in data_files: fmt = tau.get_data_format(path) dataset.setdefault(fmt, []).append(path) return tau.show_data_files(dataset, profile_tools, trace_tools)
def main(self, argv): args = self._parse_args(argv) if args.tau_dir: # Link to tau directory if argv[0] in '--tau-dir': del argv[0:2] elif argv[2] in '--tau-dir': del argv[2:4] if args.description: description = args.description if argv[0] in '--description': del argv[0:2] elif argv[2] in '--description': del argv[2:4] else: description = None launcher_cmd, application_cmds = Trial.parse_launcher_cmd(argv) self.logger.debug("Launcher command: %s", launcher_cmd) self.logger.debug("Application commands: %s", application_cmds) if args.tau_dir: Project.controller().storage.tau_dir(args.tau_dir) return Project.selected().experiment().managed_run( launcher_cmd, application_cmds, description) return Project.selected().experiment().managed_run( launcher_cmd, application_cmds, description)
def controller(cls, storage=PROJECT_STORAGE): if Project.selected(): context = [('project', Project.selected().eid), ('projects', Project.selected().eid)] else: # use a value that will never exist to block all context = [('project', 'Undefined')] return cls.__controller__(cls, storage, context)
def main(self, argv): """Program entry point. Args: argv (list): Command line arguments. Returns: int: Process return code: non-zero if a problem occurred, 0 otherwise """ args = self._parse_args(argv) cmd = args.command cmd_args = args.options log_level = getattr(args, 'verbose', getattr(args, 'quiet', logger.LOG_LEVEL)) logger.set_log_level(log_level) LOGGER.debug('Arguments: %s', args) LOGGER.debug('Verbosity level: %s', logger.LOG_LEVEL) # Try to execute as a TAU command try: return cli.execute_command([cmd], cmd_args) except UnknownCommandError: pass # Check shortcuts shortcut = None from taucmdr.model.project import Project uses_python = Project.selected().experiment().populate( )['application'].get_or_default('python') if not uses_python and build_command.is_compatible( cmd): # should return false for python shortcut = ['build'] cmd_args = [cmd] + cmd_args elif trial_create_command.is_compatible( cmd): # should return true for python shortcut = ['trial', 'create'] cmd_args = [cmd] + cmd_args elif 'show'.startswith(cmd): shortcut = ['trial', 'show'] elif 'metrics'.startswith(cmd): expr = Project.selected().experiment() targ_name = expr.populate('target')['name'] shortcut = ['target', 'metrics'] cmd_args.insert(0, targ_name) if shortcut: LOGGER.debug('Trying shortcut: %s', shortcut) return cli.execute_command(shortcut, cmd_args) else: LOGGER.debug('No shortcut found for %r', cmd) # Not sure what to do at this point, so advise the user and exit LOGGER.info("Unknown command. Calling `%s help %s` to get advice.", TAUCMDR_SCRIPT, cmd) return cli.execute_command(['help'], [cmd])
def _create_record(self, store, data): """Create the model record. Args: store (AbstractStorage): Storage to contain the record. data (dict): Record data. Returns: int: :any:`EXIT_SUCCESS` if successful. Raises: UniqueAttributeError: A record with the same unique attribute already exists. """ ctrl = self.model.controller(store) key_attr = self.model.key_attribute key = data[key_attr] try: ctrl.create(data) except UniqueAttributeError: self.parser.error("A %s with %s='%s' already exists" % (self.model_name, key_attr, key)) if ctrl.storage is PROJECT_STORAGE: from taucmdr.cli.commands.project.edit import COMMAND as project_edit_cmd try: proj = Project.selected() except ProjectSelectionError: self.logger.info( "Created a new %s '%s'. Use `%s` to add the new %s to a project.", self.model_name, key, project_edit_cmd, self.model_name) else: project_edit_cmd.main([proj['name'], '--add', key]) else: self.logger.info("Created a new %s-level %s: '%s'.", ctrl.storage.name, self.model_name, key) return EXIT_SUCCESS
def renumber(self, old_trials, new_trials): """Renumbers trial id of an experiment. Args: old_trials (list): old trial numbers. new_trials (list): new trial numbers. """ # First, we renumber everything to available temporary numbers assert len(old_trials) == len(new_trials) expr = Project.selected().experiment() existing_nums = [ trial['number'] for trial in Trial.controller( storage=PROJECT_STORAGE).search({'experiment': expr.eid}) ] start_temp_id = max(max(existing_nums), max(new_trials)) + 1 temp_id = start_temp_id for old_trial_num in old_trials: old_trial = self.one({ 'number': old_trial_num, 'experiment': expr.eid }) self.update({'number': temp_id}, old_trial.eid) temp_id = temp_id + 1 # Then we renumber from the temporaries to the final new numbers temp_id = start_temp_id for new_trial_num in new_trials: intermed_trial = self.one({ 'number': temp_id, 'experiment': expr.eid }) self.update({'number': new_trial_num}, intermed_trial.eid) temp_id = temp_id + 1
def main(self, argv): if not argv: self.parser.error("too few arguments.") args = self._parse_args(argv) proj = Project.selected() targ, app, meas, expr = self._parse_implicit(args) expr = self._parse_explicit_experiment(args, expr) if expr: targ, app, meas = None, None, None name = expr['name'] else: targ = self._parse_explicit(args, Target, targ, proj, 'targets') app = self._parse_explicit(args, Application, app, proj, 'applications') meas = self._parse_explicit(args, Measurement, meas, proj, 'measurements') name = getattr(args, 'name', "%s-%s-%s" % (targ['name'], app['name'], meas['name'])) try: Experiment.select(name) except ExperimentSelectionError: args = [name, '--target', targ['name'], '--application', app['name'], '--measurement', meas['name']] retval = experiment_create_cmd.main(args) if retval != EXIT_SUCCESS: return retval Experiment.select(name) self.logger.info("Selected experiment '%s'.", name) rebuild_required = Experiment.rebuild_required() if rebuild_required: self.logger.info(rebuild_required) return EXIT_SUCCESS
def test_heap_usage_memory_alloc_profile(self): """https://github.com/ParaToolsInc/taucmdr/issues/14""" self.reset_project_storage() stdout, stderr = self.assertCommandReturnValue(0, measurement_edit_cmd, ['profile', '--heap-usage', '--memory-alloc']) self.assertIn("Updated measurement 'profile'", stdout) self.assertFalse(stderr) stdout, stderr = self.assertCommandReturnValue(0, select_cmd, ['profile']) self.assertIn("Selected experiment 'targ1-app1-profile'", stdout) self.assertFalse(stderr) meas = Project.selected().experiment().populate('measurement') self.assertTrue(meas['heap_usage']) self.assertTrue(meas['memory_alloc']) self.assertManagedBuild(0, CC, [], 'matmult.c') stdout, stderr = self.assertCommandReturnValue(0, trial_create_cmd, ['./a.out']) self.assertIn("Trial 0 produced 1 profile files", stdout) self.assertIn("TAU_SHOW_MEMORY_FUNCTIONS=1", stdout) self.assertIn("TAU_TRACK_HEAP=1", stdout) self.assertFalse(stderr) self.assertInLastTrialData("<attribute><name>TAU_SHOW_MEMORY_FUNCTIONS</name><value>on</value></attribute>") self.assertInLastTrialData("<attribute><name>TAU_TRACK_HEAP</name><value>on</value></attribute>") self.assertInLastTrialData("Heap Memory Used (KB) at Entry") self.assertInLastTrialData("Heap Memory Used (KB) at Exit") self.assertInLastTrialData("Heap Allocate") self.assertInLastTrialData("compute_interchange") self.assertInLastTrialData("compute") self.assertInLastTrialData("malloc")
def main(self, argv): if not argv: self.parser.error("too few arguments.") args = self._parse_args(argv) proj = Project.selected() targ, app, meas, expr = self._parse_implicit(args) expr = self._parse_explicit_experiment(args, expr) if expr: targ, app, meas = None, None, None name = expr['name'] else: targ = self._parse_explicit(args, Target, targ, proj, 'targets') app = self._parse_explicit(args, Application, app, proj, 'applications') meas = self._parse_explicit(args, Measurement, meas, proj, 'measurements') name = getattr( args, 'name', "%s-%s-%s" % (targ['name'], app['name'], meas['name'])) try: Experiment.select(name) except ExperimentSelectionError: args = [ name, '--target', targ['name'], '--application', app['name'], '--measurement', meas['name'] ] retval = experiment_create_cmd.main(args) if retval != EXIT_SUCCESS: return retval Experiment.select(name) self.logger.info("Selected experiment '%s'.", name) rebuild_required = Experiment.rebuild_required() if rebuild_required: self.logger.info(rebuild_required) return EXIT_SUCCESS
def main(self, argv): self._parse_args(argv) launcher_cmd, application_cmds = Trial.parse_launcher_cmd(argv) self.logger.debug("Launcher command: %s", launcher_cmd) self.logger.debug("Application commands: %s", application_cmds) return Project.selected().experiment().managed_run( launcher_cmd, application_cmds)
def test_heap_usage_memory_alloc_profile(self): """https://github.com/ParaToolsInc/taucmdr/issues/14""" self.reset_project_storage() stdout, stderr = self.assertCommandReturnValue( 0, measurement_edit_cmd, ['profile', '--heap-usage', '--memory-alloc']) self.assertIn("Updated measurement 'profile'", stdout) self.assertFalse(stderr) stdout, stderr = self.assertCommandReturnValue(0, select_cmd, ['profile']) self.assertIn("Selected experiment 'targ1-app1-profile'", stdout) self.assertFalse(stderr) meas = Project.selected().experiment().populate('measurement') self.assertTrue(meas['heap_usage']) self.assertTrue(meas['memory_alloc']) self.assertManagedBuild(0, CC, [], 'matmult.c') stdout, stderr = self.assertCommandReturnValue(0, trial_create_cmd, ['./a.out']) self.assertIn("Trial 0 produced 1 profile files", stdout) self.assertIn("TAU_SHOW_MEMORY_FUNCTIONS=1", stdout) self.assertIn("TAU_TRACK_HEAP=1", stdout) self.assertFalse(stderr) self.assertInLastTrialData( "<attribute><name>TAU_SHOW_MEMORY_FUNCTIONS</name><value>on</value></attribute>" ) self.assertInLastTrialData( "<attribute><name>TAU_TRACK_HEAP</name><value>on</value></attribute>" ) self.assertInLastTrialData("Heap Memory Used (KB) at Entry") self.assertInLastTrialData("Heap Memory Used (KB) at Exit") self.assertInLastTrialData("Heap Allocate") self.assertInLastTrialData("compute_interchange") self.assertInLastTrialData("compute") self.assertInLastTrialData("malloc")
def _create_record(self, store, data): """Create the model record. Args: store (AbstractStorage): Storage to contain the record. data (dict): Record data. Returns: int: :any:`EXIT_SUCCESS` if successful. Raises: UniqueAttributeError: A record with the same unique attribute already exists. """ ctrl = self.model.controller(store) key_attr = self.model.key_attribute key = data[key_attr] try: ctrl.create(data) except UniqueAttributeError: self.parser.error("A %s with %s='%s' already exists" % (self.model_name, key_attr, key)) if ctrl.storage is PROJECT_STORAGE: from taucmdr.cli.commands.project.edit import COMMAND as project_edit_cmd try: proj = Project.selected() except ProjectSelectionError: self.logger.info("Created a new %s '%s'. Use `%s` to add the new %s to a project.", self.model_name, key, project_edit_cmd, self.model_name) else: project_edit_cmd.main([proj['name'], '--add', key]) else: self.logger.info("Created a new %s-level %s: '%s'.", ctrl.storage.name, self.model_name, key) return EXIT_SUCCESS
def main(self, argv): args = self._parse_args(argv) description = getattr(args, 'description', None) cmd = [args.cmd] + args.cmd_args launcher_cmd, application_cmds = Trial.parse_launcher_cmd(cmd) self.logger.debug("Launcher command: %s", launcher_cmd) self.logger.debug("Application commands: %s", application_cmds) return Project.selected().experiment().managed_run(launcher_cmd, application_cmds, description)
def is_selected(self): """Returns True if this target configuration is part of the selected experiment, False otherwise.""" from taucmdr.model.project import Project try: selected = Project.selected().experiment() except (ProjectSelectionError, ExperimentSelectionError): return False return selected['application'] == self.eid
def _retrieve_records(self, ctrl, keys): if keys: try: keys = [int(key) for key in keys] except ValueError: self.parser.error("Invalid trial number '%s'. Trial numbers are positive integers starting from 0.") expr = Project.selected().experiment() records = super(TrialListCommand, self)._retrieve_records(ctrl, keys) return [rec for rec in records if rec['experiment'] == expr.eid]
def test_newtrial(self): self.reset_project_storage() self.assertManagedBuild(0, CC, [], 'hello.c') self.assertCommandReturnValue(0, CREATE_COMMAND, ['./a.out']) self.assertCommandReturnValue(0, EDIT_COMMAND, ['0', '--description', 'desc0']) exp = Project.selected().experiment().eid old_path = Trial.controller(storage=PROJECT_STORAGE).search({ 'number': 0, 'experiment': exp })[0].get_data_files()['tau'] self.assertTrue(os.path.exists(old_path), "Data directory should exist after create") old_profile = os.path.join(old_path, "profile.0.0.0") self.assertTrue(os.path.exists(old_profile), "Profile should exist after create") with open(old_profile, 'r') as f: old_profile_contents = f.read() num_trials_before = Trial.controller(storage=PROJECT_STORAGE).count() self.assertCommandReturnValue(0, RENUMBER_COMMAND, ['0', '--to', '1']) stdout, stderr = self.assertCommandReturnValue(0, LIST_COMMAND, []) self.assertIn('./a.out', stdout) self.assertIn(' 1 ', stdout) self.assertIn('desc0', stdout) self.assertNotIn(' 0 ', stdout) self.assertIn('Selected experiment:', stdout) self.assertFalse(stderr) num_trials_after = Trial.controller(storage=PROJECT_STORAGE).count() self.assertEqual(num_trials_before, num_trials_after, "Renumbering should not change number of trials") self.assertFalse( os.path.exists(old_path), "Data directory for old number should not exist after renumber") self.assertFalse( os.path.exists(os.path.join(old_path, "profile.0.0.0")), "Profile in old data directory should not exist after renumber") new_path = Trial.controller(storage=PROJECT_STORAGE).search({ 'number': 1, 'experiment': exp })[0].get_data_files()['tau'] self.assertTrue( os.path.exists(new_path), "Data directory for new number should exist after renumber") new_profile = os.path.join(new_path, "profile.0.0.0") self.assertTrue( os.path.exists(new_profile), "Profile in data directory for new number should exist after renumber" ) with open(new_profile, 'r') as f: new_profile_contents = f.read() self.assertEqual(old_profile_contents, new_profile_contents, "Profiles should be identical after renumber")
def _retrieve_records(self, ctrl, keys): if keys: try: keys = [int(key) for key in keys] except ValueError: self.parser.error("Invalid trial number '%s'. Trial numbers are positive integers starting from 0.") expr = Project.selected().experiment() records = super(TrialListCommand, self)._retrieve_records(ctrl, keys) recs = [rec for rec in records if rec['experiment'] == expr.eid] return sorted(recs, key=lambda recs: recs['number'])
def test_set_tau_extra_options_none(self): self.reset_project_storage() expr = Project.selected().experiment() self.assertFalse('extra-tau-options' in expr.populate('measurement')) tau_options = "none" self.assertCommandReturnValue(0, EDIT_COMMAND, ['profile', '--extra-tau-options=%s' % tau_options]) # Check that 'extra-tau-options' is now a list containing the expected options in the project record meas = Measurement.controller(PROJECT_STORAGE).one({'name': 'profile'}) self.assertIsNotNone(meas) self.assertNotIn('extra_tau_options', meas)
def test_set_tau_force_options(self): self.reset_project_storage() expr = Project.selected().experiment() self.assertFalse('force-tau-options' in expr.populate('measurement')) tau_options = "-optVerbose -optNoCompInst" self.assertCommandReturnValue(0, EDIT_COMMAND, ['profile', '--force-tau-options=%s' % tau_options]) # Check that 'force-tau-options' is now a list containing the expected options in the project record meas = Measurement.controller(PROJECT_STORAGE).one({'name': 'profile'}) self.assertIsNotNone(meas) self.assertListEqual(meas['force_tau_options'], [tau_options])
def parse_launcher_cmd(cls, cmd): """Parses a command line to split the launcher command and application commands. Args: cmd (list): Command line. Returns: tuple: (Launcher command, possibly empty list of application commands). """ cmd0 = cmd[0] launcher_cmd, cmd = cls._separate_launcher_cmd(cmd) num_exes = len([x for x in cmd if util.which(x)]) assert launcher_cmd or cmd LOGGER.debug('Launcher: %s', launcher_cmd) LOGGER.debug('Remainder: %s', cmd) uses_python = Project.selected().experiment().populate( )['application'].get_or_default('python') if uses_python: if 'python' in cmd[0]: cmd.remove(cmd[0]) if not launcher_cmd: if num_exes > 1: LOGGER.warning( "Multiple executables were found on the command line but none of them " "were recognized application launchers. TAU will assume that the application " "executable is '%s' and subsequent executables are arguments to that command. " "If this is incorrect, use '--' to separate '%s' and its arguments " "from the application command, e.g. `mpirun -np 4 -- ./a.out -l hello`", cmd0, cmd0) return [], [cmd] if not cmd: return launcher_cmd, [] if num_exes <= 1: return launcher_cmd, [cmd] elif num_exes > 1: colons = [i for i, x in enumerate(cmd) if x == ':'] if not colons: # Recognized launcher with multiple executables but not using ':' syntax. LOGGER.warning( "Multiple executables were found on the command line. TAU will assume that " "the application executable is '%s' and subsequent executables are arguments " "to that command. If this is incorrect, use ':' to separate each application " "executable and its arguments, e.g. `mpirun -np 4 ./foo -l : -np 2 ./bar arg1`. " "Or, use '--' to separate '%s', its arguments, and subsequent executables " "from the application command, e.g. " "`mpirun -np 4 numactl -m 1 -- ./a.out -l hello", cmd0, cmd0) return launcher_cmd, [cmd] # Split MPMD command on ':'. Retain ':' as first element of each application command colons.append(len(cmd)) application_cmds = [cmd[:colons[0]]] for i, idx in enumerate(colons[:-1]): application_cmds.append(cmd[idx:colons[i + 1]]) return launcher_cmd, application_cmds
def _update_record(self, store, data, key): expr = Project.selected().experiment() ctrl = self.model.controller(store) key_attr = self.model.key_attribute if not ctrl.exists({key_attr: key, 'experiment': expr.eid}): self.parser.error( "No %s-level %s with %s='%s'." % (ctrl.storage.name, self.model_name, key_attr, key)) ctrl.update(data, {key_attr: key, 'experiment': expr.eid}) self.logger.info("Updated %s '%s'", self.model_name, key) return EXIT_SUCCESS
def test_set_tau_forced_extra_options(self): self.reset_project_storage() expr = Project.selected().experiment() self.assertFalse('extra-tau-options' in expr.populate('measurement')) self.assertFalse('forced-tau-options' in expr.populate('measurement')) tau_options = "-optKeepFiles" self.assertCommandReturnValue(0, EDIT_COMMAND, ['profile', '--extra-tau-options=%s' % tau_options]) with self.assertRaises(IncompatibleRecordError): self.assertNotCommandReturnValue(0, EDIT_COMMAND, ['profile', '--force-tau-options=%s' % tau_options]) meas = Measurement.controller(PROJECT_STORAGE).one({'name': 'profile'}) self.assertIsNotNone(meas)
def test_export_otf2(self): self.reset_project_storage(['--mpi', '--trace', 'otf2', '--profile', 'none']) expr = Project.selected().experiment() meas = expr.populate('measurement') self.assertEqual(meas['trace'], 'otf2') self.assertEqual(meas['profile'], 'none') self.assertManagedBuild(0, MPI_CC, [], 'mpi_hello.c') self.assertCommandReturnValue(EXIT_SUCCESS, trial_create_cmd, ['mpirun', '-np', '4', './a.out']) self.assertCommandReturnValue(EXIT_SUCCESS, trial_export_cmd, []) export_file = expr['name'] + '.trial0.tgz' self.assertTrue(os.path.exists(export_file))
def main(self, argv): args = self._parse_args(argv) trial_numbers = [] for num in getattr(args, 'trial_numbers', []): try: trial_numbers.append(int(num)) except ValueError: self.parser.error("Invalid trial number: %s" % num) expr = Project.selected().experiment() for trial in expr.trials(trial_numbers): trial.export(args.destination) return EXIT_SUCCESS
def test_set_tau_forced_extra_options_none(self): self.reset_project_storage() expr = Project.selected().experiment() self.assertFalse('extra-tau-options' in expr.populate('measurement')) self.assertFalse('forced-tau-options' in expr.populate('measurement')) tau_options = "none" self.assertCommandReturnValue(0, EDIT_COMMAND, ['profile', '--extra-tau-options=%s' % tau_options]) self.assertCommandReturnValue(0, EDIT_COMMAND, ['profile', '--force-tau-options=%s' % tau_options]) meas = Measurement.controller(PROJECT_STORAGE).one({'name': 'profile'}) self.assertIsNotNone(meas) self.assertNotIn('extra_tau_options', meas) self.assertNotIn('force_tau_options', meas)
def test_export_slog2(self): self.reset_project_storage(['--trace', 'slog2', '--profile', 'none']) expr = Project.selected().experiment() meas = expr.populate('measurement') self.assertEqual(meas['trace'], 'slog2') self.assertEqual(meas['profile'], 'none') self.assertManagedBuild(0, CC, [], 'hello.c') self.assertCommandReturnValue(EXIT_SUCCESS, trial_create_cmd, ['./a.out']) self.assertCommandReturnValue(EXIT_SUCCESS, trial_export_cmd, []) export_file = expr['name'] + '.trial0.slog2' self.assertTrue(os.path.exists(export_file))
def is_compatible(cmd): """Check if this subcommand can work with the given command. Args: cmd (str): A command from the command line, e.g. sys.argv[1]. Returns: bool: True if this subcommand is compatible with `cmd`. """ uses_python = Project.selected().experiment().populate()['application'].get_or_default('python') if uses_python and 'python' in cmd: return True return bool(util.which(cmd))
def assertInLastTrialData(self, value, data_type='tau'): from taucmdr.model.project import Project trial = Project.selected().experiment().trials() data_files = trial[0].get_data_files() if data_type == 'tau': data = [] for profile_file in glob.glob(os.path.join(data_files['tau'], 'profile.*.*.*')): with open(profile_file) as fin: buff = fin.read() if value in buff: return data.append(buff) else: raise NotImplementedError self.fail("'%s' not found in '%s'" % (value, '\n'.join(data)))
def main(self, argv): args = self._parse_args(argv) rewrite_packages = [] if args.maqao: rewrite_packages.append('maqao') if args.dyninst: rewrite_packages.append('dyninst') if args.pebil: rewrite_packages.append('pebil') if len(rewrite_packages) == 0: raise ConfigurationError('Instrumentation package not specified.', 'Specify one of --dyninst, --maqao, or --pebil.') elif len(rewrite_packages) > 1: raise ConfigurationError('Only one instrumentation paclages should be specified.') expr = Project.selected().experiment() return expr.managed_rewrite(rewrite_packages[0], args.executable, args.inst_file)
def main(self, argv): """Program entry point. Args: argv (list): Command line arguments. Returns: int: Process return code: non-zero if a problem occurred, 0 otherwise """ args = self._parse_args(argv) cmd = args.command cmd_args = args.options log_level = getattr(args, 'verbose', getattr(args, 'quiet', logger.LOG_LEVEL)) logger.set_log_level(log_level) LOGGER.debug('Arguments: %s', args) LOGGER.debug('Verbosity level: %s', logger.LOG_LEVEL) # Try to execute as a TAU command try: return cli.execute_command([cmd], cmd_args) except UnknownCommandError: pass # Check shortcuts shortcut = None if build_command.is_compatible(cmd): shortcut = ['build'] cmd_args = [cmd] + cmd_args elif trial_create_command.is_compatible(cmd): shortcut = ['trial', 'create'] cmd_args = [cmd] + cmd_args elif 'show'.startswith(cmd): shortcut = ['trial', 'show'] elif 'metrics'.startswith(cmd): expr = Project.selected().experiment() targ_name = expr.populate('target')['name'] shortcut = ['target', 'metrics'] cmd_args.insert(0, targ_name) if shortcut: LOGGER.debug('Trying shortcut: %s', shortcut) return cli.execute_command(shortcut, cmd_args) else: LOGGER.debug('No shortcut found for %r', cmd) # Not sure what to do at this point, so advise the user and exit LOGGER.info("Unknown command. Calling `%s help %s` to get advice.", TAUCMDR_SCRIPT, cmd) return cli.execute_command(['help'], [cmd])
def dashboard_format(self, records): """Format modeled records in dashboard format. Args: records: Modeled records to format. Returns: str: Record data in dashboard format. """ self.logger.debug("Dashboard format") title = util.hline( self.title_fmt % { 'model_name': records[0].name.capitalize(), 'storage_path': records[0].storage }, 'cyan') expr = Project.selected().experiment() subtitle = util.color_text("Selected experiment: ", 'cyan') + expr['name'] header_row = [col['header'] for col in self.dashboard_columns] rows = [header_row] for record in records: populated = record.populate() row = [] for col in self.dashboard_columns: if 'value' in col: try: cell = populated[col['value']] except KeyError: cell = 'N/A' elif 'yesno' in col: cell = 'Yes' if populated.get(col['yesno'], False) else 'No' elif 'function' in col: cell = col['function'](populated) else: raise InternalError("Invalid column definition: %s" % col) row.append(cell) rows.append(row) table = Texttable(logger.LINE_WIDTH) table.set_cols_align( [col.get('align', 'c') for col in self.dashboard_columns]) table.add_rows(rows) return [title, table.draw(), '', subtitle, '']
def main(self, argv): args = self._parse_args(argv) trial_numbers = [] for num in getattr(args, 'trial_numbers', []): try: trial_numbers.append(int(num)) except ValueError: self.parser.error("Invalid trial number: %s" % num) new_trial_numbers = [] for num in getattr(args, 'to', []): try: new_trial_numbers.append(int(num)) except ValueError: self.parser.error("Invalid trial trial number: %s" % num) if len(trial_numbers) != len(new_trial_numbers): self.parser.error( "Invalid number of trials." " Number of old trials ids should be equal to number of new trial ids." ) self.logger.info("Renumbering " + ', '.join([ '{} => {}'.format(i, j) for (i, j) in itertools.izip(trial_numbers, new_trial_numbers) ])) proj_ctrl = Project.controller() trial_ctrl = Trial.controller(proj_ctrl.storage) expr = Project.selected().experiment() # Check that no trials deleted with this renumbering for trial_pair in range(0, len(trial_numbers)): if new_trial_numbers[trial_pair] not in trial_numbers \ and trial_ctrl.exists({'number': new_trial_numbers[trial_pair], 'experiment': expr.eid}): self.parser.error( "This renumbering would delete trial %s. If you would like to delete" " this trial use the `trial delete` subcommand." % new_trial_numbers[trial_pair]) trial_ctrl.renumber(trial_numbers, new_trial_numbers) return EXIT_SUCCESS
def dashboard_format(self, records): """Format modeled records in dashboard format. Args: records: Modeled records to format. Returns: str: Record data in dashboard format. """ self.logger.debug("Dashboard format") title = util.hline(self.title_fmt % {'model_name': records[0].name.capitalize(), 'storage_path': records[0].storage}, 'cyan') expr = Project.selected().experiment() subtitle = util.color_text("Selected experiment: ", 'cyan') + expr['name'] header_row = [col['header'] for col in self.dashboard_columns] rows = [header_row] for record in records: populated = record.populate() row = [] for col in self.dashboard_columns: if 'value' in col: try: cell = populated[col['value']] except KeyError: cell = 'N/A' elif 'yesno' in col: cell = 'Yes' if populated.get(col['yesno'], False) else 'No' elif 'function' in col: cell = col['function'](populated) else: raise InternalError("Invalid column definition: %s" % col) row.append(cell) rows.append(row) table = Texttable(logger.LINE_WIDTH) table.set_cols_align([col.get('align', 'c') for col in self.dashboard_columns]) table.add_rows(rows) return [title, table.draw(), '', subtitle, '']
def main(self, argv): args = self._parse_args(argv) expr = Project.selected().experiment() return expr.managed_build(args.cmd, args.cmd_args)
def main(self, argv): self._parse_args(argv) launcher_cmd, application_cmds = Trial.parse_launcher_cmd(argv) self.logger.debug("Launcher command: %s", launcher_cmd) self.logger.debug("Application commands: %s", application_cmds) return Project.selected().experiment().managed_run(launcher_cmd, application_cmds)
def _get_papi_installation(self): expr = Project.selected().experiment() return expr.populate('target').get_installation('papi')