def _perform_interactive(self, expr, trial, cmd, cwd, env, record_output=False): begin_time = self._mark_time('BEGIN', expr) retval = None try: self.update({ 'phase': 'executing', 'begin_time': begin_time }, trial.eid) if record_output: retval, output, elapsed = trial.execute_command( expr, cmd, cwd, env, record_output) else: retval, elapsed = trial.execute_command( expr, cmd, cwd, env, record_output) except: self.delete(trial.eid) raise finally: end_time = self._mark_time('END', expr) fields = { 'end_time': end_time, 'return_code': retval, 'elapsed': elapsed } data_size = 0 for dir_path, _, file_names in os.walk(trial.prefix): for name in file_names: data_size += os.path.getsize(os.path.join(dir_path, name)) fields['data_size'] = data_size if record_output: fields['output'] = output self.update(fields, trial.eid) if retval != 0: if data_size != 0: LOGGER.warning("Program exited with nonzero status code: %s", retval) else: raise TrialError( "Program died without producing performance data.", "Verify that the right input parameters were specified.", "Check the program output for error messages.", "Does the selected application configuration correctly describe this program?", "Does the selected measurement configuration specify the right measurement methods?", "Does the selected target configuration match the runtime environment?" ) LOGGER.info('Experiment: %s', expr['name']) LOGGER.info('Command: %s', ' '.join(cmd)) LOGGER.info('Current working directory: %s', cwd) LOGGER.info('Data size: %s bytes', util.human_size(data_size)) LOGGER.info('Elapsed seconds: %s', elapsed) if record_output: return retval, output return retval
def _perform_interactive(self, expr, trial, cmd, cwd, env): begin_time = self._mark_time('BEGIN', expr) retval = None try: self.update({'phase': 'executing', 'begin_time': begin_time}, trial.eid) retval, elapsed = trial.execute_command(expr, cmd, cwd, env) except: self.delete(trial.eid) raise finally: end_time = self._mark_time('END', expr) fields = {'end_time': end_time, 'return_code': retval, 'elapsed': elapsed} data_size = 0 for dir_path, _, file_names in os.walk(trial.prefix): for name in file_names: data_size += os.path.getsize(os.path.join(dir_path, name)) fields['data_size'] = data_size self.update(fields, trial.eid) if retval != 0: if data_size != 0: LOGGER.warning("Program exited with nonzero status code: %s", retval) else: raise TrialError("Program died without producing performance data.", "Verify that the right input parameters were specified.", "Check the program output for error messages.", "Does the selected application configuration correctly describe this program?", "Does the selected measurement configuration specifiy the right measurement methods?", "Does the selected target configuration match the runtime environment?") LOGGER.info('Experiment: %s', expr['name']) LOGGER.info('Command: %s', ' '.join(cmd)) LOGGER.info('Current working directory: %s', cwd) LOGGER.info('Data size: %s bytes', util.human_size(data_size)) LOGGER.info('Elapsed seconds: %s', elapsed) return retval
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """``trial list`` subcommand.""" from texttable import Texttable from taucmdr import util, logger from taucmdr.error import InternalError from taucmdr.cli.cli_view import ListCommand from taucmdr.model.project import Project from taucmdr.model.trial import Trial DASHBOARD_COLUMNS = [{'header': 'Number', 'value': 'number'}, {'header': 'Data Size', 'function': lambda x: util.human_size(x.get('data_size', None))}, {'header': 'Command', 'value': 'command'}, {'header': 'Description', 'value': 'description'}, {'header': 'Status', 'value': 'phase'}, {'header': 'Elapsed Seconds', 'value': 'elapsed'}] class TrialListCommand(ListCommand): """``trial list`` subcommand.""" def _retrieve_records(self, ctrl, keys): if keys: try: keys = [int(key) for key in keys] except ValueError: self.parser.error("Invalid trial number '%s'. Trial numbers are positive integers starting from 0.") expr = Project.selected().experiment()
from texttable import Texttable from taucmdr import util, logger from taucmdr.error import InternalError from taucmdr.cli.cli_view import ListCommand from taucmdr.model.project import Project from taucmdr.model.trial import Trial DASHBOARD_COLUMNS = [{ 'header': 'Number', 'value': 'number' }, { 'header': 'Data Size', 'function': lambda x: util.human_size(x.get('data_size', None)) }, { 'header': 'Command', 'value': 'command' }, { 'header': 'Description', 'value': 'description' }, { 'header': 'Status', 'value': 'phase' }, { 'header': 'Elapsed Seconds', 'value': 'elapsed' }]
def test_humansize(self): self.assertEqual(util.human_size(20000000), '19.1MiB') with self.assertRaises(TypeError): util.human_size('abc')
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """``trial list`` subcommand.""" from texttable import Texttable from taucmdr import util, logger from taucmdr.error import InternalError from taucmdr.cli.cli_view import ListCommand from taucmdr.model.project import Project from taucmdr.model.trial import Trial DASHBOARD_COLUMNS = [{'header': 'Number', 'value': 'number'}, {'header': 'Data Size', 'function': lambda x: util.human_size(x.get('data_size', None))}, {'header': 'Command', 'value': 'command'}, {'header': 'Description', 'value': 'description'}, {'header': 'Status', 'value': 'phase'}, {'header': 'Elapsed Seconds', 'value': 'elapsed'}] class TrialListCommand(ListCommand): """``trial list`` subcommand.""" def _retrieve_records(self, ctrl, keys, context=True): if keys: try: keys = [int(key) for key in keys] except ValueError: self.parser.error("Invalid trial number '%s'. Trial numbers are positive integers starting from 0.") proj = Project.selected()
def get(self): def yn(v): return "yes" if v else "no" ds = { "targets": [], "applications": [], "measurements": [], "experiments": [] } pname = self.get_argument("project") p = Project.controller().one({'name': pname}) # targets rs = p.populate("targets") for r in rs: r = r.populate() mpicc = MPI_CC.keyword shmcc = SHMEM_CC.keyword d = { "name": r["name"], "host_os": r["host_os"], "host_arch": r["host_arch"], "host_compilers": r[CC.keyword]["family"], "mpi_compilers": r[mpicc]["family"] if mpicc in r else "None", "shmem_compilers": r[shmcc]["family"] if shmcc in r else "None" } ds["targets"].append(d) # applications rs = p.populate("applications") for r in rs: r = r.populate() d = { "name": r["name"], "linkage": r["linkage"], "openmp": yn(r["openmp"]), "pthreads": yn(r["pthreads"]), "tbb": yn(r["tbb"]), "mpi": yn(r["mpi"]), "cuda": yn(r["cuda"]), "opencl": yn(r["opencl"]), "shmem": yn(r["shmem"]), "mpc": yn(r["mpc"]) } ds["applications"].append(d) # measurements rs = p.populate("measurements") for r in rs: r = r.populate() d = { "name": r["name"], "profile": r["profile"], "trace": r["trace"], "sample": yn(r["sample"]), "source_inst": r["source_inst"], "compiler_inst": r["compiler_inst"], "openmp": r["openmp"], "cuda": yn(r["cuda"]), "io": yn(r["io"]), "mpi": yn(r["mpi"]), "shmem": yn(r["shmem"]) } ds["measurements"].append(d) # experiments rs = p.populate("experiments") for r in rs: r = r.populate() d = { "name": r["name"], "num_trials": str(len(r["trials"])), "data_size": util.human_size( sum(int(t.get('data_size', 0)) for t in r["trials"])), "target": r["target"]["name"], "application": r["application"]["name"], "measurement": r["measurement"]["name"], "tau_makefile": r["tau_makefile"] } ds["experiments"].append(d) self.set_header("Access-Control-Allow-Origin", "*") self.finish(ds)
def data_size(expr): return util.human_size(sum(int(trial.get('data_size', 0)) for trial in expr['trials']))