def determine_project(sumatra_options): if 'project_dir' in sumatra_options and sumatra_options['project_dir']: prj = load_project(sumatra_options['project_dir']) else: try: prj = load_project() except IOError: prj = None return prj
def get_context_data(self, **kwargs): context = super(ProjectListView, self).get_context_data(**kwargs) projects = self.get_queryset() context['active'] = 'List of projects' if not len(projects): context['project_name'] = load_project().name if not load_project().default_executable: # empty project: without any records inside context['show_modal'] = True else: context['project_name'] = projects[0] return context
def get_context_data(self, **kwargs): context = super(ProjectListView, self).get_context_data(**kwargs) projects = self.get_queryset() context['active'] = 'List of projects' if not len(projects): context['project_name'] = load_project().name if not load_project( ).default_executable: # empty project: without any records inside context['show_modal'] = True else: context['project_name'] = projects[0] return context
def load_project(self, path=None): """ Load a Sumatra project. Internally calls sumatra.projects.load_project. Currently this can only be called once, and raises an exception if called again. The loaded project is accessed as `config.project`. Parameters --------- path: str | path-like Project directory. Function searches for an '.smt' directory at that location (i.e. the '.smt' should not be included in the path) Path can be relative; default is to look in the current working directory. Returns ------- None Raises ------ RuntimeError: If called a more than once. """ # Setter throws error if _project is already set, unless it is set to the same value if isinstance(path, Project): self.project = path else: self.project = load_project(path)
def comment(argv): """Add a comment to an existing record.""" usage = "%(prog)s comment [options] [LABEL] COMMENT" description = dedent("""\ This command is used to describe the outcome of the simulation/analysis. If LABEL is omitted, the comment will be added to the most recent experiment. If the '-f/--file' option is set, COMMENT should be the name of a file containing the comment, otherwise it should be a string of text. By default, comments will be appended to any existing comments. To overwrite existing comments, use the '-r/--replace flag.""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('label', nargs='?', metavar='LABEL', help="the record to which the comment will be added") parser.add_argument('comment', help="a string of text, or the name of a file containing the comment.") parser.add_argument('-r', '--replace', action='store_true', help="if this flag is set, any existing comment will be overwritten, otherwise, the new comment will be appended to the end, starting on a new line") parser.add_argument('-f', '--file', action='store_true', help="interpret COMMENT as the path to a file containing the comment") args = parser.parse_args(argv) if args.file: f = open(args.comment, 'r') comment = f.read() f.close() else: comment = args.comment project = load_project() label = args.label or project.most_recent().label project.add_comment(label, comment, replace=args.replace)
def list(argv): # add 'report' and 'log' as aliases """List records belonging to the current project.""" usage = "%(prog)s list [options] [TAGS]" description = dedent("""\ If TAGS (optional) is specified, then only records tagged with all the tags in TAGS will be listed.""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('tags', metavar='TAGS', nargs='*') parser.add_argument('-l', '--long', action="store_const", const="long", dest="mode", default="short", help="prints full information for each record"), parser.add_argument('-T', '--table', action="store_const", const="table", dest="mode", help="prints information in tab-separated columns") parser.add_argument('-f', '--format', metavar='FMT', choices=['text', 'html', 'latex', 'shell', 'json'], default='text', help="FMT can be 'text' (default), 'html', 'json', 'latex' or 'shell'.") parser.add_argument('-r', '--reverse', action="store_true", dest="reverse", default=False, help="list records in reverse order (default: newest first)") parser.add_argument('-m', '--main_file', help="filter list of records by main file") parser.add_argument('-P', '--parameter_table', action="store_const", const="parameter_table", dest="mode", help="list records with parameter values") args = parser.parse_args(argv) project = load_project() if os.path.exists('.smt'): with open('.smt/labels', 'w') as f: f.write('\n'.join(project.get_labels())) kwargs = {'tags':args.tags, 'mode':args.mode, 'format':args.format, 'reverse':args.reverse} if args.main_file is not None: kwargs['main_file__startswith'] = args.main_file print(project.format_records(**kwargs))
def repeat(argv): """Re-run a previous simulation or analysis.""" usage = "%(prog)s repeat LABEL" description = dedent("""\ Re-run a previous simulation/analysis under (in theory) identical conditions, and check that the results are unchanged.""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('original_label', metavar='LABEL', help='label of record to be repeated') parser.add_argument('-l', '--label', metavar='NEW_LABEL', help="specify a label for the new experiment. If no label is specified, one will be generated automatically.") args = parser.parse_args(argv) original_label = args.original_label project = load_project() new_label, original_label = project.repeat(original_label, args.label) diff = project.compare(original_label, new_label) if diff: formatter = get_diff_formatter()(diff) msg = ["The new record does not match the original. It differs as follows.", formatter.format('short'), "run smt diff --long %s %s to see the differences in detail." % (original_label, new_label)] msg = "\n".join(msg) else: msg = "The new record exactly matches the original." print(msg) project.add_comment(new_label, msg)
def get_or_simulate(simulation, params_dict={}, pardir='simulation'): time.sleep(1) project = projects.load_project() parameters = get_parameters(simulation) parameters.update(params_dict) main_file = pardir + '/' + simulation + '.py' records = project.find_records(main_file=main_file, parameters=parameters) if len(records) == 0: run_simulation(main_file, parameters) data = get_or_simulate(simulation, parameters.as_dict()) else: print('Get data from %s' % records[0].label) try: data = get_output_data(records[0]) except: print('Data not found.') run_simulation(main_file, parameters) data = get_or_simulate(simulation, parameters.as_dict()) return np.array(data)
def sync(argv): usage = "%(prog)s sync PATH1 [PATH2]" description = dedent("""\ Synchronize two record stores. If both PATH1 and PATH2 are given, the record stores at those locations will be synchronized. If only PATH1 is given, and the command is run in a directory containing a Sumatra project, only that project's records be synchronized with the store at PATH1. Note that PATH1 and PATH2 may be either filesystem paths or URLs. """) # need to say what happens if the sync is incomplete due to label collisions parser = ArgumentParser(usage=usage, description=description) parser.add_argument('path1') parser.add_argument('path2', nargs='?') args = parser.parse_args(argv) store1 = get_record_store(args.path1) if args.path2: store2 = get_record_store(args.path2) collisions = store1.sync_all(store2) else: project = load_project() store2 = project.record_store collisions = store1.sync(store2, project.name) if collisions: print("Synchronization incomplete: there are two records with the same name for the following: %s" % ", ".join(collisions)) sys.exit(1)
def migrate(argv): usage = "%(prog)s migrate [options]" description = dedent("""\ If you have moved your data files to a new location, update the record store to reflect the new paths. """) # might also want to update the repository upstream # should we keep a history of such changes? parser = ArgumentParser(usage=usage, description=description) parser.add_argument('-d', '--datapath', metavar='PATH', help="modify the path to the directory in which your results are stored.") parser.add_argument('-i', '--input', metavar='PATH', help="modify the path to the directory in which your input data files are stored.") parser.add_argument('-A', '--archive', metavar='PATH', help="modify the directory in which your results are archived.") parser.add_argument('-M', '--mirror', metavar='URL', help="modify the URL at which your data files are mirrored.") args = parser.parse_args(argv) project = load_project() field_map = { "datapath": "datastore.root", "input": "input_datastore.root", "archive": "datastore.archive", "mirror": "datastore.mirror_base_url" } if not any(vars(args).values()): warnings.warn( "Command 'smt migrate' had no effect. Please provide at least one " "argument. (Run 'smt help migrate' for help.)") else: for option_name, field in field_map.items(): value = getattr(args, option_name) if value: project.record_store.update(project.name, field, value)
def load_project(self, path=None): """ Load a Sumatra project. Internally calls sumatra.projects.load_project. Currently this can only be called once, and raises an exception if called again. The loaded project is accessed as `config.project`. Parameters --------- path: str | path-lik Project directory. Function searches for an '.smt' directory at that location (i.e. the '.smt' should not be included in the path) Path can be relative; default is to look in the current working directory. Returns ------- None Raises ------ RuntimeError: If called a more than once. """ if self._project is not None: raise RuntimeError( "Only call `load_project` once: I haven't reasoned out what " "kinds of bad things would happen if more than one project " "were loaded.") self._project = load_project(path)
def delete(argv): """Delete records or records with a particular tag from a project.""" usage = "%(prog)s delete [options] LIST" description = dedent("""\ LIST should be a space-separated list of labels for individual records or of tags. If it contains tags, you must set the --tag/-t option (see below). The special value "last" allows you to delete the most recent simulation/analysis. If you want to delete all records, just delete the .smt directory and use smt init to create a new, empty project.""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('labels', metavar='LIST', nargs="+", help="a space-separated list of labels for individual records or of tags") parser.add_argument('-t', '--tag', action='store_true', help="interpret LIST as containing tags. Records with any of these tags will be deleted.") parser.add_argument('-d', '--data', action='store_true', help="also delete any data associated with the record(s).") args = parser.parse_args(argv) project = load_project() if args.tag: for tag in args.labels: n = project.delete_by_tag(tag, delete_data=args.data) print("%s records deleted." % n) else: for label in args.labels: if label == 'last': label = project.most_recent().label try: project.delete_record(label, delete_data=args.data) except Exception: # could be KeyError or DoesNotExist: should create standard NoSuchRecord or RecordDoesNotExist exception warnings.warn("Could not delete record '%s' because it does not exist" % label)
def list(argv): # add 'report' and 'log' as aliases """List records belonging to the current project.""" usage = "%(prog)s list [options] [TAGS]" description = dedent("""\ If TAGS (optional) is specified, then only records tagged with all the tags in TAGS will be listed.""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('tags', metavar='TAGS', nargs='*') parser.add_argument('-l', '--long', action="store_const", const="long", dest="mode", default="short", help="prints full information for each record"), parser.add_argument('-T', '--table', action="store_const", const="table", dest="mode", help="prints information in tab-separated columns") parser.add_argument('-f', '--format', metavar='FMT', choices=['text', 'html', 'latex', 'shell', 'json'], default='text', help="FMT can be 'text' (default), 'html', 'json', 'latex' or 'shell'.") parser.add_argument('-r', '--reverse', action="store_true", dest="reverse", default=False, help="list records in reverse order (default: newest first)"), args = parser.parse_args(argv) project = load_project() if os.path.exists('.smt'): f = open('.smt/labels', 'w') f.writelines(project.format_records(tags=None, mode='short', format='text', reverse=False)) f.close() print(project.format_records(tags=args.tags, mode=args.mode, format=args.format, reverse=args.reverse))
def diff(argv): """Show the differences, if any, between two records.""" usage = "%(prog)s diff [options] LABEL1 LABEL2" description = dedent("Show the differences, if any, between two records.") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('label1') parser.add_argument('label2') parser.add_argument( '-i', '--ignore', action="append", help= "a regular expression pattern for filenames to ignore when evaluating differences in output data. To supply multiple patterns, use the -i option multiple times." ) parser.add_argument('-l', '--long', action="store_const", const="long", dest="mode", default="short", help="prints full information for each record"), args = parser.parse_args(argv) if args.ignore is None: args.ignore = [] project = load_project() print( project.show_diff(args.label1, args.label2, mode=args.mode, ignore_filenames=args.ignore))
def tag(argv): """Tag, or remove a tag, from a record or records.""" usage = "%(prog)s tag [options] TAG [LIST]" description = dedent("""\ If TAG contains spaces, it must be enclosed in quotes. LIST should be a space-separated list of labels for individual records. If it is omitted, only the most recent record will be tagged. If the '-r/--remove' option is set, the tag will be removed from the records.""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('tag', metavar='TAG', help="tag to add") parser.add_argument('labels', metavar='LIST', nargs='*', help="a space-separated list of records to be tagged") parser.add_argument( '-r', '--remove', action='store_true', help="remove the tag from the record(s), rather than adding it.") args = parser.parse_args(argv) project = load_project() if args.remove: op = project.remove_tag else: op = project.add_tag labels = args.labels or [project.most_recent().label] for label in labels: op(label, args.tag)
def register_record(label, reason=None, tag=None): """ Register a simulation to the sumatra project. Loads the sumatra project in the current repository Parameters ---------- label : str Simulation label reason : str, optional Reason for the simulation run stored in the sumatra database tag : str, optional Tag for the simulation run stored in the sumatra database """ project = load_project() para_fn = os.path.join(data_path, label, '_'.join( ('custom_params', label))) parameters = build_parameters(para_fn) record = project.new_record(parameters=parameters, main_file='nest_simulation.py', reason=reason, label=label) record.duration = 0. # Add 0 for now and update later project.add_record(record) project.save() if tag is not None: project.add_tag(label, tag)
def register_runtime(label): """ Register the duration of simulation run in the sumatra database. Loads the runtime automatically from the logfiles in the simulation directory. Parameters ---------- label : str Simulation label """ fp = os.path.join(data_path, label, 'recordings', 'runtime_*') files = glob.glob(fp) for i, fn in enumerate(files): with open(fn, 'r') as f: d = json.load(f) if i == 0: data = {key: [value] for key, value in d.items()} else: for key, value in d.items(): data[key].append(value) for key, value in data.items(): data[key] = np.mean(value) project = load_project() record = project.get_record(label) record.duration = sum(data.values()) project.add_record(record) project.save()
def export_project(output_file): if minor_version == 3: shutil.copy(".smt/project", ".smt/project_export.json") return elif minor_version == 1: prj = projects.load_simulation_project() else: prj = projects.load_project() state = { 'name': prj.name, 'on_changed': prj.on_changed, 'default_main_file': prj.default_main_file, 'default_executable': None, 'default_repository': None, 'default_launch_mode': None, 'data_store': dict(prj.data_store.get_state(), type=_get_class_path(prj.data_store)), 'record_store': dict(type=_get_class_path(prj.record_store)), 'description': getattr(prj, "description", ""), # not in 0.1 'data_label': getattr(prj, "data_label", None), # not in 0.1 '_most_recent': getattr(prj, "_most_recent", ""), # not in 0.1 } if prj.default_executable: obj = prj.default_executable state['default_executable'] = { 'type': _get_class_path(obj), 'path': obj.path, 'version': obj.version } if prj.default_repository: obj = prj.default_repository state['default_repository'] = { 'type': _get_class_path(obj), 'url': obj.url, } if prj.default_launch_mode: obj = prj.default_launch_mode state['default_launch_mode'] = dict(obj.get_state(), type=_get_class_path(obj)) if prj.record_store.__class__.__name__[:6] == 'Django': state['record_store'][ 'db_file'] = ".smt/records" #prj.record_store._db_file else: state['record_store'][ 'shelf_name'] = ".smt/records" #prj.record_store._shelf_name f = open(output_file, 'w') json.dump(state, f, indent=2) f.close()
def export(argv): usage = "%(prog)s export" description = dedent("""\ Export a Sumatra project and its records to JSON. This is needed before running upgrade.""") parser = ArgumentParser(usage=usage, description=description) args = parser.parse_args(argv) project = load_project() project.export()
def _mk_save_folder(self): if self.args.smt_label != "debug": self.time_stamp = self.args.smt_label else: self.time_stamp = (time.strftime("%Y%m%d-%H%M%S", time.gmtime()) + "-debug") project = load_project() path = os.path.join(project.data_store.root, self.time_stamp) os.mkdir(os.path.normpath(path)) self.save_path = path
def info(argv): """Print information about the current project.""" usage = "%(prog)s info" description = "Print information about the current project." parser = ArgumentParser(usage=usage, description=description) args = parser.parse_args(argv) try: project = load_project() except IOError as err: print(err) sys.exit(1) print(project.info())
def settings(request, project): ''' Only one of the following parameter can be True web_settings['saveSettings'] == True: save the settings in .smt/project web_settings['web'] == True: send project.web_settings to record_list.html web_settings['sumatra'] = True: send some spacific settings to record_list.html (they will be used in the popup window for the new record as the default values ''' web_settings = {'display_density':request.POST.get('display_density', False), 'nb_records_per_page':request.POST.get('nb_records_per_page', False), 'table_HideColumns': request.POST.getlist('table_HideColumns[]'), 'saveSettings':request.POST.get('saveSettings', False), 'web':request.POST.get('web', False), 'sumatra':request.POST.get('sumatra', False) } nbCols = 14 # total number of columns sim_list = models.Record.objects.filter(project__id=project).order_by('-timestamp') project_loaded = load_project() if web_settings['saveSettings']: if len(web_settings['table_HideColumns']) == 0: # empty set (all checkboxes are checked) project_loaded.web_settings['table_HideColumns'] = [] try: project_loaded.web_settings except(AttributeError, KeyError): # project doesn't have web_settings yet # upgrading of .smt/project: new supplementary settings entries project_loaded.web_settings = init_websettings() for key, item in web_settings.iteritems(): if item: project_loaded.web_settings[key] = item project_loaded.save() # repetition of code for list_records !!! nb_per_page = int(web_settings['nb_records_per_page']) paginator = Paginator(sim_list, nb_per_page) page_list = paginator.page(1) nbCols_actual = nbCols - len(web_settings['table_HideColumns']) head_width = '%s%s' %(90.0/nbCols_actual, '%') if (nbCols_actual > 10): label_width = '150px' else: label_width = head_width dic = {'project_name': project, 'settings':project_loaded.web_settings, 'paginator':paginator, 'object_list':page_list.object_list, 'page_list':page_list, 'width':{'head': head_width, 'label':label_width}} return render_to_response('content.html', dic) elif web_settings['web']: return HttpResponse(simplejson.dumps(project.web_settings)) elif web_settings['sumatra']: settings = {'execut':project.default_executable.path, 'mfile':project.default_main_file} return HttpResponse(simplejson.dumps(settings))
def list(argv): # add 'report' and 'log' as aliases """List records belonging to the current project.""" usage = "%(prog)s list [options] [TAGS]" description = dedent("""\ If TAGS (optional) is specified, then only records tagged with all the tags in TAGS will be listed.""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('tags', metavar='TAGS', nargs='*') parser.add_argument('-l', '--long', action="store_const", const="long", dest="mode", default="short", help="prints full information for each record"), parser.add_argument('-T', '--table', action="store_const", const="table", dest="mode", help="prints information in tab-separated columns") parser.add_argument( '-f', '--format', metavar='FMT', choices=['text', 'html', 'latex', 'shell', 'json'], default='text', help="FMT can be 'text' (default), 'html', 'json', 'latex' or 'shell'." ) parser.add_argument( '-r', '--reverse', action="store_true", dest="reverse", default=False, help="list records in reverse order (default: newest first)"), args = parser.parse_args(argv) project = load_project() if os.path.exists('.smt'): f = open('.smt/labels', 'w') f.writelines( project.format_records(tags=None, mode='short', format='text', reverse=False)) f.close() print( project.format_records(tags=args.tags, mode=args.mode, format=args.format, reverse=args.reverse))
def export_records(output_file): store = load_recordstore() if minor_version < 3: patch_sumatra() f = open(output_file, "w") if minor_version == 1: json.dump([encode_record(record) for record in store.list(groups=None)], f, indent=2) else: project_name = projects.load_project().name if minor_version == 2: json.dump([encode_record(record) for record in store.list(project_name)], f, indent=2) else: f.write(store.export(project_name)) f.close()
def sumatra_start(repository, sumatra_db_path, results_path, working_dir, hg_username, sumatra_run_name, parameters): '''Clones the Omics Pipe repository from Bitbucket, creates a Sumatra project, and creates a Sumatra record for the current run''' print "sumatra_db_path is " + sumatra_db_path print type(sumatra_db_path) check_create_dir(sumatra_db_path) os.chdir(sumatra_db_path) repo1 = hgapi.Repo(repository) repo_path = sumatra_db_path +"/omics_pipe" repo= {"url":repo_path, "type":"sumatra.versioncontrol._mercurial.MercurialRepository", "upstream":repository} executable= {"path":"", "version": "", "type":"sumatra.programs.PythonExecutable", "options":"", "name": "Python"} sumatra_launch_mode = {"working_directory": working_dir, "type": "sumatra.launch.SerialLaunchMode"} data_store1 = {"root":results_path, "type": "sumatra.datastore.filesystem.FileSystemDataStore"} database_path = sumatra_db_path + "/records/recordstore.db" record_store1 = {"db_file": database_path, "type": "sumatra.recordstore.django_store.DjangoRecordStore"} input_datastore1 = {"root": results_path, "type": "sumatra.datastore.filesystem.FileSystemDataStore"} while True: try: repo1.hg_clone(url = repository, path=repo_path) with open(repo_path + "/.hg/hgrc", "a") as myfile: myfile.write("[ui]\nusername= "******"Omics pipe repository cloned to : " + repo_path break except hgapi.hgapi.HgException: print "Omics pipe repository already exists." break while True: try: Project(sumatra_run_name, default_repository=repo, default_executable=executable, default_launch_mode = sumatra_launch_mode, on_changed='store-diff', data_store=data_store1, record_store=record_store1, input_datastore=input_datastore1) print "Sumatra project created: " + sumatra_run_name + " in directory: " + sumatra_db_path break except Exception: print "Sumatra project already exists, loading project: " + sumatra_run_name break project = load_project(path=sumatra_db_path) print project sumatra_params = build_parameters(parameters) print sumatra_params os.chdir(repo_path) repo_main = "omics_pipe/main.py" record = project.new_record(parameters=sumatra_params, main_file=repo_main) print record return record,project
def smt_run(self, line): args = parse_argstring(self.smt_run, line) global parameters if args.flush: parameters = build_parameters(args.parameters) else: parameters = globals().get('parameters',build_parameters(args.parameters)) global save save = args.save if args.print: print(12*"-" + " Script " + 12*"-") with open(args.main_file, 'r') as f: script = f.readlines() f.closed print(''.join(script), end='') print(32*"-", end="\n\n") print(10*"-" + " Parameters " + 10*"-") print(parameters) print(32*"-", end="\n\n") if args.record is True: global record project = load_project() record = project.new_record(main_file=os.path.relpath(args.main_file),parameters=parameters) print("Record label for this run: '%s'" %record.label) start_time = time.time() execfile(args.main_file, globals(), parameters.as_dict()) duration = time.time() - start_time if args.record is True: fname = "%s"%record.label if globals().has_key('data'): np.savetxt("Data/%s.dat"%fname, data) if globals().has_key('fig'): fig.savefig("Data/%s.png"%fname) record.duration = duration record.output_data = record.datastore.find_new_data(record.timestamp) project.add_record(record) project.save() print("Data keys are [%s(%s [%s])"%(record.label, record.version, record.timestamp)) elif save is True: fname = "%s_%s" %(time.strftime("%y%m%d-%H%M%S", time.gmtime(start_time)), os.path.splitext(os.path.basename(args.main_file))[0]) if globals().has_key('data'): np.savetxt("%s.dat"%fname, data) # Save data if globals().has_key('fig'): fig.savefig("%s.png"%fname) print("Duration: %.2fs" %duration)
def inspectFolder(foldername, projectpath = '.', timestamp = datetime.datetime(2010, 1, 1, 11, 14, 40, 915039)): project = load_project(projectpath) logging.debug('Scan folder %s' %foldername) initialRoot = project.data_store.root project.data_store = datastore.FileSystemDataStore(foldername) record = project.new_record( main_file=sys.argv[0], parameters=foldername, executable=sumatra.programs.get_executable(sys.executable), reason='Scan folder %s' %foldername ) record.output_data = record.datastore.find_new_data(timestamp) project.add_record(record) project.save() project.data_store = datastore.FileSystemDataStore(initialRoot)
def export_records(output_file): store = load_recordstore() if minor_version < 3: patch_sumatra() f = open(output_file, 'w') if minor_version == 1: json.dump([encode_record(record) for record in store.list(groups=None)], f, indent=2) else: project_name = projects.load_project().name if minor_version == 2: json.dump([encode_record(record) for record in store.list(project_name)], f, indent=2) else: f.write(store.export(project_name)) f.close()
def start_web_server(arguments=None): """Launch the Forrest web interface""" description = dedent("""\ Launch the Forrest web interface. There must be a Sumatra project in the working directory and the record store for that project will be used.""") parser = ArgumentParser(description=description) parser.add_argument('-a', '--allips', default=False, action="store_true", help="run server on all IPs, not just localhost") parser.add_argument('-p', '--port', default="8000", help="run server on this port number") parser.add_argument('-n', '--no-browser', default=False, action="store_true", help="do not open browser") parser.add_argument('-r', '--read_only', default=False, action="store_true", help="set read-only mode") args = parser.parse_args(arguments) project = load_project() if not isinstance(project.record_store, DjangoRecordStore): # should make the web interface independent of the record store, if possible print("This project cannot be accessed using the web interface (record store is not of type DjangoRecordStore).") sys.exit(1) del project smt_root_dir = os.path.dirname(sumatra_web) db_config.update_settings( INSTALLED_APPS=db_config._settings["INSTALLED_APPS"] + ['forrest'] + ['sumatra.web'], ROOT_URLCONF='forrest.urls', STATIC_URL='/static/', TEMPLATE_DIRS=(os.path.join(os.getcwd(), ".smt", "templates"), os.path.join(os.path.dirname(__file__), "templates"), os.path.join(smt_root_dir, "templates"),), MIDDLEWARE_CLASSES=tuple(), READ_ONLY=args.read_only ) db_config.configure() if not args.no_browser: thread.start_new_thread(delayed_new_tab, ("http://127.0.0.1:%s" % args.port, 3)) if args.allips: address = '0.0.0.0' else: address = '127.0.0.1' address += ':' + args.port management.call_command('runserver', address, use_reloader=False)
def smt2df(path=None): """Load sumatra project and convert to Pandas DataFrame Read project from directory passed as the argument and return Project object and project records converted to Pandas DataFrame. If no argument is given, the project is read from the current directory. """ project = load_project('.') df = pd.read_json(project.record_store.export('benchmark8'), convert_dates=["timestamp"]) df = df.merge(df.parameters.apply(parameters2columns), left_index=True, right_index=True) df['nproc'] = df.launch_mode.apply(lambda x: x['parameters']['n']) return project, df
def export_project(output_file): if minor_version == 3: shutil.copy(".smt/project", ".smt/project_export.json") return elif minor_version == 1: prj = projects.load_simulation_project() else: prj = projects.load_project() state = { 'name': prj.name, 'on_changed': prj.on_changed, 'default_main_file': prj.default_main_file, 'default_executable': None, 'default_repository': None, 'default_launch_mode': None, 'data_store': dict(prj.data_store.get_state(), type=_get_class_path(prj.data_store)), 'record_store': dict(type=_get_class_path(prj.record_store)), 'description': getattr(prj, "description", ""), # not in 0.1 'data_label': getattr(prj, "data_label", None), # not in 0.1 '_most_recent': getattr(prj, "_most_recent", ""), # not in 0.1 } if prj.default_executable: obj = prj.default_executable state['default_executable'] = { 'type': _get_class_path(obj), 'path': obj.path, 'version': obj.version } if prj.default_repository: obj = prj.default_repository state['default_repository'] = { 'type': _get_class_path(obj), 'url': obj.url, } if prj.default_launch_mode: obj = prj.default_launch_mode state['default_launch_mode'] = dict(obj.get_state(), type=_get_class_path(obj)) if prj.record_store.__class__.__name__[:6] == 'Django': state['record_store']['db_file'] = ".smt/records" #prj.record_store._db_file else: state['record_store']['shelf_name'] = ".smt/records" #prj.record_store._shelf_name f = open(output_file, 'w') json.dump(state, f, indent=2) f.close()
def upgrade(argv): usage = "%(prog)s upgrade" description = dedent("""\ Upgrade an existing Sumatra project. You must have previously run "smt export" or the standalone 'export.py' script.""") parser = ArgumentParser(usage=usage, description=description) args = parser.parse_args(argv) project = load_project() if (hasattr(project, 'sumatra_version') and project.sumatra_version == sumatra.__version__ and "dev" not in sumatra.__version__): print( "No upgrade needed (project was created with an up-to-date version of Sumatra)." ) sys.exit(1) if not os.path.exists(".smt/project_export.json"): print("Error: project must have been exported (with the original " "version of Sumatra) before upgrading.") sys.exit(1) # backup and remove .smt import shutil backup_dir = project.backup() shutil.rmtree(".smt") # upgrade the project data os.mkdir(".smt") shutil.copy("%s/project_export.json" % backup_dir, ".smt/project") project.sumatra_version = sumatra.__version__ project.save() # upgrade the record store project.record_store.clear() filename = "%s/records_export.json" % backup_dir if os.path.exists(filename): f = open(filename) project.record_store.import_(project.name, f.read()) f.close() else: print("Record file not found") sys.exit(1) print("Project successfully upgraded to Sumatra version {0}.".format( project.sumatra_version))
def getSMTRecords(records=None, tags=[], parameters={}, atol=1e-10, rtol=1e-10, path='./'): if not records: project = load_project(path) records = project.record_store.list(project.name, tags=tags) records_out = [] for r in records: if set(tags).issubset(set(r.tags)): allclose = [] for k, v in parameters.items(): if np.allclose(v, r.parameters.as_dict()[k], atol=atol, rtol=rtol): allclose.append(True) else: allclose.append(False) if np.all(allclose): records_out.append(r) # if set(parameters.items()).issubset(set(r.parameters.as_dict().items())): # records_out.append(r) return records_out
def upgrade(argv): usage = "%(prog)s upgrade" description = dedent("""\ Upgrade an existing Sumatra project. You must have previously run "smt export" or the standalone 'export.py' script.""") parser = ArgumentParser(usage=usage, description=description) args = parser.parse_args(argv) project = load_project() if (hasattr(project, 'sumatra_version') and project.sumatra_version == sumatra.__version__ and "dev" not in sumatra.__version__): print("No upgrade needed (project was created with an up-to-date version of Sumatra).") sys.exit(1) if not os.path.exists(".smt/project_export.json"): print("Error: project must have been exported (with the original " "version of Sumatra) before upgrading.") sys.exit(1) # backup and remove .smt import shutil backup_dir = project.backup() shutil.rmtree(".smt") # upgrade the project data os.mkdir(".smt") shutil.copy("%s/project_export.json" % backup_dir, ".smt/project") project.sumatra_version = sumatra.__version__ project.save() # upgrade the record store project.record_store.clear() filename = "%s/records_export.json" % backup_dir if os.path.exists(filename): f = open(filename) project.record_store.import_(project.name, f.read()) f.close() else: print("Record file not found") sys.exit(1) print("Project successfully upgraded to Sumatra version {}.".format(project.sumatra_version))
def diff(argv): """Show the differences, if any, between two records.""" usage = "%(prog)s diff [options] LABEL1 LABEL2" description = dedent("Show the differences, if any, between two records.") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('label1') parser.add_argument('label2') parser.add_argument('-i', '--ignore', action="append", help="a regular expression pattern for filenames to ignore when evaluating differences in output data. To supply multiple patterns, use the -i option multiple times.") parser.add_argument('-l', '--long', action="store_const", const="long", dest="mode", default="short", help="prints full information for each record"), args = parser.parse_args(argv) if args.ignore is None: args.ignore = [] project = load_project() print(project.show_diff(args.label1, args.label2, mode=args.mode, ignore_filenames=args.ignore))
def view(argv): """View detail of a single record.""" usage = "%(prog)s view [options] LABEL" description = "View detail of a single record." parser = ArgumentParser(usage=usage, description=description) parser.add_argument('label') parser.add_argument('-s', '--script', action='store_true', help="show script content.") args = parser.parse_args(argv) project = load_project() record = project.get_record(args.label) if args.script: print('Main_File\t :', record.main_file) print(80 * '-') print(record.script_content) print(80 * '-') else: formatter = get_formatter('text')([record], project=project) print(formatter.format('long'))
def load_project(self, path=None): """ Load a Sumatra project. Internally calls sumatra.projects.load_project. Currently this can only be called once, and raises an exception if called again. The loaded project is accessed as `config.project`. Parameters --------- path: str | path-like Project directory. Function searches for an '.smt' directory at that location (i.e. the '.smt' should not be included in the path) Path can be relative; default is to look in the current working directory. Returns ------- None Raises ------ RuntimeError: If called a more than once. """ # DEVNOTE: If we stored the project as a task attribute when a task # is instantiated, it should be possible to support changing projects. # Probably we would want to do the same with RecordStoreView # If we do this, a replace-(almost)-all "config.project" -> # "task.project" is recommended. # Check with the view – maybe the project was needed there first # And if it wasn't loaded, ensure that both smttask and smttask.view # use the same project import smttask.view if smttask.view.config._project: self.project = smttask.view.config.project else: self.project = load_project(path) smttask.view.config.project = self.project
def tag(argv): """Tag, or remove a tag, from a record or records.""" usage = "%(prog)s tag [options] TAG [LIST]" statuses = ('initialized', 'pre_run', 'running', 'finished', 'failed', 'killed', 'succeeded', 'crashed') formatted_statuses = ", ".join((STATUS_FORMAT % s for s in statuses)) description = dedent("""\ If TAG contains spaces, it must be enclosed in quotes. LIST should be a space-separated list of labels for individual records. If it is omitted, only the most recent record will be tagged. If the '-r/--remove' option is set, the tag will be removed from the records. TAG can be a status from the mutually exclusive list: %s. """ % formatted_statuses) parser = ArgumentParser(usage=usage, description=description) parser.add_argument('tag', metavar='TAG', help="tag to add") parser.add_argument('labels', metavar='LIST', nargs='*', help="a space-separated list of records to be tagged") parser.add_argument( '-r', '--remove', action='store_true', help="remove the tag from the record(s), rather than adding it.") args = parser.parse_args(argv) m = STATUS_PATTERN.match(args.tag) if m: tag = m.group(1).lower() if tag not in statuses: raise ValueError("TAG should be one of %s" % formatted_statuses) project = load_project() if args.remove: op = project.remove_tag else: op = project.add_tag labels = args.labels or [project.most_recent().label] for label in labels: op(label, args.tag)
def tag(argv): """Tag, or remove a tag, from a record or records.""" usage = "%(prog)s tag [options] TAG [LIST]" description = dedent("""\ If TAG contains spaces, it must be enclosed in quotes. LIST should be a space-separated list of labels for individual records. If it is omitted, only the most recent record will be tagged. If the '-r/--remove' option is set, the tag will be removed from the records.""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('tag', metavar='TAG', help="tag to add") parser.add_argument('labels', metavar='LIST', nargs='*', help="a space-separated list of records to be tagged") parser.add_argument('-r', '--remove', action='store_true', help="remove the tag from the record(s), rather than adding it.") args = parser.parse_args(argv) project = load_project() if args.remove: op = project.remove_tag else: op = project.add_tag labels = args.labels or [project.most_recent().label] for label in labels: op(label, args.tag)
def export_project(output_file): if minor_version == 3: shutil.copy(".smt/project", ".smt/project_export.json") return elif minor_version == 1: prj = projects.load_simulation_project() else: prj = projects.load_project() state = { "name": prj.name, "on_changed": prj.on_changed, "default_main_file": prj.default_main_file, "default_executable": None, "default_repository": None, "default_launch_mode": None, "data_store": dict(prj.data_store.get_state(), type=_get_class_path(prj.data_store)), "record_store": dict(type=_get_class_path(prj.record_store)), "description": getattr(prj, "description", ""), # not in 0.1 "data_label": getattr(prj, "data_label", None), # not in 0.1 "_most_recent": getattr(prj, "_most_recent", ""), # not in 0.1 } if prj.default_executable: obj = prj.default_executable state["default_executable"] = {"type": _get_class_path(obj), "path": obj.path, "version": obj.version} if prj.default_repository: obj = prj.default_repository state["default_repository"] = {"type": _get_class_path(obj), "url": obj.url} if prj.default_launch_mode: obj = prj.default_launch_mode state["default_launch_mode"] = dict(obj.get_state(), type=_get_class_path(obj)) if prj.record_store.__class__.__name__[:6] == "Django": state["record_store"]["db_file"] = ".smt/records" # prj.record_store._db_file else: state["record_store"]["shelf_name"] = ".smt/records" # prj.record_store._shelf_name f = open(output_file, "w") json.dump(state, f, indent=2) f.close()
def test__load_project__should_return_Project(self): proj1 = Project("test_project", record_store=MockRecordStore()) assert os.path.exists(".smt/project") proj2 = load_project() self.assertEqual(proj1.name, proj2.name)
def configure(argv): """Modify the settings for the current project.""" usage = "%(prog)s configure [options]" description = "Modify the settings for the current project." parser = ArgumentParser(usage=usage, description=description) parser.add_argument('-d', '--datapath', metavar='PATH', help="set the path to the directory in which smt will search for datafiles generated by the simulation or analysis.") parser.add_argument('-i', '--input', metavar='PATH', default=None, help="set the path to the directory in which smt will search for input datafiles.") parser.add_argument('-l', '--addlabel', choices=['cmdline', 'parameters', None], metavar='OPTION', default=None, help="If this option is set, smt will append the record label either to the command line (option 'cmdline') or to the parameter file (option 'parameters'), and will add the label to the datapath when searching for datafiles. It is up to the user to make use of this label inside their program to ensure files are created in the appropriate location.") parser.add_argument('-e', '--executable', metavar='PATH', help="set the path to the executable.") parser.add_argument('-r', '--repository', help="the URL of a Subversion or Mercurial repository containing the code. This will be checked out/cloned into the current directory.") parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulator normally, e.g. init.hoc.") parser.add_argument('-c', '--on-changed', help="may be 'store-diff' or 'error': the action to take if the code in the repository or any of the dependencies has changed.", choices=['store-diff', 'error']) parser.add_argument('-g', '--labelgenerator', choices=['timestamp', 'uuid'], metavar='OPTION', help="specify which method Sumatra should use to generate labels (options: timestamp, uuid)") parser.add_argument('-t', '--timestamp_format', help="the timestamp format given to strftime") parser.add_argument('-L', '--launch_mode', choices=['serial', 'distributed', 'slurm-mpi'], help="how computations should be launched.") parser.add_argument('-o', '--launch_mode_options', help="extra options for the given launch mode, to be given in quotes with a leading space, e.g. ' --foo=3'") parser.add_argument('-p', '--plain', action='store_true', help="pass arguments to the run command straight through to the program.") parser.add_argument('-s', '--store', help="Change the record store to the specified path, URL or URI (must be specified). {0}".format(store_arg_help)) datastore = parser.add_mutually_exclusive_group() datastore.add_argument('-W', '--webdav', metavar='URL', help="specify a webdav URL (with username@password: if needed) as the archiving location for data") datastore.add_argument('-A', '--archive', metavar='PATH', help="specify a directory in which to archive output datafiles. If not specified, or if 'false', datafiles are not archived.") datastore.add_argument('-M', '--mirror', metavar='URL', help="specify a URL at which your datafiles will be mirrored.") args = parser.parse_args(argv) project = load_project() if args.store: new_store = get_record_store(args.store) project.change_record_store(new_store) if args.archive: if args.archive.lower() == "true": args.archive = ".smt/archive" if hasattr(project.data_store, 'archive_store'): # current data store is archiving if args.archive.lower() == 'false': project.data_store = get_data_store("FileSystemDataStore", {"root": project.data_store.root}) else: project.data_store.archive_store = args.archive else: # current data store is not archiving if args.archive.lower() != 'false': project.data_store = get_data_store("ArchivingFileSystemDataStore", {"root": args.datapath, "archive": args.archive}) if args.webdav: # should we care about archive migration?? project.data_store = get_data_store("DavFsDataStore", {"root": args.datapath, "dav_url": args.webdav}) project.data_store.archive_store = '.smt/archive' if args.datapath: project.data_store.root = args.datapath if args.input: project.input_datastore.root = args.input if args.repository: repository = get_repository(args.repository) repository.checkout() project.default_repository = repository if args.main: project.default_main_file = args.main if args.executable: executable_path, executable_options = parse_executable_str(args.executable) project.default_executable = get_executable(executable_path, script_file=args.main or project.default_main_file) project.default_executable.options = executable_options if args.on_changed: project.on_changed = args.on_changed if args.addlabel: project.data_label = args.addlabel if args.labelgenerator: project.label_generator = args.labelgenerator if args.timestamp_format: project.timestamp_format = args.timestamp_format if args.launch_mode: project.default_launch_mode = get_launch_mode(args.launch_mode)() if args.launch_mode_options: project.default_launch_mode.options = args.launch_mode_options.strip() if args.plain: project.allow_command_line_parameters = False project.save()
import ConfigParser from numpy.random import multinomial from numpy import array import csv import time import sys from sumatra.projects import load_project from sumatra.parameters import build_parameters from os import popen parameter_file = sys.argv[1] parameters = build_parameters(parameter_file) parameters.update({"parameter_file":parameter_file}) project = load_project() record = project.new_record(parameters=parameters, main_file=__file__, reason="reason for running this simulation") parameters.update({"sumatra_label": record.label}) start_time = time.time() cmd = r"/c/program files/R/R-2.15.0/bin/Rscript.exe main.r %s %s" % (parameter_file,record.label) print "Running command", cmd fin = popen(cmd) print fin.read() fin.close() record.duration = time.time() - start_time record.output_data = record.datastore.find_new_data(record.timestamp) project.add_record(record)
def test_recorded_task(caplog): # OPTIMIZATION/TIMING: Running 3 tasks takes ~30 seconds # (everything else in this test takes < 100ms) projectroot = Path(__file__).parent / "test_project" projectpath = str(projectroot.absolute()) if str(projectpath) not in sys.path: sys.path.insert(0, projectpath) # Clear the runtime directory and cd into it clean_project(projectroot) os.makedirs(projectroot / "data", exist_ok=True) os.chdir(projectroot) # Define some dummy tasks from smttask import Task from tasks import Square_x tasks = [Square_x(x=x, reason="pytest") for x in (1.1, 2.1, 5)] task_digests = ['7ad6c9eb99', '2eb601a664', '1a247b2f98'] # Delete any leftover cache for task in tasks: task.clear() # Run the tasks with caplog.at_level(logging.DEBUG, logger=tasks[0].logger.name): caplog.clear() for task in tasks: task.run( cache=False) # cache=False to test reloading from disk below assert caplog.records[ 0].msg == "No previously saved result was found; running task." # Assert that the outputs were produced at the expected locations assert set(os.listdir(projectroot / "data")) == set( ["run_dump", "Square_x"]) for task, digest in zip(tasks, task_digests): assert task.hashed_digest == digest assert task.unhashed_digests == {} assert task.digest == digest assert os.path.exists(projectroot / f"data/Square_x/{digest}_.json") assert os.path.islink(projectroot / f"data/Square_x/{digest}_.json") assert os.path.exists(projectroot / f"data/run_dump/Square_x/{digest}_.json") assert os.path.isfile(projectroot / f"data/run_dump/Square_x/{digest}_.json") # Run the tasks again # They should be reloaded from disk with caplog.at_level(logging.DEBUG, logger=tasks[0].logger.name): for task in tasks: caplog.clear() task.run(cache=True) # cache=True => now saved in memory assert caplog.records[ 0].msg == "Loading result of previous run from disk." # Run the tasks a 3rd time # They should be reloaded from memory with caplog.at_level(logging.DEBUG, logger=tasks[0].logger.name): for task in tasks: caplog.clear() task.run() # cache=False to test assert caplog.records[0].msg == "Loading memoized result." # Assert that status tags are saved # TODO: Test every possible tag value. Will require tasks which fail after each update of `status` project = load_project() for label in project.get_labels(): record = project.get_record(label) assert record.tags == {'_finished_'} # Test deserialization new_task = Task.from_desc(task.desc.json()) # Task recognizes that it is being constructed with the same arguments, and simply returns the preexisting instance assert new_task is task
import datetime def inspectFolder(foldername, projectpath = '.', timestamp = datetime.datetime(2010, 1, 1, 11, 14, 40, 915039)): project = load_project(projectpath) logging.debug('Scan folder %s' %foldername) initialRoot = project.data_store.root project.data_store = datastore.FileSystemDataStore(foldername) record = project.new_record( main_file=sys.argv[0], parameters=foldername, executable=sumatra.programs.get_executable(sys.executable), reason='Scan folder %s' %foldername ) record.output_data = record.datastore.find_new_data(timestamp) project.add_record(record) project.save() project.data_store = datastore.FileSystemDataStore(initialRoot) if __name__ == '__main__': logging.basicConfig(level='INFO') projectpath = '.' try: project = load_project(projectpath) except IOError: logging.warning("Creating sumatra project") commands.init(['deduplication']) project = load_project(projectpath) print project.info() inspectFolder(sys.argv[1])
if not params['restart']: phi.setValue(phi + nucleus(x0=0.5 * Lx, y0=0.5 * Ly, r0=params['factor'] * 5)) phi.setValue(1., where=phi > 1.) # ## Setup output # ### Setup ouput storage # In[12]: try: from sumatra.projects import load_project project = load_project(os.getcwd()) record = project.get_record(params["sumatra_label"]) output = record.datastore.root except: # either there's no sumatra, no sumatra project, or no sumatra_label # this will be the case if this script is run directly output = os.getcwd() if parallelComm.procID == 0: print "storing results in {0}".format(output) data = dtr.Treant(output) else: class dummyTreant(object): categories = dict() data = dummyTreant()
def init(argv): """Create a new project in the current directory.""" usage = "%(prog)s init [options] NAME" description = "Create a new project called NAME in the current directory." parser = ArgumentParser(usage=usage, description=description) parser.add_argument('project_name', metavar='NAME', help="a short name for the project; should not contain spaces.") parser.add_argument('-d', '--datapath', metavar='PATH', default='./Data', help="set the path to the directory in which smt will search for output datafiles generated by the simulation/analysis. Defaults to %(default)s.") parser.add_argument('-i', '--input', metavar='PATH', default='/', help="set the path to the directory relative to which input datafile paths will be given. Defaults to the filesystem root.") parser.add_argument('-l', '--addlabel', choices=['cmdline', 'parameters', None], metavar='OPTION', default=None, help="If this option is set, smt will append the record label either to the command line (option 'cmdline') or to the parameter file (option 'parameters'), and will add the label to the datapath when searching for datafiles. It is up to the user to make use of this label inside their program to ensure files are created in the appropriate location.") parser.add_argument('-e', '--executable', metavar='PATH', help="set the path to the executable. If this is not set, smt will try to infer the executable from the value of the --main option, if supplied, and will try to find the executable from the PATH environment variable, then by searching various likely locations on the filesystem.") parser.add_argument('-r', '--repository', help="the URL of a Subversion or Mercurial repository containing the code. This will be checked out/cloned into the current directory.") parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulation or analysis normally, e.g. init.hoc.") parser.add_argument('-c', '--on-changed', default='error', help="the action to take if the code in the repository or any of the depdendencies has changed. Defaults to %(default)s") # need to add list of allowed values parser.add_argument('-s', '--store', help="Specify the path, URL or URI to the record store (must be specified). This can either be an existing record store or one to be created. {0} Not using the `--store` argument defaults to a DjangoRecordStore with Sqlite in `.smt/records`".format(store_arg_help)) parser.add_argument('-g', '--labelgenerator', choices=['timestamp', 'uuid'], default='timestamp', metavar='OPTION', help="specify which method Sumatra should use to generate labels (options: timestamp, uuid)") parser.add_argument('-t', '--timestamp_format', help="the timestamp format given to strftime", default=TIMESTAMP_FORMAT) parser.add_argument('-L', '--launch_mode', choices=['serial', 'distributed', 'slurm-mpi'], default='serial', help="how computations should be launched. Defaults to %(default)s") parser.add_argument('-o', '--launch_mode_options', help="extra options for the given launch mode") datastore = parser.add_mutually_exclusive_group() datastore.add_argument('-W', '--webdav', metavar='URL', help="specify a webdav URL (with username@password: if needed) as the archiving location for data") datastore.add_argument('-A', '--archive', metavar='PATH', help="specify a directory in which to archive output datafiles. If not specified, or if 'false', datafiles are not archived.") datastore.add_argument('-M', '--mirror', metavar='URL', help="specify a URL at which your datafiles will be mirrored.") args = parser.parse_args(argv) try: project = load_project() parser.error("A project already exists in directory '{0}'.".format(project.path)) except Exception: pass if not os.path.exists(".smt"): os.mkdir(".smt") if args.repository: repository = get_repository(args.repository) repository.checkout() else: repository = get_working_copy().repository # if no repository is specified, we assume there is a working copy in the current directory. if args.executable: executable_path, executable_options = parse_executable_str(args.executable) executable = get_executable(path=executable_path) executable.args = executable_options elif args.main: try: executable = get_executable(script_file=args.main) except Exception: # assume unrecognized extension - really need more specific exception type # should warn that extension unrecognized executable = None else: executable = None if args.store: record_store = get_record_store(args.store) else: record_store = 'default' if args.webdav: # should we care about archive migration?? output_datastore = get_data_store("DavFsDataStore", {"root": args.datapath, "dav_url": args.webdav}) args.archive = '.smt/archive' elif args.archive and args.archive.lower() != 'false': if args.archive.lower() == "true": args.archive = ".smt/archive" args.archive = os.path.abspath(args.archive) output_datastore = get_data_store("ArchivingFileSystemDataStore", {"root": args.datapath, "archive": args.archive}) elif args.mirror: output_datastore = get_data_store("MirroredFileSystemDataStore", {"root": args.datapath, "mirror_base_url": args.mirror}) else: output_datastore = get_data_store("FileSystemDataStore", {"root": args.datapath}) input_datastore = get_data_store("FileSystemDataStore", {"root": args.input}) if args.launch_mode_options: args.launch_mode_options = args.launch_mode_options.strip() launch_mode = get_launch_mode(args.launch_mode)(options=args.launch_mode_options) project = Project(name=args.project_name, default_executable=executable, default_repository=repository, default_main_file=args.main, # what if incompatible with executable? default_launch_mode=launch_mode, data_store=output_datastore, record_store=record_store, on_changed=args.on_changed, data_label=args.addlabel, input_datastore=input_datastore, label_generator=args.labelgenerator, timestamp_format=args.timestamp_format) if os.path.exists('.smt') and project.record_store.has_project(project.name): f = open('.smt/labels', 'w') f.writelines(project.format_records(tags=None, mode='short', format='text', reverse=False)) f.close() project.save()
def configure(argv): """Modify the settings for the current project.""" usage = "%(prog)s configure [options]" description = "Modify the settings for the current project." parser = ArgumentParser(usage=usage, description=description) parser.add_argument('-d', '--datapath', metavar='PATH', help="set the path to the directory in which smt will search for datafiles generated by the simulation or analysis.") parser.add_argument('-i', '--input', metavar='PATH', default=None, help="set the path to the directory in which smt will search for input datafiles.") parser.add_argument('-l', '--addlabel', choices=['cmdline', 'parameters', None], metavar='OPTION', default=None, help="If this option is set, smt will append the record label either to the command line (option 'cmdline') or to the parameter file (option 'parameters'), and will add the label to the datapath when searching for datafiles. It is up to the user to make use of this label inside their program to ensure files are created in the appropriate location.") parser.add_argument('-e', '--executable', metavar='PATH', help="set the path to the executable.") parser.add_argument('-r', '--repository', help="the URL of a Subversion or Mercurial repository containing the code. This will be checked out/cloned into the current directory.") parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulator normally, e.g. init.hoc.") parser.add_argument('-c', '--on-changed', help="may be 'store-diff' or 'error': the action to take if the code in the repository or any of the dependencies has changed.", choices=['store-diff', 'error']) parser.add_argument('-g', '--labelgenerator', choices=['timestamp', 'uuid'], metavar='OPTION', help="specify which method Sumatra should use to generate labels (options: timestamp, uuid)") parser.add_argument('-t', '--timestamp_format', help="the timestamp format given to strftime") parser.add_argument('-L', '--launch_mode', choices=['serial', 'distributed', 'slurm-mpi'], help="how computations should be launched.") parser.add_argument('-o', '--launch_mode_options', help="extra options for the given launch mode, to be given in quotes with a leading space, e.g. ' --foo=3'") parser.add_argument('-p', '--plain', dest='plain', action='store_true', help="pass arguments to the 'run' command straight through to the program. Otherwise arguments of the form name=value can be used to overwrite default parameter values.") parser.add_argument('--no-plain', dest='plain', action='store_false', help="arguments to the 'run' command of the form name=value will overwrite default parameter values. This is the opposite of the --plain option.") parser.add_argument('-s', '--store', help="Change the record store to the specified path, URL or URI (must be specified). {0}".format(store_arg_help)) datastore = parser.add_mutually_exclusive_group() datastore.add_argument('-W', '--webdav', metavar='URL', help="specify a webdav URL (with username@password: if needed) as the archiving location for data") datastore.add_argument('-A', '--archive', metavar='PATH', help="specify a directory in which to archive output datafiles. If not specified, or if 'false', datafiles are not archived.") datastore.add_argument('-M', '--mirror', metavar='URL', help="specify a URL at which your datafiles will be mirrored.") parser.add_argument('--add-plugin', help="name of a Python module containing one or more plug-ins.") parser.add_argument('--remove-plugin', help="name of a plug-in module to remove from the project.") args = parser.parse_args(argv) project = load_project() if args.store: new_store = get_record_store(args.store) project.change_record_store(new_store) if args.datapath: project.data_store.root = args.datapath if args.archive: if args.archive.lower() == "true": args.archive = ".smt/archive" if hasattr(project.data_store, 'archive_store'): # current data store is archiving if args.archive.lower() == 'false': project.data_store = get_data_store("FileSystemDataStore", {"root": project.data_store.root}) else: project.data_store.archive_store = args.archive else: # current data store is not archiving if args.archive.lower() != 'false': project.data_store = get_data_store("ArchivingFileSystemDataStore", {"root": project.data_store.root, "archive": args.archive}) elif args.mirror: project.data_store = get_data_store("MirroredFileSystemDataStore", {"root": project.data_store.root, "mirror_base_url": args.mirror}) elif args.webdav: # should we care about archive migration?? project.data_store = get_data_store("DavFsDataStore", {"root": project.data_store.root, "dav_url": args.webdav}) project.data_store.archive_store = '.smt/archive' if args.input: project.input_datastore.root = args.input if args.repository: repository = get_repository(args.repository) repository.checkout() project.default_repository = repository if args.main: project.default_main_file = args.main if args.executable: executable_path, executable_options = parse_executable_str(args.executable) project.default_executable = get_executable(executable_path, script_file=args.main or project.default_main_file) project.default_executable.options = executable_options if args.on_changed: project.on_changed = args.on_changed if args.addlabel: project.data_label = args.addlabel if args.labelgenerator: project.label_generator = args.labelgenerator if args.timestamp_format: project.timestamp_format = args.timestamp_format if args.launch_mode: project.default_launch_mode = get_launch_mode(args.launch_mode)() if args.launch_mode_options: project.default_launch_mode.options = args.launch_mode_options.strip() if args.plain is not None: project.allow_command_line_parameters = not args.plain if args.add_plugin: project.load_plugins(args.add_plugin) if args.remove_plugin: project.remove_plugins(args.remove_plugin) project.save()
from sys import argv from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument("states_file") parser.add_argument("--no_mpi", action="store_true") parser.add_argument("--id", nargs='?', default="tmp") args = parser.parse_args() output_dir = os.path.abspath("tmp") if args.id != "tmp": try: from sumatra.projects import load_project output_dir = os.path.join(os.path.abspath(load_project().data_store.root), args.id) except ImportError: pass current_path = os.path.dirname(os.path.realpath(__file__)) if not os.path.exists(output_dir): os.makedirs(output_dir) states_file = argv[1] output_file = os.path.join(output_dir, os.path.split(states_file)[-1]) build_path = os.path.abspath(os.path.join(current_path, "..", "..", "..", "build")) project_path = os.path.abspath(os.path.join(current_path, "..", ".."))
def run(argv): """Run a simulation or analysis.""" usage = "%(prog)s run [options] [arg1, ...] [param=value, ...]" description = dedent("""\ The list of arguments will be passed on to the simulation/analysis script. It should normally contain at least the name of a parameter file, but can also contain input files, flags, etc. If the parameter file should be in a format that Sumatra understands (see documentation), then the parameters will be stored to allow future searching, comparison, etc. of records. For convenience, it is possible to specify a file with default parameters and then specify those parameters that are different from the default values on the command line with any number of param=value pairs (note no space around the equals sign).""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('-v', '--version', metavar='REV', help="use version REV of the code (if this is not the same as the working copy, it will be checked out of the repository). If this option is not specified, the most recent version in the repository will be used. If there are changes in the working copy, the user will be prompted to commit them first") parser.add_argument('-l', '--label', help="specify a label for the experiment. If no label is specified, one will be generated automatically.") parser.add_argument('-r', '--reason', help="explain the reason for running this simulation/analysis.") parser.add_argument('-e', '--executable', metavar='PATH', help="Use this executable for this run. If not specified, the project's default executable will be used.") parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulation/analysis normally, e.g. init.hoc. If not specified, the project's default will be used.") parser.add_argument('-n', '--num_processes', metavar='N', type=int, help="run a distributed computation on N processes using MPI. If this option is not used, or if N=0, a normal, serial simulation/analysis is run.") parser.add_argument('-t', '--tag', help="tag you want to add to the project") parser.add_argument('-D', '--debug', action='store_true', help="print debugging information.") parser.add_argument('-i', '--stdin', help="specify the name of a file that should be connected to standard input.") parser.add_argument('-o', '--stdout', help="specify the name of a file that should be connected to standard output.") args, user_args = parser.parse_known_args(argv) user_args = [str(arg) for arg in user_args] # unifying types for Py2/Py3 if args.debug: logger.setLevel(logging.DEBUG) project = load_project() parameters, input_data, script_args = parse_arguments(user_args, project.input_datastore, args.stdin, args.stdout, project.allow_command_line_parameters) if len(parameters) == 0: parameters = {} elif len(parameters) == 1: parameters = parameters[0] else: parser.error("Only a single parameter file allowed.") # for now if args.executable: executable_path, executable_options = parse_executable_str(args.executable) executable = get_executable(path=executable_path) executable.options = executable_options elif args.main: executable = get_executable(script_file=args.main) # should we take the options from project.default_executable, if they match? else: executable = 'default' if args.num_processes: if hasattr(project.default_launch_mode, 'n'): project.default_launch_mode.n = args.num_processes else: parser.error("Your current launch mode does not support using multiple processes.") reason = args.reason or '' if reason: reason = reason.strip('\'"') label = args.label try: run_label = project.launch(parameters, input_data, script_args, label=label, reason=reason, executable=executable, main_file=args.main or 'default', version=args.version or 'current') except (UncommittedModificationsError, MissingInformationError) as err: print(err) sys.exit(1) if args.tag: project.add_tag(run_label, args.tag)
def run(argv): """Run a simulation or analysis.""" usage = "%(prog)s run [options] [arg1, ...] [param=value, ...]" description = dedent("""\ The list of arguments will be passed on to the simulation/analysis script. It should normally contain at least the name of a parameter file, but can also contain input files, flags, etc. If the parameter file should be in a format that Sumatra understands (see documentation), then the parameters will be stored to allow future searching, comparison, etc. of records. For convenience, it is possible to specify a file with default parameters and then specify those parameters that are different from the default values on the command line with any number of param=value pairs (note no space around the equals sign).""") parser = ArgumentParser(usage=usage, description=description) parser.add_argument('-v', '--version', metavar='REV', help="use version REV of the code (if this is not the same as the working copy, it will be checked out of the repository). If this option is not specified, the most recent version in the repository will be used. If there are changes in the working copy, the user will be prompted to commit them first") parser.add_argument('-l', '--label', help="specify a label for the experiment. If no label is specified, one will be generated automatically.") parser.add_argument('-r', '--reason', help="explain the reason for running this simulation/analysis.") parser.add_argument('-e', '--executable', metavar='PATH', help="Use this executable for this run. If not specified, the project's default executable will be used.") parser.add_argument('-m', '--main', help="the name of the script that would be supplied on the command line if running the simulation/analysis normally, e.g. init.hoc. If not specified, the project's default will be used.") parser.add_argument('-n', '--num_processes', metavar='N', type=int, help="run a distributed computation on N processes using MPI. If this option is not used, or if N=0, a normal, serial simulation/analysis is run.") parser.add_argument('-t', '--tag', help="tag you want to add to the project") parser.add_argument('-D', '--debug', action='store_true', help="print debugging information.") parser.add_argument('-i', '--stdin', help="specify the name of a file that should be connected to standard input.") parser.add_argument('-o', '--stdout', help="specify the name of a file that should be connected to standard output.") args, user_args = parser.parse_known_args(argv) if args.debug: logger.setLevel(logging.DEBUG) project = load_project() parameters, input_data, script_args = parse_arguments(user_args, project.input_datastore, args.stdin, args.stdout, project.allow_command_line_parameters) if len(parameters) == 0: parameters = {} elif len(parameters) == 1: parameters = parameters[0] else: parser.error("Only a single parameter file allowed.") # for now if args.executable: executable_path, executable_options = parse_executable_str(args.executable) executable = get_executable(path=executable_path) executable.options = executable_options elif args.main: executable = get_executable(script_file=args.main) # should we take the options from project.default_executable, if they match? else: executable = 'default' if args.num_processes: if hasattr(project.default_launch_mode, 'n'): project.default_launch_mode.n = args.num_processes else: parser.error("Your current launch mode does not support using multiple processes.") reason = args.reason or '' if reason: reason = reason.strip('\'"') label = args.label try: run_label = project.launch(parameters, input_data, script_args, label=label, reason=reason, executable=executable, main_file=args.main or 'default', version=args.version or 'current') except (UncommittedModificationsError, MissingInformationError) as err: print(err) sys.exit(1) if args.tag: project.add_tag(run_label, args.tag)