def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS): """Load the config file. By default, errors in loading config are handled, and a warning printed on screen. For testing, the suppress_errors option is set to False, so errors will make tests fail. `suppress_errors` default value is to be `None` in which case the behavior default to the one of `traitlets.Application`. The default value can be set : - to `False` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '0', or 'false' (case insensitive). - to `True` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '1' or 'true' (case insensitive). - to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset. Any other value are invalid, and will make IPython exit with a non-zero return code. """ self.log.debug("Searching path %s for config files", self.config_file_paths) base_config = 'ipython_config.py' self.log.debug("Attempting to load config file: %s" % base_config) try: if suppress_errors is not None: old_value = Application.raise_config_file_errors Application.raise_config_file_errors = not suppress_errors Application.load_config_file(self, base_config, path=self.config_file_paths) except ConfigFileNotFound: # ignore errors loading parent self.log.debug("Config file %s not found", base_config) pass if suppress_errors is not None: Application.raise_config_file_errors = old_value for config_file_name in self.config_files: if not config_file_name or config_file_name == base_config: continue self.log.debug("Attempting to load config file: %s" % self.config_file_name) try: Application.load_config_file(self, config_file_name, path=self.config_file_paths) except ConfigFileNotFound: # Only warn if the default config file was NOT being used. if config_file_name in self.config_file_specified: msg = self.log.warning else: msg = self.log.debug msg("Config file not found, skipping: %s", config_file_name) except Exception: # For testing purposes. if not suppress_errors: raise self.log.warning("Error loading config file: %s" % self.config_file_name, exc_info=True)
def test_logger_adapter(caplog, capsys): logger = logging.getLogger("Application") adapter = logging.LoggerAdapter(logger, {"key": "adapted"}) app = Application(log=adapter, log_level=logging.INFO) app.log_format = "%(key)s %(message)s" app.log.info("test message") assert "adapted test message" in capsys.readouterr().err
def test_logger_adapter(self): logger = logging.getLogger("test_logger_adapter") adapter = logging.LoggerAdapter(logger, {"key": "adapted"}) with self.assertLogs(logger, logging.INFO) as captured: app = Application(log=adapter, log_level=logging.INFO) app.log_format = "%(key)s %(message)s" app.log.info("test message") output = "\n".join(captured.output) assert "adapted test message" in output
def load_config_file(self, suppress_errors=IPYTHON_SUPPRESS_CONFIG_ERRORS): """Load the config file. By default, errors in loading config are handled, and a warning printed on screen. For testing, the suppress_errors option is set to False, so errors will make tests fail. `supress_errors` default value is to be `None` in which case the behavior default to the one of `traitlets.Application`. The default value can be set : - to `False` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '0', or 'false' (case insensitive). - to `True` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '1' or 'true' (case insensitive). - to `None` by setting 'IPYTHON_SUPPRESS_CONFIG_ERRORS' environment variable to '' (empty string) or leaving it unset. Any other value are invalid, and will make IPython exit with a non-zero return code. """ self.log.debug("Searching path %s for config files", self.config_file_paths) base_config = "ipython_config.py" self.log.debug("Attempting to load config file: %s" % base_config) try: if suppress_errors is not None: old_value = Application.raise_config_file_errors Application.raise_config_file_errors = not suppress_errors Application.load_config_file(self, base_config, path=self.config_file_paths) except ConfigFileNotFound: # ignore errors loading parent self.log.debug("Config file %s not found", base_config) pass if suppress_errors is not None: Application.raise_config_file_errors = old_value for config_file_name in self.config_files: if not config_file_name or config_file_name == base_config: continue self.log.debug("Attempting to load config file: %s" % self.config_file_name) try: Application.load_config_file(self, config_file_name, path=self.config_file_paths) except ConfigFileNotFound: # Only warn if the default config file was NOT being used. if config_file_name in self.config_file_specified: msg = self.log.warning else: msg = self.log.debug msg("Config file not found, skipping: %s", config_file_name) except Exception: # For testing purposes. if not suppress_errors: raise self.log.warning("Error loading config file: %s" % self.config_file_name, exc_info=True)
def pylab(self, line=''): """Load numpy and matplotlib to work interactively. This function lets you activate pylab (matplotlib, numpy and interactive support) at any point during an IPython session. %pylab makes the following imports:: import numpy import matplotlib from matplotlib import pylab, mlab, pyplot np = numpy plt = pyplot from IPython.display import display from IPython.core.pylabtools import figsize, getfigs from pylab import * from numpy import * If you pass `--no-import-all`, the last two `*` imports will be excluded. See the %matplotlib magic for more details about activating matplotlib without affecting the interactive namespace. """ args = magic_arguments.parse_argstring(self.pylab, line) if args.no_import_all is None: # get default from Application if Application.initialized(): app = Application.instance() try: import_all = app.pylab_import_all except AttributeError: import_all = True else: # nothing specified, no app - default True import_all = True else: # invert no-import flag import_all = not args.no_import_all gui, backend, clobbered = self.shell.enable_pylab( args.gui, import_all=import_all) self._show_matplotlib_backend(args.gui, backend) print( "%pylab is deprecated, use %matplotlib inline and import the required libraries." ) print("Populating the interactive namespace from numpy and matplotlib") if clobbered: warn("pylab import has clobbered these variables: %s" % clobbered + "\n`%matplotlib` prevents importing * from pylab and numpy")
def pylab(self, line=""): """Load numpy and matplotlib to work interactively. This function lets you activate pylab (matplotlib, numpy and interactive support) at any point during an IPython session. %pylab makes the following imports:: import numpy import matplotlib from matplotlib import pylab, mlab, pyplot np = numpy plt = pyplot from IPython.display import display from IPython.core.pylabtools import figsize, getfigs from pylab import * from numpy import * If you pass `--no-import-all`, the last two `*` imports will be excluded. See the %matplotlib magic for more details about activating matplotlib without affecting the interactive namespace. """ args = magic_arguments.parse_argstring(self.pylab, line) if args.no_import_all is None: # get default from Application if Application.initialized(): app = Application.instance() try: import_all = app.pylab_import_all except AttributeError: import_all = True else: # nothing specified, no classes - default True import_all = True else: # invert no-import flag import_all = not args.no_import_all gui, backend, clobbered = self.shell.enable_pylab(args.gui, import_all=import_all) self._show_matplotlib_backend(args.gui, backend) print("Populating the interactive namespace from numpy and matplotlib") if clobbered: warn( "pylab import has clobbered these variables: %s" % clobbered + "\n`%matplotlib` prevents importing * from pylab and numpy" )
def test_logging_config(tmp_path, capsys): """We should be able to configure additional log handlers.""" log_file = tmp_path / "log_file" app = Application( logging_config={ "version": 1, "handlers": { "file": { "class": "logging.FileHandler", "level": "DEBUG", "filename": str(log_file), }, }, "loggers": { "Application": { "level": "DEBUG", "handlers": ["console", "file"], }, }, } ) # the default "console" handler + our new "file" handler assert len(app.log.handlers) == 2 # log a couple of messages app.log.info("info") app.log.warning("warn") # test that log messages get written to the file with open(log_file) as log_handle: assert log_handle.read() == "info\nwarn\n" # test that log messages get written to stderr (default console handler) assert capsys.readouterr().err == "[Application] WARNING | warn\n"
def enable_gui(gui, kernel=None): """Enable integration with a given GUI""" if gui not in loop_map: e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys()) raise ValueError(e) if kernel is None: if Application.initialized(): kernel = getattr(Application.instance(), 'kernel', None) if kernel is None: raise RuntimeError("You didn't specify a kernel," " and no IPython Application with a kernel appears to be running." ) loop = loop_map[gui] if loop and kernel.eventloop is not None and kernel.eventloop is not loop: raise RuntimeError("Cannot activate multiple GUI eventloops") kernel.eventloop = loop
def load_config(): filename, ext = os.path.splitext("jupyter_notebook_config.py") new_config = Config() for config in Application._load_config_files(filename, path=config_file_paths()): new_config.merge(config) return new_config
def load_config_file(self, suppress_errors=True): """Load the config file. By default, errors in loading config are handled, and a warning printed on screen. For testing, the suppress_errors option is set to False, so errors will make tests fail. """ self.log.debug("Searching path %s for config files", self.config_file_paths) base_config = 'ipython_config.py' self.log.debug("Attempting to load config file: %s" % base_config) try: Application.load_config_file( self, base_config, path=self.config_file_paths ) except ConfigFileNotFound: # ignore errors loading parent self.log.debug("Config file %s not found", base_config) pass for config_file_name in self.config_files: if not config_file_name or config_file_name == base_config: continue self.log.debug("Attempting to load config file: %s" % self.config_file_name) try: Application.load_config_file( self, config_file_name, path=self.config_file_paths ) except ConfigFileNotFound: # Only warn if the default config file was NOT being used. if config_file_name in self.config_file_specified: msg = self.log.warning else: msg = self.log.debug msg("Config file not found, skipping: %s", config_file_name) except Exception: # For testing purposes. if not suppress_errors: raise self.log.warning("Error loading config file: %s" % self.config_file_name, exc_info=True)
def load_config_file(self, suppress_errors=True): """Load the config file. By default, errors in loading config are handled, and a warning printed on screen. For testing, the suppress_errors option is set to False, so errors will make tests fail. """ self.log.debug("Searching path %s for config files", self.config_file_paths) base_config = 'ipython_config.py' self.log.debug("Attempting to load config file: %s" % base_config) try: Application.load_config_file( self, base_config, path=self.config_file_paths ) except ConfigFileNotFound: # ignore errors loading parent self.log.debug("Config file %s not found", base_config) pass for config_file_name in self.config_files: if not config_file_name or config_file_name == base_config: continue self.log.debug("Attempting to load config file: %s" % self.config_file_name) try: Application.load_config_file( self, config_file_name, path=self.config_file_paths ) except ConfigFileNotFound: # Only warn if the default config file was NOT being used. if config_file_name in self.config_file_specified: msg = self.log.warn else: msg = self.log.debug msg("Config file not found, skipping: %s", config_file_name) except Exception: # For testing purposes. if not suppress_errors: raise self.log.warn("Error loading config file: %s" % self.config_file_name, exc_info=True)
def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, reg_addr, config=None, logname='root', log_url=None, loglevel=logging.DEBUG, identity=b'task', in_thread=False): ZMQStream = zmqstream.ZMQStream if config: # unwrap dict back into Config config = Config(config) if in_thread: # use instance() to get the same Context/Loop as our parent ctx = zmq.Context.instance() loop = ioloop.IOLoop.instance() else: # in a process, don't use instance() # for safety with multiprocessing ctx = zmq.Context() loop = ioloop.IOLoop() ins = ZMQStream(ctx.socket(zmq.ROUTER),loop) util.set_hwm(ins, 0) ins.setsockopt(zmq.IDENTITY, identity + b'_in') ins.bind(in_addr) outs = ZMQStream(ctx.socket(zmq.ROUTER),loop) util.set_hwm(outs, 0) outs.setsockopt(zmq.IDENTITY, identity + b'_out') outs.bind(out_addr) mons = zmqstream.ZMQStream(ctx.socket(zmq.PUB),loop) util.set_hwm(mons, 0) mons.connect(mon_addr) nots = zmqstream.ZMQStream(ctx.socket(zmq.SUB),loop) nots.setsockopt(zmq.SUBSCRIBE, b'') nots.connect(not_addr) querys = ZMQStream(ctx.socket(zmq.DEALER),loop) querys.connect(reg_addr) # setup logging. if in_thread: log = Application.instance().log else: if log_url: log = connect_logger(logname, ctx, log_url, root="scheduler", loglevel=loglevel) else: log = local_logger(logname, loglevel) scheduler = TaskScheduler(client_stream=ins, engine_stream=outs, mon_stream=mons, notifier_stream=nots, query_stream=querys, loop=loop, log=log, config=config) scheduler.start() if not in_thread: try: loop.start() except KeyboardInterrupt: scheduler.log.critical("Interrupted, exiting...")
def launch_scheduler(in_addr, out_addr, mon_addr, not_addr, reg_addr, config=None, logname='root', log_url=None, loglevel=logging.DEBUG, identity=b'task', in_thread=False): ZMQStream = zmqstream.ZMQStream if config: # unwrap dict back into Config config = Config(config) if in_thread: # use instance() to get the same Context/Loop as our parent ctx = zmq.Context.instance() loop = ioloop.IOLoop.current() else: # in a process, don't use instance() # for safety with multiprocessing ctx = zmq.Context() loop = ioloop.IOLoop() ins = ZMQStream(ctx.socket(zmq.ROUTER),loop) util.set_hwm(ins, 0) ins.setsockopt(zmq.IDENTITY, identity + b'_in') ins.bind(in_addr) outs = ZMQStream(ctx.socket(zmq.ROUTER),loop) util.set_hwm(outs, 0) outs.setsockopt(zmq.IDENTITY, identity + b'_out') outs.bind(out_addr) mons = zmqstream.ZMQStream(ctx.socket(zmq.PUB),loop) util.set_hwm(mons, 0) mons.connect(mon_addr) nots = zmqstream.ZMQStream(ctx.socket(zmq.SUB),loop) nots.setsockopt(zmq.SUBSCRIBE, b'') nots.connect(not_addr) querys = ZMQStream(ctx.socket(zmq.DEALER),loop) querys.connect(reg_addr) # setup logging. if in_thread: log = Application.instance().log else: if log_url: log = connect_logger(logname, ctx, log_url, root="scheduler", loglevel=loglevel) else: log = local_logger(logname, loglevel) scheduler = TaskScheduler(client_stream=ins, engine_stream=outs, mon_stream=mons, notifier_stream=nots, query_stream=querys, loop=loop, log=log, config=config) scheduler.start() if not in_thread: try: loop.start() except KeyboardInterrupt: scheduler.log.critical("Interrupted, exiting...")
def test_logging_teardown_on_error(capsys, caplogconfig): """Ensure we don't try to open logs in order to close them (See #722). If you try to configure logging handlers whilst Python is shutting down you may get traceback. """ # create and destroy an app (without configuring logging) # (ensure that the logs were not configured) app = Application() del app assert len(caplogconfig) == 0 # logging was not configured out, err = capsys.readouterr() assert "Traceback" not in err # ensure that the logs would have been configured otherwise # (to prevent this test from yielding false-negatives) app = Application() app._logging_configured = True # make it look like logging was configured del app assert len(caplogconfig) == 1 # logging was configured
def is_interactive(self): """Determine if the user requested interactive mode.""" # The Python interpreter sets sys.flags correctly, so use them! if sys.flags.interactive: return True # IPython does not set sys.flags when -i is specified, so first # check it if it is already imported. if not hasattr(builtins, '__IPYTHON__'): return False # Then we check the application singleton and determine based on # a variable it sets. try: try: # ipython >=3.0 from traitlets.config.application import Application as App except ImportError: # ipython <3.0 from IPython.config.application import Application as App return App.initialized() and App.instance().interact except (ImportError, AttributeError): return False
def load_default_config(ipython_dir=None): """Load the default config file from the default ipython_dir. This is useful for embedded shells. """ if ipython_dir is None: ipython_dir = get_ipython_dir() profile_dir = os.path.join(ipython_dir, 'profile_default') config = Config() for cf in Application._load_config_files("ipython_config", path=profile_dir): config.update(cf) return config
def is_interactive(self): """ Determine if the user requested interactive mode. """ # The Python interpreter sets sys.flags correctly, so use them! if sys.flags.interactive: return True # IPython does not set sys.flags when -i is specified, so first # check it if it is already imported. if '__IPYTHON__' not in dir(six.moves.builtins): return False # Then we check the application singleton and determine based on # a variable it sets. try: try: # ipython >=3.0 from traitlets.config.application import Application as App except ImportError: # ipython <3.0 from IPython.config.application import Application as App return App.initialized() and App.instance().interact except (ImportError, AttributeError): return False
def get_common_scheduler_streams(mon_addr, not_addr, reg_addr, config, logname, log_url, loglevel, in_thread): if config: # unwrap dict back into Config config = Config(config) if in_thread: # use instance() to get the same Context/Loop as our parent ctx = zmq.Context.instance() loop = ioloop.IOLoop.current() else: # in a process, don't use instance() # for safety with multiprocessing ctx = zmq.Context() loop = ioloop.IOLoop() mons = zmqstream.ZMQStream(ctx.socket(zmq.PUB), loop) mons.connect(mon_addr) nots = zmqstream.ZMQStream(ctx.socket(zmq.SUB), loop) nots.setsockopt(zmq.SUBSCRIBE, b'') nots.connect(not_addr) querys = ZMQStream(ctx.socket(zmq.DEALER), loop) querys.connect(reg_addr) # setup logging. if in_thread: log = Application.instance().log else: if log_url: log = connect_logger(logname, ctx, log_url, root="scheduler", loglevel=loglevel) else: log = local_logger(logname, loglevel) return config, ctx, loop, mons, nots, querys, log
def get_app(): return Application.instance()
def __init__(self, course_dir=None, auto=False) -> 'Course': """Initialize a course from a config file. :param course_dir: The directory your course. If none, defaults to current working directory. :type course_dir: str :param auto: Suppress all prompts, automatically answering yes. :type auto: bool :returns: A Course object for performing operations on an entire course at once. :rtype: Course """ #=======================================# # Working Directory & Git Sync # #=======================================# # Set up the working directory. If no course_dir has been specified, then it # is assumed that this is the course directory. self.working_directory = course_dir if course_dir is not None else os.getcwd( ) repo = Repo(self.working_directory) # Before we do ANYTHING, make sure our working directory is clean with no # untracked files! Unless we're running a automated job, in which case we # don't want to fail for an unexpected reason. if (repo.is_dirty() or repo.untracked_files) and (not auto): continue_with_dirty = input(""" Your repository is currently in a dirty state (modifications or untracked changes are present). We strongly suggest that you resolve these before proceeding. Continue? [y/n]:""") # if they didn't say no, exit if continue_with_dirty.lower() != 'y': sys.exit("Exiting...") # PRINT BANNER print( AsciiTable([['Initializing Course and Pulling Instructors Repo'] ]).table) # pull the latest copy of the repo utils.pull_repo(repo_dir=self.working_directory) # Make sure we're running our nbgrader commands within our instructors repo. # this will contain our gradebook database, our source directory, and other # things. config = Config() config.CourseDirectory.root = self.working_directory #=======================================# # Load Config # #=======================================# # Check for an nbgrader config file... if not os.path.exists( os.path.join(self.working_directory, 'nbgrader_config.py')): # if there isn't one, make sure there's at least a rudaux config file if not os.path.exists( os.path.join(self.working_directory, 'rudaux_config.py')): sys.exit(""" You do not have nbgrader_config.py or rudaux_config.py in your current directory. We need at least one of these to set up your course parameters! You can specify a directory with the course_dir argument if you wish. """) # use the traitlets Application class directly to load nbgrader config file. # reference: # https://github.com/jupyter/nbgrader/blob/41f52873c690af716c796a6003d861e493d45fea/nbgrader/server_extensions/validate_assignment/handlers.py#L35-L37 # ._load_config_files() returns a generator, so if the config is missing, # the generator will act similarly to an empty array # load rudaux_config if it exists, otherwise just bring in nbgrader_config. for rudaux_config in Application._load_config_files( 'rudaux_config', path=self.working_directory): config.merge(rudaux_config) for nbgrader_config in Application._load_config_files( 'nbgrader_config', path=self.working_directory): config.merge(nbgrader_config) #=======================================# # Set Config Params # #=======================================# ## NBGRADER PARAMS # If the user set the exchange, perform home user expansion if necessary if config.get('Exchange', {}).get('root') is not None: # perform home user expansion. Should not throw an error, but may try: # expand home user in-place config['Exchange']['root'] = os.path.expanduser( config['Exchange']['root']) except: pass ## CANVAS PARAMS # Before we continue, make sure we have all of the necessary parameters. self.course_id = config.get('Canvas', {}).get('course_id') self.canvas_url = config.get('Canvas', {}).get('canvas_url') self.external_tool_name = config.get('Canvas', {}).get('external_tool_name') self.external_tool_level = config.get('Canvas', {}).get('external_tool_level') # The canvas url should have no trailing slash self.canvas_url = re.sub(r"/$", "", self.canvas_url) ## GITHUB PARAMS self.stu_repo_url = config.get('GitHub', {}).get('stu_repo_url', '') self.assignment_release_path = config.get( 'GitHub', {}).get('assignment_release_path') self.ins_repo_url = config.get('GitHub', {}).get('ins_repo_url') # subpath not currently supported # self.ins_dir_subpath = config.get('GitHub').get('ins_dir_subpath') ## JUPYTERHUB PARAMS self.hub_url = config.get('JupyterHub', {}).get('hub_url') # The hub url should have no trailing slash self.hub_url = re.sub(r"/$", "", self.hub_url) # Get Storage directory & type self.storage_path = config.get('JupyterHub', {}).get('storage_path', ) self.zfs = config.get('JupyterHub', {}).get('zfs') # Optional, default is false! self.zfs_regex = config.get('JupyterHub', {}).get('zfs_regex') # default is false! self.zfs_datetime_pattern = config.get('JupyterHub', {}).get( 'zfs_datetime_pattern') # default is false! # Note hub_prefix, not base_url, to avoid any ambiguity self.hub_prefix = config.get('JupyterHub', {}).get('base_url') # If prefix was set, make sure it has no trailing slash, but a preceding # slash if self.hub_prefix is not None: self.hub_prefix = re.sub(r"/$", "", self.hub_prefix) if re.search(r"^/", self.hub_prefix) is None: self.hub_prefix = fr"/{self.hub_prefix}" ## COURSE PARAMS self.grading_image = config.get('Course', {}).get('grading_image') self.tmp_dir = config.get('Course', {}).get('tmp_dir') assignment_list = config.get('Course', {}).get('assignments') self.course_timezone = config.get('Course', {}).get('timezone') self.system_timezone = pendulum.now(tz='local').timezone.name ## Repurpose the rest of the params for later batches ## (Hang onto them in case we need something) self._full_config = config #=======================================# # Validate URLs (Slightly) # #=======================================# urls = { 'JupyterHub.hub_url': self.hub_url, 'Canvas.canvas_url': self.canvas_url } for key, value in urls.items(): if re.search(r"^https{0,1}", value) is None: sys.exit(f""" You must specify the scheme (e.g. https://) for all URLs. You are missing the scheme in "{key}": {value} """) if re.search(r".git$", value) is not None: sys.exit(f""" Please do not use .git-appended URLs. You have used a .git url in "{key}": {value} """) #=======================================# # Check For Required Params # #=======================================# # Finally, before we continue, make sure all of our required parameters were # specified in the config file(s) required_params = { "Canvas.course_id": self.course_id, "Canvas.canvas_url": self.canvas_url, "GitHub.stu_repo_url": self.stu_repo_url, "GitHub.ins_repo_url": self.ins_repo_url, "JupyterHub.hub_url": self.hub_url, "Course.assignments": assignment_list } # If any are none... if None in required_params.values(): # Figure out which ones are none and let the user know. for key, value in required_params.items(): if value is None: print(f" \"{key}\" is missing.") sys.exit( 'Please make sure you have specified all required parameters in your config file.' ) #=======================================# # Check For Optional Params # #=======================================# # Now look for all of our optional parameters. If any are missing, let the # user know we'll be using the default. optional_params = { "assignment_release_path": { "value": self.assignment_release_path, "default": 'materials', "config_name": "GitHub.assignment_release_path" }, # "assignment_source_path": { # "value": self.assignment_source_path, # "default": "source", # "config_name": "c.GitHub.assignment_source_path" # }, "hub_prefix": { "value": self.hub_prefix, "default": "", "config_name": "JupyterHub.base_url" }, "zfs": { "value": self.zfs, "default": False, "config_name": "JupyterHub.zfs" }, "zfs_regex": { "value": self.zfs_regex, "default": r'\d{4}-\d{2}-\d{2}-\d{4}', "config_name": "JupyterHub.zfs_regex" }, "zfs_datetime_pattern": { "value": self.zfs_datetime_pattern, "default": 'YYYY-MM-DD-HHmm', "config_name": "JupyterHub.zfs_datetime_pattern" }, "course_timezone": { "value": self.course_timezone, "default": 'US/Pacific', "config_name": "Course.timezone" }, "grading_image": { "value": self.grading_image, "default": 'ubcdsci/r-dsci-grading', "config_name": "Course.grading_image" }, "tmp_dir": { "value": self.tmp_dir, "default": os.path.join(Path.home(), 'tmp'), "config_name": "Course.tmp_dir" }, "external_tool_name": { "value": self.external_tool_name, "default": 'Jupyter', "config_name": "Canvas.external_tool_name" }, "external_tool_level": { "value": self.external_tool_level, "default": 'course', "config_name": "Canvas.external_tool_level" } } for key, param in optional_params.items(): if param.get('value') is None: setattr(self, key, param.get('default')) print( f" \"{param.get('config_name')}\" is missing, using default parameter of \"{getattr(self, key)}\"" ) # Make sure no preceding or trailing slashes in assignment release path self.assignment_release_path = re.sub(r"/$", "", self.assignment_release_path) self.assignment_release_path = re.sub(r"^/", "", self.assignment_release_path) # Since we are using the student repo URL for the Launch URLs # (i.e. telling nbgitpuller where to find the notebook), # if the user provided an SSH url, we need the https version as well. self.stu_launch_url = utils.generate_git_urls( self.stu_repo_url).get('plain_https') #! this is cheating a bit, but we can get the repo name this way #! Fix me in the future self.ins_repo_name = os.path.split( utils.generate_git_urls(self.ins_repo_url).get('plain_https'))[1] self.stu_repo_name = os.path.split(self.stu_launch_url)[1] #=======================================# # Set Canvas Token # #=======================================# canvas_token_name = config.get('Canvas').get('token_name') if canvas_token_name is None: print("Searching for default Canvas token, CANVAS_TOKEN...") canvas_token_name = 'CANVAS_TOKEN' self.canvas_token = self._get_token(canvas_token_name) #=======================================# # Finalize Setting Params # #=======================================# # set up the nbgrader api with our merged config files self.nb_api = NbGraderAPI(config=config) # assign init params to object # self.canvas_token = self._get_token(canvas_token_name) # self.course = self._get_course() # Set crontab # self.cron = CronTab(user=True) # We need to use the system crontab because we'll be making ZFS snapshots # which requires elevated permissions self.cron = CronTab(user=True) #=======================================# # Instantiate Assignments # #=======================================# # Subclass assignment for this course: class CourseAssignment(rudaux.Assignment): course = self instantiated_assignments = [] for _assignment in assignment_list: assignment = CourseAssignment( name=_assignment.get('name'), duedate=_assignment.get('duedate'), duetime=_assignment.get( 'duetime', '23:59:59'), # default is 1 sec to midnight points=_assignment.get('points', 0), # default is zero points manual=_assignment.get('manual', False), # default is no manual grading ) instantiated_assignments.append(assignment) self.assignments = instantiated_assignments
def get_app(): # return TerminalIPythonApp.instance() return Application.instance()