def startModelLoadingAsync(self): """ NOTE: this seems to invoke a few bugs (crashes, sporadic model reading errors, etc) so is disabled for now... """ self.monitorNP = None self.keyboardNP = None self.loadingError = False # force the "loading" to take some time after the first run... options = LoaderOptions() options.setFlags(options.getFlags() | LoaderOptions.LFNoCache) def gotMonitorModel(model): if not model: self.loadingError = True self.monitorNP = model self.loader.loadModel("monitor", loaderOptions=options, callback=gotMonitorModel) def gotKeyboardModel(model): if not model: self.loadingError = True self.keyboardNP = model self.loader.loadModel("takeyga_kb", loaderOptions=options, callback=gotKeyboardModel)
def loadModel(self, modelPath, loaderOptions=None, noCache=None, allowInstance=False, okMissing=None, callback=None, extraArgs=[], priority=None): """ Attempts to load a model or models from one or more relative pathnames. If the input modelPath is a string (a single model pathname), the return value will be a NodePath to the model loaded if the load was successful, or None otherwise. If the input modelPath is a list of pathnames, the return value will be a list of NodePaths and/or Nones. loaderOptions may optionally be passed in to control details about the way the model is searched and loaded. See the LoaderOptions class for more. The default is to look in the ModelPool (RAM) cache first, and return a copy from that if the model can be found there. If the bam cache is enabled (via the model-cache-dir config variable), then that will be consulted next, and if both caches fail, the file will be loaded from disk. If noCache is True, then neither cache will be consulted or updated. If allowInstance is True, a shared instance may be returned from the ModelPool. This is dangerous, since it is easy to accidentally modify the shared instance, and invalidate future load attempts of the same model. Normally, you should leave allowInstance set to False, which will always return a unique copy. If okMissing is True, None is returned if the model is not found or cannot be read, and no error message is printed. Otherwise, an IOError is raised if the model is not found or cannot be read (similar to attempting to open a nonexistent file). (If modelPath is a list of filenames, then IOError is raised if *any* of the models could not be loaded.) If callback is not None, then the model load will be performed asynchronously. In this case, loadModel() will initiate a background load and return immediately. The return value will be an object that may later be passed to loader.cancelRequest() to cancel the asynchronous request. At some later point, when the requested model(s) have finished loading, the callback function will be invoked with the n loaded models passed as its parameter list. It is possible that the callback will be invoked immediately, even before loadModel() returns. If you use callback, you may also specify a priority, which specifies the relative importance over this model over all of the other asynchronous load requests (higher numbers are loaded first). True asynchronous model loading requires Panda to have been compiled with threading support enabled (you can test Thread.isThreadingSupported()). In the absence of threading support, the asynchronous interface still exists and still behaves exactly as described, except that loadModel() might not return immediately. """ assert Loader.notify.debug("Loading model: %s" % (modelPath)) if loaderOptions == None: loaderOptions = LoaderOptions() else: loaderOptions = LoaderOptions(loaderOptions) if okMissing is not None: if okMissing: loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFReportErrors) else: loaderOptions.setFlags(loaderOptions.getFlags() | LoaderOptions.LFReportErrors) else: okMissing = ((loaderOptions.getFlags() & LoaderOptions.LFReportErrors) == 0) if noCache is not None: if noCache: loaderOptions.setFlags(loaderOptions.getFlags() | LoaderOptions.LFNoCache) else: loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFNoCache) if allowInstance: loaderOptions.setFlags(loaderOptions.getFlags() | LoaderOptions.LFAllowInstance) if isinstance(modelPath, types.StringTypes) or \ isinstance(modelPath, Filename): # We were given a single model pathname. modelList = [modelPath] gotList = False else: # Assume we were given a list of model pathnames. modelList = modelPath gotList = True if callback is None: # We got no callback, so it's a synchronous load. result = [] for modelPath in modelList: node = self.loader.loadSync(Filename(modelPath), loaderOptions) if (node != None): nodePath = NodePath(node) else: nodePath = None result.append(nodePath) if not okMissing and None in result: message = 'Could not load model file(s): %s' % (modelList, ) raise IOError, message if gotList: return result else: return result[0] else: # We got a callback, so we want an asynchronous (threaded) # load. We'll return immediately, but when all of the # requested models have been loaded, we'll invoke the # callback (passing it the models on the parameter list). cb = Loader.Callback(len(modelList), gotList, callback, extraArgs) i = 0 for modelPath in modelList: request = self.loader.makeAsyncRequest(Filename(modelPath), loaderOptions) if priority is not None: request.setPriority(priority) request.setDoneEvent(self.hook) request.setPythonObject((cb, i)) i += 1 self.loader.loadAsync(request) cb.requests[request] = True return cb
def loadModel(self, modelPath, loaderOptions = None, noCache = None, allowInstance = False, okMissing = None, callback = None, extraArgs = [], priority = None): """ Attempts to load a model or models from one or more relative pathnames. If the input modelPath is a string (a single model pathname), the return value will be a NodePath to the model loaded if the load was successful, or None otherwise. If the input modelPath is a list of pathnames, the return value will be a list of NodePaths and/or Nones. loaderOptions may optionally be passed in to control details about the way the model is searched and loaded. See the LoaderOptions class for more. The default is to look in the ModelPool (RAM) cache first, and return a copy from that if the model can be found there. If the bam cache is enabled (via the model-cache-dir config variable), then that will be consulted next, and if both caches fail, the file will be loaded from disk. If noCache is True, then neither cache will be consulted or updated. If allowInstance is True, a shared instance may be returned from the ModelPool. This is dangerous, since it is easy to accidentally modify the shared instance, and invalidate future load attempts of the same model. Normally, you should leave allowInstance set to False, which will always return a unique copy. If okMissing is True, None is returned if the model is not found or cannot be read, and no error message is printed. Otherwise, an IOError is raised if the model is not found or cannot be read (similar to attempting to open a nonexistent file). (If modelPath is a list of filenames, then IOError is raised if *any* of the models could not be loaded.) If callback is not None, then the model load will be performed asynchronously. In this case, loadModel() will initiate a background load and return immediately. The return value will be an object that may later be passed to loader.cancelRequest() to cancel the asynchronous request. At some later point, when the requested model(s) have finished loading, the callback function will be invoked with the n loaded models passed as its parameter list. It is possible that the callback will be invoked immediately, even before loadModel() returns. If you use callback, you may also specify a priority, which specifies the relative importance over this model over all of the other asynchronous load requests (higher numbers are loaded first). True asynchronous model loading requires Panda to have been compiled with threading support enabled (you can test Thread.isThreadingSupported()). In the absence of threading support, the asynchronous interface still exists and still behaves exactly as described, except that loadModel() might not return immediately. """ assert Loader.notify.debug("Loading model: %s" % (modelPath)) if loaderOptions == None: loaderOptions = LoaderOptions() else: loaderOptions = LoaderOptions(loaderOptions) if okMissing is not None: if okMissing: loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFReportErrors) else: loaderOptions.setFlags(loaderOptions.getFlags() | LoaderOptions.LFReportErrors) else: okMissing = ((loaderOptions.getFlags() & LoaderOptions.LFReportErrors) == 0) if noCache is not None: if noCache: loaderOptions.setFlags(loaderOptions.getFlags() | LoaderOptions.LFNoCache) else: loaderOptions.setFlags(loaderOptions.getFlags() & ~LoaderOptions.LFNoCache) if allowInstance: loaderOptions.setFlags(loaderOptions.getFlags() | LoaderOptions.LFAllowInstance) if isinstance(modelPath, types.StringTypes) or \ isinstance(modelPath, Filename): # We were given a single model pathname. modelList = [modelPath] gotList = False else: # Assume we were given a list of model pathnames. modelList = modelPath gotList = True if callback is None: # We got no callback, so it's a synchronous load. result = [] for modelPath in modelList: node = self.loader.loadSync(Filename(modelPath), loaderOptions) if (node != None): nodePath = NodePath(node) else: nodePath = None result.append(nodePath) if not okMissing and None in result: message = 'Could not load model file(s): %s' % (modelList,) raise IOError, message if gotList: return result else: return result[0] else: # We got a callback, so we want an asynchronous (threaded) # load. We'll return immediately, but when all of the # requested models have been loaded, we'll invoke the # callback (passing it the models on the parameter list). cb = Loader.Callback(len(modelList), gotList, callback, extraArgs) i=0 for modelPath in modelList: request = self.loader.makeAsyncRequest(Filename(modelPath), loaderOptions) if priority is not None: request.setPriority(priority) request.setDoneEvent(self.hook) request.setPythonObject((cb, i)) i+=1 self.loader.loadAsync(request) cb.requests[request] = True return cb