def task_run_modules(ana_pk, **kwargs): """run all enabled modules for an analysis :type ana_pk: Analysis :param ana_pk: Analysis entity :keyword: any, will be passed to modules as parameters :returns: True on success, False on error """ ana = None success = None logger = Logger.get_logger(StringIO("")) try: # get analysis ana = Analysis.objects.get(pk=ana_pk.pk) logger.log_delimiter_line() logger.log("processing %s" % ana) ana.status = Analysis.STATUS.running ana.save() # get module list mod_list = ana.datafile.dataset.module_set.all() assert mod_list, "module list is empty!" # get file handles logger.log("reading input files") rd_file = ana.datafile.rd_file gt_file = ana.datafile.gt_file st_file = ana.st_file rd, sampling_rate = read_hdf5_arc(rd_file.data.path) if sampling_rate is not None: kwargs.update(sampling_rate=sampling_rate) ev_sts = read_gdf_sts(st_file.data.path) gt_sts = read_gdf_sts(gt_file.data.path) logger.log("done reading input files") # apply modules _tick_all = datetime.now() for mod in mod_list: logger.log_delimiter_line() logger.log("starting {}".format(mod)) module_cls = mod.get_module_cls() _tick_ = datetime.now() module = module_cls(rd, gt_sts, ev_sts, logger, **kwargs) module.apply() module.save(mod, ana) _tock_ = datetime.now() logger.log("finished in {}".format(str(_tock_ - _tick_))) del module, module_cls, _tick_, _tock_ _tock_all = datetime.now() logger.log_delimiter_line() logger.log("finished all module in {}".format(str(_tock_all - _tick_all))) except Exception, ex: logger.log_delimiter_line() logger.log("ERROR ({}) :: {}".format(ex.__class__.__name__, ex)) success = False
def task_run_modules(ev, **kwargs): """core function to run all modules for an evaluation :type ev: Evaluation :param ev: Evaluation entity :keyword: any, will be passed to modules as parameters :returns: True on success, False on error """ success = None try: ev.status = ev.STATUS.running ev.save() mod_list = ev.trial.benchmark.module_set.all() logger = Logger.get_logger(StringIO()) except: success = False else: try: logger.log_delimiter_line() rd_file = ev.trial.rd_file gt_file = ev.trial.gt_file ev_file = ev.ev_file logger.log('processing: %s' % ev) logger.log('reading input files') rd, sampling_rate = read_hdf5_arc(rd_file.file.path) if sampling_rate is not None: kwargs.update(sampling_rate=sampling_rate) ev_sts = read_gdf_sts(ev_file.file.path) gt_sts = read_gdf_sts(gt_file.file.path) logger.log('done reading input files') logger.log_delimiter_line() # modules assert len(mod_list), 'Module list is empty!' for mod in mod_list: logger.log('starting module: %s' % mod) module_pkg = importlib.import_module('spike.module.%s' % mod.path) _tick_ = datetime.now() module = module_pkg.module_cls(rd, gt_sts, ev_sts, logger, **kwargs) module.apply() module.save(mod, ev) _tock_ = datetime.now() logger.log('finished: %s' % str(_tock_ - _tick_)) logger.log_delimiter_line() del module, module_pkg except Exception, ex: logger.log('ERROR: (%s) %s' % (ex.__class__.__name__, str(ex))) success = False ev.status = ev.STATUS.failure else:
def task_validate_spiketrain_file(pk, **kwargs): """validate spike train file - that is a text file in gdf format (space separated, 2col, [key,time]) :type pk: int :param pk: pk of `Datafile` entity :returns: bool -- True if file validates, False else. Processing log, including errors, will be written to the `Datafile` entity. """ # init and checks valid = False logger = Logger.get_logger(StringIO()) try: obj = Asset.objects.get(id=pk) assert obj.kind == "st_file" df = obj.content_object except: logger.log("ERROR") return valid try: logger.log("looking at spike train file with pk: %s" % obj.id) sts = read_gdf_sts(obj.data.path) logger.log("found st_file: %s" % obj.name) for st in sts: if not isinstance(sts[st], sp.ndarray): raise TypeError("spike train %s not ndarray" % st) if not sts[st].ndim == 1: raise ValueError("spike trains have to be ndim==1") # TODO: more checks? logger.log("st_file passed all checks") valid = True except Exception, ex: logger.log('ERROR: spike train file check: %s' % str(ex))
def task_validate_spiketrain_file(pk): """validate a spiketrain file - that is a text file in gdf format (space separated, 2col, [key,time]) :type pk: int :param pk: pk for Data entity :returns: bool -- True if Data validates, False else. Processing log, including errors, will be written to the Data entity. """ # init and checks valid = False logger = Logger.get_logger(StringIO()) try: df = Data.objects.get(id=pk) assert df.kind == 'st_file' tr = df.content_object except: logger.log('ERROR') return valid try: logger.log('looking at spiketrain file with pk: %s' % df.id) sts = read_gdf_sts(df.file.path) logger.log('found st_file: %s' % df.name) for st in sts: if not isinstance(sts[st], sp.ndarray): raise TypeError('spike train %s not ndarray' % st) if not sts[st].ndim == 1: raise ValueError('spike trains have to be ndim==1') # TODO: more checks? logger.log('st_file passed all checks') valid = True except Exception, ex: logger.log('ERROR: spiketrain file check: %s' % str(ex))