def task_validate_rawdata_file(pk): """validates a rawdata file - that is an archive holding data to be analysed :type pk: int :param pk: pk for Data entity :returns: bool -- True if Data validates, False else. Processing log, including errors, will be written to the Data entity. """ # init and checks valid = False logger = Logger.get_logger(StringIO()) try: df = Data.objects.get(id=pk) assert df.kind == 'rd_file' tr = df.content_object except: logger.log('ERROR') return valid try: logger.log('looking at raw data file with pk: %s' % pk) rd, sr = read_hdf5_arc(df.file.path) logger.log('found rd_file: %s' % df.name) len_rd_sec = rd.shape[0] / sr logger.log('found data in %d channels, for %d sec' % ( rd.shape[1], len_rd_sec)) # TODO: more checks? logger.log('rd_file passed all checks') valid = True except Exception, ex: logger.log('ERROR: rawdata file check: %s' % str(ex))
def task_run_modules(ev, **kwargs): """core function to run all modules for an evaluation :type ev: Evaluation :param ev: Evaluation entity :keyword: any, will be passed to modules as parameters :returns: True on success, False on error """ success = None try: ev.status = ev.STATUS.running ev.save() mod_list = ev.trial.benchmark.module_set.all() logger = Logger.get_logger(StringIO()) except: success = False else: try: logger.log_delimiter_line() rd_file = ev.trial.rd_file gt_file = ev.trial.gt_file ev_file = ev.ev_file logger.log('processing: %s' % ev) logger.log('reading input files') rd, sampling_rate = read_hdf5_arc(rd_file.file.path) if sampling_rate is not None: kwargs.update(sampling_rate=sampling_rate) ev_sts = read_gdf_sts(ev_file.file.path) gt_sts = read_gdf_sts(gt_file.file.path) logger.log('done reading input files') logger.log_delimiter_line() # modules assert len(mod_list), 'Module list is empty!' for mod in mod_list: logger.log('starting module: %s' % mod) module_pkg = importlib.import_module('spike.module.%s' % mod.path) _tick_ = datetime.now() module = module_pkg.module_cls(rd, gt_sts, ev_sts, logger, **kwargs) module.apply() module.save(mod, ev) _tock_ = datetime.now() logger.log('finished: %s' % str(_tock_ - _tick_)) logger.log_delimiter_line() del module, module_pkg except Exception, ex: logger.log('ERROR: (%s) %s' % (ex.__class__.__name__, str(ex))) success = False ev.status = ev.STATUS.failure else:
def task_validate_spiketrain_file(pk): """validate a spiketrain file - that is a text file in gdf format (space separated, 2col, [key,time]) :type pk: int :param pk: pk for Data entity :returns: bool -- True if Data validates, False else. Processing log, including errors, will be written to the Data entity. """ # init and checks valid = False logger = Logger.get_logger(StringIO()) try: df = Data.objects.get(id=pk) assert df.kind == 'st_file' tr = df.content_object except: logger.log('ERROR') return valid try: logger.log('looking at spiketrain file with pk: %s' % df.id) sts = read_gdf_sts(df.file.path) logger.log('found st_file: %s' % df.name) for st in sts: if not isinstance(sts[st], sp.ndarray): raise TypeError('spike train %s not ndarray' % st) if not sts[st].ndim == 1: raise ValueError('spike trains have to be ndim==1') # TODO: more checks? logger.log('st_file passed all checks') valid = True except Exception, ex: logger.log('ERROR: spiketrain file check: %s' % str(ex))