Example #1
0
    def setup(self, app_args):
        # basic arguments for every node
        basic_args = [Argument(Keys.INPUT, KeyHelp.INPUT, default=''),
                      Argument(Keys.OUTPUT, KeyHelp.OUTPUT, default=''),
                      Argument(Keys.MODULE, KeyHelp.MODULE, default=''),
                      Argument(Keys.LOG_LEVEL, KeyHelp.LOG_LEVEL, default="DEBUG")]

        # Fixme: Prettify WORKDIR creation system
        # WORKDIR: if WORKDIR is defined add related args
        for i, arg in enumerate(app_args):
            if arg.name == Keys.WORKDIR:
                app_args.insert(i + 1, Argument(Keys.BASEDIR, KeyHelp.BASEDIR, default='.'))
                app_args.insert(i + 2, Argument(Keys.JOB_ID, KeyHelp.JOB_ID, default=''))
                app_args.insert(i + 3, Argument(Keys.SUBJOBLIST, KeyHelp.SUBJOBLIST, default=''))
                app_args.insert(i + 4, Argument(Keys.NAME, KeyHelp.NAME, default=self.__class__.__name__))
                break

        defaults, cliargs = parse_sysargs(basic_args + app_args)

        # construct info from defaults < info < commandlineargs
        ih = get_handler(cliargs.get(Keys.INPUT, None))
        fileinfo = ih.read(cliargs.get(Keys.INPUT, None))
        info = dicts.merge(cliargs, dicts.merge(fileinfo, defaults))

        # setup logging
        log = Logger.create(info[Keys.LOG_LEVEL])

        # request by malars: show dataset prominent in logger
        if Keys.DATASET_CODE in info:
            if not isinstance(info[Keys.DATASET_CODE], list):
                if Keys.MZXML in info and not isinstance(info[Keys.MZXML], list):
                    log.info("Dataset is %s (%s)" % (info[Keys.DATASET_CODE], os.path.basename(info[Keys.MZXML])))
                else:
                    log.info("Dataset is %s" % info[Keys.DATASET_CODE])
            else:
                log.debug("Datasets are %s" % info[Keys.DATASET_CODE])


        # WORKDIR: create WORKDIR (only after mk log)
        info = dirs.create_workdir(log, info)

        # filter to requested args
        if Keys.ALL_ARGS in info:
            # if ALL_ARGS is set give whole info to app...
            req_info = info
        else:
            req_info = {}
            # ...otherwise copy only explicitly requested args to app
            for key in [arg.name for arg in basic_args + app_args]:
                if key in info:
                    req_info[key] = info[key]
        log.debug("info for app: %s" % req_info)
        return log, req_info, info
Example #2
0
    def main(cls):
        print
        print "this is", os.path.abspath(__file__)
        print
        log = None
        try:
            start = time.time()
            ci = cls()
            app_args = ci.add_args()
            log, req_info, info = ci.setup(app_args)
            ret_info = ci.run(log, req_info)
            info = dicts.merge(info, ret_info, priority='right')
            ci.teardown(log, info)
            log.debug("%s finished sucessfully at %s" % (cls.__name__, time.asctime()))
            log.info("%s finished sucessfully after %ss" % (cls.__name__, int(time.time() - start)))
        except Exception, e:
            msg = cls.__name__ + " failed! " + str(e)
            if isinstance(e, KeyError):
                msg += " key not found in info"
            msg += "\n"
            # feature request cuklinaj: mail when fail, delay between
            if os.environ.get("LSB_JOBID"):
                controlfile = os.getenv("HOME") + "/.last_error_message"
                if not os.path.exists(controlfile) or (time.time() - os.stat(controlfile).st_mtime) > 600:
                    subprocess.call("touch %s; echo \"Failure reason: %s\nTo prevent spam you won't get such warnings for the next 10 minutes\" | mail -s \"Workflow Failed\" %s" % (
                        controlfile, msg, getpass.getuser()), shell=True)
            # if app fails before logger is created use sys.exit for message
            if not log:
                print(msg)
                traceback.print_exc(e)
                sys.exit(msg)

            log.error(traceback.format_exc(e))
            log.error(msg)
            sys.exit(1)
Example #3
0
    def run(self, log, info):
        used_engines = []
        log.debug("All available engines: %s", info['ENGINES'])
        for engine in info['ENGINES'].split(" "):
            key = 'RUN' + engine.upper()
            if key in info and info[key] == 'True':
                used_engines.append(engine)
        log.debug("Effectively used engines: %s" % used_engines)

        if not isinstance(info[Keys.DATASET_CODE],list):
            info[Keys.DATASET_CODE] = [info[Keys.DATASET_CODE]]
        runs = len(info[Keys.DATASET_CODE])
        log.debug("Number of samples: %d" % runs)
        for i in range(runs):
            collectedconfig = {}
            for engine in used_engines:
                path = "%s.ini_%d" % (engine, i)
                if not os.path.exists(path):
                    raise RuntimeError("Required infofile not found " + path)
                else:
                    log.debug("Found infofile "+path)
                engineconfig = infohandler.get_handler(path).read(path)
                collectedconfig = dicts.merge(collectedconfig, engineconfig, priority='append')

            for key in collectedconfig.keys():
                if isinstance(collectedconfig[key], list):
                    collectedconfig[key] = dicts.unify(collectedconfig[key])

            collector_path = "%s_%d" % (info[Keys.MERGED], i)
            infohandler.get_handler(info[Keys.MERGED]).write(collectedconfig, collector_path)
            log.debug('Wrote outfile ' + collector_path)

        return info
Example #4
0
    def run(self, log, info):
        paths = sorted(glob.glob(info[Keys.MERGE] + "_*"))

        #read in
        config_container = {}
        nofiles = len(paths)
        if nofiles == 0:
            raise RuntimeError("No files to merge found!")
        for path in paths:
            log.debug("Reading " + path)
            config = infohandler.get_handler(path).read(path)

            lastjob = config[Keys.SUBJOBLIST][-1]
            checksum = int(lastjob.split(Keys.SUBJOBSEP)[2])
            if nofiles != checksum:
                raise RuntimeError("Number of inputfiles %d and checksum %d do not match" % (nofiles, checksum))

            #append the current config to the ones with same parent subjobs
            parentjoblist = config[Keys.SUBJOBLIST][:-1]
            parentjobstr = self.parentjobs_to_str(parentjoblist)

            #remove one level from subjoblist
            config[Keys.SUBJOBLIST] = parentjoblist
            if not config[Keys.SUBJOBLIST]:
                del config[Keys.SUBJOBLIST]
            if parentjobstr in config_container:
                config_container[parentjobstr] = dicts.merge(config_container[parentjobstr], config, priority='append')
            else:
                config_container[parentjobstr] = config

        #unify (only possible after all collected)
        for config in config_container.values():
            for key in config.keys():
                if key == Keys.SUBJOBLIST:
                    config[key] = dicts.unify(config[key], unlist_single=False)
                    continue
                if isinstance(config[key], list):
                    config[key] = dicts.unify(config[key])

        #write back
        for i, config in enumerate(config_container.values()):
            path = info[Keys.MERGED] + '_' + str(i)
            log.debug("Writing out " + path)
            infohandler.get_handler(path).write(config, path)

        return info
Example #5
0
    def run(self, log, info):
        ih = get_handler(info[Keys.COLLATE])
        paths = info[Keys.COLLATE].split(" ")
        del info[Keys.COLLATE]
        collector_config = info.copy()

        #read in
        for path in paths:
            log.debug('collating file [%s]' % path)
            config = ih.read(path)
            collector_config = dicts.merge(collector_config, config, priority='append')

        #unify
        for key in collector_config.keys():
            collector_config[key] = dicts.unify(collector_config[key])

        #write back
        return collector_config
Example #6
0
    def run(self, log, info):
        ih = get_handler(info[Keys.COLLATE])
        paths = info[Keys.COLLATE].split(" ")
        del info[Keys.COLLATE]
        collector_config = info.copy()

        #read in
        for path in paths:
            log.debug('collating file [%s]' % path)
            config = ih.read(path)
            collector_config = dicts.merge(collector_config,
                                           config,
                                           priority='append')

        #unify
        for key in collector_config.keys():
            collector_config[key] = dicts.unify(collector_config[key])

        #write back
        return collector_config
Example #7
0
    def main(cls):
        print
        print "this is", os.path.abspath(__file__)
        print
        log = None
        try:
            start = time.time()
            ci = cls()
            app_args = ci.add_args()
            log, req_info, info = ci.setup(app_args)
            ret_info = ci.run(log, req_info)
            info = dicts.merge(info, ret_info, priority='right')
            ci.teardown(log, info)
            log.debug("%s finished sucessfully at %s" %
                      (cls.__name__, time.asctime()))
            log.info("%s finished sucessfully after %ss" %
                     (cls.__name__, int(time.time() - start)))
        except Exception, e:
            msg = cls.__name__ + " failed! " + str(e)
            if isinstance(e, KeyError):
                msg += " key not found in info"
            msg += "\n"
            # feature request cuklinaj: mail when fail, delay between
            if os.environ.get("LSB_JOBID"):
                controlfile = os.getenv("HOME") + "/.last_error_message"
                if not os.path.exists(controlfile) or (
                        time.time() - os.stat(controlfile).st_mtime) > 600:
                    subprocess.call(
                        "touch %s; echo \"Failure reason: %s\nTo prevent spam you won't get such warnings for the next 10 minutes\" | mail -s \"Workflow Failed\" %s"
                        % (controlfile, msg, getpass.getuser()),
                        shell=True)
            # if app fails before logger is created use sys.exit for message
            if not log:
                print(msg)
                traceback.print_exc(e)
                sys.exit(msg)

            log.error(traceback.format_exc(e))
            log.error(msg)
            sys.exit(1)
Example #8
0
    def setup(self, app_args):
        # basic arguments for every node
        basic_args = [
            Argument(Keys.INPUT, KeyHelp.INPUT, default=''),
            Argument(Keys.OUTPUT, KeyHelp.OUTPUT, default=''),
            Argument(Keys.MODULE, KeyHelp.MODULE, default=''),
            Argument(Keys.LOG_LEVEL, KeyHelp.LOG_LEVEL, default="DEBUG")
        ]

        # Fixme: Prettify WORKDIR creation system
        # WORKDIR: if WORKDIR is defined add related args
        for i, arg in enumerate(app_args):
            if arg.name == Keys.WORKDIR:
                app_args.insert(
                    i + 1, Argument(Keys.BASEDIR, KeyHelp.BASEDIR,
                                    default='.'))
                app_args.insert(
                    i + 2, Argument(Keys.JOB_ID, KeyHelp.JOB_ID, default=''))
                app_args.insert(
                    i + 3,
                    Argument(Keys.SUBJOBLIST, KeyHelp.SUBJOBLIST, default=''))
                app_args.insert(
                    i + 4,
                    Argument(Keys.NAME,
                             KeyHelp.NAME,
                             default=self.__class__.__name__))
                break

        defaults, cliargs = parse_sysargs(basic_args + app_args)

        # construct info from defaults < info < commandlineargs
        ih = get_handler(cliargs.get(Keys.INPUT, None))
        fileinfo = ih.read(cliargs.get(Keys.INPUT, None))
        info = dicts.merge(cliargs, dicts.merge(fileinfo, defaults))

        # setup logging
        log = Logger.create(info[Keys.LOG_LEVEL])

        # request by malars: show dataset prominent in logger
        if Keys.DATASET_CODE in info:
            if not isinstance(info[Keys.DATASET_CODE], list):
                if Keys.MZXML in info and not isinstance(
                        info[Keys.MZXML], list):
                    log.info("Dataset is %s (%s)" %
                             (info[Keys.DATASET_CODE],
                              os.path.basename(info[Keys.MZXML])))
                else:
                    log.info("Dataset is %s" % info[Keys.DATASET_CODE])
            else:
                log.debug("Datasets are %s" % info[Keys.DATASET_CODE])

        # WORKDIR: create WORKDIR (only after mk log)
        info = dirs.create_workdir(log, info)

        # filter to requested args
        if Keys.ALL_ARGS in info:
            # if ALL_ARGS is set give whole info to app...
            req_info = info
        else:
            req_info = {}
            # ...otherwise copy only explicitly requested args to app
            for key in [arg.name for arg in basic_args + app_args]:
                if key in info:
                    req_info[key] = info[key]
        log.debug("info for app: %s" % req_info)
        return log, req_info, info