Example #1
0
 def process_2d(
     self,
     extractor,
     glob_pattern="*.mp4",
     timestamp_regex=TIMESTAMP_REGEX,
     display_progress=True,
     draw_viz=True,
 ):
     log = Logger("App")
     vi = VideoIndex.create_from_search(glob_pattern=glob_pattern, timestamp_regex=timestamp_regex)
     log.info("Files to process: {}".format(vi.video_files))
     length = len(vi.video_files)
     processed_videos = []
     files_by_timestamp = list(vi.groupby_iter("timestamp"))
     disable_tqdm = not display_progress
     with tqdm(files_by_timestamp, disable=disable_tqdm) as tqdm_files:
         for timestamp, video_files in tqdm_files:
             pvs = []
             display_timestamp = timestamp.isoformat()
             tqdm_files.set_description(display_timestamp)
             with tqdm(video_files, disable=disable_tqdm) as tqdm_video_file:
                 for video_file in tqdm_video_file:
                     display_filename = "{} : {}".format(video_file.camera, video_file.file)
                     tqdm_video_file.set_description(display_filename)
                     pv = ProcessedVideo.from_video(
                         video_file.file,
                         extractor,
                         read_from_cache=False,
                         timestamp_start=timestamp,
                         draw_viz=draw_viz,
                     )
                     pvs.append(pv)
             processed_videos.append((timestamp, pvs))
     return processed_videos
Example #2
0
def dispatch(args):
    """
    Create a sample JSON file for a run, and launch it using Popen.
    """
    dispatch_log = Logger('dispatch')
    # validate that programs in setup file exist
    setup_params = json.load(args.setup)
    validate_setupfile(setup_params)

    # validate reference
    validate_reference(args.ref)

    # validate directories
    validate_directory(args.log, dispatch_log)
    validate_directory(args.stats, dispatch_log)
    validate_directory(args.bam_dir, dispatch_log)

    if (args.scythe and args.adapter is None) or (args.adapter is not None and not os.path.isfile(args.adapter)):
        logger.critical("adapter file for Scythe no specified, or does not exist.")
        sys.exit(1)

    # create sample config JSON file, starting off with global config passed through args
    global_sample_config = dict(reference=args.ref, adapters_file=args.adapter,
                                prior=str(args.prior), error=args.trim_error,
                                stats_dir=args.stats, nthreads=args.threads,
                                mem=args.mem, bam_dir=args.bam_dir)

    # which preprocess steps to use
    global_sample_config["preprocess-steps"] = list()
    for step in PREPROCESSING_STEPS:
        if step in args and args.__getattribute__(step):
            global_sample_config["preprocess-steps"].append(step)

    global_params = dict(global_sample_config.items() + setup_params.items())
    sample_config = "%s_samples.txt" % args.job
    samples = create_sample_config(args.samples, sample_config, global_params)

    # create batch script
    sbatch_params = {"log_dir":args.log, "jobname":args.job, "nthreads":args.threads,
                    "mem":args.mem, "nsamples":len(samples), "sample_dispatch_py":__file__,
                    "sample_config":sample_config, "partition":args.partition}
    batch_script = safe_templater(SLURM_BATCH, sbatch_params)
    batch_file = "%s_batch.sh" % args.job
    with open(batch_file, 'w') as f:
        f.write(batch_script)

    if not args.dry_run:
        # now, start the batch script
        dispatch_log.info("submitting sbatch script '%s'." % batch_file)
        sbatch_cmd = ["sbatch"]

        if args.email is not None:
            sbatch_cmd.extend(["--mail-type", "ALL"])
            sbatch_cmd.extend(["--mail-user", args.email])
        sbatch_cmd.append(batch_file)
        retcode = check_call(sbatch_cmd)
        if retcode != 0:
            dispatch_log.critical("submitting batch script '%s' exited abnormally with return code %d." % (batch_file, retcode))
            sys.exit(retcode.returncode)
        dispatch_log.info("submitting sbatch script '%s' complete." % batch_file)
Example #3
0
def pds_database( log: Logger,
                  filter: Optional[Union[str, Path]] = None,
                  ) -> Tuple[Path, List]:
    """Provide the list of target PDS as a file and a list.

    .. note::
        Depends on the ``master.pds`` configuration option.

    :param log: Job logger.
    :param filter: File containting the target subset, otherwise all PDS database is considered.
    """
    # @TODO: PDS-FILTER
    pds_file = Path(TBcore.get_option('master', 'pds'))
    if pds_file.is_file():
        pds_list = [line.strip() for line in open(pds_file).readlines() if len(line.strip()) > 0]
    elif pds_file.is_dir():
        pds_list = [str(x.resolve()) for x in pds_file.glob('*/*.pds')]
    else:
        raise NodeDataError('The provided MASTER database directory/list file cannot be found.')

    # Even if a PDS file already exists, we always create a temporary file so that we can
    # manage different versions of the PDS database in different Nodes.
    f = NamedTemporaryFile(mode='w', delete=False)
    log.info(f'Temporary file for PDS database: {f.name}')
    [f.write(x + '\n') for x in pds_list]
    f.close()
    pds_file = Path(f.name)
    return pds_file, pds_list
Example #4
0
class Aiko(commands.AutoShardedBot):
    """
    Aiko's bot class.
    Subclass of discord.ext.commands.AutoShardedBot.

    See discord.ext.commands documentation for parameters
    """

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.config = Config("config.yaml").config
        StreamHandler(sys.stdout).push_application()
        self.log = Logger("Aiko")
        self.osuapi = OsuApi(self.config["osuapi"], connector=AHConnector())
        self.kv = self.loop.run_until_complete(_connect_redis(self))

    async def on_ready(self):
        self.log.info(f"Aiko is ready! {len(self.guilds)} servers")
        await self.change_presence(
            activity=discord.Streaming(
                name=f"osu! // {get_prefix(self.config)}help",
                url="https://twitch.tv/monstercat",
            )
        )

    async def on_message(self, message):
        if message.author.bot:
            return
        ctx = await self.get_context(message)
        await self.invoke(ctx)
Example #5
0
class actionThread(stopableThread):
    def __init__(self,actList, inputParam=None,*args,**kargs):
        super(actionThread,self).__init__(*args,**kargs)
        self.actList=actList
        self.param = inputParam
        self.log = Logger(u"%s actionThread"%self.name)

    def setParam(self,data):
        self.param = data
    
    def changeActList(self,newList):
        if type(newList) != type(()) and type(newList) != type(()):
            self.log.error(u"changeActList:invalid parameter")
            return 
        self.actList = newList

    def process(self):
        """
        调用actList中的所有action plugin.Only run once.
        """
        actManager = absManager.actionManager()
        for i in self.actList:
            self.log.info(u"do action %s:%s" % (i,actManager.id2filename(i)))
            actManager.call(i,self.param)
        self.pause()
        
        
Example #6
0
    def error(self, id_=None, error_code=None, error_msg=None):
        if isinstance(id_, Exception):
            # XXX: for an unknown reason 'log' is None in this branch,
            # therefore it needs to be instantiated before use
            global log
            if not log:
                log = Logger('IB Broker')
            log.exception(id_)

        if isinstance(error_code, EClientErrors.CodeMsgPair):
            error_msg = error_code.msg()
            error_code = error_code.code()

        if isinstance(error_code, int):
            if error_code in (502, 503, 326):
                # 502: Couldn't connect to TWS.
                # 503: The TWS is out of date and must be upgraded.
                # 326: Unable connect as the client id is already in use.
                self.unrecoverable_error = True

            if error_code < 1000:
                log.error("[{}] {} ({})".format(error_code, error_msg, id_))
            else:
                log.info("[{}] {} ({})".format(error_code, error_msg, id_))
        else:
            log.error("[{}] {} ({})".format(error_code, error_msg, id_))
Example #7
0
def runner(args):
    """
    Run a sample through an NGS pipeline (as a command) using Popen.
    TOOD: logging, timing.
    """
    if args.config is None:
        sample_params = json.loads(sys.stdin.readline().rstrip())
    else:
        # read the first line from the test/debug config file
        sample_params = json.loads(args.config.readline().rstrip())

    sample = sample_params["sample_id"]
    runner_log = Logger("%s logger" % sample)

    cmd = safe_templater(SAMPLE_PROCESS_CMD, sample_params)
    runner_log.info("%s starting preprocessing and alignment of sample." % sample)
    if args.dry_run:
        runner_log.debug("%s command: %s" % (sample, cmd))
        return
    tstart = time.time()
    p = Popen(cmd, shell=True, executable=find_bash())
    p.wait()
    if p.returncode != 0:
        # make this as loud as possible so Slurm can handle it
        runner_log.critical("%s exited abnormally with return code %d." % (sample, p.returncode))
        sys.exit(p.returncode)
    tend = time.time()
    elapsed = tend - tstart
    runner_log.info("%s completed preprocessing and alignment in %s seconds." % (sample, str(round(elapsed, 5))))
Example #8
0
class CustomLogger(object):
    def __init__(
        self,
        log_level=LogLevel.INFO,
        format_str='[{record.time:%Y-%m-%d %H:%M:%S}] - {record.channel} - {record.level_name} '
        '- {record.message}'):
        self.logger = Logger('WindAdapter')
        set_datetime_format('local')
        StreamHandler(sys.stdout, format_string=format_str).push_application()
        FileHandler('WindAdapter.log', bubble=True,
                    format_string=format_str).push_application()
        self.set_level(log_level)

    def set_level(self, log_level):
        if log_level.lower() == LogLevel.INFO:
            self.logger.level = logbook.INFO
        elif log_level.lower() == LogLevel.WARNING:
            self.logger.level = logbook.WARNING
        elif log_level.lower() == LogLevel.CRITICAL:
            self.logger.level = logbook.CRITICAL
        elif log_level.lower() == LogLevel.NOTSET:
            self.logger.level = logbook.NOTSET

    def info(self, msg):
        self.logger.info(msg)

    def warning(self, msg):
        self.logger.warning(msg)

    def critical(self, msg):
        self.logger.critical(msg)
Example #9
0
    def error(self, id_=None, error_code=None, error_msg=None):
        if isinstance(id_, Exception):
            # XXX: for an unknown reason 'log' is None in this branch,
            # therefore it needs to be instantiated before use
            global log
            if not log:
                log = Logger('IB Broker')
            log.exception(id_)

        if isinstance(error_code, EClientErrors.CodeMsgPair):
            error_msg = error_code.msg()
            error_code = error_code.code()

        if isinstance(error_code, int):
            if error_code in (502, 503, 326):
                # 502: Couldn't connect to TWS.
                # 503: The TWS is out of date and must be upgraded.
                # 326: Unable connect as the client id is already in use.
                self.unrecoverable_error = True

            if error_code < 1000:
                log.error("[{}] {} ({})".format(error_code, error_msg, id_))
            else:
                log.info("[{}] {} ({})".format(error_code, error_msg, id_))
        else:
            log.error("[{}] {} ({})".format(error_code, error_msg, id_))
Example #10
0
def start_parse(debug, file, save):
    """ main function for starting everything"""
    if debug:
        log_level = 'DEBUG'
    else:
        log_level = 'WARNING'

    StreamHandler(stdout, level=log_level).push_application()
    log = Logger('main')
    log.debug('Starting up...')

    games = parse(file)

    log.info("Games:", games.count)
    if save:
        log.debug('Saving data to json files')
        l_game = []
        for game in games.items():
            name = game.sid.strftime("%Y-%m-%d_%H%M%S")
            l_game.append(name)
            filename = save + name + ".json"
            with open(filename, 'w') as file:
                file.write(jsonpickle.encode(game))
                log.debug("wrote game info to {file}".format(file=filename))
            filename = save + 'hs2017.json'
        highscore = score_count(games)
        data = {'games': l_game, 'highscore': highscore}
        with open(filename, 'w') as file:
            log.debug("wrote highscore info to {file}".format(file=filename))
            file.write(json.dumps(data))

    log.debug('End of program...')
Example #11
0
def runner(args):
    """
    Run a sample through an NGS pipeline (as a command) using Popen.
    TOOD: logging, timing.
    """
    if args.config is None:
        sample_params = json.loads(sys.stdin.readline().rstrip())
    else:
        # read the first line from the test/debug config file
        sample_params = json.loads(args.config.readline().rstrip())

    sample = sample_params["sample_id"]
    runner_log = Logger("%s logger" % sample)

    cmd = safe_templater(SAMPLE_PROCESS_CMD, sample_params)
    runner_log.info("%s starting preprocessing and alignment of sample." %
                    sample)
    if args.dry_run:
        runner_log.debug("%s command: %s" % (sample, cmd))
        return
    tstart = time.time()
    p = Popen(cmd, shell=True, executable=find_bash())
    p.wait()
    if p.returncode != 0:
        # make this as loud as possible so Slurm can handle it
        runner_log.critical("%s exited abnormally with return code %d." %
                            (sample, p.returncode))
        sys.exit(p.returncode)
    tend = time.time()
    elapsed = tend - tstart
    runner_log.info("%s completed preprocessing and alignment in %s seconds." %
                    (sample, str(round(elapsed, 5))))
Example #12
0
def runner(args):
    """
    Run a sample through an NGS pipeline (as a command) using Popen.
    TOOD: logging, timing.
    """
    if args.config is None:
        sample_params = json.loads(sys.stdin.readline().rstrip())
    else:
        # read the first line from the test/debug config file
        sample_params = json.loads(args.config.readline().rstrip())

    sample = sample_params["sample_id"]
    runner_log = Logger("%s logger" % sample)
    tstart = datetime.datetime.now() # total run time

    # preprocessing and alignment
    aln_cmd = build_sample_aln_command(sample_params)
    run_command_on_sample(aln_cmd, runner_log, sample, desc="preprocessing and alignment")
    if args.dry_run:
        return

    sort_cmd = safe_templater(SORT_STEPS['samtools-sort'], sample_params)
    run_command_on_sample(sort_cmd, runner_log, sample, desc="sorting BAM file")
    tend = datetime.datetime.now()
    elapsed = tend - tstart
    runner_log.info("%s all processing completed in: %s." % (sample, str(elapsed)))
Example #13
0
class Router(Thread):
    """Thread waiting for a request by another Driver and responding to
    it with the chunked asked.
    """
    def __init__(self, name, redis, get_chunk):
        super(Router, self).__init__()

        self.name = name
        self.redis = redis
        self.get_chunk = get_chunk
        self.router = None

        self.logger = Logger("{} - Router".format(self.name))
        self.logger.info("Started")

        self.context = zmq.Context.instance()

    def run(self):
        self.router = self.context.socket(zmq.ROUTER)
        port = self.router.bind_to_random_port('tcp://*')
        self.redis.set('drivers:{}:router'.format(self.name), port)

        while True:
            msg = self.router.recv_multipart()
            self._respond_to(*msg)

    def _respond_to(self, identity, filename, offset, size):
        """Calls the `get_chunk` handler defined by the Driver to get
        the chunk and send it to the addressee.
        """
        self.logger.debug("Getting chunk of size {} from offset {} in '{}'",
                          size, offset, filename)
        chunk = self.get_chunk(filename, int(offset), int(size))
        self.router.send_multipart((identity, chunk))
Example #14
0
def checkpoint_out( log: Logger, filename: Union[Path, str], data: Dict ):
    """Dump checkpoint.
    """
    filename = Path(filename)

    log.info(f'CHECKPOINT: Creating at {filename}\n')
    with filename.open('w') as fd:
        json.dump(data, fd, cls=GeneralEncoder)
Example #15
0
def write_rating_to_bcolz():
    table_name = 'rating'
    logger = Logger(table_name)
    logger.info('准备数据......')
    df = rating_table()
    attr_dict = {}
    ndays = 0
    write_dataframe(df, table_name, ndays, attr_dict)
Example #16
0
def test2():
    log = Logger('Logbook-test-2')
    log.critical("critical")
    log.error("error")
    log.warn("warn")
    log.notice("notice")
    log.info("test")
    log.debug("debug")
Example #17
0
def write_static_info_to_bcolz():
    table_name = 'infoes'
    logger = Logger(table_name)
    logger.info('准备数据......')
    ndays = 0
    df, attr_dict = static_info_table()
    # df['asof_date'] = pd.to_datetime(df['asof_date'].values).tz_localize(None)
    write_dataframe(df, table_name, ndays, attr_dict)
Example #18
0
def dispatch(args):
    """
    Create a sample JSON file for a run, and launch it using Popen.
    """
    dispatch_log = Logger("dispatch")
    # validate that programs in setup file exist
    setup_params = json.load(args.setup)
    validate_setupfile(setup_params)

    # validate reference
    validate_reference(args.ref)

    # validate directories
    validate_directory(args.log, dispatch_log)
    validate_directory(args.stats, dispatch_log)
    validate_directory(args.bam_dir, dispatch_log)

    # create sample config JSON file, starting off with global config passed through args
    global_sample_config = dict(
        reference=args.ref,
        adapters_file=args.adapter,
        prior=str(args.prior),
        error=args.trim_error,
        stats_dir=args.stats,
        nthreads=args.threads,
        mem=args.mem,
        bam_dir=args.bam_dir,
    )
    global_params = dict(global_sample_config.items() + setup_params.items())
    sample_config = "%s_samples.txt" % args.job
    samples = create_sample_config(args.samples, sample_config, global_params)

    # create batch script
    sbatch_params = {
        "log_dir": args.log,
        "jobname": args.job,
        "nthreads": args.threads,
        "mem": args.mem,
        "nsamples": len(samples),
        "sample_dispatch_py": __file__,
        "sample_config": sample_config,
        "partition": args.partition,
    }
    batch_script = safe_templater(SLURM_BATCH, sbatch_params)
    batch_file = "%s_batch.sh" % args.job
    with open(batch_file, "w") as f:
        f.write(batch_script)

    if not args.dry_run:
        # now, start the batch script
        dispatch_log.info("submitting sbatch script '%s'." % batch_file)
        retcode = check_call(["sbatch", batch_file])
        if retcode != 0:
            dispatch_log.critical(
                "submitting batch script '%s' exited abnormally with return code %d." % (batch_file, retcode)
            )
            sys.exit(retcode.returncode)
        dispatch_log.critical("submitting sbatch script '%s' complete." % batch_file)
Example #19
0
def dispatch(args):
    """
    Create a sample JSON file for a run, and launch it using Popen.
    """
    dispatch_log = Logger('dispatch')
    # validate that programs in setup file exist
    setup_params = json.load(args.setup)
    validate_setupfile(setup_params)

    # validate reference
    validate_reference(args.ref)

    # validate directories
    validate_directory(args.log, dispatch_log)
    validate_directory(args.stats, dispatch_log)
    validate_directory(args.bam_dir, dispatch_log)

    # create sample config JSON file, starting off with global config passed through args
    global_sample_config = dict(reference=args.ref,
                                adapters_file=args.adapter,
                                prior=str(args.prior),
                                error=args.trim_error,
                                stats_dir=args.stats,
                                nthreads=args.threads,
                                mem=args.mem,
                                bam_dir=args.bam_dir)
    global_params = dict(global_sample_config.items() + setup_params.items())
    sample_config = "%s_samples.txt" % args.job
    samples = create_sample_config(args.samples, sample_config, global_params)

    # create batch script
    sbatch_params = {
        "log_dir": args.log,
        "jobname": args.job,
        "nthreads": args.threads,
        "mem": args.mem,
        "nsamples": len(samples),
        "sample_dispatch_py": __file__,
        "sample_config": sample_config,
        "partition": args.partition
    }
    batch_script = safe_templater(SLURM_BATCH, sbatch_params)
    batch_file = "%s_batch.sh" % args.job
    with open(batch_file, 'w') as f:
        f.write(batch_script)

    if not args.dry_run:
        # now, start the batch script
        dispatch_log.info("submitting sbatch script '%s'." % batch_file)
        retcode = check_call(["sbatch", batch_file])
        if retcode != 0:
            dispatch_log.critical(
                "submitting batch script '%s' exited abnormally with return code %d."
                % (batch_file, retcode))
            sys.exit(retcode.returncode)
        dispatch_log.critical("submitting sbatch script '%s' complete." %
                              batch_file)
Example #20
0
def top_words(sorted_set):
  log = Logger("Top Words")
  top = []

  while True:
    if(len(sorted_set) > 0):
      test = list(reversed(sorted_set[-10:]))
      if 0 in [item in top for item in test]:
        top = test

        log.info('#1-10 of {}: {}'.format(len(sorted_set), ', '.join(top)))
    time.sleep(0.01)
Example #21
0
class Bot(commands.Bot):
    def __init__(self, config):
        super(Bot, self).__init__(command_prefix=self.COMMAND_PREFIX)
        self.config = config
        self.logger = Logger(self.__class__.__name__.lower())
        self.event(self.on_ready)
        self.add_cog(self.COMMANDS(self))

    def run(self):
        super(Bot, self).run(self.config.token)

    async def on_ready(self):
        self.logger.info(f'Logged in: {self.user.name} {self.user.id}')
Example #22
0
    def load_plugins(self) -> None:
        """Dynamically loads all plugins from the plugins directory.

        New plugins can be added by creating new classes in the `plugins` module.
        """
        self.plugins = {}
        importlib.import_module("plugins")
        modules = []
        plugin_files = os.listdir(os.path.join(os.path.dirname(__file__), "plugins"))
        if len(plugin_files) == 0:
            print("NOTE: No plugin files found.")

        for plugin in plugin_files:
            if plugin.startswith("__") or not plugin.endswith(".py"):
                # Skip files like __init__.py and .gitignore
                continue

            module_name = "plugins." + plugin.rsplit(".")[0]
            modules.append(importlib.import_module(module_name, package="plugins"))

        for module in modules:
            if module.__name__ in sys.modules:
                importlib.reload(module)

            clsmembers = inspect.getmembers(
                module,
                lambda member: inspect.isclass(member)
                and member.__module__ == module.__name__,
            )

            for name, cls in clsmembers:
                if not issubclass(cls, BasePlugin):
                    # We only want plugins that derive from BasePlugin
                    CORE_LOG.warn(
                        f"Skipping {name} as it doesn't derive from the BasePlugin"
                    )
                    continue
                CORE_LOG.info(f"Loading plugin {name} ...")

                # Create logger for each plugin
                plugin_logger = Logger(f"olive.plugin.{name}")
                plugin_logger.info(f"{name}'s logger is working hard!")
                logger_group.add_logger(plugin_logger)

                # Generate standard config
                config = PluginConfig(plugin_logger)

                # Instantiate the plugin!
                self.plugins[name] = cls(config)

        CORE_LOG.info("Loaded plugins")
Example #23
0
def write_dataframe(df, table_name, ndays, attr_dict):
    """以bcolz格式写入数据框"""
    logger = Logger(table_name)
    # 列名称及类型标准化
    out = _normalize_ad_ts_sid(df, ndays=ndays)
    # 转换为bcolz格式并存储
    rootdir = bcolz_table_path(table_name)
    if os.path.exists(rootdir):
        rmtree(rootdir)
    odo(out, rootdir)
    logger.info('数据存储路径:{}'.format(rootdir))
    # 设置属性
    ct = bcolz.open(rootdir)
    for k, v in attr_dict.items():
        ct.attrs[k] = v
Example #24
0
class timer(object):
  """Decorator that mesures the time it takes to run a function."""

  __instances = {}

  def __init__(self, f):
    self.__f = f
    self.log = Logger(f.func_name)

  def __call__(self, *args, **kwargs):
    self.__start = time.time()
    result = self.__f(*args, **kwargs)
    value = time.time() - self.__start
    self.log.info('ellapsed time: {0:.2f}ms'.format(value * 1000))
    return result
Example #25
0
def sketchXZ(log: Logger, cases: List[Case],
             **kwargs) -> Tuple[plt.Figure, List[plt.Axes]]:
    """
    """
    grid = _calculate_grid(cases, **kwargs)

    if TBcore.get_option('system', 'verbose'):
        sys.stdout.write('Generating an image grid of: {0}x{1}\n'.format(
            grid[0], grid[1]))

    fsize = (kwargs.pop('width',
                        7.5 * grid[1]), kwargs.pop('hight', 7.5 * grid[0]))
    fig = plt.figure(figsize=fsize)
    axs = []
    ylim, xlim = [0, 0], [0, 0]
    for i, case in enumerate(cases):
        position = (int(i / grid[1]), i % grid[1])
        title = '{0}_{1:03d}'.format(case['configuration.name'], i + 1)
        log.info(
            f'Showing {title}-{case.architecture_str} in position: {position[0]}x{position[1]}\n'
        )

        ax = plt.subplot2grid(grid, position, fig=fig)
        axs.append(ax)
        plot_case_sketch(case, ax, kwargs.pop('connections', False),
                         kwargs.pop('beta_fill', 'red'),
                         kwargs.pop('beta_edge', 'black'),
                         kwargs.pop('alpha_fill', 'blue'),
                         kwargs.pop('alpha_edge', 'black'),
                         kwargs.pop('connection_edge', None))
        ax.set_title(title)
        cy = ax.get_ylim()
        cx = ax.get_xlim()
        ylim = [
            ylim[0] if cy[0] < ylim[0] else cy[0],
            ylim[1] if cy[1] > ylim[1] else cy[1]
        ]
        xlim = [
            xlim[0] if cx[0] > xlim[0] else cx[0],
            xlim[1] if cx[1] < xlim[1] else cx[1]
        ]

    for ax in axs:
        ax.set_ylim(ylim[0], ylim[1])
        ax.set_xlim(xlim[0], xlim[1])

    return fig, axs
Example #26
0
class PyPLogger(object):
    def __init__(self, clazz):
        logbook.set_datetime_format("local")
        self.serverName = clazz.__name__[clazz.__name__.rfind('.') + 1:]
        if not os.path.exists(log_dir):
            os.makedirs(log_dir)
        self.log_file = TimedRotatingFileHandler(os.path.join(
            log_dir, '%s.log' % self.serverName),
                                                 date_format='%Y-%m-%d',
                                                 bubble=True,
                                                 encoding='utf-8')
        self.log_std = ColorizedStderrHandler(bubble=True)
        #         self.log_std = StderrHandler(bubble=True)

        self.log = Logger(self.serverName)
        self.__init_logger()
        self.__setting()

    def log_type(self, record, handler):
        #         log = "[{date}]-[{level}]-[" + self.serverName + "] - {msg}".format(
        log = "[" + self.serverName + "]" + "-[{date}]-[{level}] - {msg}".format(
            date=record.time,
            level=record.level_name,
            #             filename = os.path.split(record.filename)[-1],
            #             func_name = record.func_name,
            #             lineno = record.lineno,
            msg=record.message)
        return log

    def __init_logger(self):
        logbook.set_datetime_format("local")
        self.log.handlers = []
        self.log.handlers.append(self.log_file)
        self.log.handlers.append(self.log_std)

    def __setting(self):
        self.log_std.formatter = self.log_type
        self.log_file.formatter = self.log_type

    def info(self, *args, **kwargs):
        self.log.info(*args, **kwargs)

    def warn(self, *args, **kwargs):
        self.log.warn(*args, **kwargs)

    def error(self, *args, **kwargs):
        self.log.error(*args, **kwargs)
Example #27
0
class Plugin(object):
    def __init__(self, site, name=None):
        self.name = name or remove_suffix('plugin', self.__class__.__name__)
        self.log = Logger(self.__class__.__name__.lower())

        self.log.debug('{} initialized'.format(self.name))
        self.base_dir = Path(inspect.getfile(self.__class__)).parent

        # initialize templates
        template_path = self.base_dir / 'templates'
        if template_path.exists():
            self.jinja_env = jinja2.Environment(
                loader=jinja2.FileSystemLoader(str(template_path)),
                extensions=['jinja2.ext.with_']
            )

        # load possible default configuration
        self.register(site)

    @property
    def DEFAULTS_FILE(self):
        return self.base_dir / 'defaults.cfg'

    def register(self, site):
        pass

    def enable_app(self, app):
        pass

    def render_template(self, template_name, **kwargs):
        if not hasattr(self, 'jinja_env'):
            return RuntimeError('Plugin {} has no template path'.format(
                self.__class__.__name__
            ))
        tpl = self.jinja_env.get_template(template_name)
        return tpl.render(**kwargs)

    def output_template(self, template_name, dest, _mode=0o644, **kwargs):
        if not dest.parent.exists():
            self.log.warning('Path {} did not exist and was created'.format(
                             dest.parent,
            ))
            dest.parent.mkdir(parents=True)

        with new_file(dest, _mode) as out:
            self.log.info('Writing {}'.format(dest.resolve()))
            out.write(self.render_template(template_name, **kwargs))
Example #28
0
class fileMonitor(object):
    """
    监视文件目录的变更
    """
    def __init__(self,path):
        super(fileMonitor,self).__init__()
        self.log = Logger(u"fileMonitor")
        self.event_handler = EventHandler()
        self.observer = Observer()
        self.observer.schedule(self.event_handler, path, recursive=True)
        self.log.info("now starting monitor path:%s" % path)
    
    def start(self):
        self.observer.start()
    def stop(self):
        self.observer.stop()
        self.observer.join()
Example #29
0
def main():
    global log
    StderrHandler().push_application()
    log = Logger("xbbs.coordinator")

    XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs")
    with open(path.join(XBBS_CFG_DIR, "coordinator.toml"), "r") as fcfg:
        cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg))

    inst = Xbbs.create(cfg)

    for name, elem in cfg["projects"].items():
        project = Project(name,
                          **elem,
                          base=path.join(inst.project_base, name))
        inst.projects[name] = project
        os.makedirs(project.base, exist_ok=True)
        log.debug("got project {}", inst.projects[name])

    with inst.zmq.socket(zmq.REP) as sock_cmd, \
         inst.zmq.socket(zmq.PULL) as inst.intake, \
         inst.zmq.socket(zmq.ROUTER) as inst.worker_endpoint:
        # XXX: potentially make perms overridable? is that useful in any
        #      capacity?
        inst.intake.bind(cfg["intake"]["bind"])
        _ipc_chmod(cfg["intake"]["bind"], 0o664)

        inst.worker_endpoint.bind(cfg["worker_endpoint"])
        inst.worker_endpoint.set(zmq.ROUTER_MANDATORY, 1)
        _ipc_chmod(cfg["worker_endpoint"], 0o664)

        sock_cmd.bind(cfg["command_endpoint"]["bind"])
        _ipc_chmod(cfg["command_endpoint"]["bind"], 0o664)

        dumper = gevent.signal_handler(signal.SIGUSR1, dump_projects, inst)
        log.info("startup")
        intake = gevent.spawn(intake_loop, inst)
        job_pull = gevent.spawn(job_pull_loop, inst)
        try:
            command_loop(inst, sock_cmd)
        finally:
            # XXX: This may not be the greatest way to handle this
            gevent.killall(inst.project_greenlets[:])
            gevent.kill(intake)
            gevent.kill(job_pull)
            dumper.cancel()
def create_app(config_class = DefaultConfig):
	app = connexion.FlaskApp(
		    __name__, specification_dir='openapi/', options={"swagger_ui": False, "serve_spec": False}
		)
	app.app.config.from_object(config_class)

	log = Logger('logbook')
	log.info(app.app.config['LOG_LEVEL'])
	#show logging messages in terminal
	StreamHandler(sys.stdout ,
		level = app.app.config['LOG_LEVEL']).push_application()

	log.info('welcome to my application CHALLENGE CODE API MODE {}'.format(env('FLASK_ENV','developement')))

	app.add_api("swagger.yaml", strict_validation=True)
	flask_app = app.app
	return flask_app
Example #31
0
    def process_3d(
        self,
        cameras: typing.Mapping[str, ImmutableCamera],
        glob_pattern="*.mp4",
        timestamp_regex=TIMESTAMP_REGEX,
        display_progress=True,
        output_file_pattern="ProcessedVideo3D_{}.pb",
    ):
        log = Logger("App")
        vi = VideoIndex.create_from_search(glob_pattern=glob_pattern, timestamp_regex=TIMESTAMP_REGEX)
        log.info("Files to process: {}".format(vi.video_files))
        length = len(vi.video_files)
        processed_videos = []
        files_by_timestamp = list(vi.groupby_iter("timestamp"))

        DATETIME_FORMAT = "%Y-%m-%d-%H-%M-%S"

        disable_tqdm = not display_progress
        with tqdm(files_by_timestamp, disable=disable_tqdm) as tqdm_files:
            for timestamp, video_files in tqdm_files:
                pvs = []
                display_timestamp = timestamp.isoformat()
                tqdm_files.set_description(display_timestamp)
                with tqdm(video_files, disable=disable_tqdm) as tqdm_video_file:
                    cams = []
                    for video_file in tqdm_video_file:
                        display_filename = "{} : {}".format(video_file.camera, video_file.file)
                        tqdm_video_file.set_description(display_filename)
                        pv = ProcessedVideo.from_video(
                            video_file.file,
                            None,
                            read_from_cache=True,
                            write_to_cache=False,
                            timestamp_start=timestamp,
                            draw_viz=False,
                        )
                        pvs.append(pv)
                        cams.append(cameras[video_file.camera])
                    pv3d = ProcessedVideo3D.from_processed_video_2d(pv2ds=pvs, cameras=cams)

                    date_text = timestamp.strftime(DATETIME_FORMAT)
                    output_filename = output_file_pattern.format(date_text)
                    pv3d.to_proto_file(output_filename)
                    processed_videos.append((timestamp, pv3d))
        return processed_videos
Example #32
0
def checkpoint_in( log: Logger, filename: Union[Path, str] ) -> Optional[Dict]:
    """Incoming checkpoint.
    """
    if TBcore.get_option('system', 'forced'):
        return None

    filename = Path(filename)
    if filename.is_file():

        log.info(f'CHECKPOINT: Reloading from {filename}\n')
        with Path(filename).open() as fd:
            try:
                data = json.load(fd)
            except json.JSONDecodeError:
                return None
        return data

    return None
Example #33
0
def write_dataframe(df, table_name, attr_dict=None):
    """以bcolz格式写入数据框"""
    log = Logger(table_name)
    # 转换为bcolz格式并存储
    rootdir = bcolz_table_path(table_name)
    if os.path.exists(rootdir):
        rmtree(rootdir)
    df = _normalize_ad_ts_sid(df)
    for c in (AD_FIELD_NAME, TS_FIELD_NAME, SID_FIELD_NAME):
        if df[c].hasnans:
            warnings.warn(f'{c}列含有空值,已移除')
            df = df.loc[~df[c].isnan(), :]
    ct = bcolz.ctable.fromdataframe(df, rootdir=rootdir)
    log.info('写入数据至:{}'.format(rootdir))
    if attr_dict:
        # 设置属性
        for k, v in attr_dict.items():
            ct.attrs[k] = v
Example #34
0
class Root(resource.Resource):
    isLeaf = True

    def __init__(self, lock):
        self._lock = lock
        self.log = Logger('web')
        resource.Resource.__init__(self)


    @delayed
    def render_GET(self, request):
        try:
            key = _get_key_from_path(request.path)
            if key == 'info/keys':
                request.write('%r\n' % (self._lock._keys,))
            elif key == 'info/status':
                for line in self._lock.get_status():
                    request.write('%s %s\n' % line)
            elif key == 'info/log':
                for line in self._lock._log:
                    request.write('%s\n' % line)
            else:
                value = yield self._lock.get_key(key)
                request.write(value)
        except KeyNotFound:
            request.setResponseCode(NOT_FOUND)
        returnValue('')


    @proxy_to_master
    @delayed
    def render_POST(self, request):
        try:
            key = _get_key_from_path(request.path)
            data = request.args.get('data', [''])[0]

            self.log.info('Set key %s=%r' % (key, data))
            yield self._lock.set_key(key, data)
        except KeyAlreadyExists, e:
            self.log.warning(e)
            request.setResponseCode(CONFLICT)
        except PaxosError, e:
            self.log.warning(e)
            request.setResponseCode(EXPECTATION_FAILED)
Example #35
0
def main():
    logbook.concurrency.enable_gevent()

    global log
    StderrHandler().push_application()
    log = Logger('xbbs.worker')
    inst = XbbsWorker()

    XBBS_CFG_DIR = os.getenv("XBBS_CFG_DIR", "/etc/xbbs")
    with open(path.join(XBBS_CFG_DIR, "worker.toml"), "r") as fcfg:
        cfg = CONFIG_VALIDATOR.validate(toml.load(fcfg))

    job_request = msgs.JobRequest(capabilities=cfg["capabilities"]).pack()

    gevent.signal_handler(signal.SIGUSR1, gevent.util.print_run_info)

    log.info(cfg)
    while True:
        with inst.zmq.socket(zmq.REQ) as jobs:
            jobs.connect(cfg["job_endpoint"])

            while True:
                jobs.send(job_request)
                log.debug("waiting for job...")
                # the coordinator sends a heartbeat each minute, so 1.5 minutes
                # should be a sane duration to assume coordinator death on
                if jobs.poll(90000) == 0:
                    # breaking the inner loop will cause a reconnect
                    # since the coordinator is presumed dead, drop requests yet
                    # unsent to it
                    jobs.set(zmq.LINGER, 0)
                    log.debug("dropping socket after a heartbeat timeout")
                    break
                try:
                    msg = jobs.recv()
                    if len(msg) == 0:
                        # drop null msgs
                        continue
                    process_job_msg(inst, msg)
                except KeyboardInterrupt:
                    log.exception("interrupted")
                    return
                except Exception as e:
                    log.exception("job error", e)
Example #36
0
class DeleteFiles:
    def __init__(self):
        handler = TimedRotatingFileHandler('../logs/delete_files.log')
        handler.push_application()
        self.logger = Logger(name='delete files')

        self.path_lists = self.get_conf()

    @staticmethod
    def get_conf():
        ##从配置文件获取配置
        cp = ConfigParser.ConfigParser()
        with codecs.open(os.path.join(BASE_DIR, './config/config.ini'),
                         'r',
                         encoding='utf-8') as f:
            cp.readfp(f)
            path_lists = eval(cp.get('filesystem', 'del_paths').strip())

        return path_lists

    def file_handler(self):
        for file_path in self.path_lists:
            file_name = os.path.join('/', file_path)
            if os.path.isdir(file_name) or os.path.isfile(file_name):
                if os.path.isdir(file_name):
                    try:
                        os.removedirs(file_name)
                        #os.remove(file_name)
                    except Exception, e:
                        self.logger.error('Delete dir %s failed!%s' %
                                          (file_name, e))
                    else:
                        self.logger.info('Delete dir %s success!' % file_name)
                if os.path.isfile(file_name):
                    try:
                        #os.removedirs(file_name)
                        os.remove(file_name)
                    except Exception, e:
                        self.logger.error('Delete file %s failed!%s' %
                                          (file_name, e))
                    else:
                        self.logger.info('Delete file %s success!' % file_name)
            else:
Example #37
0
class Genbank:
    def __init__(self, path):
        """
        GenBank
        """
        self.path = os.path.abspath(path)
        self.info_dir = os.path.join(self.path, ".info")
        self.assembly_summary_path = os.path.join(self.info_dir,
                                                  "assembly_summary.txt")
        self.log = Logger("GenBank")
        if not os.path.isdir(self.path):
            os.mkdir(self.path)
        if not os.path.isdir(self.info_dir):
            os.mkdir(self.info_dir)
        try:
            self.assembly_summary = pd.read_csv(self.assembly_summary_path,
                                                sep="\t",
                                                index_col=0)
        except FileNotFoundError:
            self.assembly_summary = pd.read_csv(assembly_summary_url,
                                                sep="\t",
                                                index_col=0,
                                                skiprows=1)
            self.assembly_summary.to_csv(self.assembly_summary_path, sep="\t")
            self.log.info("Downloaded assembly_summary.txt")

    @property
    def species(self):
        for d in os.listdir(self.path):
            species_path = os.path.join(self.path, d)
            if not os.path.isdir(species_path):
                continue
            fastas = len(
                [f for f in os.listdir(species_path) if f.endswith('fasta')])
            if fastas < 10:
                self.log.info("Not enough genomes for {}".format(d))
                continue
            yield Species.Species(species_path,
                                  assembly_summary=self.assembly_summary)

    def qc(self):
        for species in self.species:
            species.qc()
Example #38
0
def update_data(log: Logger, data: Dict, wpaths: Dict) -> Dict:
    """Update data to link final files.
    """
    data['silent_files']['assembly'] = list(
        wpaths['outdir'].glob('*_hyb.silent'))
    data['silent_files']['design'] = list(
        wpaths['outdir'].glob('*_des.silent'))
    data['minisilent']['assembly'] = wpaths['main'].joinpath(
        'output_hyb.minisilent.gz')
    data['minisilent']['design'] = wpaths['main'].joinpath(
        'output_des.minisilent.gz')
    for k in data['minisilent']:
        log.info(f'Generating minisilent file at {data["minisilent"][k]}\n')
        fd = gzip.open(data['minisilent'][k], "wb")
        for line, _, _, _ in open_rosetta_file(
            [str(x) for x in data['silent_files'][k]],
                True,
                check_symmetry=False):
            fd.write(line.encode('utf-8'))
    return data
Example #39
0
class UrlToMarkdown(object):
    default_mobilizer = "original"

    def __init__(self, mobilizer="original"):
        if mobilizer:
            if not getattr(mobilizers, mobilizer.capitalize() + "Mobilizer", None):
                raise Exception("Invalid mobilizer: {}".format(mobilizer))
            self.default_mobilizer = mobilizer
        self.log = Logger(self.__class__.__name__)

    def convert(self, url, mobilizer=None, simple_result=True):
        """Fetch a page from URL, mobilize it, then convert it to Markdown

        url: ...
        mobilizer: 'original', 'instapaper',
        simple_result: True returns markdown text, else returns a dict
        """
        if not mobilizer:
            mobilizer = self.default_mobilizer
        try:
            mob_object = getattr(mobilizers, mobilizer.capitalize() + "Mobilizer")
        except AttributeError:
            raise Exception("Invalid mobilizer: {}".format(mobilizer))
        mob = mob_object()

        self.log.debug(
            "Obtaining {url} via {mobilizer}".format(url=url, mobilizer=mobilizer)
        )
        mobilized = mob.fetch(url)
        self.log.info("Title is {0[title]!r}".format(mobilized))

        self.log.debug("Converting {url} to Markdown".format(url=url))
        h2t = HTML2Text()
        # html2text also wraps image/link URLs, breaking them
        h2t.body_width = 0

        self.log.info("Converted to Markdown")
        mobilized["markdown"] = h2t.handle(mobilized["body"].html())
        if simple_result:
            return mobilized["markdown"]
        return mobilized
Example #40
0
def main(thread_number, username, main_directory):
    # set up logging
    log_handler = FileHandler(join(main_directory, "log"))
    log_handler.push_application()
    log = Logger("fdsn_log")

    def download_kernel(download_url):
        log_message = f"[thread: {multiprocessing.current_process()}] start to download {download_url} "
        log.info(log_message)
        sh.wget("-c", download_url)
        log_message = f"[thread: {multiprocessing.current_process()}] finish downloading {download_url} "
        log.info(log_message)

    filelist_ftp = get_ftp_file_list(username)
    files_to_download = get_files_to_download(main_directory, filelist_ftp)
    downloading_urls = get_download_urls(files_to_download)

    with multiprocessing.Pool(thread_number) as pool:
        pool.starmap(download_kernel, downloading_urls)

    log.info("success")
Example #41
0
class ConcoctR(object):
    """ A class for running concoct from within python. Uses the
    concoctParams class for representing input parameters. """
    def __init__(self):
        self.log = Logger('ConcoctR')

    def run_concoct(self,
                    concoct_params,
                    drmaa_s=None,
                    drmaa_jt=None,
                    sbatch_script=None):
        """ drmaa_s is a drama session
        drmaa_jt is a drmaa job template """
        if drmaa_s:
            drmaa_jt.remoteCommand = "concoct " + " ".join(
                concoct_params.args())
            job_id = drmaa_s.runJob(drmaa_jt)
            self.log.info("Jobid {0} with command: {1}".format(
                drmaa_jt.remoteCommand, job_id))
            return job_id
        else:
            if sbatch_script:
                subprocess.Popen(['sbatch', sbatch_script])
            else:
                cla = ['concoct'] + concoct_params.args()
                subprocess.Popen(cla)
                self.log.info(
                    "Command line call with command: {0}".format(cla))

            return None

    def generate_sbatch_script(self, concoct_params, sbatch_params, file_name):
        """ Generate a shell script that can be submitted with sbatch. """
        with open(file_name, 'w+') as f:
            f.write("#!/bin/bash" + '\n')
            for sp in sbatch_params:
                f.write("#SBATCH " + sp + '\n')
            command = " ".join(['concoct'] + concoct_params.args())
            f.write(command + '\n')
Example #42
0
class Router(Thread):
    """Thread waiting for a request by another Driver and responding to
    it with the chunked asked.
    """

    def __init__(self, name, redis, get_chunk):
        super(Router, self).__init__()

        self.name = name
        self.redis = redis
        self.get_chunk = get_chunk
        self.router = None

        self.logger = Logger("{} - Router".format(self.name))
        self.logger.info("Started")

        self.context = zmq.Context.instance()

    def run(self):
        self.router = self.context.socket(zmq.ROUTER)
        port = self.router.bind_to_random_port('tcp://*')
        self.redis.set('drivers:{}:router'.format(self.name), port)

        while True:
            msg = self.router.recv_multipart()
            self._respond_to(*msg)

    def _respond_to(self, identity, filename, offset, size):
        """Calls the `get_chunk` handler defined by the Driver to get
        the chunk and send it to the addressee.
        """
        self.logger.debug(
            "Getting chunk of size {} from offset {} in '{}'",
            size, offset, filename
        )
        chunk = self.get_chunk(filename, int(offset), int(size))
        self.router.send_multipart((identity, chunk))
Example #43
0
class MyEventHandler(FileSystemEventHandler):
    """Logs all the events captured."""

    def __init__(self, logfile, run_handler):
        """pass logfile to be opened and handler to flush writing the file"""
        super(MyEventHandler, self).__init__()
        self.run_log = Logger('Runs')
        self.fs_log = Logger('Files')
        socket_path = environ.get("NVIM_LISTEN_ADDRESS")
        self.nvim = attach('socket', path=socket_path)
        self.log_file = logfile
        self.run_handler = run_handler

    def on_moved(self, event):
        """called when a file or folder is moved"""
        super(MyEventHandler, self).on_moved(event)
        what = 'directory' if event.is_directory else 'file'
        log_msg = "Moved {}: from {} to {}".format(what, event.src_path, event.dest_path)
        self.fs_log.info(log_msg)

    def on_created(self, event):
        """called on creation of a file or folder"""
        super(MyEventHandler, self).on_created(event)
        what = 'directory' if event.is_directory else 'file'
        self.fs_log.info("Created {}: {}".format(what, event.src_path))

    def on_deleted(self, event):
        """called on deletion of a file or folder"""
        super(MyEventHandler, self).on_deleted(event)
        what = 'directory' if event.is_directory else 'file'
        self.fs_log.info("Deleted {}: {}".format(what, event.src_path))

    def on_modified(self, event):
        """when a file is modified the event is logged and appended to a separate file
        Then the script is run through python and the output is (over)written to self.log_file
        and appended to the file handled by the info handler"""
        super(MyEventHandler, self).on_modified(event)
        what = 'directory' if event.is_directory else 'file'
        self.fs_log.info("Modified {}: {}".format(what, event.src_path))
        event_path = Path(event.src_path) # using plain src_path gives nonexistent path
        if event_path.is_file():
            in_file = str(event_path)
            out_str = python(in_file)
            self.run_log.notice('Output:\n{}'.format(out_str))
            self.run_handler.close()
            self.nvim.command('pedit ' + self.log_file)
Example #44
0
class Logging_MixIn(Abstract_Interface):
    """
    Logging Version
    """
    def __init__(self, *args, **kwargs):
        """
        Instantiate logging interface
        :param args:
        :param kwargs:
        :return:
        """
        Abstract_Interface.__init__(self, *args, **kwargs)
        from logbook import Logger
        self.log = Logger("{}-{}".format(type(self).__name__,self.door_name))
        self.log.debug("Initialised using : {}".format(kwargs))

    def activate(self):
        self.log.debug("Activated")

    def is_active(self):
        self.log.debug("Checking activation")

    def open(self, duration=10):
        self.log.info("Opening for {}".format(duration))
Example #45
0
class MediaWatcher(object):

    default_config = {
        'mock': False
    }

    def __init__(self, config):
        self.config = MediaWatcherConfig(**config)
        StreamHandler(sys.stdout).push_application()
        self.logger = Logger('MediaWatcher')
        self.mover = MediaWatcherMover(self.config, self.logger)
        self.handler = MediaWatcherHandler(self.mover, self.logger)
        self.observer = Observer()
        self.setup_watch()

    def startup(self):
        self.logger.info('Performing initial scan.')
        self.mover.scan()
        self.logger.info('Initial scan done, ready to watch {} folder{}.'.format(len(self.config.watch), 's' if len(
            self.config.watch) != 1 else ''))

    def setup_watch(self):
        for directory in self.config.watch:
            path = Path(directory)
            if not path.is_dir():
                raise FileNotFoundError('Directory {} not found on system.'.format(directory))
            self.observer.schedule(self.handler, str(path), recursive=True)

    def watch(self):
        self.observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            self.observer.stop()
        self.observer.join()
Example #46
0
class EventHandler(FileSystemEventHandler):#文件变化的触发
    def __init__(self):
        super(EventHandler,self).__init__()
        self.log = Logger(u"fileMonitor")
    
    def on_created(self, event):
        super(EventHandler,self).on_created(event)
        if os.path.splitext(event.src_path)[1] == ".py":
            self.log.info("Create file: %s " % event.src_path)
            # send plugin add message
            pub.sendMessage(topics.PLUGIN_ADD, path=event.src_path)
    
    def on_modified(self, event):
        super(EventHandler,self).on_modified(event)
        if os.path.splitext(event.src_path)[1] == ".py":
            self.log.info("Modify file: %s " % event.src_path)
            pub.sendMessage(topics.PLUGIN_MODIFY, path=event.src_path)
    
    def on_deleted(self, event):
        super(EventHandler,self).on_deleted(event)
        if os.path.splitext(event.src_path)[1] == ".py":
            self.log.info("delete file: %s " % event.src_path)
            pub.sendMessage(topics.PLUGIN_DEL, path=event.src_path)
Example #47
0
def main():
    logger.info('Starting')

    servers = [Server(i) for i in range(1, NUM_SERVERS + 1)]

    start_servers(servers)

    time.sleep(10)
    try:
        return test(servers)
    except Exception, e:
        logger.exception('Test failed: %s' % e)
        return 1
    finally:
        logger.info('Stopping')
        stop(servers)

if __name__ == '__main__':
    format = '[{record.time}] {record.level_name:>5} [{record.extra[worker_id]}] {record.message}'

    logging_setup = NestedSetup([
        NullHandler(),
        FileHandler(
            filename=os.path.join(os.path.dirname(__file__), 'log/client.log'),
            format_string=format,
            bubble=True,
        ),
        StderrHandler(level=logbook.INFO, format_string=format, bubble=True),
    ])
Example #48
0
  log = Logger('Main')

  if len(sys.argv) > 1:
    start = datetime.now()

    letters = sys.argv[1]

    preferred = None

    if len(sys.argv) > 2:
      preferred = list(sys.argv[2])

    if letters:
      letters = [x for x in letters]

      log.info('Working with {}'.format(letters))

      words = set()

      with open('enable.txt') as dictionary:
        words = set(word.strip().lower() for word in dictionary if len(word) <= len(letters) and len(word) > 1)

      log.info('Evaluation against {} words'.format(len(words)))

      log.info('Sorting words')
      words = sorted(words, key=lambda word: len(word))
      #max_length = max(words, key= lambda word: len(word))

      #log.info('Max word length is {}'.format(max_length))

      log.info('Creating worker pool')
Example #49
0
#fix
#import dateutil
import arrow
import redis

#fix
import greenlet

import gevent
from gevent import Timeout
from gevent.pool import Pool

from logbook import Logger
log = Logger('heartbeat')

log.info('start')


#redis pool
REDIS_MAX_CONNECTIONS = 100

rpool = redis.ConnectionPool(host='localhost', port=6379, db=1, \
            max_connections=REDIS_MAX_CONNECTIONS)

rclient = redis.Redis(connection_pool=rpool) 


class TimeoutException(Exception):
    """ timeout exception for gevent Timeout"""
    pass
Example #50
0
from logbook import Logger
from logbook import FileHandler

# max file size in lines (10M by default)
MAX_LINES_TO_READ = 10000000
# max file size in megabytes
MAX_FILE_SIZE = "500M"
# where to write application log
LOG_FILE_NAME = "reican.log"

TIMESTAMP_FORMAT = "YYYY-MM-DD HH:mm:ss"

log_handler = FileHandler(LOG_FILE_NAME)
log_handler.push_application()
log = Logger("Reican")
log.info("Logging started")


def die(msg=None):
    """
    Print a message and exit
    """
    if msg:
        print
        print msg
        print
        log.critical(msg)
    sys.exit(1)


def usage():
Example #51
0
    from logbook import FileHandler
    from logbook import Logger
    from argparse import ArgumentParser
    import sys
    parser = ArgumentParser()
    logpath = './log/'
    parser.add_argument('--log', nargs=1, help='log path')
    parser.add_argument('--version', nargs=1, help='maintain version')
    args = parser.parse_args(sys.argv[1:])
    logfilepath = logpath + args.log[0]
    maintain_version = args.version[0]
    log_handler = FileHandler(logfilepath)
    logbk = Logger('Token Maintain')

    with log_handler.applicationbound():
        logbk.info('maintain prepare')

        at_least = AT_LEAST_TOKEN_COUNT
        max_tokens_redis_limit = MAX_TOKENS_IN_REDIS

        logbk.info('maintain begin')

        # 认证新用户,并将access_token加入mongodb,redis从mongodb导入新token,不重置已有token 的 req_count
        if maintain_version == 'addatoken':
            print 'generate new token, write to mongo, push to redis without reset request count'
            generate_api_access_token(logbk)
            add_without_reset_req_count(max_tokens_redis_limit, logbk)

        # 将mongodb中所有access_token加入redis,并重置已有token 的 req_count
        if maintain_version == 'addalltoken':
            print 'push all tokens from mongo to redis and reset request count'
Example #52
0
with error_handler.applicationbound():

    Config = ConfigParser.ConfigParser()
    Config.read('/etc/videodoor/videodoor.ini')
    SensorPin = Config.getint('hardware','SensorPin')
    log.debug('setting up GPIO pin %i...' % SensorPin)
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(SensorPin, GPIO.IN, pull_up_down=GPIO.PUD_UP)
    log.debug('done.')

    log.debug('Setting up the omxplayer instance...')
    File = Config.get('video','File')
    Options = Config.get('video','Options')
    omx_status = False
    log.info('initializing videoplayer with file %s and options %s' % (File, Options))
    omx = OMXPlayer(File, Options)
    log.debug('done.')

    def start_video():
        global omx, omx_status
        if(omx_status):
    	    log.warn('video already running')
        else:
        	omx.toggle_pause()
        	omx_status = True
        	log.info('door opened')

    def stop_video():
        global omx, omx_status
        if(omx_status):
Example #53
0
class Admin(object):
    """Administrative task object."""

    def __init__(self, conf):
        """conf: dict, yaml parameters."""
        self.conf = conf
        handler = TimedRotatingFileHandler(conf.log_file, date_format="%Y-%m-%d")
        handler.push_application()
        self.logger = Logger("Firetower-admin")
        self.queue = redis_util.get_redis_conn(host=conf.redis_host, port=conf.redis_port, redis_db=conf.redis_db)
        self.classifier = classifier.Levenshtein()
        self.last_archive_run = None

    # Wrote this to avoid numpy dependencies until we absolutely require them.
    def mean_stdev(self, items):
        """Return mean and stdev of numerical list of items.

        Args:
            items: list, list of numbers (int or float) to perform calculations upon.
        Returns:
            tuble of (mean between items, and stdeviation of items and mean).
        """
        n, mean, std = 0, 0, 0
        if items:
            n = len(items)

        else:
            return (0.0, 0.0)

        for item in items:
            mean = mean + item

        mean = mean / float(n)
        for item in items:
            std = std + (item + mean) ** 2

        std = sqrt(std / float(n))  # Avoid DivideByZero by not subtracting one.

        return mean, std

    def calc_stats(self):
        """Calculate and save mean and standard deviation."""

        categories = category.Category.get_all_categories(self.queue)
        for cat in categories:
            all_events = cat.events.range(0, -1)
            ratios = []
            for event in all_events:
                event = json.loads(event)
                ratios.append(self.classifier.str_ratio(cat.signature, event["sig"]))
            cat.mean, cat.stdev = self.mean_stdev(ratios)

    def archive_events(self):
        """Run the timeseries archiving for all categories.

        This code moves counts from the atomically incrementable HASHes
        to Sorted Sets (which can be sliced by date)."""

        now = datetime.datetime.utcnow()
        if self.last_archive_run is None:
            self.last_archive_run = datetime.datetime.utcnow()
            return

        delta = datetime.timedelta(seconds=self.conf.archive_time)
        if self.last_archive_run < (now - delta):
            self.logger.debug("Archiving counts older than %s seconds" % (self.conf.archive_time,))
            for c in category.Category.get_all_categories(self.queue):
                self.logger.debug("Archiving for %s category" % (c.cat_id))
                c.timeseries.archive_cat_counts(self.last_archive_run)

    def run(self, args):
        """Run set of jobs specified on commandline or config."""

        self.logger.info("Running with tasks: %s" % (",".join(args)))
        for arg in args:
            if arg not in TASKS:
                self.logger.error("Specified unknown task: %s" % (arg,))
                sys.exit(1)
            if arg == "calc_stats":
                self.logger.info("Calculating stats for each category")
                self.calc_stats()
            if arg == "archive_events":
                self.archive_events()
                self.logger.info("Archiving old data from each category")
class Fibratus():

    """Fibratus entrypoint.

    Setup the core components including the kernel
    event stream collector and the tracing controller.
    At this point the system handles are also being
    enumerated.

    """
    def __init__(self, filament):

        self.logger = Logger(Fibratus.__name__)
        self.file_handler = FileHandler(os.path.join(os.path.abspath(__file__), '..', '..', '..', 'fibratus.log'),
                                        mode='w+')
        self.kevt_streamc = KEventStreamCollector(etw.KERNEL_LOGGER_NAME.encode())
        self.kcontroller = KTraceController()
        self.ktrace_props = KTraceProps()
        self.ktrace_props.enable_kflags()
        self.ktrace_props.logger_name = etw.KERNEL_LOGGER_NAME

        self.handle_repository = HandleRepository()
        self._handles = []
        # query for handles on the
        # start of kernel trace
        with self.file_handler.applicationbound():
            self.logger.info('Starting fibratus...')
            self.logger.info('Enumerating system handles...')
            self._handles = self.handle_repository.query_handles()
            self.logger.info('%s handles found' % len(self._handles))
            self.handle_repository.free_buffers()
        self.thread_registry = ThreadRegistry(self.handle_repository, self._handles)

        self.kevent = KEvent(self.thread_registry)

        self._filament = filament

        self.fsio = FsIO(self.kevent, self._handles)
        self.hive_parser = HiveParser(self.kevent, self.thread_registry)
        self.tcpip_parser = TcpIpParser(self.kevent)
        self.dll_repository = DllRepository(self.kevent)

        self.requires_render = {}
        self.filters_count = 0

    def run(self):

        @atexit.register
        def _exit():
            self.stop_ktrace()

        self.kcontroller.start_ktrace(etw.KERNEL_LOGGER_NAME, self.ktrace_props)

        def on_kstream_open():
            if self._filament is None:
                IO.write_console('Done!                               ')
        self.kevt_streamc.set_kstream_open_callback(on_kstream_open)
        self._open_kstream()

    def _open_kstream(self):
        try:
            self.kevt_streamc.open_kstream(self._on_next_kevent)
        except Exception as e:
            with self.file_handler.applicationbound():
                self.logger.error(e)
        except KeyboardInterrupt:
            self.stop_ktrace()

    def stop_ktrace(self):
        IO.write_console('Stopping fibratus...')
        if self._filament:
            self._filament.close()
        self.kcontroller.stop_ktrace(self.ktrace_props)
        self.kevt_streamc.close_kstream()

    def add_filters(self, kevent_filters):
        if len(kevent_filters) > 0:
            self.filters_count = len(kevent_filters)
            # include the basic filters
            # that are essential to the
            # rest of kernel events
            self.kevt_streamc.add_kevent_filter(ENUM_PROCESS)
            self.kevt_streamc.add_kevent_filter(ENUM_THREAD)
            self.kevt_streamc.add_kevent_filter(ENUM_IMAGE)
            self.kevt_streamc.add_kevent_filter(REG_CREATE_KCB)
            self.kevt_streamc.add_kevent_filter(REG_DELETE_KCB)

            # these kevents are necessary for consistent state
            # of the trace. If the user doesn't include them
            # in a filter list, then we do the job but set the
            # kernel event type as not eligible for rendering
            if not KEvents.CREATE_PROCESS in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_PROCESS)
                self.requires_render[CREATE_PROCESS] = False
            else:
                self.requires_render[CREATE_PROCESS] = True

            if not KEvents.CREATE_THREAD in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_THREAD)
                self.requires_render[CREATE_THREAD] = False
            else:
                self.requires_render[CREATE_THREAD] = True

            if not KEvents.CREATE_FILE in kevent_filters:
                self.kevt_streamc.add_kevent_filter(CREATE_FILE)
                self.requires_render[CREATE_FILE] = False
            else:
                self.requires_render[CREATE_FILE] = True

            for kevent_filter in kevent_filters:
                ktuple = kname_to_tuple(kevent_filter)
                if isinstance(ktuple, list):
                    for kt in ktuple:
                        self.kevt_streamc.add_kevent_filter(kt)
                        if not kt in self.requires_render:
                            self.requires_render[kt] = True
                else:
                    self.kevt_streamc.add_kevent_filter(ktuple)
                    if not ktuple in self.requires_render:
                        self.requires_render[ktuple] = True

    def _on_next_kevent(self, ktype, cpuid, ts, kparams):
        """Callback which fires when new kernel event arrives.

        This callback is invoked for every new kernel event
        forwarded from the kernel stream collector.

        Parameters
        ----------

        ktype: tuple
            Kernel event type.
        cpuid: int
            Indentifies the CPU core where the event
            has been captured.
        ts: str
            Temporal reference of the kernel event.
        kparams: dict
            Kernel event's parameters.
        """

        # initialize kernel event properties
        self.kevent.ts = ts
        self.kevent.cpuid = cpuid
        self.kevent.name = ktuple_to_name(ktype)
        kparams = ddict(kparams)
        # thread / process kernel events
        if ktype in [CREATE_PROCESS,
                     CREATE_THREAD,
                     ENUM_PROCESS,
                     ENUM_THREAD]:
            self.thread_registry.add_thread(ktype, kparams)
            if ktype in [CREATE_PROCESS, CREATE_THREAD]:
                self.thread_registry.init_thread_kevent(self.kevent,
                                                        ktype,
                                                        kparams)
                self._render(ktype)
        elif ktype in [TERMINATE_PROCESS, TERMINATE_THREAD]:
            self.thread_registry.init_thread_kevent(self.kevent,
                                                    ktype,
                                                    kparams)
            self._render(ktype)
            self.thread_registry.remove_thread(ktype, kparams)

        # file system/disk kernel events
        elif ktype in [CREATE_FILE,
                       DELETE_FILE,
                       CLOSE_FILE,
                       READ_FILE,
                       WRITE_FILE]:
            self.fsio.parse_fsio(ktype, kparams)
            self._render(ktype)

        # dll kernel events
        elif ktype in [LOAD_IMAGE, ENUM_IMAGE]:
            self.dll_repository.register_dll(kparams)
            if ktype == LOAD_IMAGE:
                self._render(ktype)
        elif ktype == UNLOAD_IMAGE:
            self.dll_repository.unregister_dll(kparams)
            self._render(ktype)

        # registry kernel events
        elif ktype == REG_CREATE_KCB:
            self.hive_parser.add_kcb(kparams)
        elif ktype == REG_DELETE_KCB:
            self.hive_parser.remove_kcb(kparams.key_handle)

        elif ktype in [REG_CREATE_KEY,
                       REG_DELETE_KEY,
                       REG_OPEN_KEY,
                       REG_QUERY_KEY,
                       REG_SET_VALUE,
                       REG_DELETE_VALUE,
                       REG_QUERY_VALUE]:
            self.hive_parser.parse_hive(ktype, kparams)
            self._render(ktype)

        # network kernel events
        elif ktype in [SEND_SOCKET_TCPV4,
                       SEND_SOCKET_UDPV4,
                       RECV_SOCKET_TCPV4,
                       RECV_SOCKET_UDPV4,
                       ACCEPT_SOCKET_TCPV4,
                       CONNECT_SOCKET_TCPV4,
                       DISCONNECT_SOCKET_TCPV4,
                       RECONNECT_SOCKET_TCPV4]:
            self.tcpip_parser.parse_tcpip(ktype, kparams)
            self._render(ktype)

        if self._filament:
            # call filament method
            # to process the next
            # kernel event from the stream
            if ktype not in [ENUM_PROCESS,
                             ENUM_THREAD, ENUM_IMAGE]:
                if self.kevent.name:
                    self._filament.process(self.kevent)

    def _render(self, ktype):
        """Renders the kevent to the standard output stream.

        Parameters
        ----------

        ktype: tuple
            Identifier of the kernel event
        """
        if not self._filament:
            if ktype in self.requires_render:
                rr = self.requires_render[ktype]
                if rr:
                    self.kevent.render()
            elif self.filters_count == 0:
                self.kevent.render()
Example #55
0
class Worker(object):
    redis_worker_namespace_prefix = 'rq:worker:'
    redis_workers_keys = 'rq:workers'

    @classmethod
    def all(cls, connection=None):
        """Returns an iterable of all Workers.
        """
        if connection is None:
            connection = get_current_connection()
        reported_working = connection.smembers(cls.redis_workers_keys)
        workers = [cls.find_by_key(key, connection) for key in
                reported_working]
        return compact(workers)

    @classmethod
    def find_by_key(cls, worker_key, connection=None):
        """Returns a Worker instance, based on the naming conventions for
        naming the internal Redis keys.  Can be used to reverse-lookup Workers
        by their Redis keys.
        """
        prefix = cls.redis_worker_namespace_prefix
        name = worker_key[len(prefix):]
        if not worker_key.startswith(prefix):
            raise ValueError('Not a valid RQ worker key: %s' % (worker_key,))

        if connection is None:
            connection = get_current_connection()
        if not connection.exists(worker_key):
            return None

        name = worker_key[len(prefix):]
        worker = cls([], name, connection=connection)
        queues = connection.hget(worker.key, 'queues')
        worker._state = connection.hget(worker.key, 'state') or '?'
        if queues:
            worker.queues = [Queue(queue, connection=connection)
                                for queue in queues.split(',')]
        return worker


    def __init__(self, queues, name=None, default_result_ttl=DEFAULT_RESULT_TTL,
            connection=None, exc_handler=None):  # noqa
        if connection is None:
            connection = get_current_connection()
        self.connection = connection
        if isinstance(queues, Queue):
            queues = [queues]
        self._name = name
        self.queues = queues
        self.validate_queues()
        self._exc_handlers = []
        self.default_result_ttl = default_result_ttl
        self._state = 'starting'
        self._is_horse = False
        self._horse_pid = 0
        self._stopped = False
        self.log = Logger('worker')
        self.failed_queue = get_failed_queue(connection=self.connection)

        # By default, push the "move-to-failed-queue" exception handler onto
        # the stack
        self.push_exc_handler(self.move_to_failed_queue)
        if exc_handler is not None:
            self.push_exc_handler(exc_handler)


    def validate_queues(self):  # noqa
        """Sanity check for the given queues."""
        if not iterable(self.queues):
            raise ValueError('Argument queues not iterable.')
        for queue in self.queues:
            if not isinstance(queue, Queue):
                raise NoQueueError('Give each worker at least one Queue.')

    def queue_names(self):
        """Returns the queue names of this worker's queues."""
        return map(lambda q: q.name, self.queues)

    def queue_keys(self):
        """Returns the Redis keys representing this worker's queues."""
        return map(lambda q: q.key, self.queues)


    @property  # noqa
    def name(self):
        """Returns the name of the worker, under which it is registered to the
        monitoring system.

        By default, the name of the worker is constructed from the current
        (short) host name and the current PID.
        """
        if self._name is None:
            hostname = socket.gethostname()
            shortname, _, _ = hostname.partition('.')
            self._name = '%s.%s' % (shortname, self.pid)
        return self._name

    @property
    def key(self):
        """Returns the worker's Redis hash key."""
        return self.redis_worker_namespace_prefix + self.name

    @property
    def pid(self):
        """The current process ID."""
        return os.getpid()

    @property
    def horse_pid(self):
        """The horse's process ID.  Only available in the worker.  Will return
        0 in the horse part of the fork.
        """
        return self._horse_pid

    @property
    def is_horse(self):
        """Returns whether or not this is the worker or the work horse."""
        return self._is_horse

    def procline(self, message):
        """Changes the current procname for the process.

        This can be used to make `ps -ef` output more readable.
        """
        setprocname('rq: %s' % (message,))


    def register_birth(self):  # noqa
        """Registers its own birth."""
        self.log.debug('Registering birth of worker %s' % (self.name,))
        if self.connection.exists(self.key) and \
                not self.connection.hexists(self.key, 'death'):
            raise ValueError(
                    'There exists an active worker named \'%s\' '
                    'already.' % (self.name,))
        key = self.key
        now = time.time()
        queues = ','.join(self.queue_names())
        with self.connection.pipeline() as p:
            p.delete(key)
            p.hset(key, 'birth', now)
            p.hset(key, 'queues', queues)
            p.sadd(self.redis_workers_keys, key)
            p.execute()

    def register_death(self):
        """Registers its own death."""
        self.log.debug('Registering death')
        with self.connection.pipeline() as p:
            # We cannot use self.state = 'dead' here, because that would
            # rollback the pipeline
            p.srem(self.redis_workers_keys, self.key)
            p.hset(self.key, 'death', time.time())
            p.expire(self.key, 60)
            p.execute()

    def set_state(self, new_state):
        self._state = new_state
        self.connection.hset(self.key, 'state', new_state)

    def get_state(self):
        return self._state

    state = property(get_state, set_state)

    @property
    def stopped(self):
        return self._stopped

    def _install_signal_handlers(self):
        """Installs signal handlers for handling SIGINT and SIGTERM
        gracefully.
        """

        def request_force_stop(signum, frame):
            """Terminates the application (cold shutdown).
            """
            self.log.warning('Cold shut down.')

            # Take down the horse with the worker
            if self.horse_pid:
                msg = 'Taking down horse %d with me.' % self.horse_pid
                self.log.debug(msg)
                try:
                    os.kill(self.horse_pid, signal.SIGKILL)
                except OSError as e:
                    # ESRCH ("No such process") is fine with us
                    if e.errno != errno.ESRCH:
                        self.log.debug('Horse already down.')
                        raise
            raise SystemExit()

        def request_stop(signum, frame):
            """Stops the current worker loop but waits for child processes to
            end gracefully (warm shutdown).
            """
            self.log.debug('Got signal %s.' % signal_name(signum))

            signal.signal(signal.SIGINT, request_force_stop)
            signal.signal(signal.SIGTERM, request_force_stop)

            msg = 'Warm shut down requested.'
            self.log.warning(msg)

            # If shutdown is requested in the middle of a job, wait until
            # finish before shutting down
            if self.state == 'busy':
                self._stopped = True
                self.log.debug('Stopping after current horse is finished. '
                               'Press Ctrl+C again for a cold shutdown.')
            else:
                raise StopRequested()

        signal.signal(signal.SIGINT, request_stop)
        signal.signal(signal.SIGTERM, request_stop)


    def work(self, burst=False):  # noqa
        """Starts the work loop.

        Pops and performs all jobs on the current list of queues.  When all
        queues are empty, block and wait for new jobs to arrive on any of the
        queues, unless `burst` mode is enabled.

        The return value indicates whether any jobs were processed.
        """
        self._install_signal_handlers()

        did_perform_work = False
        self.register_birth()
        self.log.info('RQ worker started, version %s' % VERSION)
        self.state = 'starting'
        try:
            while True:
                if self.stopped:
                    self.log.info('Stopping on request.')
                    break
                self.state = 'idle'
                qnames = self.queue_names()
                self.procline('Listening on %s' % ','.join(qnames))
                self.log.info('')
                self.log.info('*** Listening on %s...' % \
                        green(', '.join(qnames)))
                wait_for_job = not burst
                try:
                    result = Queue.dequeue_any(self.queues, wait_for_job, \
                            connection=self.connection)
                    if result is None:
                        break
                except StopRequested:
                    break
                except UnpickleError as e:
                    msg = '*** Ignoring unpickleable data on %s.' % \
                            green(e.queue.name)
                    self.log.warning(msg)
                    self.log.debug('Data follows:')
                    self.log.debug(e.raw_data)
                    self.log.debug('End of unreadable data.')
                    self.failed_queue.push_job_id(e.job_id)
                    continue

                self.state = 'busy'

                job, queue = result
                # Use the public setter here, to immediately update Redis
                job.status = Status.STARTED
                self.log.info('%s: %s (%s)' % (green(queue.name),
                    blue(job.description), job.id))

                self.fork_and_perform_job(job)

                did_perform_work = True
        finally:
            if not self.is_horse:
                self.register_death()
        return did_perform_work

    def fork_and_perform_job(self, job):
        """Spawns a work horse to perform the actual work and passes it a job.
        The worker will wait for the work horse and make sure it executes
        within the given timeout bounds, or will end the work horse with
        SIGALRM.
        """
        child_pid = os.fork()
        if child_pid == 0:
            self.main_work_horse(job)
        else:
            self._horse_pid = child_pid
            self.procline('Forked %d at %d' % (child_pid, time.time()))
            while True:
                try:
                    os.waitpid(child_pid, 0)
                    break
                except OSError as e:
                    # In case we encountered an OSError due to EINTR (which is
                    # caused by a SIGINT or SIGTERM signal during
                    # os.waitpid()), we simply ignore it and enter the next
                    # iteration of the loop, waiting for the child to end.  In
                    # any other case, this is some other unexpected OS error,
                    # which we don't want to catch, so we re-raise those ones.
                    if e.errno != errno.EINTR:
                        raise

    def main_work_horse(self, job):
        """This is the entry point of the newly spawned work horse."""
        # After fork()'ing, always assure we are generating random sequences
        # that are different from the worker.
        random.seed()

        # Always ignore Ctrl+C in the work horse, as it might abort the
        # currently running job.
        # The main worker catches the Ctrl+C and requests graceful shutdown
        # after the current work is done.  When cold shutdown is requested, it
        # kills the current job anyway.
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)

        self._is_horse = True
        self.log = Logger('horse')

        success = self.perform_job(job)

        # os._exit() is the way to exit from childs after a fork(), in
        # constrast to the regular sys.exit()
        os._exit(int(not success))

    def perform_job(self, job):
        """Performs the actual work of a job.  Will/should only be called
        inside the work horse's process.
        """
        self.procline('Processing %s from %s since %s' % (
            job.func_name,
            job.origin, time.time()))

        try:
            with death_penalty_after(job.timeout or 180):
                rv = job.perform()

            # Pickle the result in the same try-except block since we need to
            # use the same exc handling when pickling fails
            pickled_rv = dumps(rv)
            job._status = Status.FINISHED
        except:
            # Use the public setter here, to immediately update Redis
            job.status = Status.FAILED
            self.handle_exception(job, *sys.exc_info())
            return False

        if rv is None:
            self.log.info('Job OK')
        else:
            self.log.info('Job OK, result = %s' % (yellow(unicode(rv)),))

        # How long we persist the job result depends on the value of
        # result_ttl:
        # - If result_ttl is 0, cleanup the job immediately.
        # - If it's a positive number, set the job to expire in X seconds.
        # - If result_ttl is negative, don't set an expiry to it (persist
        #   forever)
        result_ttl =  self.default_result_ttl if job.result_ttl is None else job.result_ttl  # noqa
        if result_ttl == 0:
            job.delete()
            self.log.info('Result discarded immediately.')
        else:
            p = self.connection.pipeline()
            p.hset(job.key, 'result', pickled_rv)
            p.hset(job.key, 'status', job._status)
            if result_ttl > 0:
                p.expire(job.key, result_ttl)
                self.log.info('Result is kept for %d seconds.' % result_ttl)
            else:
                self.log.warning('Result will never expire, clean up result key manually.')
            p.execute()

        return True


    def handle_exception(self, job, *exc_info):
        """Walks the exception handler stack to delegate exception handling."""
        exc_string = ''.join(
                traceback.format_exception_only(*exc_info[:2]) +
                traceback.format_exception(*exc_info))
        self.log.error(exc_string)

        for handler in reversed(self._exc_handlers):
            self.log.debug('Invoking exception handler %s' % (handler,))
            fallthrough = handler(job, *exc_info)

            # Only handlers with explicit return values should disable further
            # exc handling, so interpret a None return value as True.
            if fallthrough is None:
                fallthrough = True

            if not fallthrough:
                break

    def move_to_failed_queue(self, job, *exc_info):
        """Default exception handler: move the job to the failed queue."""
        exc_string = ''.join(traceback.format_exception(*exc_info))
        self.log.warning('Moving job to %s queue.' % self.failed_queue.name)
        self.failed_queue.quarantine(job, exc_info=exc_string)

    def push_exc_handler(self, handler_func):
        """Pushes an exception handler onto the exc handler stack."""
        self._exc_handlers.append(handler_func)

    def pop_exc_handler(self):
        """Pops the latest exception handler off of the exc handler stack."""
        return self._exc_handlers.pop()
Example #56
0
def post_created(response):
    if response.status_code == 201:
        return True
    else:
        print response.text
        print response.status_code
        return False


# Questionnaire
questionnaire = {'name': 'FMR Continuation Desire Questionnaire'}
r = requests.post('{}/questionnaire'.format(BASE_URL),
                  data=json.dumps(questionnaire),
                  headers={'content-type': 'application/json'})
if r.status_code == 201:
    log.info('Questionnaire {} with status code {}'.format(questionnaire['name'], r.status_code))
else:
    log.error('Questionnaire {} with status code {}'.format(questionnaire['name'], r.status_code))



# QuestionSet
question_sets = [
    {'name': 'Before the experience', 'sort_nr': '1',
    'info_text': 'Questions that need to be filled out before you start playing',
    'questionnaire_id': '1'},
    {'name': 'During the experience', 'sort_nr': '2',
    'info_text': 'Questions regarding playing the game', 'questionnaire_id': '1'},
    {'name': 'After the experience', 'sort_nr': '3',
    'info_text': 'Questions regarding post-play', 'questionnaire_id': '1'}
]
def main(run_name, settings, exec_mode):
    # File path and test data path
    fp = os.path.dirname(__file__) 
    tdp = join(fp,"..", "tests", "test_data")

    composition = settings.get("composition_file", join(tdp,"composition.fa"))
    coverage = settings.get("coverage_file", join(tdp,"coverage"))
    result_path = settings.get("results_path_base", join(fp,"..","tmp_out_test"))
    kmer_lengths = settings.get("kmer_lengths", [4])
    pcas = settings.get("total_percentage_pca", [80])
    thresholds = settings.get("length_threshold", [1000])
    cv_types = settings.get("covariance_type", ["full"])
    clusters = settings.get("clusters", "2,100,2")
    max_n_processors = settings.get("max_n_processors", 1)
    email = settings.get("email", None)

    log_path = settings.get("log_path", 
                            join(os.path.expanduser("~"),"log","concoctr.log"))
    handler = TimedRotatingFileHandler(log_path)
    logger = Logger(run_name)
    handler.push_application()

    result_rows = []
    indx = []
    
    con_ps = []

    if exec_mode == 'drmaa':
        s = drmaa.Session()
        s.initialize()

    result_dir = os.path.join(result_path, run_name)
    os.mkdir(result_dir)
    slurm_dir = os.path.join(result_dir, 'slurm')
    os.mkdir(slurm_dir)
    sbatch_dir = os.path.join(result_dir, 'sbatch')
    os.mkdir(sbatch_dir)
    concoct_dir = os.path.join(result_dir, 'concoct_output')
    os.mkdir(concoct_dir)

    for k in kmer_lengths:
        for pca in pcas:
            for thr in thresholds:
                for cv in cv_types:
                    job_name = "_".join(map(str, [k, pca, thr, cv]))
                    con_p = ConcoctParams(composition,
                                          coverage,
                                          kmer_length = k,
                                          total_percentage_pca= pca,
                                          length_threshold = thr,
                                          covariance_type = cv,
                                          basename = os.path.join(concoct_dir, job_name) + "/",
                                          max_n_processors = max_n_processors,
                                          clusters = clusters)
                    con_ps.append(con_p)

                    cr = ConcoctR()
                    if (k > 9):
                        # Throw in some extra memory
                        n_cores = 4
                    else:
                        n_cores = 1
                    if exec_mode == 'drmaa':
                        jt = s.createJobTemplate()
                        jt.nativeSpecification = '-A b2010008 -p core -n {} -t 7-00:00:00'.format(n_cores)
                        jt.email = email
                        jt.workingDirectory = result_path
                        jobid = cr.run_concoct(con_p, drmaa_s=s, drmaa_jt=jt)
                    elif exec_mode == 'sbatch':
                        script_file = os.path.join(result_dir, 'sbatch', job_name)
                        sbatch_params = ['-A b2010008', 
                                         '-p core', 
                                         '-n {}'.format(n_cores), 
                                         '-t 7-00:00:00', 
                                         "-J {}".format(job_name),
                                         "-o {}".format(os.path.join(result_dir, 'slurm', 'slurm-%j.out'))]
                        cr.generate_sbatch_script(con_p, sbatch_params, script_file)
                        jobid = cr.run_concoct(con_p, sbatch_script = script_file)
                    if jobid:
                        result_rows.append(con_p.options)
                        indx.append(jobid)
                        logger.info("Submitted jobid {0}".format(jobid))

    results_df = p.DataFrame(result_rows, index=indx)
    results_df.to_csv(os.path.join(result_path, run_name + "_all_results.csv"))

    handler.pop_application()
Example #58
0
    for pid, project_data in all_projects:
        experiment_settings = exp_settings_class()
        manager = manager_class(experiment_settings)
        try:    
            project, settings = manager.get_project(pid)
            try:
                manager.recheck_folders_and_params(pid, project, project_data=project_data)       
            except OSError, e:
                app_logger.error("OSError: %s" % e)
            app_logger.warning("Project %s was added early" % pid)
        except AttributeError, e:
            app_logger.error("ERROR: please, check that all settings given as dictionaries, not sets. (%s)" % e)
            raise Exception(e)
        except ProjectManagerException, e:
            manager.add_project(pid, project_data, init=True)
            app_logger.info("Project %s added" % pid)


def execute(args, usage, dataset_dict, exp_settings, exp_class, manager_class):
    ''' Execute experiment with givent args.
    '''
    if not len(args) in [5,7]:
        print usage
        sys.exit(0)
    command = args[0]
    tasks = args[1]
    start = int(args[2])
    end = int(args[3])
    dataset = args[4]
    settings_context = None
    project_context = None
Example #59
0
class PortfolioManager:
    '''
    Observes the trader universe and produces
    orders to be excuted within zipline
    Abstract method meant to be used by user
    to construct their portfolio optimizer
    '''
    __metaclass__ = abc.ABCMeta

    #TODO Add in the constructor or setup parameters some general settings like maximum weights, positions, frequency,...
    def __init__(self, parameters):
        '''
        Parameters
            parameters : dict(...)
                Named parameters used either for general portfolio settings
                (server and constraints), and for user optimizer function
        '''
        super(PortfolioManager, self).__init__()
        self.log = Logger('Manager')
        self.portfolio       = None
        self.date            = None
        self._optimizer_parameters = parameters
        self.connected = False
        self.server = parameters.get('server', None)
        #TODO Message emission only if a client exists ? Could try to bind and give up if no connections
        #NOTE Non blocking recv(): https://github.com/zeromq/pyzmq/issues/132   /  zmq.NOBLOCK ?
        #NOTE good example: http://zguide2.zeromq.org/py:peering3
        #if self.server.ports is not None:
            #startup_msg = self.server.receive()
            #self.connected = True
            #log.info(json.dumps(startup_msg, indent=4, separators=(',', ': ')))
        #TODO Should send stuff anyway, and accept new connections while running
        #else:

        self.connected = parameters.get('connected', False)

        # Run the server if the engine didn't while it is asked
        if self.server.port is None and self.connected:
            self.log.info('Binding manager on default port...')
            self.server.run(host='127.0.0.1', port=5570)

    @abc.abstractmethod
    def optimize(self):
        '''
        Users must overwrite this method
        '''
        pass

    def update(self, portfolio, date):
        '''
        Actualizes the portfolio universe
        and if connected, sends it through the wires
        ________________________________
        Parameters
            portfolio: zipline.portfolio(1)
                ndict object storing portfolio values at the given date
            date: datetime.datetime(1)
                Current date in zipline simulation
        '''
        self.portfolio = portfolio
        self.date      = date
        #FIXME A generic method: f(ndict) = dict()
        portfolio.capital_used = portfolio.capital_used[0]
        #portfolio.start_date = portfolio.start_date.strftime(format='%Y-%m-%d %H:%M')
        #FIXME remote console receives nothing
        if self.connected:
            self.server.send({'positions': json.loads(str(portfolio.positions).replace('Position(', '').replace(')', '').replace("'", '"')),
                              'value': portfolio.portfolio_value,
                              'cash': portfolio.cash,
                              'returns': portfolio.returns,
                              'pnl': portfolio.pnl,
                              'capital_used': portfolio.capital_used,
                              'actif': portfolio.positions_value},
                              type='portfolio',
                              channel='dashboard')

    def trade_signals_handler(self, signals):
        '''
        Process buy and sell signals from backtester or live trader
        @param signals: dict holding stocks of interest, format like {"google": 567.89, "apple": -345.98}
                       If the value is negative -> sell signal, otherwize buy one
        @return: dict orderBook, like {"google": 34, "apple": -56}
        '''
        orderBook       = dict()

        # If value < 0, it's a sell signal on the key, else buy signal
        to_buy          = [t for t in signals if signals[t] > 0]
        to_sell         = set(self.portfolio.positions.keys()).intersection([t for t in signals if signals[t] < 0])
        if not to_buy and not to_sell:
            # Nothing to do
            return dict()

        # Compute the optimal portfolio allocation, using user defined function
        alloc, e_ret, e_risk = self.optimize(self.date, to_buy, to_sell, self._optimizer_parameters)

        #TODO Check about selling in available money and handle 250 stocks limit
        #TODO Handle max_* as well, ! already actif stocks

        # Building orders for zipline
        for t in alloc:
            # Handle allocation returned as number of stocks to order
            if isinstance(alloc[t], int):
                orderBook[t] = alloc[t]

            # Handle allocation returned as stock weights to order
            elif isinstance(alloc[t], float):
                # Sell orders
                if alloc[t] <= 0:
                    orderBook[t] = int(alloc[t] * self.portfolio.positions[t].amount)
                # Buy orders
                else:
                    # If we already trade this ticker, substract owned amount before computing number of stock to buy
                    if self.portfolio.positions[t].amount:
                        price = self.portfolio.positions[t].last_sale_price
                    else:
                        price = signals[t]
                    orderBook[t] = (int(alloc[t] * self.portfolio.portfolio_value / price)
                                    - self.portfolio.positions[t].amount)

        return orderBook

    def setup_strategie(self, parameters):
        '''
        General parameters or user ones setting
        (maw_weigth, max_assets, max_frequency, commission cost)
        ________________________________________________________
        Parameters
            parameters: dict(...)
                Arbitrary values to change general constraints,
                or for user algorithm settings
        '''
        for name, value in parameters.iteritems():
            self._optimizer_parameters[name] = value