Пример #1
0
def remove_logger(config, found_workers):
    import yaml
    NN = len(found_workers) - 1

    if NN == 0:
        elog.warn("no logger can be removed")
        return

    loggerPrefix = found_workers[0].split('-')[0]

    delLoggerName = "%s-%d" % (loggerPrefix, NN)

    while delLoggerName not in found_workers and NN < LOGGER_WORKERS_MAX:
        NN = NN + 1
        delLoggerName = "%s-%d" % (loggerPrefix, NN)

    if delLoggerName not in found_workers:
        elog.warn("no logger can be removed")
        return

    # remove file loggerNN:
    loggerNN = os.path.join(config['loggers_abspath'], "%s.py" % delLoggerName)
    loggerNNPosition = os.path.join(config['loggers_abspath'], ".%s.position" % delLoggerName)

    elog.info("%s: %s", delLoggerName, loggerNN)

    (fr, fd) = (None, None)
    try:
        loggingConfigYaml = config['logger']['logging_config']
        loggingConfigYamlDefault = "%s.0" % loggingConfigYaml

        shutil.copy(loggingConfigYaml, loggingConfigYamlDefault)

        fr = open(loggingConfigYaml)
        cfg = yaml.load(fr)
        fr.close()
        fr = None

        del cfg['loggers'][delLoggerName]

        fd = util.open_file(loggingConfigYaml)
        yaml.dump(cfg, fd, default_flow_style=False)
        fd.close()
        fd = None

        os.remove(loggerNN)
        os.remove(loggerNNPosition)

        shutil.copy(loggingConfigYaml, loggingConfigYamlDefault)
        elog.info("success: %s", delLoggerName)
    except:
        shutil.copy(loggingConfigYamlDefault, loggingConfigYaml)
        elog.error("failed: %s", delLoggerName)
        pass
    finally:
        if fr:
            fr.close()
        if fd:
            fd.close()
    pass
Пример #2
0
def sweep_path(path, file_exts, recursive, author, curtime):
    filelist = os.listdir(path)
    filelist.sort(key=lambda x:x[0:20])

    for f in filelist:
        try:
            pf = os.path.join(path, f)

            fs = os.stat(pf)
            
            mod = fs.st_mode

            if stat.S_ISDIR(mod):
                # is dir
                if util.dir_exists(pf):
                    if recursive:
                        sweep_path(pf, file_exts, recursive, author, curtime)
                        pass
                pass
            elif stat.S_ISREG(mod):
                # is file
                ignored = False

                if not util.file_exists(pf) or pf == APPFILE or f == "__init__.py":
                    ignored = True

                if not ignored:
                    _, ext = os.path.splitext(f)
                    if ext in file_exts:
                        update_file(pf, f, fs, author, curtime)
                        pass
        except OSError as e:
            elog.warn("%r: %s" % (e, pf))
        
    pass
Пример #3
0
def add_logger(config, found_workers):
    import yaml
    from copy import deepcopy

    NN = len(found_workers)

    if NN > LOGGER_WORKERS_MAX:
        elog.warn("too many loggers(>%d) to add", LOGGER_WORKERS_MAX)
        return

    loggerPrefix = found_workers[0].split('-')[0]

    newLoggerName = "%s-%d" % (loggerPrefix, NN)
    while newLoggerName in found_workers:
        NN = NN + 1
        newLoggerName = "%s-%d" % (loggerPrefix, NN)

    # add loggerNN:
    logger0 = os.path.join(config['loggers_abspath'], "%s.py" % loggerPrefix)
    loggerNN = os.path.join(config['loggers_abspath'], "%s.py" % newLoggerName)

    elog.info("%s: %s", newLoggerName, loggerNN)

    (fr, fd) = (None, None)
    try:
        loggingConfigYaml = config['logger']['logging_config']
        loggingConfigYamlDefault = "%s.0" % loggingConfigYaml

        shutil.copy(loggingConfigYaml, loggingConfigYamlDefault)

        fr = open(loggingConfigYaml)
        cfg = yaml.load(fr)
        fr.close()
        fr = None

        fd = util.open_file(loggingConfigYaml)

        cfg['loggers'][newLoggerName] = deepcopy(cfg['loggers'][loggerPrefix])

        yaml.dump(cfg, fd, default_flow_style=False)

        fd.close()
        fd = None

        shutil.copy(logger0, loggerNN)

        shutil.copy(loggingConfigYaml, loggingConfigYamlDefault)
        elog.info("success: %s", newLoggerName)
    except:
        shutil.copy(loggingConfigYamlDefault, loggingConfigYaml)
        elog.error("failed: %s", newLoggerName)
        pass
    finally:
        if fr:
            fr.close()
        if fd:
            fd.close()
    pass
Пример #4
0
def sweep_dir(path, srcs, results):
    dname = os.path.basename(path)

    if dname not in ignore_dirs:
        filelist = os.listdir(path)
        filelist.sort(key=lambda x: x[0:20])

        for f in filelist:
            pf = os.path.join(path, f)

            if util.dir_exists(pf):
                sweep_dir(pf, srcs, results)
            elif util.file_exists(pf):
                _, ext = os.path.splitext(f)

                passed_filters = False

                if f not in ignore_files and ext not in ignore_exts:
                    passed_filters = True

                if len(only_exts) > 0:
                    if ext not in only_exts:
                        passed_filters = False

                if passed_filters:
                    fd = None
                    try:
                        fd = open(pf, 'r')
                        lines = fd.readlines()
                        lineno = 0
                        for line in lines:
                            lineno += 1

                            for src in srcs:
                                if line.find(src) != -1:
                                    elog.info("found '%s': [%s:%d]", src,
                                              os.path.relpath(pf, APPPATH),
                                              lineno)
                                    elog.force_clean("%s", line)
                                    if pf not in results:
                                        results.append(pf)
                    except:
                        elog.error("%r: %s", sys.exc_info(), pf)
                    finally:
                        util.close_file_nothrow(fd)
                else:
                    #elog.warn("ignore file: %s", pf)
                    pass
    else:
        elog.warn("ignore path: %s", path)
        pass
Пример #5
0
def main(parser, appConfig, loggerConfig):
    import utils.logger

    (options, args) = parser.parse_args(args=None, values=None)

    loggerDictConfig = utils.logger.set_logger(loggerConfig, options.log_path,
                                               options.log_level)

    elog.force("%s-%s start", APPNAME, APPVER)

    # 当前脚本绝对路径
    abspath = util.script_abspath(inspect.currentframe())

    options.artifactName = options.artifactName \
        .replace('${artifactId}', options.artifactId) \
        .replace('-', '_') \
        .replace('.', '_')

    options.artifactRootdir = options.artifactRootdir \
        .replace('${projectRootdir}', appConfig['projectRootdir']) \
        .replace('${artifactId}', options.artifactId) \
        .replace('${artifactName}', options.artifactName)

    if options.create_project:
        util.info2("projectRootdir = '%s'" % appConfig['projectRootdir'])
        util.info2("sb2template = '%s'" % appConfig['sb2template'])

        util.print_options_attrs(options, [
            'groupId', 'artifactId', 'artifactName', 'artifactVersion',
            'artifactDescription', 'artifactRootdir'
        ])

        if util.dir_exists(options.artifactRootdir) and not options.force:
            elog.warn(
                "artifactRootdir has already existed. (using '--force' to overwrite it.)"
            )
            sys.exit(-1)
            pass

        create_sb2_project(appConfig, options)

    elog.force("%s-%s exit.", APPNAME, APPVER)
    pass
Пример #6
0
def main(parser):
    (options, args) = parser.parse_args(args=None, values=None)

    # 当前脚本绝对路径
    abspath = util.script_abspath(inspect.currentframe())

    if not options.path:
        elog.warn("No path specified. using: -P, --path=PATH")
        exit(-1)

    # 取得配置项 options.path 的绝对路径
    root_path = util.source_abspath(APPFILE, options.path, abspath)

    # 取得文件扩展名数组
    file_exts = []
    filters = parse_strarr(options.filter)
    for filter in filters:
        if filter.startswith('.'):
            if filter not in file_exts:
                file_exts.append(filter)

        if filter in source_filters_dict.keys():
            for ext in source_filters_dict[filter]:
                if ext not in file_exts:
                    file_exts.append(ext)

    curtime = time.time()

    elog.force("path:      %r", root_path)
    elog.force("exts:      %r", file_exts)
    elog.force("recursive: %r", options.recursive)
    elog.force("timestamp: %r", curtime)

    if options.author:
        elog.force("author:    %r", options.author)

    sweep_path(root_path, file_exts, options.recursive, options.author,
               curtime)

    pass
Пример #7
0
def main(parser):
    (options, args) = parser.parse_args(args=None, values=None)

    # 子进程退出后向父进程发送的信号
    ## signal.signal(signal.SIGCHLD, util.sig_chld)

    # 进程退出信号
    signal.signal(signal.SIGINT, util.sig_int)
    signal.signal(signal.SIGTERM, util.sig_term)

    # 当前脚本绝对路径
    abspath = util.script_abspath(inspect.currentframe())

    if not options.path:
        options.path = os.getcwd()
        elog.warn(
            "No path specified. using current working dir or using: --path='PATH'"
        )

    if not options.srcs:
        elog.error("No source strings specified. using: --srcs='STRINGS'")
        sys.exit(1)

    if not options.dsts:
        elog.warn("No destigation strings specified. using: --dsts='STRINGS'")

    # 取得配置项options.path的绝对路径
    root_path = util.source_abspath(APPFILE, options.path, abspath)
    srcs = parse_strarr(options.srcs)
    dsts = parse_strarr(options.dsts)

    elog.force("path: %s", root_path)
    elog.force("sour = %r", srcs)
    elog.force("dest = %r", dsts)

    founds = []
    sweep_dir(root_path, srcs, founds)
    elog.force("Total %d files found", len(founds))

    if len(founds) > 0:
        if options.replace:
            if len(srcs) == len(dsts):
                for pf in founds:
                    ctime, mtime = None, None
                    fts = file_times(pf)
                    if fts:
                        ctime, mtime = fts
                    else:
                        elog.warn("missing file: %s", pf)
                        continue

                    for i in range(0, len(srcs)):
                        srcstr = srcs[i]
                        dststr = None
                        if i < len(dsts):
                            dststr = dsts[i]

                        if dststr:
                            ds = dststr.replace('$(mtime)', mtime).replace(
                                '$(ctime)', ctime)

                            if options.whole_line:
                                cmd = "sed -i 's/%s.*/%s/g' '%s'" % (srcstr,
                                                                     ds, pf)
                            else:
                                cmd = "sed -i 's/%s/%s/g' '%s'" % (srcstr, ds,
                                                                   pf)

                            elog.debug(cmd)
                            (status, output) = commands.getstatusoutput(cmd)
                            if status != 0:
                                elog.error(
                                    "failed to command: \"%s\", output: %r",
                                    sed, output)

                elog.force("Total %d files replaced", len(founds))
            else:
                elog.error(
                    "Failed to replace for srcs(%r) mismatched with dsts(%r)",
                    srcs, dsts)
                pass
        else:
            elog.warn("No files to be replaced. Using: --replace")
            pass
    pass
Пример #8
0
    group.add_option("--startup",
        action="store_true", dest="startup", default=False,
        help="startup given worker logger")

    group.add_option("--batch-rows",
        action="store", dest="batch_rows", type=int, default=5000,
        help="specify batch rows for logger. 5000 default",
        metavar="ROWS")

    group.add_option("--force",
        action="store_true", dest="force", default=False,
        help="force apply on all workers")

    if len(sys.argv) == 1:
        elog.warn("WORKERs not specified")
        print "--------------------------------"
        parser.print_help()
        print "--------------------------------"
        exit(1)

    workers = None
    firstarg = sys.argv[1]
    if not firstarg.startswith('-'):
        workers = []
        names = firstarg.split(',')
        for name in names:
            workers.append(name.strip(' '))
        pass

    config = {
Пример #9
0
    def init_data(self, logfile):
        self.restore_position()

        if not util.dir_exists(self.log_prefix):
            elog.warn("create dir for stash log: %s", self.log_prefix)
            os.makedirs(self.log_prefix)

        elog.debug('log config: %r', self.dictcfg)
        elog.info('stash prefix: %s', self.log_prefix)
        elog.info('start tstamp: %d', self.start_tstamp)
        elog.info('start rowid: %d', self.start_rowid)
        elog.info('batch rows: %d', self.batch_rows)

        file_dests = os.path.join(self.plugins_dir, 'config' , 'dests.csv')
        file_proxys = os.path.join(self.plugins_dir, 'config' , 'proxys.csv')
        file_keywds = os.path.join(self.plugins_dir, 'config' , 'keywds.csv')

        elog.info("dests file: %s", file_dests)
        elog.info("proxys file: %s", file_proxys)
        elog.info("keywds file: %s", file_keywds)

        with open(file_dests, 'r') as fd:
            dests = fd.readlines()

        with open(file_proxys, 'r') as fd:
            proxys = fd.readlines()

        with open(file_keywds, 'r') as fd:
            keywds = fd.readlines()

        self.dests = []
        for n in range(0, len(dests)):
            # id, ip, port, host
            # 100005,67.64.46.91,80,www.zhibo8.cc
            self.dests.append(tuple(dests[n].strip('\n').split(',')))
        del dests

        self.proxys = []
        for n in range(0, len(proxys)):
            # ip, port, type
            # 121.232.144.158,9000,HTTP
            self.proxys.append(tuple(proxys[n].strip('\n').split(',')))
        del proxys

        self.keywds = []
        for n in range(0, len(keywds)):
            # id, word
            self.keywds.append(tuple(keywds[n].strip('\n').split(',')))
        del keywds

        self.max_dests = len(self.dests) - 1
        self.max_proxys = len(self.proxys) - 1
        self.max_keywds = len(self.keywds) - 1

        # update dictcfg with logfile
        elog.update_log_config(self.dictcfg, self.logger_name, logfile, 'INFO')

        # reload config
        logging.config.dictConfig(self.dictcfg)

        # update logger
        self.logger = logging.getLogger(self.logger_name)
        self.logfile = logfile

        (self.a, self.b, self.c, self.d, self.p) = ((1, 220), (10, 230), (20, 240), (30, 250), (10000, 60000))

        self.fields = (
            'rowid',
            'timestr',
            'timeint',
            'destid',
            'sourip',
            'sourport',
            'destip',
            'destport',
            'desturl',
            'proxyip',
            'proxyport',
            'proxytype',
            'keywdid')
        pass