示例#1
0
def parse(filename):
    """Parses the given file line by line
    while following it"""
    try:
        fd = open(filename)
    except IOError:
        print "Could not read file:", filename
    lines = follow(fd)
    for line in lines:
        parsed = parse_line(line)
        if parsed == None:
            continue
        yield parsed
示例#2
0
def parse(filename):
    """Parses the given file line by line
    while following it"""
    try:
        fd = open(filename)
    except IOError:
        print "Could not read file:", filename
    lines = follow(fd)
    for line in lines:
        parsed = parse_line(line)
        if parsed == None:
            continue
        yield parsed
示例#3
0
from tail import follow

lines = follow(open('www/access-log'))
for i, line in enumerate(lines):
    print(i, 'line: ', line, end='')
    if i == 4:
        lines.close()
示例#4
0
import time
import threading

from tail import follow

shutdown = threading.Event()
lines = follow(open('www/access-log'), shutdown)


def sleep_and_close(s):
    time.sleep(s)
    print('Closing it down')
    shutdown.set()


threading.Thread(target=sleep_and_close, args=(10, )).start()

for line in lines:
    print(line, end='')
示例#5
0
        print(item)
        yield item


class storelast:
    def __init__(self, source):
        self.source = source

    def __next__(self):
        item = self.source.__next__()
        self.last = item
        return item

    def __iter__(self):
        return self


if __name__ == '__main__':
    gen_sqrt = generate(math.sqrt)

    for x in trace(gen_sqrt(range(10))):
        pass

    print('--------------------')

    lines = storelast(follow(open('www/access-log')))
    log = apache_log(lines)
    for r in log:
        print(r)
        print(lines.last)
示例#6
0
# One of the most pwerful applications of generators is
# setting up processing pipelines.

# input sequence -> generator -> generator -> for x in a

import tail


def grep(pattern, lines):
    for line in lines:
        if pattern in lines:
            yield line


logfile = open("access-log")
loglines = tail.follow(logfile)
pylines = grep("python", loglines)

for line in pylines:
    print(line)
示例#7
0
def feed_queue(log_q):
    lines = open('www/access-log')
    lines = follow(lines)
    log = apache_log(lines)
    sendto_queue(log, log_q)
示例#8
0
from tail import follow
from parser import apache_log
from sendto import sendto

lines = open('www/access-log')
lines = follow(lines)
log = apache_log(lines)
sendto(log, ("", 15000))
示例#9
0
#!/usr/bin/python

# when a new query appears into log file, blink the little green led (#25)

import RPi.GPIO as GPIO
import os
import time
import tail

# Some varaibles
logFile = "/opt/neo4j/logs/query.log"
blinkTime = 0.1

# Set GPIO mode that match the board number
GPIO.setmode(GPIO.BCM)

# Setup to on the little red led
GPIO.setup(25, GPIO.OUT, initial=GPIO.LOW)

# What we do when a new query appers ?
try:
  for data in tail.follow(logFile):
    GPIO.output(25, GPIO.HIGH)
    time.sleep(0.1)
    GPIO.output(25,GPIO.LOW)
    
except KeyboardInterrupt:
  GPIO.cleanup()

exit(0)