예제 #1
0
	def dbl(self,logmsg):
		try:
			#locate path for debug log in prefs file
			logfilePath=os.path.join(self.rootFolderPath,'errors')
			fileName = 'debugLog'
			logSize = 5000000
			logCount = 5
			#if path does not exist, create it
			if not os.path.exists(logfilePath):
				os.makedirs(logfilePath)
			if os.path.exists(logfilePath):
				logHandler = RotatingFileHandler(logfilePath + "/" + fileName,"a", logSize, logCount)
				logFormatter = logging.Formatter("%(asctime)s:%(message)s")
				logHandler.setFormatter(logFormatter)
				logger = logging.getLogger(__name__)
				logger.disabled = False
				logger.addHandler(logHandler)
				logger.setLevel(logging.DEBUG)
				logger.debug(logmsg)
				logHandler.flush()
				logHandler.close()
				logger.removeHandler(logHandler)
		except Exception:
			#if we can't write to the log for any reason, eat the error and continue.
			pass
예제 #2
0
파일: dbBackup.py 프로젝트: longooglite/mps
	def dbl(self,logmsg):
		try:
			#locate path for debug log in prefs file
			logfilePath=os.path.join(self.rootpath,'debugLogs')
			fileName = 'debugLog'
			logSize = 5000000
			logCount = 5
			#if path does not exist, create it
			if not os.path.exists(logfilePath):
				os.makedirs(logfilePath)
			if os.path.exists(logfilePath):
				env = envUtils.getEnvironment()
				self.backupReport += "<tr><td>%s - %s</td></tr>" % (env.localizeUTCDate(env.formatUTCDate()), logmsg)
				logHandler = RotatingFileHandler(logfilePath + "/" + fileName,"a", logSize, logCount)
				logFormatter = logging.Formatter("%(asctime)s:%(message)s")
				logHandler.setFormatter(logFormatter)
				logger = logging.getLogger(__name__)
				logger.disabled = False
				logger.addHandler(logHandler)
				logger.setLevel(logging.DEBUG)
				logger.debug(logmsg)
				logHandler.flush()
				logHandler.close()
				logger.removeHandler(logHandler)
		except Exception:
			#if we can't write to the log for any reason, eat the error and continue.
			pass
예제 #3
0
 def flush(self):
     try:
         RotatingFileHandler.flush(self)
     except EnvironmentError as e:
         if e.errno == errno.ENOSPC:
             self.disable_logging()
         else:
             raise
예제 #4
0
파일: log.py 프로젝트: swstack/blockwar
 def flush(self):
     RotatingFileHandler.flush(self)
     if hasattr(self.stream, 'fileno') and hasattr(os, 'fsync'):
         fd = self.stream.fileno()
         os.fsync(fd)
예제 #5
0
파일: listen.py 프로젝트: terbo/sigmon
class Sensor(py_daemon.Daemon):
  def __init__(self):
    self.starttime = time.time()
    self.version = VERSION

    self.debug   = os.environ.get('SIGMON_DEBUG', 1)
    self.detach   = os.environ.get('SIGMON_DETACH', 1)
    self.apihost  = os.environ.get('SIGMON_API_HOST', '1.0.0.1')
    self.apiport  = os.environ.get('SIGMON_API_PORT', 8989)
    self.apiprobes  = os.environ.get('SIGMON_API_PROBES', '/probes/')
    self.apiaps  = os.environ.get('SIGMON_API_APS', '/aps/')
    self.apidata  = os.environ.get('SIGMON_API_APS', '/datapkts/')
    self.homedir = os.environ.get('SIGMON_ROOT', '/data/sigmon')
    
    self.apiurls = {'datapkts': '%s' % ( self.apidata ),
                    'probes': '%s' % ( self.apiprobes ),
                    'aps': '%s' % ( self.apiaps )}
    
    self.logcsv = os.environ.get('SIGMON_CSVOUT', 0)
    self.logjson = os.environ.get('SIGMON_JSONOUT', 0)
    self.logweb = os.environ.get('SIGMON_WEBOUT',1)
    self.logpcap = os.environ.get('SIGMON_SAVEPCAP',0)
    self.watchaps = os.environ.get('SIGMON_WATCHAPS',1)
    self.watchdata = os.environ.get('SIGMON_WATCHDATA',0)
    self.watchprobes = os.environ.get('SIGMON_WATCHPROBES',1)
    
    self.savelogs = os.environ.get('SIGMON_SAVELOGS',1)
    
    self.poststatus = os.environ.get('SIGMON_POSTSTATUS',1)
    
    self.statusurl = os.environ.get('SIGMON_STATSURL', '/logs.sensors/')
    
    self.usefilter = os.environ.get('PCAP_FILTER',0)

    self.iface = os.environ.get('SIGMON_MON_DEV', 'mon0')
    self.queue_time = os.environ.get('QUEUE_TIME', 25)
    self.queue_packets = os.environ.get('QUEUE_PACKETS', 50)
    
    
    self.apsleep = 10
    self.datasleep = 3

    self.pcapfile = {}
    
    self.max_errors = 25
    self.statusprint = 60 * 7

    self.scriptname = os.path.basename(__file__).replace('.py','')
    self.hostname = node() 
    self.pidfile = '%s.pid' % ( self.scriptname ) 
    
    self.errorlog = []
    
    self.csvdelim = ','
    self.csvquote = '"'
     
    self.aps = {}
    self.macs = {}

    self.synced = 0
    self.last_synced = 0
    self.last_status = ''
    self.last_synced_status = 0
    self.errors = 0

    self.logfile = 'logs/%s.%s.%s-%s.log' % ( 
                                           self.scriptname,
                                           self.hostname,
                                           self.iface,
                                           datetime.datetime.now().strftime('%F'))
    
    self.log_format = '%(asctime)s %(module)s:%(lineno)d : %(levelname)s : %(message)s'
    self.logger = logging.getLogger()
    
    if self.savelogs:
      self.logger.setLevel(logging.DEBUG)
      self.log_handler = RotatingFileHandler(self.logfile, maxBytes=5000000, backupCount=20)
      self.log_handler.setFormatter(logging.Formatter(self.log_format))
      self.logger.addHandler(self.log_handler)
    
    if self.debug:
      self.log_stderr = logging.StreamHandler(sys.stderr)
      self.log_stderr.setFormatter(logging.Formatter(self.log_format))
      self.log_stderr.setLevel(logging.DEBUG)
      self.logger.addHandler(self.log_stderr)

    # pcapy settings
    self.pcap_filter = 'type mgt subtype probe-req' # need to add probe resp
    self._max_pkts = -1
    self._max_len = 1514
    self._promisc = 1
    self._read_timeout = 100
   
    self.pcap_maxsize = 500000

    self.data = {
        'datapkts': [],
        'probes': [],
        'aps': [],
    } 
    
    self.pcapdump = {}

    self.maxpoolsize = 4
    self.maxpooltimeout = None

    self.http_headers = urllib3.util.make_headers(keep_alive=True,
                                          user_agent='sigmon sensor %s' % self.version)
    self.http_headers.update({'Content-Type':'application/json'})

    self.pool = urllib3.HTTPConnectionPool(self.apihost, self.apiport,
                                      self.maxpoolsize,
                                      self.maxpooltimeout,
                                      headers=self.http_headers)
    self.RTD = ImpactDecoder.RadioTapDecoder()
    
    self.active = False
    
    # called like such before instantiation
    if self.detach:
      py_daemon.Daemon.__init__(self,self.pidfile,verbose=True)
    
  
  def error(self,err):
      error(err)
      self.errors += 1
      self.errorlog.append('%s %s' % (datetime.datetime.now().strftime('%F-%T'), err))

  def do_exit(self):
    if self.queued():
      error('Exiting, wrote %s packets to csv in %s' % (self.writecsv(), self.homedir))
    # called like such after instantiation
    super(Sensor,self).stop()

  def status(self):
    self.last_synced_status = time.time()

    memusage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss 
    
    self.last_status = {
      'sensor': self.hostname,
      'uptime': self.uptime().total_seconds(),
      'memusage': memusage,
      'pktseen': self.pkts,
      'synced': self.synced,
      'lastsync': self.last_synced,
      'queued': { x: len(self.data[x]) for x in self.data.keys() },
      'time':   time.time(),
      'version': self.version,
      'active': self.active,
      'errorcount': self.errors,
      'errors': self.errorlog,
    }
    
    if self.logpcap:
      for pcapf in self.pcapdump:
        stat = os.stat(self.pcapfile[pcapf])
        
        if stat and stat[6] > self.pcap_maxsize:
          self.pcapture()
    
    if self.debug:
      info('uptime: %s' % self.last_status['uptime'])
      info('memory usage: %.2fmb' % (self.last_status['memusage']))
      info('synced: %d packets (%d errors)' % \
        ( self.synced, len(self.errorlog) ))
      info('queued: %s, last synced: %.2f seconds ago' % \
        (self.last_status['queued'], (time.time() - self.last_status['lastsync'])))
    
    if self.poststatus:
      self.post(self.statusurl, data=self.last_status)
    
    return self

  def writejson(self):
    written = 0

    for field in self.data.keys():
      for pkt in self.data[field]:
        print(self.csvify(pkt))

  def writecsv(self):
    written = 0

    with open('%s/csv/%s_%s-%s.csv' % (self.homedir, self.hostname, self.iface,
      datetime.datetime.now().strftime('%F-%T')), 'a') as csvfile:
      for field in self.data.keys():
        for pkt in self.data[field]:
          written += 1
          csvfile.write('%s\n' % self.csvify(pkt))

    return written
  
  def queued(self):
    return sum([ len(self.data[field[0]]) for field in zip(self.data)])
  
  def queue(self, data):
    ptype = data['ptype']
    self.data[ptype].append(data)
    
    if self.queued() > self.queue_packets or \
        time.time() - self.last_synced > self.queue_time:
        self.sync()
    
    return self
  
  def uptime(self):
    return datetime.timedelta(seconds=(time.time() - self.starttime))

  def sync(self):
    if self.savelogs:
      self.log_handler.flush() 
    
    try:
      # i dont save pcap here, thats immediately written
      # could make it another queued data hash...
      for field in self.data.keys():
        if len(self.data[field]):
          if self.logjson:  self.writejson()
          if self.logcsv:   self.writecsv()
          if self.logweb:   self.post(self.apiurls[field],field=field)

    except Exception as e:
      self.error('upload: %s (on %s)' % (e, field))
    
    
    if time.time() - self.last_synced_status > self.statusprint:
      self.status()

    return self
  
  def post(self, url, data=False, field=False):
    r = ''
    
    try:
      if field:
        encoded_data = json.dumps(self.data[field])
      elif data:
        encoded_data = json.dumps(data)
      else:
        error('Sent no data? url:%s data:%s field:%s' % ( url, data, field ))
        return

      response = self.pool.urlopen('POST', url, body=encoded_data,
                                         headers=self.http_headers,
                                         assert_same_host=False)
      r = response.read()
      
      if field:
        self.synced += self.queued()
        self.last_synced = time.time()
        self.data[field] = []
  
    except Exception as e:
      self.error('Posting to %s: %s/%s (posting %s/%s)' % ( url, e, r, field, data))

  def csvify(self,data):
    return self.csvdelim.join( [ '%s%s%s' % \
          (self.csvquote,data[x],self.csvquote) for x in data.keys()])
 def flush(self):
     RotatingFileHandler.flush(self)
     if hasattr(self.stream, 'fileno') and hasattr(os, 'fsync'):
         fd = self.stream.fileno()
         os.fsync(fd)
예제 #7
0
class Logger(Singleton):
    def __init__(self):
        self.__log = None
        self.__hdlr = None


    def __del__(self):
        self.close()

    '''
    introduction: 
        get instance of loggger
    @parameter:
        log_instance: id of logger
        file_name: name you are going to produce
        max_bytes: capacity of file
        backup_count: rollback number of file
        timeopen: 0 -----close, others -----open

    return:
        0 ----- success
        -1 ----- failure
    '''
    def get_log(self, log_instance, file_name, max_bytes, backup_count, timeopen = 0):
        result = 0
        try:
            self.__log = logging.getLogger(log_instance)
        except:
            self.__log = None
            result = -1
        else:
            # print id(self.__log)
            self.__log.setLevel(logging.DEBUG)
            try:
                self.__hdlr = RotatingFileHandler(file_name, maxBytes = max_bytes, backupCount = backup_count)
            except:
                self.__hdlr = None
                result = -1
            else:
                self.__hdlr.setLevel(logging.DEBUG)
                formatter = None
                if 0 != timeopen:
                    formatter = logging.Formatter('%(asctime)s - %(message)s')
                else:
                    formatter = logging.Formatter('%(message)s')
                self.__hdlr.setFormatter(formatter)
                self.__log.addHandler(self.__hdlr)
        finally:
            return result

    def write(self, value):
        if None != self.__log and None != self.__hdlr:
            self.__log.debug(value)

    def flush(self):
        if None != self.__hdlr:
            self.__hdlr.flush()

    def close(self):
        if None != self.__log:
            self.__log.removeHandler(self.__hdlr)
            self.__log = None

        if None != self.__hdlr:
            self.__hdlr.flush()
            self.__hdlr.close()
            self.__hdlr = None
예제 #8
0
class Project(GObject.GObject):

    __gsignals__ = {
        'stack-changed': (GObject.SIGNAL_RUN_FIRST, None, (int, GObject.TYPE_PYOBJECT))
    }

    __MAX_LOG_COUNT__ = 5

    name = GObject.Property(type=str)
    creator = GObject.Property(type=str)
    panic_fade_time = GObject.property(type=GObject.TYPE_LONG)
    panic_hard_stop_time = GObject.property(type=GObject.TYPE_LONG)
    current_hash = GObject.property(type=str)
    last_hash = GObject.property(type=str)
    max_duration_discovery_difference = GObject.property(type=GObject.TYPE_LONG)

    def __init__(self, name="Untitled Project", creator="", root="", panic_fade_time=500, panic_hard_stop_time=1000,
                 cue_stacks=None, current_hash=None, last_hash=None, max_duration_discovery_difference=5):
        GObject.GObject.__init__(self)
        self.name = name
        self.creator = creator
        self.__root = root
        self.cue_stacks = [CueStack(project=self), ] if cue_stacks is None else cue_stacks
        self.panic_fade_time = panic_fade_time
        self.panic_hard_stop_time = panic_hard_stop_time
        self.current_hash = current_hash
        self.last_hash = last_hash
        self.max_duration_discovery_difference = max_duration_discovery_difference
        self.__dirty = True

        self.__logfile_handler = None

        self.init_logfile()

    def __iadd__(self, other):
        if not isinstance(other, CueStack):
            raise TypeError("Can't add type {0} to Project".format(type(other)))
        self.cue_stacks.append(other)
        self.emit('stack-changed', len(self.cue_stacks)-1, StackChangeAction.INSERT)

        return self

    def __isub__(self, other):
        if not isinstance(other, CueStack):
            raise TypeError("Can't remove type {0} from Project".format(type(other)))
        elif other not in self.cue_stacks:
            raise ValueError("{0} isn't in this project".format(other))

        key = self.cue_stacks.index(other)
        self.cue_stacks.remove(key)

        self.emit('stack-changed', key, StackChangeAction.DELETE)

        return self

    def __setitem__(self, key, value):
        if not isinstance(value, CueStack):
            raise TypeError("Cannot add type {0} to CueList".format(type(value)))

        i = len(self.cue_stacks)
        self.cue_stacks[key] = value

        self.emit('stack-changed', key, StackChangeAction.UPDATE if 0 <= key < i else StackChangeAction.INSERT)

    def __getitem__(self, key):
        logger.debug("Asked for cuelist in slot {0}".format(key))
        return self.cue_stacks[key]

    def __len__(self):
        return len(self.cue_stacks)

    @GObject.property(type=str)
    def root(self):
        return self.__root

    def change_root(self, path):
        logger.debug("Current Root: {0}".format(self.__root))
        if self.__root and self.__root != path:
            # TODO: Copy project if root is already set
            logger.warning("Need to copy project to new location!")
        else:
            logger.debug("Setting new root {0}".format(path))
            self.__root = path

    def init_logfile(self):
        if self.__root is not None and self.__logfile_handler is None:
            logpath = os.path.join(self.__root, os.path.join(".soundclip", "logs"))

            if not os.path.exists(logpath):
                os.makedirs(logpath)

            self.__logfile_handler = RotatingFileHandler(
                os.path.join(logpath, "soundclip.log"), backupCount=Project.__MAX_LOG_COUNT__
            )
            self.__logfile_handler.setLevel(0)
            self.__logfile_handler.setFormatter(logging.Formatter(
                fmt="%(asctime)s - [%(module)s | %(levelname)s]: %(message)s",
                datefmt='%Y-%m-%d %I:%M:%S %p'
            ))
            logger.addHandler(self.__logfile_handler)
            self.__logfile_handler.doRollover()

            logger.info("Logging to file: {0}".format(os.path.join(logpath, "soundclip.log")))

    def close_logfile(self):
        if self.__logfile_handler is not None:
            self.__logfile_handler.flush()
            logger.removeHandler(self.__logfile_handler)
            self.__logfile_handler = None

    def add_cuelist(self, other):
        if not isinstance(other, CueStack):
            raise TypeError("Can't add type {0} to Project".format(type(other)))
        self.cue_stacks.append(other)
        self.emit('stack-changed', len(self.cue_stacks)-1, StackChangeAction.INSERT)

    def remove_cuelist(self, other):
        if not isinstance(other, CueStack):
            raise TypeError("Can't remove type {0} from Project".format(type(other)))
        elif other not in self.cue_stacks:
            raise ValueError("{0} isn't in this project".format(other))

        key = self.cue_stacks.index(other)
        self.cue_stacks.remove(key)

        self.emit('stack-changed', key, StackChangeAction.DELETE)

    def remove_cue(self, cue):
        for stack in self.cue_stacks:
            if cue in stack:
                stack.remove_cue(cue)

    def get_cue_list_for(self, cue):
        for stack in self.cue_stacks:
            if cue in stack:
                return stack
        return None

    def close(self):
        for stack in self.cue_stacks:
            stack.stop_all()

        self.close_logfile()

        # TODO: Save project to disk if new

    @staticmethod
    def load(path):
        if not os.path.isdir(os.path.join(path, ".soundclip")):
            raise FileNotFoundError("Path does not exist or not a soundclip project")

        with open(os.path.join(path, ".soundclip", "project.json"), "rt") as dbobj:
            content = dbobj.read()

        if not content:
            raise ProjectParserException({
                "message": "The project is corrupted (project.json was empty)!",
                "path": path
            })

        j = json.loads(content)

        name = j['name'] if 'name' in j else "Untitled Project"
        creator = j['creator'] if 'creator' in j else ""
        panic_fade_time = j['panicFadeTime'] if 'panicFadeTime' in j else 500
        panic_hard_stop_time = j['panicHardStopTime'] if 'panicHardStopTime' in j else 1000
        eps = j['discoveryEpsilon'] if 'discoveryEpsilon' in j else 5
        last_hash = j['previousRevision'] if 'previousRevision' in j else None

        p = Project(name=name, creator=creator, root=path, cue_stacks=[], panic_fade_time=panic_fade_time,
                    panic_hard_stop_time=panic_hard_stop_time, current_hash=sha(content), last_hash=last_hash,
                    max_duration_discovery_difference=eps)

        if 'stacks' in j:
            for key in j['stacks']:
                p += CueStack.load(path, key, p)

        return p

    def store(self):
        if not self.__root:
            raise IllegalProjectStateException({
                "message": "Projects must have a root before they can be saved"
            })

        if not os.path.exists(self.__root) or not os.path.isdir(self.__root):
            os.makedirs(self.__root)

        d = {'name': self.name, 'creator': self.creator, 'stacks': [], 'panicFadeTime': self.panic_fade_time,
             'panicHardStopTime': self.panic_hard_stop_time, 'discoveryEpsilon': self.max_duration_discovery_difference}

        for stack in self.cue_stacks:
            d['stacks'].append(stack.store(self.__root))

        with open(os.path.join(self.__root, '.soundclip', 'project.json'), 'w') as f:
            json.dump(d, f)
            f.write("\n")

        logger.info("Project {0} saved to {1}".format(self.name, self.__root))
예제 #9
0
import pandas as pd
from sqlalchemy import create_engine

# with open('password.txt') as file:
#     EmailPassword = file.read()

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s:%(name)s:%(message)s')
LOG_FILENAME = datetime.now().strftime(
    '/var/tmp/ingram_log/PSALE_FTP_Download_logfile.log')
file_handler = RotatingFileHandler(LOG_FILENAME, mode='w')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
file_handler.flush()
logger.addHandler(file_handler)

server = "edi.lightningsource.com"
# user = "******"
# password = "******"
source = "outgoing"
destination = "/data/archive/ingram/INCOMING"
filenametimeloc = "/data/archive/ingram/filenametime.txt"
processed = "/data/archive/ingram/PROCESSED"

# FromEmail='*****@*****.**'
# ToEmail='*****@*****.**'

dbuser = '******'
dbpassword = '******'
예제 #10
0
파일: inquire.py 프로젝트: terbo/sigmon
class Sensor(Sigmon):
  def __init__(self):
    Sigmon.__init__(self)

    self.starttime = time.time()

    self.debug   = os.environ.get('SIGMON_DEBUG', 1)
    self.apihost  = os.environ.get('SIGMON_APIHOST', '1.0.0.1')
    self.apiport  = os.environ.get('SIGMON_APIPORT', 8989)
    self.apibt  = os.environ.get('SIGMON_APIBT', '/bt/')
    self.homedir = os.environ.get('SIGMON_ROOT', '/data/sigmon')
   
    self.apiurls = {'bt': '%s' % ( self.apibt ) }
    
    self.logcsv = os.environ.get('SIGMON_CSVOUT', 0)
    self.logjson = os.environ.get('SIGMON_JSONOUT', 0)
    self.logweb = os.environ.get('SIGMON_WEBOUT',1)
    
    self.savelogs = os.environ.get('SIGMON_SAVELOGS',1)
    
    self.poststatus = os.environ.get('SIGMON_POSTSTATUS',1)
    
    self.statusurl = os.environ.get('SIGMON_STATSURL', '/logs.sensors/')
    
    self.iface = os.environ.get('BT_IDX', 0)
    self.queue_time = os.environ.get('QUEUE_TIME', 30)
    self.queue_packets = os.environ.get('QUEUE_PACKETS', 15)
    
    
    self.max_errors = 25
    self.statusprint = 60 * 7

    self.scriptname = os.path.basename(__file__).replace('.py','')
    self.hostname = node() 
    
    self.errorlog = []
    
    self.csvdelim = ','
    self.csvquote = '"'
     
    self.aps = {}
    self.macs = {}

    self.synced = 0
    self.last_synced = 0
    self.last_status = ''
    self.last_synced_status = 0
    self.errors = 0

    self.logfile = '%s/logs/%s.%s.%s-%s.log' % ( self.homedir,
                                           self.hostname,
                                           self.iface,
                                           self.scriptname,
                                           datetime.datetime.now().strftime('%F'))
    
    self.log_format = '%(asctime)s %(module)s:%(lineno)d : %(levelname)s : %(message)s'
    self.logger = logging.getLogger()
    
    if self.savelogs:
      self.logger.setLevel(logging.DEBUG)
      self.log_handler = RotatingFileHandler(self.logfile, maxBytes=5000000, backupCount=20)
      self.log_handler.setFormatter(logging.Formatter(self.log_format))
      self.logger.addHandler(self.log_handler)
    
    if self.debug:
      self.log_stderr = logging.StreamHandler(sys.stderr)
      self.log_stderr.setFormatter(logging.Formatter(self.log_format))
      self.log_stderr.setLevel(logging.DEBUG)
      self.logger.addHandler(self.log_stderr)

    # pcapy settings
    self.data = {
        'bt': []
    } 
    
    self.maxpoolsize = 1
    self.maxpooltimeout = None

    self.http_headers = urllib3.util.make_headers(keep_alive=True,
                                          user_agent='sigmon sensor %s' % self.version)
    self.http_headers.update({'Content-Type':'application/json'})

    self.pool = urllib3.HTTPConnectionPool(self.apihost, self.apiport,
                                      self.maxpoolsize,
                                      self.maxpooltimeout,
                                      headers=self.http_headers)
    
    debug('bt idx: %s ip: %s port %s endpoint %s' % \
        (self.iface, self.apihost, self.apiport, self.apibt))
    debug('sync: to web:%s csv:%s json:%s' % \
        (self.logweb, self.logcsv, self.logjson))
    
    info('syncing every %s seconds/%s packets)' % \
        (self.queue_time, self.queue_packets))

    signal.signal(signal.SIGINT, self.do_exit)
    
    self.pkts = 0
    self.active = False
    
  
  def error(self,err):
      error(err)
      self.errors += 1
      self.errorlog.append('%s %s' % (datetime.datetime.now().strftime('%F-%T'), err))

  def do_exit(self,arga,argb):
    if self.cease() and self.queued():
      error('Exiting, wrote %s packets to csv in %s' % (self.writecsv(), self.homedir))
    sys.exit(self.errors)

  def status(self):
    self.last_synced_status = time.time()

    memusage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss 
    
    self.last_status = {
      'sensor': self.hostname,
      'uptime': self.uptime().total_seconds(),
      'memusage': memusage,
      'pktseen': self.pkts,
      'synced': self.synced,
      'lastsync': self.last_synced,
      'queued': { x: len(self.data[x]) for x in self.data.keys() },
      'time':   time.time(),
      'version': self.version,
      'active': self.active,
      'errorcount': self.errors,
      'errors': self.errorlog,
    }
    
    if self.debug:
      info('uptime: %s' % self.last_status['uptime'])
      info('memory usage: %.2fmb' % (self.last_status['memusage']))
      info('synced: %d packets (%d errors)' % \
        ( self.synced, len(self.errorlog) ))
      info('queued: %s, last synced: %.2f seconds ago' % \
        (self.last_status['queued'], self.last_status['lastsync']))
    
    if self.poststatus:
      self.post(self.statusurl, data=self.last_status)
    
    return self

  def writecsv(self):
    written = 0
    with open('%s/csv/%s-%s-%s.csv' % (self.homedir, self.hostname, self.iface,
      datetime.datetime.now().strftime('%F-%T')), 'a') as csvfile:
      for field in self.data.keys():
        for pkt in self.data[field]:
          written += 1
          csvfile.write('%s\n' % self.csvify(pkt))

    return written
  
  def queued(self):
    return sum([ len(self.data[field[0]]) for field in zip(self.data)])
  
  def queue(self, data):
    ptype = data['ptype']
    self.data[ptype].append(data)
    
    if self.queued() > self.queue_packets or \
        time.time() - self.last_synced > self.queue_time:
        self.sync()
    
    return self
  
  def uptime(self):
    return datetime.timedelta(seconds=(time.time() - self.starttime))

  def sync(self):
    if self.savelogs:
      self.log_handler.flush() 
    
    try:
      # i dont save pcap here, thats written for each packet
      # could make it another queued data hash...
      for field in self.data.keys():
        if len(self.data[field]):
          if self.logweb:   self.post(self.apiurls[field],field=field)
          if self.logjson:  print(self.data[field])
          if self.logcsv:   self.writecsv()

    except Exception as e:
      self.error('upload: %s (on %s)' % (e, field))
    
    
    if time.time() - self.last_synced_status > self.statusprint:
      self.status()

    return self
  
  def post(self, url, data=False, field=False):
    r = ''
    
    try:
      if field:
        encoded_data = json.dumps(self.data[field])
      elif data:
        encoded_data = json.dumps(data)
      else:
        error('Sent no data? url:%s data:%s field:%s' % ( url, data, field ))
        return

      response = self.pool.urlopen('POST', url, body=encoded_data,
                                         headers=self.http_headers, assert_same_host=False)
      r = response.read()
      
      if field:
        self.synced += self.queued()
        self.last_synced = time.time()
        self.data[field] = []
  
    except Exception as e:
      self.error('Posting to %s: %s/%s (posting %s/%s)' % ( url, e, r, field, data))

  def csvify(self,data):
    return self.csvdelim.join( [ '%s%s%s' % \
          (self.csvquote,data[x],self.csvquote) for x in data.keys()])
예제 #11
0
    # run
    try:
        mgr = LciManager(reactor, config, log)
        # we ignore SIGTERM because Squid will close the log FH, which gives
        # us a much cleaner signal that we're to shut down.
        signal.signal(signal.SIGTERM, signal.SIG_IGN)
        mgr.start()
    except ConfigParser.Error, why:
        error("Configuration file: %s" % why)
    except Exception, why:
        error("Error: %s " % why)
    except:
        error("Unknown error.")
        
    # clean up logging
    hdlr.flush()
    hdlr.close()
    logging.shutdown()

def error(msg):
    "Something really bad has happened. Should only be used during startup."
    logging.critical(msg)
    sys.stderr.write("LCI FATAL: %s\n" % msg)
    sys.exit(1)


############################################################################

class ManagerState:
    "Holds the manager's state in an easily persistable way."
    def __init__(self):
예제 #12
0
    prfx=sys.prefix))

# import web application
sys.path.append(cwd)
try:
    import myAppModule
except ImportError as err:
    logger.exception('Error importing module: "{e}"'.format(e=err))

# start the application
def application(environ, start_response):
    results = []
    logger.debug('Application called')
    if DEBUG:
        out = ['environ:']
        for k in environ:
            out.append('  {k!r}: {val!r}'.format(k=k, val=environ[k]))
        logger.debug('\n'.join(out))
    try:
        results = myAppModule.application(environ, start_response)
        logger.debug('Application executed successfully')
    except Exception, inst:
        logger.exception('Error: {0}'.format(type(inst)))
    logger.debug('Application call done')
    if DEBUG:
        logHandler.flush()
    return results

logger.debug('Application initilization compleated')