Exemple #1
0
    def range_by(self, start_tsc: int, end_tsc: int, ltsc: bool) -> 'LiteLog':

        '''
        Find log entries whose tsc: start_tsc < tsc < end_tsc
        When left_one_more is True, add one more log whose tsc may be less then start_tsc

        ltsc = true : we compare with the starting time-stamp
        ltsc = false: we compare with the finishing time-stamp
        '''
        left_key = LogEntry.TscCompare(start_tsc, ltsc)
        right_key = LogEntry.TscCompare(end_tsc, ltsc)

        left_index = bisect.bisect_right(self.log_list_, left_key)
        right_index = bisect.bisect_left(self.log_list_, right_key)

        '''
        if left_one_more:
            if left_index > 0:
                left_index -= 1

        if right_one_less:
            if right_index > left_index:
                right_index -= 1
        '''
        log = LiteLog()
        log.log_list_ =  self.log_list_[left_index: right_index]
        return log
Exemple #2
0
 def get(self, action):
     if action == 'unblock_ips':
         BlockedIP.remove_outdated()
         self.response.out.write('Done!')
     elif action == 'checkjobs':
         WatchJob.check_all_jobs()
         self.response.out.write('Done!')
     if action == 'log_cleanup':
         LogEntry.cleanup()
         self.response.out.write('Done!')
Exemple #3
0
 def ingestLogFile(self):
     if self.validated and not self.ingested:
         logEntries = list()
         date = datetime.strptime(
             time.ctime(os.path.getctime(self.filename)),
             "%a %b %d %H:%M:%S %Y")
         date = date.strftime("%m/%d/%Y %I:%M %p")
         if date[0] == "0":
             date = date[1:]
         firstHalf = date[:date.index(" ") + 1]
         secondHalf = date[date.index(" ") + 1:]
         if secondHalf[0] == "0":
             secondHalf = secondHalf[1:]
         date = firstHalf + secondHalf
         lineNumber = 0
         for line in self.lines:
             logEntry = LogEntry()
             logEntry.date = date
             logEntry.description = line
             logEntry.creator = self.creator
             logEntry.eventType = self.eventType
             logEntry.artifact = self.filename
             logEntry.lineNumber = lineNumber
             logEntry.id = logEntry.artifact + "_" + str(
                 logEntry.lineNumber)
             logEntries.append(logEntry)
             lineNumber += 1
         self.ingested = True
         return logEntries
     return None
def save_entry(entry, log_entry_object):
    if entry["light_reading"] == "NaN":
        return False
    new_entry = LogEntry(**entry)

    try:
        new_entry.save()
    except sqlite3.IntegrityError:
        if args.show_errors:
            print("Entry skipped: {}".format(entry["timestamp"]))

        return False
    return True
Exemple #5
0
def save_entry(entry, log_entry_object):
    if entry["light_reading"] == "NaN":
        return False
    new_entry = LogEntry(**entry)

    try:
        new_entry.save()
    except sqlite3.IntegrityError:
        if args.show_errors:
            print("Entry skipped: {}".format(entry["timestamp"]))

        return False
    return True
Exemple #6
0
    def check(self):
        # check if job is overdue
        if self.last_seen + timedelta(minutes=self.interval) < datetime.utcnow():

            self.status = 'offline'
            LogEntry.log_event(self.key(), 'Error', 'job is overdue')

            # perform all actions
            for action_key in self.timeout_actions:
                try:
                    db.get(action_key).perform_action()
                except Exception as exp:
                    logging.error('Error executing timeout action: ' + str(exp))
            self.put()
Exemple #7
0
    def tailAndProcess(self):
        for line in self.logFile.tail():
            try:
                logEntry = LogEntry(line)
                profile  = self.profiles.getProfileForPort(logEntry.getDestinationPort())

                if (profile != None):
                    try:
                        ciphertext = logEntry.getEncryptedData()
                        port       = profile.decrypt(ciphertext, self.config.getWindow())
                        sourceIP   = logEntry.getSourceIP()
                    
                        self.portOpener.open(sourceIP, port)
                        syslog.syslog("Received authenticated port-knock for port " + str(port) + " from " + sourceIP)
			#knock-ack		
			profile      = self.getProfile(sourceIP)
			syslog.syslog("Profile obtained for" + sourceIP)
    			packetData   = profile.encrypt(port)
			syslog.syslog("Payload for knock-ack prepared")
    			knockPort    = profile.getKnockPort()

			(idField, seqField, ackField, winField) = unpack('!HIIH', packetData)
			hping = existsInPath("hping3")

   			if hping is None:
        		    syslog.syslog("Error, you must install hping3 first.")
        		    sys.exit(2)	

			command = [hping, "-S", "-c", "1",
               			   "-p", str(knockPort),
               			   "-N", str(idField),
               			   "-w", str(winField),
               			   "-M", str(seqField),
               			   "-L", str(ackField),
               			   host]
			try:
        		    subprocess.call(command, shell=False, stdout=open('/dev/null', 'w'), stderr=subprocess.STDOUT)
			    syslog.syslog("Knock-ack sent for port " + str(port) + " to " + sourceIP)
        		    #print "Knock-ack sent."
# knock-ack end
    			except OSError:
        		    syslog.syslog("Error: Do you have hping3 installed?")
        		    sys.exit(3)			
			
                    except MacFailedException:
                        pass
            except Exception as e:
#                print "Unexpected error:", sys.exc_info()
                syslog.syslog("knocknock skipping unrecognized line" + e)
    def get_accelerometer_data(self):
        # Sort the data
        all_data = sorted(LogEntry.select(), key=lambda row: datetime.strptime(row.timestamp, self.time_stamp_format))
        accelerometer_data = [{"timestamp": row.timestamp, "light": row.light_reading, "proximity": row.proximity_reading,
            "x": row.x_reading, "y": row.y_reading, "z": row.z_reading} for row in all_data]

        return accelerometer_data
Exemple #9
0
 def update(self):
     """Updates the registry : removes the old entries, adds the new ones with addNewEntry"""
     with open(self.file, 'r') as log:
         count = 0
         for line in log:
             # We only read lines that have not yet been read
             if (count >= self.lastLine):
                 # We turn them into log entries
                 l = LogEntry(line)
                 # And add them to the registry
                 self.addNewEntry(l)
             count += 1
         # Updates the last read line
         self.lastLine = count
     # Now we have to remove the old entries
     time = datetime.now()
     while len(self.registry) != 0 and abs(
         (self.registry[0].date.replace(tzinfo=None) -
          time).total_seconds()) > 120:
         self.deleteEntry()
     # Now we handle the alerts
     if (self.avgHits > self.threshold and not self.alert):
         self.addAlert()
     elif (self.avgHits < self.threshold and self.alert):
         self.removeAlert()
def parse_file():
    file_path = dir_path + "/" + filename
    fp = open(file_path, 'r')
    cnt = 1
    with open(file_path) as fp:
        line = fp.readline()
        while line:
            (server, user_count, organism_count) = parse_line(line)
            log_entry = LogEntry(server=server,
                                 num_users=user_count,
                                 num_organisms=organism_count)
            # print "log entry "
            # print log_entry.server + " " + str(log_entry.num_organisms) + " " + str(log_entry.num_users)
            log_container.add_entry(log_entry)

            # print "server: " + server
            # print "user_count: " + str(user_count)
            # print "organism_count: " + str(organism_count)

            cnt += 1
            line = fp.readline()
            # print cnt
            # if cnt > 5:
            #     break

        # cnt = 1
        # while line:
        #     print("Line {}: {}".format(cnt, line.strip()))
        #     line = fp.readline()
        #     cnt += 1

    fp.close()
def create_parsed_log_entry_object(parsed_log_entry):
    """
    Creates object from parsed log entry

    :param parsed_log_entry: list --- list with log entry artifacts.
    len(parsed_log_entry) == 9
    :return: LogEntry object
    """

    lgr.debug("Trying to create parsed log entry object from {} ...".format(
        parsed_log_entry))
    if len(parsed_log_entry) == 9:
        lgr.debug("Creating log entry object from ")
        asctime = parsed_log_entry[0]
        msecs = parsed_log_entry[1]
        process = parsed_log_entry[2]
        levelname = parsed_log_entry[3]
        name = parsed_log_entry[4]
        request_ids = parsed_log_entry[5]
        user_identitys = parsed_log_entry[6]
        instance = parsed_log_entry[7]
        message = parsed_log_entry[8]
        log_entry_object = LogEntry(asctime, msecs, process, levelname, name,
                                    request_ids, user_identitys, instance,
                                    message)
        return log_entry_object
    else:
        lgr.debug("Log entry {} does not match \
        a requirement, skipped...".format(parsed_log_entry))
Exemple #12
0
    def get(self, job_name):
        template = jinja_environment.get_template('templates/log_template.htm')

        job_id = WatchJob.all().filter('name =', job_name).get()
        job_logs = LogEntry.all().filter('job =', job_id).order('-event_time').run(limit=100)

        self.response.out.write(template.render(name=job_name, logging=job_logs))
 def __init__(self):
     self.logEntries = dict()
     self.logEntriesInTable = list()
     self.searchLogEntryTableWidget = None
     self.colNamesInSearchLogsTable = list()
     self.vectorManager = None
     self.nextAvailableId = 5
     ids = [0, 1, 2, 3, 4]
     dates = ["1/26/20", "1/26/20", "1/26/20", "1/26/20", "1/26/20"]
     teams = [
         LogEntry.BLUE_TEAM, LogEntry.WHITE_TEAM, LogEntry.BLUE_TEAM,
         LogEntry.RED_TEAM, LogEntry.BLUE_TEAM
     ]
     descriptions = [
         "Blue Team Defender Turns on Computer.",
         "White Team Analyst Starts Taking Notes.",
         "SQL Injection attack from Red Team.",
         "Cross-Site Scripting Attack from Red Team.",
         "Blue Team Defender turns off computer."
     ]
     artifacts = [
         "blue_log.csv", "white_recording.png", "red_attack.txt",
         "red_escalation.txt", "blue_response.csv"
     ]
     for i in range(len(descriptions)):
         logEntry = LogEntry()
         logEntry.date = dates[i]
         logEntry.description = descriptions[i]
         logEntry.creator = teams[i]
         logEntry.id = ids[i]
         logEntry.artifact = artifacts[i]
         self.logEntries[ids[i]] = logEntry
     self.logEntriesInTable = list(self.logEntries.values())
 def entry_from_dict(self, dict_entry):
     """Creates a new log entry given the dictionary gathered from SPLUNK."""
     log_entry = LogEntry(serial=int(dict_entry['_cd'].replace(":", "")),
                          timestamp=dict_entry['_time'],
                          content=dict_entry['_raw'],
                          host=dict_entry['host'],
                          source=dict_entry['source'],
                          sourcetype=dict_entry['sourcetype'])
     return log_entry
Exemple #15
0
    def tailAndProcess(self):
        for line in self.logFile.tail():
            try:
                logEntry = LogEntry(line)
                profile  = self.profiles.getProfileForPort(logEntry.getDestinationPort())

                if (profile != None):
                    try:
                        ciphertext = logEntry.getEncryptedData()
                        port       = profile.decrypt(ciphertext, self.config.getWindow())
                        sourceIP   = logEntry.getSourceIP()
                    
                        self.portOpener.open(sourceIP, port)
                        syslog.syslog("Received authenticated port-knock for port " + str(port) + " from " + sourceIP)
                    except MacFailedException:
                        pass
            except:
#                print "Unexpected error:", sys.exc_info()
                syslog.syslog("knocknock skipping unrecognized line.")
 def test_entry(self):
     entry = LogEntry(
         "80",
         "192.1.1.1",
         "00-11-22-33-44-55",
         "81",
         "192.1.1.2",
         "AA-BB-CC-DD-EE-FF",
         "TCP",
         "20",
         True,
     )
     self.assertEqual("80", entry.sourcePortNumber)
     self.assertTrue(type(entry.timestamp) is int)
    def tailAndProcess(self):
        for line in self.logFile.tail():
            try:
                logEntry = LogEntry(line)
                profile = self.profiles.getProfileForPort(
                    logEntry.getDestinationPort())

                if (profile != None):
                    try:
                        ciphertext = logEntry.getEncryptedData()
                        port = profile.decrypt(ciphertext,
                                               self.config.getWindow())
                        sourceIP = logEntry.getSourceIP()

                        self.portOpener.open(sourceIP, port)
                        syslog.syslog(
                            "Received authenticated port-knock for port " +
                            str(port) + " from " + sourceIP)
                    except MacFailedException:
                        pass
            except:
                #                print "Unexpected error:", sys.exc_info()
                syslog.syslog("knocknock skipping unrecognized line.")
def setup_tables():
    try:
        LogEntry.create_table()
    except (sqlite3.OperationalError):
        pass

    if args.reset:
        LogEntry.drop_table()
        LogEntry.create_table()
Exemple #19
0
def setup_tables():
    try:
        LogEntry.create_table()
    except (sqlite3.OperationalError):
        pass

    if args.reset:
        LogEntry.drop_table()
        LogEntry.create_table()
    def get_accelerometer_data(self):
        # Sort the data
        all_data = sorted(LogEntry.select(),
                          key=lambda row: datetime.strptime(
                              row.timestamp, self.time_stamp_format))
        accelerometer_data = [{
            "timestamp": row.timestamp,
            "light": row.light_reading,
            "proximity": row.proximity_reading,
            "x": row.x_reading,
            "y": row.y_reading,
            "z": row.z_reading
        } for row in all_data]

        return accelerometer_data
Exemple #21
0
 def post(self):
     try:
         # Get the body, which should be a JSON Array of JSON Objects for multiple log entries or only a JSON
         # object for single entries
         data = request.get_json()
         dictionary_list = data if isinstance(data, list) else [data]
         # For each JSON Object, try to create a log entry
         # Store the successful log entries in a list and error messages from invalid logs in another list
         logs = []
         errors = []
         for dictionary in dictionary_list:
             log = LogEntry(dictionary)
             if log.valid:
                 logs.append(log)
             else:
                 errors.append(log.get_error_message())
         # TODO - Connect this to logstash and/or database
         # Return the results as a dictionary (Flask will JSONify the dict) and the respective HTTP Status Code
         return self.generate_response(logs, errors)
     except TypeError as te:
         return "Error:" + str(te), 418
     except AttributeError as ae:
         error = ae.with_traceback()
         return "Error:" + str(ae), 418
Exemple #22
0
    def load_log(logpath: str) -> 'LiteLog':

        log = LiteLog()
        my_list = []
        with open(logpath) as fd:
            for line in fd:
                try:
                    entry = LogEntry.parse(line)
                    my_list.append(entry)
                except:
                    print('error when loading',line)
                    print('   in ', logpath)
                    raise

        log.log_list_ = np.array(my_list)

        return log
 def ingestLogFile(self):
     if self.validated and not self.ingested:
         logEntries = list()
         lineNumber = 0
         for line in self.lines:
             logEntry = LogEntry()
             timestamp = self.timestamps[lineNumber]
             timestampAsDate = datetime.strptime(timestamp,
                                                 "%Y-%m-%d %H:%M:%S")
             formattedDate = timestampAsDate.strftime("%m/%d/%Y %I:%M %p")
             logEntry.date = formattedDate
             logEntry.description = line
             logEntry.creator = self.creator
             logEntry.eventType = self.eventType
             logEntry.artifact = self.filename
             logEntry.lineNumber = lineNumber
             logEntry.id = logEntry.artifact + "_" + str(
                 logEntry.lineNumber)
             logEntries.append(logEntry)
             lineNumber += 1
         self.ingested = True
         return logEntries
     return None
def plot_data():
    format_string = '%H:%M:%S %m/%d/%Y'

    log_data = LogEntry.select().order_by(LogEntry.timestamp)
    # log_data = [row for row in log_data]
    log_data = sorted(log_data, key=lambda row: datetime.strptime(row.timestamp, format_string))

    light_readings = [row.light_reading for row in log_data]
    light_readings = [min(reading, 100.0) for reading in light_readings]
    timestamps = [datetime.strptime(row.timestamp, format_string) for row in log_data]

    fig = plt.figure()
    # plt.figure(1, figsize=(10, 6))
    plt.clf()
    plt.cla()

    plt.plot_date(timestamps, light_readings, 'b')
    fig.autofmt_xdate()
    plt.show()
    def store(self, log_bundles):
        log.debug('store the logs by calling the model layer')
        log.debug(log_bundles)
        log_entries = log_bundles[0]['logEntries']  #.logEntries.tolist()
        log.info(log_entries)

        for log_entry in log_entries:
            log.info(log_entry)
            log.info(log_entry)
            ndb_log_entry = LogEntry(type='debug')
            ndb_log_entry.type = log_entry['type']
            ndb_log_entry.color = log_entry['color']
            ndb_log_entry.encoded_data = log_entry['encodedData']
            ndb_log_entry.pathname = log_entry['pathname']
            #ndb_log_entry.timestamp = log_entry['timestamp']
            #log_entries=[])
            ndb_log_entry.put()


#def get(self, log_id):
#log.debug('retrieve the logs for logid ' + log_id)
Exemple #26
0
 def handleAddNode(self):
     if self.vectorComboBoxTable.count() > 0:
         vectorName = self.vectorComboBoxTable.currentText()
         vector = self.clientHandler.vectorManager.vectors[vectorName]
         logEntry = LogEntry()
         logEntry.creator = logEntry.WHITE_TEAM
         logEntry.eventType = logEntry.WHITE_TEAM
         logEntry.id = "-1"
         logEntry.date = (datetime.datetime.today()
                          ).strftime("%m/%d/%Y %I:%M %p").lstrip("0")
         logEntry.associatedVectors.append(
             self.vectorComboBoxTable.currentText())
         vector.addSignificantEventFromLogEntry(logEntry)
         self.updateVectorTable(vector)
         self.updateVectorGraph(vector)
         self.updateVectorGraph(vector)
Exemple #27
0
def save_data(log_data):
    num_entries = len(log_data)
    ten_percent = num_entries / 10

    records_added = 0
    records_skipped = 0

    log_entry_object = LogEntry()

    # Run as a single transaction for speed
    with db.transaction():
        for index, entry in enumerate(log_data):
            if save_entry(entry, log_entry_object):
                records_added += 1
            else:
                records_skipped += 1

            # Print progress to screen
            if index % ten_percent == 0:
                print("{:0.0%} done".format(index / float(num_entries)))

    print_save_results(num_entries, records_added, records_skipped)
Exemple #28
0
    def update(self, remote_ip, uptime):
        self.last_seen = datetime.utcnow()

        if self.last_ip != remote_ip:
            LogEntry.log_event(self.key(), 'Info', 'IP changed - new IP: ' + remote_ip)

        self.last_ip = remote_ip

        if uptime is not None:
            if self.update is not None and self.uptime > uptime:
                LogEntry.log_event(self.key(), 'Reboot',
                                   'Reboot - Previous uptime: ' + str(timedelta(seconds=self.uptime)))
                for action_key in self.reboot_actions:
                    try:
                        db.get(action_key).perform_action()
                    except Exception as exp:
                        logging.error('Error executing reboot action: ' + str(exp))

        self.uptime = uptime
        self.put()

        # job got back online
        if self.status == 'offline':
            self.status = 'online'
            LogEntry.log_event(self.key(), 'Info', 'Job back online - IP: ' + remote_ip)

            # perform all back_online actions
            for action_key in self.backonline_actions:
                try:
                    db.get(action_key).perform_action()
                except Exception as exp:
                    logging.error('Error executing backonline action: ' + str(exp))

        # delete previous (waiting) task
        if self.task_name is not None:
            logging.debug('old task: ' + self.task_name)
            Queue.delete_tasks(Queue(), Task(name=self.task_name))

        task_name = self.name + '_' + datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S-%f')

        # create task to be executed in updated no called in interval minutes
        taskqueue.add(name=task_name, url='/task', params={'key': self.key()}, countdown=(self.interval + 2) * 60)

        self.task_name = task_name
        self.put()
    def store(self, log_bundles):
        log.debug('store the logs by calling the model layer')
        log.debug(log_bundles)
        log_entries = log_bundles[0]['logEntries']   #.logEntries.tolist()
        log.info(log_entries)

        for log_entry in log_entries:
            log.info(log_entry)
            log.info(log_entry)
            ndb_log_entry = LogEntry(type='debug')
            ndb_log_entry.type = log_entry['type']
            ndb_log_entry.color = log_entry['color']
            ndb_log_entry.encoded_data = log_entry['encodedData']
            ndb_log_entry.pathname = log_entry['pathname']
            #ndb_log_entry.timestamp = log_entry['timestamp']
                                      #log_entries=[])
            ndb_log_entry.put()





#def get(self, log_id):
#log.debug('retrieve the logs for logid ' + log_id)
def get_light_data():
    light_data = LogEntry.select()

    return light_data
def load_light():
    light_data = LogEntry.select()
    return sorted(light_data, key=lambda row: datetime.strptime(row.timestamp, format_string))
    def save_packet(self, packet):
        """
        Function for recording a packet during sniff runtime
        packet = the packet passed through the sniff function
        """
        # TODO: make this work with layer 2, for now just skip filtering those packets
        if not packet.haslayer("IP"):
            return

        # timestamp used for port scan detection
        currentTime = int(datetime.now().timestamp())
        if self.portScanTimeout is None:
            self.portScanTimeout = currentTime

        # how to tell if we need to reset our port scan record
        if currentTime > self.portScanTimeout + self.scan_window:
            self.portScanTimeout = currentTime
            self.PS_RECORD = dict()

        # A bunch of packet data, collected to be stored
        sourceMAC = packet.src
        destMAC = packet.dst
        ipLayer = packet.getlayer("IP")
        # IP where this came from
        srcIP = ipLayer.src
        dstIP = ipLayer.dst
        destPort = ipLayer.dport if hasattr(ipLayer, "dport") else None
        srcPort = ipLayer.sport if hasattr(ipLayer, "sport") else None

        if (not ipLayer.haslayer("TCP") and not ipLayer.haslayer("UDP")
                and not ipLayer.haslayer("ICMP")):
            return

        # Whitelist check
        if srcIP not in self.whitelist:
            # Testing config - does not utilize a database
            # isTest = self.config == "onlyUDP" or self.config == "testing"

            trafficType = ("TCP" if ipLayer.haslayer("TCP") else
                           "UDP" if ipLayer.haslayer("UDP") else
                           "ICMP" if ipLayer.haslayer("ICMP") else "Other")

            # Log Entry object we're saving
            log = LogEntry(
                srcPort,
                srcIP,
                sourceMAC,
                destPort,
                dstIP,
                destMAC,
                trafficType,
                ipLayer.len,
                destPort in self.openPorts,
            )

            # self.RECORD is where we save logs for easy testing
            if srcIP in self.RECORD.keys():
                self.RECORD[srcIP].append(log)
            else:
                self.RECORD[srcIP] = [log]

            # saving the database ID in case of port scan detection
            if self.config == "base":
                dbID = self.db.save(log)
            else:
                return

            # self.PS_RECORD is a separate dictionary used for port scan detection
            if srcIP not in self.PS_RECORD.keys():
                self.PS_RECORD[srcIP] = dict()
                self.PS_RECORD[srcIP][log.destPortNumber] = dbID
            else:
                self.PS_RECORD[srcIP][log.destPortNumber] = dbID

                # Sending out the port scan alert
                if len(self.PS_RECORD[srcIP]) > self.scan_sensitivity:
                    self.db.alert(
                        Alert(
                            variant="alert",
                            message="Port scan detected from IP {}".format(
                                srcIP),
                            references=list(self.PS_RECORD[srcIP].values()),
                        ))
                    self.PS_RECORD[srcIP] = dict()
 def retrieveLogEntryDb(self, logEntryId):
     query = {"id": str(logEntryId)}
     logEntry = None
     for entry in self.col.find(query):
         logEntry = LogEntry()
         logEntry.id = entry["_id"]
         logEntry.associatedVectors = eval(entry["vectors"])
         logEntry.location = entry["location"]
         logEntry.eventType = entry["eventType"]
         logEntry.description = entry["description"]
         logEntry.creator = entry["creator"]
         logEntry.date = entry["date"]
         logEntry.artifact = entry["artifact"]
         logEntry.lineNumber = entry["lineNumber"]
     return logEntry
 def retrieveLogEntriesDb(self):
     self.logEntries.clear()
     for entry in self.col.find():
         logEntry = LogEntry()
         logEntry.id = entry["_id"]
         logEntry.associatedVectors = eval(entry["vectors"])
         logEntry.location = entry["location"]
         logEntry.eventType = entry["eventType"]
         logEntry.description = entry["description"]
         logEntry.creator = entry["creator"]
         logEntry.date = entry["date"]
         logEntry.artifact = entry["artifact"]
         logEntry.lineNumber = entry["lineNumber"]
         self.logEntries[logEntry.id] = logEntry
Exemple #35
0
 def test_get_grade(self):
     assert LogEntry.get_grade(None,"  DIESEL ") == Grade.Diesel
Exemple #36
0
 def test_get_tank(self):
     assert LogEntry.get_tank_num(None,"  TANK 1") == 1
def load_light():
    light_data = LogEntry.select()
    return sorted(
        light_data,
        key=lambda row: datetime.strptime(row.timestamp, format_string))
Exemple #38
0
 def test_get_capacity(self):
     assert LogEntry.get_capacity(None,"  26380") != None
     assert LogEntry.get_capacity(None,"  26380") == 26380
Exemple #39
0
 def test_get_pump(self):
     assert LogEntry.get_pump_num(None,"  PUMP 6 ") == 6