def py_code(self, args):
     xs = nats(1, args["length"])
     ys = nats(args["length"], 1)
     res = 0
     while (not isEmpty(xs)):
         res += binop(args["operation"], head(xs), head(ys))
         xs = tail(xs)
         ys = tail(ys)
     return res
 def py_code(self, args):
     xs = nats(1, args["length"])
     ys = nats(args["length"], 1)
     zs = []
     while (not isEmpty(xs)):
         zs = zs + [binop(args["operation"], head(xs), head(ys))]
         xs = tail(xs)
         ys = tail(ys)
     return zs
Esempio n. 3
0
def run():
    global MYSQL_SLOW_QUERY_LOG_PATH, PARSED_SLOW_QUERY_LOG_PATH
    logfile = open(MYSQL_SLOW_QUERY_LOG_PATH, 'r')
    loglines = tail(logfile)

    query_parser = SlowQueryParser(loglines)
    query_parser.start_parser(None)
Esempio n. 4
0
def get_changing_information():
    now = datetime.now()
    percentage = 0
    speed = 0
    finishing_time = 0
    ETA = 0
    log_file = get_latest_log_file_name(datetime.now())
    if os.path.isfile(log_file):
        with open(log_file, 'r') as file_handler:
            value = utils.tail(file_handler, 1)
            if len(value) > 0:
                line = value[0]
                if 'ETA' in line:
                    percentage = float(line[line.find(']') + 2:line.find('%')])
                    speed = line[line.find('at ') + 3:line.find(' ETA')]
                    ETA = line[line.find('ETA ') + 4:]
                    if len(ETA) > 5:
                        time = datetime.strptime(ETA, '%H:%M:%S')
                        endTime = timedelta(hours=time.hour, minutes=time.minute, seconds=time.second)
                    else:
                        time = datetime.strptime(ETA, '%M:%S')
                        endTime = timedelta(minutes=time.minute, seconds=time.second)
                    finishing_time = utils.unix_time_millis(now + endTime)
    changing_video_information = [percentage, speed, finishing_time, ETA]
    return changing_video_information
Esempio n. 5
0
def send_24h_data():
    raw_data = utils.tail(DATALOG_FILENAME, 144)
    data = {}
    dataBTC, dataUSD, data['labels'] = [], [], []
    for line in raw_data:
        e = line.split(' ')
        data['labels'].append(e[1])  # HH:MM:SS field
        dataBTC.append(float(e[2]))  # BTC field
        dataUSD.append(float(e[3]))  # USD field

    dicBTC = {
        'label': 'BTC',
        'yAxisID': 'BTC',
        'data': dataBTC,
    }
    dicUSD = {
        'label': 'USD',
        'yAxisID': 'USD',
        'data': dataUSD,
    }
    data['min'] = min(dataUSD)
    data['max'] = max(dataUSD)
    data['percent'] = (dataUSD[-1] * 100 / dataUSD[0]) - 100
    data['datasets'] = [dicBTC, dicUSD]
    return jsonify(data)
Esempio n. 6
0
def tail_log():
    log_content = ''
    if os.path.exists(LOG_FILE):
        log_content = tail(LOG_FILE)
    response = {'code': 0, 'msg': log_content}

    return jsonify(response)
Esempio n. 7
0
def main(args):
    while True:
        out = []
        if args.all or args.battery:
            datetime, voltage, current = tail(BATTERY_LOG)
            out.append('Battery: {:6.3f}V {:7.1f}mA'.format(voltage, current))
        if args.all or args.solar:
            datetime, voltage, current = tail(SOLAR_LOG)
            out.append('Solar panel: {:6.3f}V {:7.1f}mA'.format(
                voltage, current))
        if args.all or args.system:
            datetime, cpu, memory = tail(SYSTEM_LOG)
            out.append('CPU: {:5.1f}% memory: {:5.1f}%'.format(cpu, memory))
        if args.all or args.temperature:
            datetime, cpu_temp, gpu_temp = tail(TEMP_LOG)
            out.append("CPU: {:6.1f}'C GPU: {:6.1f}'C".format(
                cpu_temp, gpu_temp))
        print('  |  '.join(out))
        sleep(1)
Esempio n. 8
0
def is_merging():
    log_file = get_latest_log_file_name(datetime.now())
    if os.path.isfile(log_file):
        with open(log_file, 'r') as file_handler:
            value = utils.tail(file_handler, 1)
            if len(value) > 0:
                line = value[0]
                if 'ffmpeg' in line:
                    LOG.debug('Currently merging videos')
                    return True
Esempio n. 9
0
    def enumerate_points(self) -> T.Iterator[Point2D]:
        """ Iterates points from source to sink one after another """
        ns = SOL.eval(self.num_segs)

        first = True
        for si in range(ns):
            seg = self.segment(si).eval()
            if first:
                yield from seg.enumerate_points()
                first = False
            else:
                yield from tail(seg.enumerate_points())
Esempio n. 10
0
def getIskLog(window = 30):
    """
    Returns the last lines of text in the iskdaemon instance log

    @type  window: number
    @param window: number of lines to retrieve 

    @rtype:   string
    @return:  text block
    @since: 0.9.3
    """
    from utils import tail
    
    return tail(open(settings.core.get('daemon','logPath')), window)
Esempio n. 11
0
 def do_POST(self):
         datas = self.rfile.read(int(self.headers['content-length']))
         datas = urllib.unquote(datas).decode("utf-8", 'ignore')
         datas = utils.transDicts(datas)
         if datas.has_key('data'):  
             content = "data:"+datas['data']+"\r\n" 
         cmd=datas['cmd']
         f=self.wfile
         if cmd=="taillog":
             filename=datas['allowfile']
             dir_tmp=os.path.split(filename)[0]
             dir_list=utils.get_conf_dirlist(global_path+"/config/bias.conf")
             if utils.is_path_allow(dir_list,dir_tmp)==0:
                     self.send_response(200)
                     self.send_header("Content-type", "text/html")
                     self.end_headers()
                     data_buffer="<span style='color:red'>dir is forbidden</span>"
                     f.write(data_buffer)
                     return
         
             linenum=datas['linenum']
             if linenum=="" or linenum==0:
                     linenum=10
             if os.path.isfile(filename):
                     self.send_response(200)
                     self.send_header("Content-type", "text/html")
                     self.end_headers()
                     data_buffer=utils.tail(filename,linenum)
                     f.write(data_buffer)
             else:
                     self.send_response(200)
                     self.send_header("Content-type", "text/html")
                     self.end_headers()
                     data_buffer="<span style='color:red'>filename  is dir ,please check!</span>"
                     f.write(data_buffer)
         
         elif cmd=="sysinfo":
             sysinfo=dict()
             sysinfo['meminfo']=utils.meminfo()
             sysinfo['loadinfo']=utils.loadinfo()
             sysinfo_json = json.dumps(sysinfo)
             self.send_response(200)
             self.send_header("Content-type", "application/Json")
             self.end_headers()
             f.write(sysinfo_json)
         elif cmd=="listlog":
             self.send_response(200)
             self.send_header("Content-type", "application/Json")
             self.end_headers()
             f.write(sysinfo)
Esempio n. 12
0
def getIskLog(window = 30):
    """
    Returns the last lines of text in the iskdaemon instance log

    @type  window: number
    @param window: number of lines to retrieve

    @rtype:   string
    @return:  text block
    @since: 0.9.3
    """
    from utils import tail

    return tail(open(settings.core.get('daemon','logPath')), window)
Esempio n. 13
0
 def callback_umc_errorlog(self, params):
     content = Map(code=200, json=[])
     for ud in GlobalContext.umcdefs:
         ud.lock.acquire()
         try:
             if ud.umc_instanceid.startswith(params.params.umc):
                 errorlog = "%s/%s.error.out" % (get_umc_instance_log_dir(
                     ud.umc_instanceid, GlobalContext), ud.umc_instanceid)
                 if os.path.exists(errorlog):
                     content.json.append(
                         json.dumps({
                             "umc_instanceid": ud.umc_instanceid,
                             "rows": tail(errorlog, 10)
                         }))
             # // if umc id
         finally:
             ud.lock.release()
     return content
Esempio n. 14
0
def send_7d_data():
    raw_data = utils.tail(DATALOG_FILENAME, 7 * 144)
    data = {}
    counter = 0
    dataBTC, dataUSD, data['labels'] = [], [], []
    for line in raw_data:
        counter += 1
        if (counter % 7 == 0):
            e = line.split(' ')
            data['labels'].append(e[0] + " " + e[1])  # YYYY-MM-DD field
            dataBTC.append(float(e[2]))  # BTC field
            dataUSD.append(float(e[3]))  # USD field

    dicBTC = {'label': 'BTC', 'yAxisID': 'BTC', 'data': dataBTC}
    dicUSD = {'label': 'USD', 'yAxisID': 'USD', 'data': dataUSD}
    data['min'] = min(dataUSD)
    data['max'] = max(dataUSD)
    data['percent'] = (dataUSD[-1] * 100 / dataUSD[0]) - 100
    data['datasets'] = [dicBTC, dicUSD]
    return jsonify(data)
Esempio n. 15
0
def get_current_video_information():
    size = 0
    youtuber_name = ''
    video_title = ''
    video_id = ''
    avatar_url = ''
    log_file = get_latest_log_file_name(datetime.now())
    if os.path.isfile(log_file):
        with open(log_file, 'r') as file_handler:
            value = utils.tail(file_handler, 1)
            if len(value) > 0:
                line = value[0]
                if 'ETA' in line:
                    size = line[line.find('of ') + 3:line.find(' at')]
        if os.path.exists(paths.current_video_file):
            with open(paths.current_video_file, 'r') as file_handler2:
                channel_id = file_handler2.readline().rstrip('\n')
                video_title = file_handler2.readline().rstrip('\n')
                video_id = file_handler2.readline().rstrip('\n')
                youtuber_info = Youtuber.load(channel_id=channel_id)
                youtuber_name = youtuber_info.channel_title
                avatar_url = youtuber_info.avatar_url
    video_information = [size, youtuber_name, video_title, video_id, avatar_url]
    return video_information
Esempio n. 16
0
def ui(key):
    try:
        session_data = server.cache.get(key)
        return render_template('psst.html', names=session_data['recipients'], also_to=also_to(tail(session_data['recipients'])),key=key)
    except:
        return render_template("secret.html", link="/")
Esempio n. 17
0
    def loadDb(self,
               store: ApplicationStore = None,
               checkInitialised: bool = False):
        """Load the PreloadLogger database.

        Go through the directory and create all the relevant app instances and
        events. Can be made to insert all found apps into an ApplicationStore,
        or to exit if some Application instances are not properly initialised.
        """

        count = 0  # Counter of fetched files, for stats
        actors = set()  # Apps that logged anything at all
        empties = 0  # Matching files without content (logger crash)
        invalids = 0  # Files with corrupted content (logger crash)
        nosyscalls = []  # Logs with zero syscalls logged (not a bug)
        nosyscallactors = set()  # Apps that logged zero syscalls
        instanceCount = 0  # Count of distinct app instances in the dataset
        hasErrors = False  # Whether some uninitialised apps were found
        invalidApps = set()  # List of desktop IDs that could not be init'd
        eventCount = 0

        # List all log files that match the PreloadLogger syntax
        for file in os.listdir(self.path):
            # Ignore files that don't match
            if not PreloadLoggerLoader.pattern.match(file):
                continue

            count += 1

            # Process log files that match the PreloadLogger name pattern
            try:
                f = open(self.path + "/" + file, 'rb')
            except (IOError) as e:
                print("Error: could not open file %s: %s" % (file, str(e)),
                      file=sys.stderr)
            else:
                with f:
                    if os.fstat(f.fileno()).st_size == 0:
                        print("Info: file '%s' is empty. Skipping." % file)
                        continue

                    # Parse the first line to get the identity of the app,
                    # but sometimes the header ends up on the second line
                    # in some logs... So, parse until we find a match, and
                    # remember the line index of the header
                    idx = 0
                    headerLocation = 0
                    result = None
                    for binary in f:
                        try:
                            line = binary.decode('utf-8')
                        except (UnicodeDecodeError) as e:
                            print("Error: %s has a non utf-8 line: %s " %
                                  (file, str(e)),
                                  file=sys.stderr)
                            idx += 1
                            continue
                        result = PreloadLoggerLoader.header.match(line)
                        if result:
                            headerLocation = idx
                            break
                        idx += 1

                    # Files with a missing or corrupted header are invalid
                    if result is None:
                        print("%s is missing a header" % file, file=sys.stderr)
                        invalids += 1
                        continue

                    # Parse the header line, make sure it has the right length.
                    g = result.groups()
                    if (len(g) != 3):
                        print("%s has wrong group count: " % file,
                              result.group(),
                              file=sys.stderr)
                        invalids += 1
                        continue

                    # Filter interpreters, and rewrite them to get the identity
                    # of the app they launched instead.
                    items = space.split(g[2])
                    interpreterid = None

                    # Python
                    if (pyre.match(g[0])):
                        interpreterid = g[0]
                        g = self.parsePython(g, items)
                        # print("PYTHON APP: %s" % g[2])

                    # Bash
                    if (bashre.match(g[0])):
                        interpreterid = g[0]
                        g = self.parseBash(g, items)
                        # print("BASH APP: %s" % g[2])

                    # Java
                    if (javare.match(g[0])):
                        interpreterid = g[0]
                        g = self.parseJava(g, items)
                        # print("JAVA APP: %s" % g[2])
                    # Perl
                    if (perlre.match(g[0])):
                        interpreterid = g[0]
                        g = self.parsePerl(g, items)
                        # print("PERL APP: %s" % g[2])

                    # Mono
                    if (monore.match(g[0])):
                        interpreterid = g[0]
                        g = self.parseMono(g, items)
                        # print("MONO APP: %s" % g[2])

                    # PHP
                    if (phpre.match(g[0])):
                        interpreterid = g[0]
                        g = self.parsePHP(g, items)
                        # print("PHP APP: %s" % g[2])

                    # Get first and last event to calculate the timestamps.
                    tstart = float("inf")
                    tend = 0

                    skipCache = None
                    lineIdx = 0
                    f.seek(0, 0)
                    for binary in f:
                        # Ignore the header.
                        if lineIdx == headerLocation:
                            lineIdx += 1
                            skipCache = None
                            continue

                        # Decode line.
                        try:
                            line = binary.decode('utf-8')
                        except (UnicodeDecodeError) as e:
                            print("Error: %s has a non utf-8 line: %s " %
                                  (file, str(e)),
                                  file=sys.stderr)
                            lineIdx += 1
                            skipCache = None
                            continue

                        # Previous line did not end and was skipped, merge it.
                        if skipCache:
                            line = skipCache + line
                            skipCache = None

                        # Line continues...
                        if line.endswith('\\\n'):
                            skipCache = line
                            lineIdx += 1
                            continue

                        line = line.rstrip("\n").lstrip(" ")

                        # Line is a parameter to the last system call logged
                        if line.startswith(' '):
                            lineIdx += 1
                            continue

                        # Check that line is a syntactically valid system call
                        result = PreloadLoggerLoader.syscall.match(line)
                        if result is None:
                            if debugEnabled():
                                print("%s has a corrupted line (match): %s" %
                                      (file, line),
                                      file=sys.stderr)
                            lineIdx += 1
                            continue

                        # Update the timestamp (convert to ZG millisec format)
                        h = result.groups()
                        tstart = int(h[0]) * 1000
                        break

                    # TODO, first non-header line + tail code.
                    lastLine = tail(f)
                    result = None
                    if lastLine:
                        result = PreloadLoggerLoader.syscall.match(lastLine)

                    if result is None:
                        if debugEnabled():
                            print("%s's last line is corrupted: %s" %
                                  (file, lastLine),
                                  file=sys.stderr)
                    else:
                        # Update the timestamp (convert to ZG millisec format)
                        h = result.groups()
                        tend = int(h[0]) * 1000

                    # Check if the timestamps have been set
                    if tend == 0:
                        nosyscalls.append(g)
                        nosyscallactors.add(g[0])
                        continue

                    # Sometimes, short logs have event ordering problems... We
                    # can try to ignore these problems as all events are indi-
                    # vidually timestamped anyway.
                    if tstart > tend:
                        tend, start = tstart, tend

                    # TODO: process deletions and remove corresponding files

                    # Make the application
                    try:
                        app = Application(desktopid=g[0],
                                          pid=int(g[1]),
                                          tstart=tstart,
                                          tend=tend,
                                          interpreterid=interpreterid)
                        app.setCommandLine(g[2])
                    except (ValueError) as e:
                        print("MISSING: %s" % g[0], file=sys.stderr)
                        hasErrors = True
                        invalidApps.add(g[0])
                        continue

                    # Ignore study artefacts!
                    if app.isStudyApp():
                        continue

                    # Add command-line event
                    event = Event(actor=app, time=tstart, cmdlineStr=g[2])
                    app.addEvent(event)

                    # Add system call events
                    skipCache = None
                    lineIdx = 0
                    currentCall = None
                    prevTimestamp = 0
                    timeDelta = 0
                    f.seek(0, 0)
                    for binary in f:
                        # Ignore the header.
                        if lineIdx == headerLocation:
                            lineIdx += 1
                            skipCache = None
                            continue

                        # Decode line.
                        try:
                            line = binary.decode('utf-8')
                        except (UnicodeDecodeError) as e:
                            print("Error: %s has a non utf-8 line: %s " %
                                  (file, str(e)),
                                  file=sys.stderr)
                            lineIdx += 1
                            skipCache = None
                            continue

                        # Previous line did not end and was skipped, merge it.
                        if skipCache:
                            line = skipCache + line
                            skipCache = None

                        # Line continues...
                        if line.endswith('\\\n'):
                            skipCache = line
                            lineIdx += 1
                            continue

                        line = line[:-1]  # Remove ending "\n"

                        # Line is a parameter to the last system call logged
                        if line.startswith(' '):
                            if currentCall:
                                currentCall = (currentCall[0],
                                               currentCall[1] + '\n' + line)
                            elif debugEnabled():
                                print("%s has a corrupted line (no call): %s" %
                                      (file, line),
                                      file=sys.stderr)
                            lineIdx += 1
                            continue

                        # Check that line is a syntactically valid system call
                        result = PreloadLoggerLoader.syscall.match(line)
                        if result is None:
                            if debugEnabled():
                                print("%s has a corrupted line (match): %s" %
                                      (file, line),
                                      file=sys.stderr)
                            lineIdx += 1
                            continue

                        # Update the timestamp (convert to ZG millisec format)
                        h = result.groups()
                        timestamp = int(h[0]) * 1000

                        # Append the system call to our syscall list. Note that
                        # we do something odd with the timestamp: because PL
                        # only logs at second precision, a lot of system calls
                        # have the same timestamp, which causes the EventStore
                        # to sort them in the wrong order. So, every time we
                        # have a timestamp identical to the previous one, we
                        # increase a counter that sorts them. This works under
                        # the assumption that there are at most 1000 events per
                        # second.
                        if timestamp == prevTimestamp:
                            timeDelta += 1
                        else:
                            timeDelta = 0

                        # Process the last system call into an Event, and clear
                        # up the syscalls list to keep RAM free!
                        if currentCall:
                            event = Event(actor=app,
                                          time=currentCall[0],
                                          syscallStr=currentCall[1])
                            app.addEvent(event)
                            eventCount += 1

                        # Create the new syscalls list.
                        currentCall = (timestamp + timeDelta, h[1])
                        prevTimestamp = timestamp

                        lineIdx += 1

                    # Add the found process id to our list of actors, using the
                    # app identity that was resolved by the Application ctor
                    actors.add(app.desktopid)

                    if checkInitialised and not app.isInitialised():
                        print("MISSING: %s" % g[0], file=sys.stderr)
                        hasErrors = True

                    # Insert into the ApplicationStore if one is available
                    if store is not None:
                        store.insert(app)
                        instanceCount += 1

        if checkInitialised and hasErrors:
            if invalidApps:
                print("Invalid apps:", file=sys.stderr)
                for a in sorted(invalidApps):
                    print("\t%s" % a, file=sys.stderr)
            sys.exit(-1)

        # print("Apps that logged valid files:")
        # for act in sorted(actors):
        #     print(act)

        # print("\nApps that logged files without a single system call:")
        # for act in sorted(nosyscallactors):
        #     print(act)

        self.appCount = len(actors)
        self.instCount = count - empties - invalids - len(nosyscalls)
        self.eventCount = eventCount
        self.validEventRatio = 100 - 100 * (invalids + empties +
                                            len(nosyscalls)) / (count)

        print("Finished loading DB.\n%d files seen, %d valid from %d apps, "
              "%d empty files, "
              "%d logs with 0 syscalls from %d apps, "
              "%d invalid.\nIn "
              "total, %.02f%% files processed." %
              (count, self.instCount, self.appCount, empties, len(nosyscalls),
               len(nosyscallactors), invalids, self.validEventRatio))
        print("Instance count: %d" % instanceCount)
def fetch_new_favorites(username):
    last_favorite = (favorites[username])[-1]
    return tail([ fave for fave in dropwhile(lambda img: img.link != last_favorite.link, reversed(fetch_favorites(username)))])
Esempio n. 19
0
 async def sendLog(self):
     await self.emit('logs', tail('CMDlog', self.config.logLength))
Esempio n. 20
0
 def test_tail(self):
     self.assertEqual(tail([1,2,3,4,5]) , [2,3,4,5])
     self.assertEqual(tail([1,]) , [])        
Esempio n. 21
0
 def received_message(self, message):
     message = json.loads(message.data)
     message_type = inverse(MESSAGES, head(message))
     method = getattr(self, message_type.lower(),
                      self.raise_not_implemented)
     method(*tail(message))
Esempio n. 22
0
    async def on_message(self, message):
        """ 
        Open to all:
        !scrape
        !screenshot
        !worldometer
        !covidtracking
        !hopkins
        !coronacloud
        Admin only:
        !train CASES DEATHS RECOVERED
        !scrape (in Europe!)
        !disabletracker
        """

        """if message.author.id == self.novel_bot_id: #Read check and save it
            country = convert_channel_to_country(str(message.channel))
            channel = message.channel
            interface.process_check(country, str(message.content))
            await channel.send("Praise the Creator")
            return"""
        # we do not want the bot to reply to itself

        if (message.author == self.user or str(message.channel) in other_channels):
            return

        user_is_staff = self.is_staff(message.author)
        user_is_normal =  self.is_normal_user(message.author)

        words = message.content.split()  
        channel = message.channel
        country = convert_channel_to_country(str(message.channel))
        country = country[0].upper() + country[1:]

        if user_is_normal or user_is_staff: #Normal user commands
            if message.content.startswith('!screenshot'):    
                await channel.send("Beep boop! Taking a screenshot, please stand by...")
                if len(words) > 1 and (words[1] == "s" or words[1] == "slow"):
                    self.command_queue.put("screenshot {} -d".format(country))
                else:
                    self.command_queue.put("screenshot {} -d -f".format(country))

        if user_is_staff: #Staff only commands
            if message.content.startswith('!scrape'):
                if len(words) >= 2:
                    no_check = False
                    cache_only = False
                    if words[1] == "covidtracker" or words[1] == "covidtracking" or words[1] == "ct": 
                        scrape_type = "covidtracking"
                    elif words[1] == "hopkins" or words[1] == "johnhopkins" or words[1] == "john"  or words[1] == "jh":
                        scrape_type = "hopkins"
                    elif words[1] == "auto" or words[1] == "a":
                        scrape_type = ""
                    elif words[1] == "regions" or words[1] == "r":
                        scrape_type = "r"
                    else:
                        await self.send_error_message("Incorrect scrape type", channel.name)
                        return
                        
                    if scrape_type == "covidtracking" and str(channel) not in us_channels:
                        await self.send_error_message("Covidtracking.com only has data on US states", channel.name)
                        return
                    if scrape_type == "hopkins" and (str(channel) not in europe_channels or str(channel) == "europe") and str(channel) not in canada_channels:
                        await self.send_error_message("John Hopkins has no data on this country/state", channel.name)
                        return

                    time = datetime.datetime.now()
                    date = interface.convert_datetime_to_string(time)
                    if len(words) >= 3: #Date argument
                        if words[2] == "nocheck" or words[2] == "nc":
                            no_check = True
                        elif words[2] == "cacheonly" or words[2] == "co":
                            cache_only = True
                        else:
                            date = words[2]
                            if len(words) > 5:
                                log.warning("!scrape date incorrectly formatted")
                                return
                            date = words[2]
                            if len(words) >= 4:
                                if words[3] == "nocheck" or words[3] == "nc":
                                    no_check = True
                                elif words[2] == "cacheonly" or words[2] == "co":
                                    cache_only = True
                        
                    if "-" in date: #Range
                        date = "-r " + date
                    else:
                        date = "-t " + date

                    if no_check:
                        await channel.send("Beep boop! Investigating Covid-19 cases in {}, please stand by... (NOTE: SHEET CROSS-CHECKING IS DISABLED!)".format(country))
                    else:
                        await channel.send("Beep boop! Investigating Covid-19 cases in {}, please stand by...".format(country))
                    no_check_str = "-nocheck" if no_check else ""
                    cache_only_str = "-cacheonly" if cache_only else ""
                    self.command_queue.put("scrape {} {} -d -disp {} {} {}".format(country, scrape_type, date, no_check_str, cache_only_str))
            elif message.content.startswith('!abort'): #Reset the command queue
                log.info("Recieved Discord abort command, killing program...")
                self.command_queue = Queue()
                await channel.send("Initiating abort, shutting everything down...")
                config.REBOOT_ON_CRASH = False
                exit()
            elif message.content.startswith('!log'):
                if len(words) > 1:
                    amount_of_lines = int(words[1])
                await channel.send("Beep boop! Sending the last {} lines of logging".format(amount_of_lines))
                logs = utils.tail("log_file.log", amount_of_lines)
                await channel.send(logs)
            elif message.content.startswith('!train'):
                command = words[1:]
                if len(command) > 0 and "=" not in command[0] and len(command) <= 3:
                    command = ' '.join(command)
                elif len(command) > 0 and "=" in command[0]:
                    command = "\"{}\"".format(str(create_dict_from_message(command)))
                self.command_queue.put("train {} -d -data {}".format(country, command))
                await channel.send("Beep boop! Training recognition model...")
                


 
        #if message.content.startswith('check'):
            #channel = message.channel
            #await self.fake_check(channel)
Esempio n. 23
0
 def received_message(self, message):
     message = json.loads(message.data)
     message_type = inverse(MESSAGES, head(message))
     method = getattr(self, message_type.lower(),
                      self.raise_not_implemented)
     method(*tail(message))
Esempio n. 24
0
def ossec_log(type_log='all',
              category='all',
              months=3,
              offset=0,
              limit=common.database_limit,
              sort=None,
              search=None):
    """
    Gets logs from ossec.log.
    :param type_log: Filters by log type: all, error or info.
    :param category: Filters by log category (i.e. ossec-remoted).
    :param months: Returns logs of the last n months. By default is 3 months.
    :param offset: First item to return.
    :param limit: Maximum number of items to return.
    :param sort: Sorts the items. Format: {"fields":["field1","field2"],"order":"asc|desc"}.
    :param search: Looks for items with the specified string.
    :return: Dictionary: {'items': array of items, 'totalItems': Number of items (without applying the limit)}
    """
    logs = []

    first_date = previous_month(months)
    statfs_error = "ERROR: statfs('******') produced error: No such file or directory"

    for line in tail(common.ossec_log, 2000):
        log_fields = __get_ossec_log_fields(line)
        if log_fields:
            log_date, log_category, level, description = log_fields

            if log_date < first_date:
                continue

            if category != 'all':
                if log_category:
                    if log_category != category:
                        continue
                else:
                    continue

            log_line = {
                'timestamp': str(log_date),
                'tag': log_category,
                'level': level,
                'description': description
            }
            if type_log == 'all':
                logs.append(log_line)
            elif type_log.lower() == level.lower():
                if "ERROR: statfs(" in line:
                    if statfs_error in logs:
                        continue
                    else:
                        logs.append(statfs_error)
                else:
                    logs.append(log_line)
            else:
                continue
        else:
            if logs != []:
                logs[-1]['description'] += "\n" + line

    if search:
        logs = search_array(logs, search['value'], search['negation'])

    if sort:
        if sort['fields']:
            logs = sort_array(logs,
                              order=sort['order'],
                              sort_by=sort['fields'])
        else:
            logs = sort_array(logs, order=sort['order'], sort_by=['timestamp'])
    else:
        logs = sort_array(logs, order='desc', sort_by=['timestamp'])

    return {'items': cut_array(logs, offset, limit), 'totalItems': len(logs)}