コード例 #1
0
def log_parser():
    db = get_db()
    end_time = datetime.now()
    start_time = end_time - timedelta(days=7)
    lp = LogParser(start_time, end_time)
    result = lp.parse()
    return render_template('show_job_entries.html', job_entries=result)
コード例 #2
0
def main():
    # NOTE: For now, we're not going to do this. Want to keep it here in case we go back to it.
    #  add_path_to_registry_startup_key(os.getenv('LOCALAPPDATA') + r'\Programs\FGStats_Client\fgstats_client.exe')

    # TODO: Add a try-catch and send to backend if we got an error here.
    STEAM_ID, STEAM_ACCOUNT_NAME = get_steam_user_info()

    while True:
        for episode in LogParser(follow_file(
                get_fall_guys_log_location())).parse():
            while True:
                try:
                    post('https://api.fgstats.com/client',
                         data=dumps({
                             'steam_id': STEAM_ID,
                             'steam_account_name': STEAM_ACCOUNT_NAME,
                             'episode_info': episode,
                         }))

                    break

                # Don't error if connection issue
                except RequestException:
                    sleep(30)
                    continue
コード例 #3
0
ファイル: log_sifter.py プロジェクト: hanxuzjuckc/gsyslyzer
    def sift_log(self):
        """ Implements the logical flow of the multiple parsing stages. """

        # Parse the raw log
        log_parser = LogParser(self.log_file_path, self.event_rules)
        log_parser.parse_log()
        log_events = log_parser.log_events_found

        # Parse the log events
        event_parser = LogEventParser(log_events, self.group_rules)
        event_parser.parse_log_events()
        event_groups = event_parser.event_groups_found

        collect_statistics = self.flags.collect_statistics

        # Parse the event groups
        group_parser = EventGroupParser(event_groups, self.criterias,
                                        collect_statistics)
        group_parser.parse_event_groups()
        statistics = group_parser.statistics_summaries

        # Construct the output
        if self.flags.json_output:
            self.output_to_json(group_parser, collect_statistics)
        else:
            self.output_to_terminal(group_parser)
コード例 #4
0
 def GET(self, t_interval):
     web.header('Access-Control-Allow-Origin', '*')
     url = 'http://192.168.20.251/axis-cgi/systemlog.cgi'
     username = '******'
     password = '******'
     parser = LogParser(url, username, password)
     dt = parser.parse_log(t_interval)
     return dt
コード例 #5
0
def main(argv=None):
    """
    Application entry point
    """
    args = parse_arguments(argv)
    log_parser = LogParser(args.log_file)
    log_parser.parse_file()
    print(log_parser.format_result())
コード例 #6
0
def parse_all_sessions(files):
    logParser = LogParser()
    sessions = []

    for file in files:
        session = logParser.parse(file)
        sessions.append(session)

    return sessions
コード例 #7
0
def main():
    from config import Config
    conf = Config()

    log_list = LogParser(conf.log_file_path, conf.log_file_tag)
    # non_sso_lifecycle = NonSSOLifecycle()
    # non_sso_lifecycle.first_sign_in(log_list)
    sso_lifecycle = SSOLifecycle()
    sso_lifecycle.second_sign_in(log_list)
    print sso_lifecycle.to_dict()
コード例 #8
0
ファイル: server.py プロジェクト: a-kapila/Poker-Log-Parser
   def make_app(self):
      app = tornado.web.Application([
         # Additional endpoints to test the service is up and running.
         (r"/stats/total", TotalGameHandler),
         (r"/stats", SingleGameHandler),
         (r"/stats/([^/]+)", PastGameHandler),
         (r"/update", UpdateStatsHandler)
      ])
      app.game_tracker = GameTracker(GAME_ID)
      app.stats_parser = StatsParser(GAME_ID) #, single=False, game_ids=GAME_IDS, filename="20.05-25.05")
      app.log_parser = LogParser(GAME_ID)

      return app
コード例 #9
0
def main():
    """MAIN PROGRAM"""

    logging.info("Script starting...")
    params = Params()
    logging.info("Parsing parameters...")
    try:
        params.get_args()

    except (errors.InputError, errors.ParamError) as e:
        sys.stderr.write(str(e) + '\n')
        exit(1)

    logging.info("Parsing system calls log file...")

    # Position of system call names in the log
    if params.time_included:
        column_pos = 1
    else:
        column_pos = 0

    parser = LogParser(params.input_log, column_pos)

    #print("Calls analysed: {}".format(parser.system_calls_num)) # TODO

    if params.ngram is not None:
        logging.info("Getting Ngram...")

        feature_vector = parser.ngram(params.ngram, params.normalise, params.syscalls_list)
        #print("Unique sequences: {}".format(len(feature_vector)))

    elif params.co_occurrence_matrix is not None:
        logging.info("Getting Co-occurrence matrix...")

        feature_vector = parser.co_occurrence_matrix(params.co_occurrence_matrix, params.normalise,
                                                           params.syscalls_list)
    else:
        if not params.histogram:
            sys.stderr.write("No option selected, using histogram.\n")

        logging.info("Getting Histogram...")
        feature_vector = parser.histogram(params.normalise, params.syscalls_list)

    if params.csv_values:
        #print(feature_vector.get_csv_values(), file=params.output)
        writer = csv.writer(params.output, lineterminator="\n")
        writer.writerow(feature_vector.get_values())
    else:
        print(feature_vector, file=params.output, end='')

    params.cleanup()
コード例 #10
0
def get_parser(parser_type, path_to_file):
    parser = None
    if parser_type == "script":
        try:
            fd = open(path_to_file)
            parser = ScriptParser(fd.read())
        except IOError as e:
            print 'failed to open script file ' + path_to_file
    elif parser_type == "log":
        try:
            fd = open(path_to_file)
            parser = LogParser(fd.read())
        except IOError as e:
            print 'failed to open log file ' + path_to_file
    return parser
コード例 #11
0
def exec_operation():
    arg_type = ArgType()
    args_res = arg_type.analyze_type

    conf = Config()
    if arg_type.is_monitor:
        perform = Performance(arg_type.process_name, arg_type.message)

    if LifecycleBase._export_to is None:
        LifecycleBase._export_to = conf.export_path
    if Performance._export_to is None:
        Performance._export_to = conf.export_path


    operations = {
        ArgType.SIGN_IN | ArgType.FIRST_SIGN_IN :\
            lambda var, msg: lifecycle_cases.NonSSOLifecycle().first_sign_in(var, msg),

        ArgType.SIGN_IN | ArgType.SECOND_SIGN_IN:\
            lambda var, msg: lifecycle_cases.NonSSOLifecycle().second_sign_in(var, msg),

        ArgType.SIGN_IN | ArgType.FIRST_SIGN_IN | ArgType.SECOND_SIGN_IN:\
            lambda var, msg: lifecycle_cases.NonSSOLifecycle().first_second_sign_in_out(var, msg),

        ArgType.SSO | ArgType.SIGN_IN | ArgType.FIRST_SIGN_IN:\
            lambda var, msg: lifecycle_cases.SSOLifecycle().first_sign_in(var, msg),

        ArgType.SSO | ArgType.SIGN_IN | ArgType.SECOND_SIGN_IN:\
            lambda var, msg: lifecycle_cases.SSOLifecycle().second_sign_in(var, msg),

        ArgType.SSO | ArgType.SIGN_IN | ArgType.FIRST_SIGN_IN | ArgType.SECOND_SIGN_IN:\
            lambda var, msg: lifecycle_cases.SSOLifecycle().first_second_sign_in_out(var, msg),

        ArgType.MONITOR:\
            lambda: perform.begin_monitor(time_span=arg_type.timespan),
    }

    method = operations.get(args_res)
    if method is not None:
        if arg_type.is_monitor:
            method()
        else:
            log_list = LogParser(conf.log_file_path, conf.log_file_tag)
            method(log_list, arg_type.message)
            if arg_type.clean:
                log_list.clean_log()
                logging.info("Log cleaned")
コード例 #12
0
def update_measures(m_id, is_new_measure=False):
    if m_id is None:
        raise AttributeError("Measure id is None")
    else:
        measures = models.Measures.select().where(models.Measures.measure_id == int(m_id)).order_by(models.Measures.id)

    measures_lst = []
    measures_lst.append(['Id', 'Temperature Outside', 'Engine Temperature', 'Pressure', 'Voltage', 'Fuel level'])
    for m in measures:
        measures_lst.append(m.to_list())
    lp = LogParser()
    if int(is_new_measure) == 1:
        lp.parse(truncate_log=True, new_measure=True)
    else:
        lp.parse(truncate_log=True)

    return jsonify(measures_lst)
コード例 #13
0
def log_parser_plugin(cmd):

    print('Start plugin......')
    settings = get_settings(cmd)
    if not settings:
        print('Invalid command line flags')
        return 1

    print('Start log_parser...')
    lp = LogParser(settings)
    results = lp.log_process()

    if results['status'] != 'OK':
        print(results['error'])
        return 1
    print(results['message'])
    return 0
コード例 #14
0
def main():
    args = get_args()
    VERBOSE = get_verbosity(args)
    DPLoaded = get_date_parser_loaded()
    start_day = get_start_day(args, DPLoaded)
    duration = args.duration
    filename = get_filename(args)
    create_string = get_create_string(args, filename)
    filetype = args.type

    if VERBOSE:
        print_message(args, start_day, duration, filename, create_string,
                      filetype)

    if args.update:
        parser = LogParser(filename)
        parser.update_log()
    else:
        write_log_table(filename, create_string, start_day, duration, filetype)
コード例 #15
0
def main():
    """MAIN PROGRAM"""

    logging.info("Script starting...")
    params = Params()
    logging.info("Parsing parameters...")
    try:
        params.get_args()

    except (errors.InputError, errors.ParamError) as e:
        sys.stderr.write(str(e) + '\n')
        exit(1)

    logging.info("Parsing system calls log file...")
    parser = LogParser(params.input_log, params.time_included)

    print("Calls analysed: {}".format(parser.system_calls_num))  # TODO

    print(parser, file=params.output)

    params.cleanup()
コード例 #16
0
django.setup()

## continue normal imports
from log_parser import LogParser
from availability_app.models import Tracker

logging.basicConfig(
    filename=os.environ['AVL_API__LOG_PATH'],
    level=logging.DEBUG,
    format=
    '[%(asctime)s] %(levelname)s [%(module)s-%(funcName)s()::%(lineno)d] %(message)s',
    datefmt='%d/%b/%Y %H:%M:%S',
)
log = logging.getLogger(__name__)

parser = LogParser()

os.nice(19)


class LogAnalyzer(object):
    """ Analyzes logs and populates db. """
    def __init__(self):
        self.last_checked_json_path = os.environ[
            'AVL_API__LAST_CHECKED_JSON_PATH']
        self.log_dir_path = os.path.dirname(os.environ['AVL_API__LOG_PATH'])
        self.last_read_log_datetime = None
        self.legit_ips = json.loads(os.environ['AVL_API__LEGIT_IPS_JSON'])
        self.legit_user_agents = json.loads(
            os.environ['AVL_API__LEGIT_USER_AGENTS_JSON'])
コード例 #17
0
ファイル: evaluator.py プロジェクト: ANRGUSC/vesper
    for algo in folders:
        # One folder for each algorithm
        log_list = os.listdir(os.path.join(args.folder, algo))

        successful_job_count = 0
        total_jobs = 0
        total_time_ms = 0.0

        pipeline_counts = np.zeros(len(cfg.PIPELINES), dtype=np.int)
        metrics[algo + '.makespans'] = []
        dev_counts = np.zeros(len(devices))

        for filename in log_list:
            filepath = os.path.join(args.folder, algo, filename)

            reader = LogParser(filepath)
            results = reader.extract()

            df = pd.DataFrame(data=results['job_pipelines'],
                              columns=['id', 'pipeline'])

            # Update count for each pipeline
            counts = df.pipeline.value_counts()
            keys = counts.keys()
            values = counts.values
            for i in range(len(keys)):
                pipeline_counts[keys[i]] += values[i]

            # Throughput constraint
            if T_o is None:
                T_o = results['T_o'][0][1]
コード例 #18
0
def main(arguments: Optional[List[str]] = None) -> None:
    def parse_ignored_ips(x: str) -> List[ipaddress.IPv4Network]:
        return [
            ipaddress.ip_network(address, strict=False)
            for address in x.split(',')
        ]

    parser = argparse.ArgumentParser(description='Process log files.')

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        '--batch',
        '-b',
        action='store_const',
        dest="run_type",
        const=RunType.BATCH,
        help='Print a report on one or more completed log files.  The default.'
    )
    group.add_argument('--summary',
                       action='store_const',
                       dest="run_type",
                       const=RunType.SUMMARY,
                       help="Show the slugs that have been used in a log file")
    group.add_argument('--cronjob',
                       action='store_const',
                       dest="run_type",
                       const=RunType.BATCH,
                       help="Deprecated.  Use --batch instead")
    group.add_argument('--realtime',
                       '--interactive',
                       '-i',
                       '-r',
                       action='store_const',
                       dest="run_type",
                       const=RunType.REALTIME,
                       help='Watch a single log file in realtime')
    group.add_argument(
        '--xxfake-realtime',
        action='store_const',
        dest="run_type",
        const=RunType.FAKE_REALTIME,
        help=argparse.SUPPRESS,
    )
    parser.set_defaults(run_type=RunType.BATCH)

    group2 = parser.add_mutually_exclusive_group()
    group2.add_argument('--by-ip',
                        action='store_true',
                        dest='by_ip',
                        help='Sorts batched logs by host ip')
    group2.add_argument('--by-time',
                        action='store_false',
                        dest='by_ip',
                        help='Sorts batched logs by session start time')

    parser.add_argument('--html',
                        action='store_true',
                        dest='uses_html',
                        help='Generate html output rather than text output')
    parser.add_argument('--no-sessions',
                        action='store_true',
                        dest='no_sessions',
                        help="Don't generate detailed session information")

    parser.add_argument(
        '--date',
        '--cronjob-date',
        action='store',
        dest='date',
        help=
        'Date for --batch.  One of -<number>, yyyy-mm, or yyyy-mm-dd.  default is today.'
    )

    parser.add_argument('--api-host-url',
                        default=DEFAULT_FIELDS_PREFIX,
                        metavar='URL',
                        dest='api_host_url',
                        help='base url to access the information')
    parser.add_argument('--reverse-dns',
                        '--dns',
                        action='store_true',
                        dest='uses_reverse_dns',
                        help='Attempt to resolve the real host name')
    parser.add_argument(
        '--ignore-ip',
        '-x',
        default=[],
        action="append",
        metavar='cidrlist',
        dest='ignore_ip',
        type=parse_ignored_ips,
        help='list of ips to ignore.  May be specified multiple times')
    parser.add_argument(
        '--session-timeout',
        default=60,
        type=int,
        metavar="minutes",
        dest='session_timeout_minutes',
        help='a session ends after this period (minutes) of inactivity')
    parser.add_argument('--manifest',
                        default=[],
                        action='append',
                        dest='manifests')

    parser.add_argument(
        '--output',
        '-o',
        dest='output',
        help=
        "output file.  default is stdout.  For --batch, specifies the output pattern"
    )
    parser.add_argument(
        '--sessions-relative-directory',
        dest="sessions_relative_directory",
        help="relative directory into which to store the sessions information")
    parser.add_argument('--configuration',
                        dest='configuration_file',
                        default='opus.configuration',
                        help="location of python configuration file")

    # Stores DNS entries in a persistent database
    parser.add_argument('--xxdns-cache',
                        action="store_true",
                        dest="dns_cache",
                        help=argparse.SUPPRESS)

    # Debugging hack that shows all log entries
    parser.add_argument('--xxshowall',
                        action='store_true',
                        dest='debug_show_all',
                        help=argparse.SUPPRESS)

    # Caches the read entries into a database, rather than reading the log files anew each time.
    parser.add_argument('--xxcached_log_entry',
                        action='store_true',
                        dest='cached_log_entries',
                        help=argparse.SUPPRESS)

    parser.add_argument('log_files',
                        nargs=argparse.REMAINDER,
                        help='log files')
    args = parser.parse_args(arguments)

    run_type = cast(RunType, args.run_type)

    if run_type == RunType.BATCH:
        # Fix up the arguments to match what everyone else wants
        expand_globs_and_dates(args)
    elif args.glob:
        args.log_files = [
            file for pattern in args.log_files for file in glob.glob(pattern)
        ]
        args.manifests = [
            file for pattern in args.manifests for file in glob.glob(pattern)
        ]

    # args.ignored_ip comes out as a list of lists, and it needs to be flattened.
    args.ignored_ips = [ip for arg_list in args.ignore_ip for ip in arg_list]
    args.ip_to_host_converter = \
        IpToHostConverter.get_ip_to_host_converter(**vars(args))

    module = importlib.import_module(args.configuration_file)
    configuration = cast(AbstractConfiguration,
                         module.Configuration(**vars(args)))  # type: ignore
    log_parser = LogParser(configuration, **vars(args))

    if run_type == RunType.REALTIME:
        if len(args.log_files) != 1:
            raise Exception(
                "Must specify exactly one file for real-time mode.")
        log_entries_realtime = LogReader.read_logs_from_tailed_file(
            args.log_files[0])
        log_parser.run_realtime(log_entries_realtime)
    else:
        if len(args.log_files) < 1:
            raise Exception("Must specify at least one log file.")
        if args.cached_log_entries:
            log_entries_list = handle_cached_log_entries(args)
        else:
            log_entries_list = LogReader.read_logs(args.log_files)

        if run_type == RunType.BATCH:
            log_parser.run_batch(log_entries_list)
        elif run_type == RunType.SUMMARY:
            log_parser.run_summary(log_entries_list)
        elif run_type == RunType.FAKE_REALTIME:
            log_entries_list.sort(key=operator.attrgetter('time'))
            log_parser.run_realtime(iter(log_entries_list))
コード例 #19
0
 def __init__(self, db_path: str):
     self.db_path = db_path
     self.parser = LogParser()
コード例 #20
0
 def setUp(self):
     self.log_parser = LogParser('access.log')
コード例 #21
0
ファイル: start.py プロジェクト: daviola/quake_log_translator
from log_parser import LogParser
from game import GameReporter
import pprint, sys
p = LogParser("games.log")
p.load_to_memory()
p.import_games()


def start():
    while True:
        print('loading from games.log...')
        select = input("Please select one of the following options\n" +
                       "   1 - Simple report\n" +
                       "   2 - Simple Report Paginated\n" +
                       "   3 - Detailed Report Paginated\n" +
                       "   4 - Single Report By ID\n" +
                       "   5 - Overall Ranking\n" + "   6 - Exit\n")
        if select == '1':
            simple_report()
        if select == '2':
            simple_report_paginated()
        if select == '3':
            detailed_report_paginated()
        if select == '4':
            single_report()
        if select == '5':
            overall_ranking()
        if select == '6':
            sys.exit(0)

コード例 #22
0
#/usr/bin/python3

#Python script to run cron task daily to create database log entries

from cron_monitor import app
from log_parser import LogParser
from datetime import datetime,timedelta
from flask_mysqldb import MySQL
from flask import Flask, g
import config
end_time = datetime.now()
start_time = end_time - timedelta(days=1)

ctx = app.app_context()
ctx.push()

app.config['MYSQL_USER'] = config.MYSQL_DATABASE_USER
app.config['MYSQL_PASSWORD'] = config.MYSQL_DATABASE_PASSWORD
app.config['MYSQL_DB'] = config.MYSQL_DATABASE_DB
app.config['MYSQL_HOST'] = config.MYSQL_DATABASE_HOST
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
mysql = MySQL(app)
g.mysql_db = mysql.connection
lp = LogParser(start_time, end_time)
result = lp.parse()
コード例 #23
0
import time
from datetime import timedelta
from log_parser import LogParser

start_time = time.monotonic()

parser = LogParser('logs.txt')
parser.parse()

end_time = time.monotonic()
print('Total time:', timedelta(seconds=end_time - start_time))
コード例 #24
0
 def setUp(self):
     self.parser = LogParser('games.log')
コード例 #25
0
            if temp is not None:
                players.append(temp)
            else:
                break
        return players


hideout = Hideout()

if __name__ == '__main__':

    logger.setLevel(level=logging.DEBUG)

    console_log = logging.StreamHandler()
    formatter = logging.Formatter(
        '%(asctime)s %(levelname)s %(lineno)d:%(filename)s(%(process)d) - %(message)s'
    )
    console_log.setFormatter(formatter)
    logger.addHandler(console_log)

    from log_parser import LogParser
    import time

    parser = LogParser(config.LOG_PATH)
    parser.start()

    hideout.start()

    while True:
        time.sleep(1)
コード例 #26
0
    def run(self):
        """ The main routine which controls everything """
        framecount = 0

        # Create drawing tool to use to draw everything - it'll create its own screen
        drawing_tool = DrawingTool(wdir_prefix)
        drawing_tool.set_window_title_info(
            update_notifier=(" v" + self.tracker_version))
        opt = Options()

        parser = LogParser(wdir_prefix, self.tracker_version, LogFinder())

        event_result = None
        state = None
        custom_title_enabled = opt.custom_title_enabled
        read_from_server = opt.read_from_server
        write_to_server = opt.write_to_server
        game_version = opt.game_version
        state_version = -1
        twitch_username = None
        new_states_queue = []
        screen_error_message = None
        retry_in = 0
        update_timer = opt.log_file_check_seconds
        last_game_version = None

        while event_result != Event.DONE:
            # Check for events and handle them
            event_result = drawing_tool.handle_events()

            # The user checked or unchecked the "Custom Title Enabled" checkbox
            if opt.custom_title_enabled != custom_title_enabled:
                custom_title_enabled = opt.custom_title_enabled
                drawing_tool.update_window_title()

            # The user started or stopped watching someone from the server (or they started watching a new person from the server)
            if opt.read_from_server != read_from_server or opt.twitch_name != twitch_username:
                twitch_username = opt.twitch_name
                read_from_server = opt.read_from_server
                new_states_queue = []
                # Also restart version count if we go back and forth from log.txt to server
                if read_from_server:
                    state_version = -1
                    state = None
                    # Change the delay for polling, as we probably don't want to fetch it every second
                    update_timer_override = 2
                    # Show who we are watching in the title bar
                    drawing_tool.set_window_title_info(
                        watching=True,
                        watching_player=twitch_username,
                        updates_queued=len(new_states_queue))
                else:
                    drawing_tool.set_window_title_info(watching=False)
                    update_timer_override = 0

            # The user started or stopped broadcasting to the server
            if opt.write_to_server != write_to_server:
                write_to_server = opt.write_to_server
                drawing_tool.set_window_title_info(
                    uploading=opt.write_to_server)

            if opt.game_version != game_version:
                parser.reset()
                game_version = opt.game_version

            # Force refresh state if we updated options or if we need to retry
            # to contact the server.
            if (event_result == Event.OPTIONS_UPDATE
                    or (screen_error_message is not None and retry_in == 0)):
                # By setting the framecount to 0 we ensure we'll refresh the state right away
                framecount = 0
                screen_error_message = None
                retry_in = 0
                # Force updates after changing options
                if state is not None:
                    state.modified = True

            # normally we check for updates based on how the option is set
            # when doing network stuff, this can be overridden
            update_delay = opt.log_file_check_seconds
            if update_timer_override != 0:
                update_delay = update_timer_override

            # Now we re-process the log file to get anything that might have loaded;
            # do it every update_timer seconds (making sure to truncate to an integer
            # or else it might never mod to 0)
            frames_between_checks = int(Options().framerate_limit *
                                        update_delay)
            if frames_between_checks <= 0:
                frames_between_checks = 1

            if framecount % frames_between_checks == 0:
                if retry_in != 0:
                    retry_in -= 1
                # Let the parser do his thing and give us a state
                if opt.read_from_server:
                    base_url = opt.trackerserver_url + "/tracker/api/user/" + opt.twitch_name
                    json_dict = None
                    try:
                        json_version = urllib.request.urlopen(
                            base_url + "/version").read()
                        if int(json_version) > state_version:
                            # FIXME better handling of 404 error ?
                            json_state = urllib.request.urlopen(
                                base_url).read()
                            json_dict = json.loads(json_state)
                            new_state = TrackerState.from_json(json_dict)
                            if new_state is None:
                                raise Exception("server gave us empty state")
                            state_version = int(json_version)
                            new_states_queue.append((state_version, new_state))
                            drawing_tool.set_window_title_info(
                                updates_queued=len(new_states_queue))
                    except Exception:
                        state = None
                        log_error("Couldn't load state from server\n" +
                                  traceback.format_exc())
                        if json_dict is not None:
                            if "tracker_version" in json_dict:
                                their_version = json_dict["tracker_version"]
                            else:
                                # This is the only version that can upload to the server but doesn't include a version string
                                their_version = "0.10-beta1"

                            if their_version != self.tracker_version:
                                screen_error_message = "They are using tracker version " + their_version + " but you have " + self.tracker_version
                else:
                    force_draw = state and state.modified
                    state = parser.parse()
                    if force_draw and state is not None:
                        state.modified = True
                    if write_to_server and not opt.trackerserver_authkey:
                        screen_error_message = "Your authkey is blank. Get a new authkey in the options menu and paste it into the authkey text field."
                    if state is not None and write_to_server and state.modified and screen_error_message is None:
                        opener = urllib.request.build_opener(
                            urllib.request.HTTPHandler)
                        put_url = opt.trackerserver_url + "/tracker/api/update/" + opt.trackerserver_authkey
                        json_string = json.dumps(
                            state, cls=TrackerStateEncoder,
                            sort_keys=True).encode("utf-8")
                        request = urllib.request.Request(put_url,
                                                         data=json_string)
                        request.add_header('Content-Type', 'application/json')
                        request.get_method = lambda: 'PUT'
                        try:
                            result = opener.open(request)
                            result_json = json.loads(result.read())
                            updated_user = result_json["updated_user"]
                            if updated_user is None:
                                screen_error_message = "The server didn't recognize you. Try getting a new authkey in the options menu."
                            else:
                                screen_error_message = None
                        except Exception as e:
                            log_error(
                                "ERROR: Couldn't send item info to server\n" +
                                traceback.format_exc())
                            screen_error_message = "ERROR: Couldn't send item info to server, check tracker_log.txt"
                            # Retry to write the state in 10*update_timer (aka 10 sec in write mode)
                            retry_in = 10

            # Check the new state at the front of the queue to see if it's time to use it
            if len(new_states_queue) > 0:
                (state_timestamp, new_state) = new_states_queue[0]
                current_timestamp = int(time.time())
                if current_timestamp - state_timestamp >= opt.read_delay or opt.read_delay == 0 or state is None:
                    state = new_state
                    new_states_queue.pop(0)
                    drawing_tool.set_window_title_info(
                        updates_queued=len(new_states_queue))

            if state is None and screen_error_message is None:
                if read_from_server:
                    screen_error_message = "Unable to read state from server. Please verify your options setup and tracker_log.txt"
                    # Retry to read the state in 5*update_timer (aka 10 sec in read mode)
                    retry_in = 5
                else:
                    screen_error_message = "log.txt for " + opt.game_version + " not found. Make sure you have the right game selected in the options."

            if screen_error_message is not None:
                drawing_tool.write_error_message(screen_error_message)
            else:
                # We got a state, now we draw it
                drawing_tool.draw_state(state, framecount)

            # if we're watching someone and they change their game version, it can require us to reset
            if state and last_game_version != state.game_version:
                drawing_tool.reset_options()
                last_game_version = state.game_version

            drawing_tool.tick()
            framecount += 1

        # Main loop finished; program is exiting
        drawing_tool.save_window_position()
        Options().save_options(wdir_prefix + "options.json")
コード例 #27
0
from log_parser import LogParser
from feature_extractor import FeatureExtractor
from clustering import Clustering
from idf import IDF

parser = LogParser('../../data/HDFS_2K.log')
tagged_events, log_sequences = parser.parse()

extractor = FeatureExtractor(log_sequences, list(tagged_events.keys()))
log_sequences = extractor.extract()

clustering = Clustering(log_sequences, tagged_events)
cluster_ids, cluster_values, silhouette = clustering.cluster()

for cluster_value in cluster_values:
    if cluster_value['num_possible_abnormal_events'] != 0:
        print(cluster_value)
        print()

#print ("El coeficiente de silueta es =", silhouette)

コード例 #28
0
ファイル: run.py プロジェクト: J4CKVVH173/bius_charts
from log_parser import LogParser
import matplotlib.pyplot as plt

if __name__ == '__main__':
    logs = LogParser('text.log')
    for mode in logs.get_mods_values():
        plt.title(mode)
        plt.xlabel('time')
        plt.ylabel('speed')
        plt.grid()
        x = [
            i * 200 for i in range(
                len(logs.get_mods_values()[mode][logs.get_values_name()[-1]]))
        ]
        for value in logs.get_mods_values()[mode]:
            plt.plot(x, logs.get_mods_values()[mode][value], label=value)
        plt.legend(bbox_to_anchor=(1.05, 1),
                   loc='upper left',
                   borderaxespad=0.)
        plt.show()
コード例 #29
0
 def run(self):
     parser = LogParser()
     while True:
         for line in Pygtail(self.log_file):
             record = parser.parse(line)
             self.stats.record(record)