Esempio n. 1
0
def test_process_line():
    line = 'level=info request_to="http://localhost" response_status="200"'
    parser = log_parser.LogParser('log.txt')
    urls_expected = {'http://localhost': 1}
    status_codes_expected = {'200': 1}
    parser.process_line(line)
    assert parser.urls == urls_expected
    assert parser.status_codes == status_codes_expected
    def __init__(self, vidinfo_dict):
        LogParser = log_parser.LogParser()
        self._vidinfo_dict = vidinfo_dict
        
        self._arr_rs = namespace.RESOLUTION_LIST
        self._arr_br = namespace.BITRATE_LIST
        self._N      = namespace.NO_RESOLUTION_LEVELS

        return
Esempio n. 3
0
def test_process_line_strange_url():
    line = ('level=info request_to="http://localHost-13.com.br/test" '
            'response_status="200"')
    parser = log_parser.LogParser('log.txt')
    urls_expected = {'http://localHost-13.com.br/test': 1}
    status_codes_expected = {'200': 1}
    parser.process_line(line)
    assert parser.urls == urls_expected
    assert parser.status_codes == status_codes_expected
Esempio n. 4
0
    def setUp(self):
        super(LogParserTest, self).setUp()

        self.path_to_fake_log = "test_data/fake_logs/log_parser_fake_log"

        self.fake_event_with_message_type = event_rule.EventRule("A", "(.*)(I'm a test event)(.*)", constants.MessageType.INFO)
        self.fake_event = event_rule.EventRule("B", "(.*)(I'm a different test event)(.*)")
        self.fake_events = [self.fake_event_with_message_type, self.fake_event]

        self.fake_parser = log_parser.LogParser(self.path_to_fake_log, self.fake_events)
Esempio n. 5
0
def test_process_file():
    parser = log_parser.LogParser('log.txt')
    urls_expected = {
        'http://localHost-13.com.br/test': 2,
        'http://localhost': 1
    }
    status_codes_expected = {'200': 2, '400': 1}
    parser.parse_file()
    assert parser.urls == urls_expected
    assert parser.status_codes == status_codes_expected
Esempio n. 6
0
def update_db():
    """
    Goes over all logfiles stored in config.LOG_PATH, extracts all fights found
    and stores them in the db.
    """
    persistence = DBApi()

    for file in glob.glob(config.LOG_PATH + r'\*.log'):
        if persistence.file_already_processed(file):
            continue

        encounters = log_parser.LogParser(file).extract_fights()
        for encounter in encounters:
            persistence.save_encounter(encounter, file)

        persistence.mark_file_processed(file)
Esempio n. 7
0
def test_format_result():
    parser = log_parser.LogParser('log.txt')
    urls_expected = {
        'http://localHost-13.com.br/test': 2,
        'http://localhost': 1
    }
    status_codes_expected = {'200': 2, '400': 1}
    parser.parse_file()
    assert parser.urls == urls_expected
    assert parser.status_codes == status_codes_expected
    result = parser.format_result()
    expected = ('http://localHost-13.com.br/test - 2\n'
                'http://localhost - 1\n\n'
                '200 - 2\n'
                '400 - 1')
    assert result == expected
def plot_main(folder):

    # Add slash to fodler
    if (not folder.endswith("/")):
        folder += "/"
        print(folder)

    log_parse = log_parser.LogParser()

    # For each configuration, get the data
    list_configs, list_datas = log_parse.get_data(folder)

    plot_env = plots.Plotting_Environment(folder, list_configs, list_datas)

    # read the plots.json file
    plots_json_file = perf_folder + folder + "plots.json"
    with open(plots_json_file) as plot_json:

        plots_list = json.load(plot_json)

        # Iterate over plots
        i = 0
        for plot in plots_list:

            # Get corresponding plotting function
            function = plot_env.get_plotting_function(plot['type'])

            if "parameter" in plot:
                plot_env.parameter = plot["parameter"]
                plot_env.column = plot["column"]

            # Call the function
            function(i)
            i += 1

    return
    framemask = os.path.splitext(vidpath)[0] + '/' + namespace.FRAMEVERSION_BEST + '/??????.jpg'
    for framepath in glob.glob(framemask): #go over all frame item in the best verion
        #for each best frame item, collect score over all of its versions
        #print item
        try:
            gt_list = get_all_frameversions(framepath, vidpath, pred)
        except:
            print ('ERROR: ', framepath, vidpath)
            raise
        result[framepath] = gt_list
    return vidpath, result

#in order for log_parser to work, previous steps are assumed to be taken.
#see https://github.com/phananh1010/content-aware/tree/master/scripts for further detail

LogParser = log_parser.LogParser()
vidinfo_dict= LogParser.load_metainfo_dict()

YConverter = youtubebb_converter.YoutubeBBConverter(vidinfo_dict)
mAP = metric_map.mAP()
pred300 = predictor2.Predictor('300')
pred512 = predictor2.Predictor('512')

#only run once to create processed Youtube annotation dict, write to FILEPATH_YOUTUBE_YANNODICT
#YConverter.parse_annotation(namespace.FILEPATH_YOUTUBE_RAWANNOCSV, namespace.FILEPATH_YOUTUBE_YANNODICT)
yanno, yanno_dict = YConverter.load_annotation(namespace.FILEPATH_YOUTUBE_YANNODICT)

#for debuging purpose only
df = pd.read_csv('./data/YOUTUBE_data/yt_bb_detection_train.csv', header=None)
df1 = pd.read_pickle('./data/YOUTUBE_data/yt_bb_detection_train_filtered.pkl.gz', compression='gzip')
                            #./data/YOUTUBE_data/yt_bb_detection_train_filtered.csv
def parse_raw_log(raw_log_file):
    parser = log_parser.LogParser()
    for pattern in PATTERNS:
        parser.add_pattern(pattern)
    return parser.parse(raw_log_file)
Esempio n. 11
0
def parseFile(tbox_id):
    results = log_parser.LogParser(product).parseLog(tbox_id)
    if (results != None):
        print "saving: " + tbox_id
        for result in results:
            save(result)
Esempio n. 12
0
import audio_queue
import json
import gtts
import pyaudio
import youtube_dl
import os
import traceback

try:
    with open("config.json") as f:
        config = json.load(f)
except FileNotFoundError:
    print("Config missing.")

loglistener = log_listener.LogListener(config["logfile"])
logparser = log_parser.LogParser(
    loglistener, chat_command_prefix=config["chat_command_prefix"])

pa = pyaudio.PyAudio()
tts_audio_queue = audio_queue.AudioQueue(
    pa,
    output_device=config["output_device"],
    frames_per_buffer=config["frames_per_buffer"],
    volume=config["tts_volume"])
ydl_audio_queue = audio_queue.AudioQueue(
    pa,
    output_device=config["output_device"],
    frames_per_buffer=config["frames_per_buffer"],
    volume=config["ydl_volume"])

if config["loopback"]:
Esempio n. 13
0
def parseFile(tbox_id):
    results = log_parser.LogParser(product).parseLog(tbox_id, callback=save)
    if (results != None):
        for result in results:
            save(result)
Esempio n. 14
0
def test_process_status_code():
    parser = log_parser.LogParser('log.txt')
    expected = {'400': 1}
    parser.process_status_code('400')
    assert parser.status_codes == expected
Esempio n. 15
0
def test_process_url():
    parser = log_parser.LogParser('log.txt')
    expected = {'http://localhost': 1}
    parser.process_url('http://localhost')
    assert parser.urls == expected