# extension = ".session"
    # extension = '.win'
    extension = '.adr'
    url_regex_simple_firefox = '"url":"([^"]*)","'
    #url_regex_all_urls = """'(?i)\b((?:https?:(?:/{1,3}|[a-z0-9%])|[a-z0-9.\-]+[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)/)(?:[^\s()<>{}\[\]]+|\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\))+(?:\([^\s()]*?\([^\s()]+\)[^\s()]*?\)|\([^\s]+?\)|[^\s`!()\[\]{};:'".,<>?«»“”‘’])|(?:(?<!@)[a-z0-9]+(?:[.\-][a-z0-9]+)*[.](?:com|net|org|edu|gov|mil|aero|asia|biz|cat|coop|info|int|jobs|mobi|museum|name|post|pro|tel|travel|xxx|ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|Ja|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|yu|za|zm|zw)\b/?(?!@)))'"""
    url_regex_http_https = '.*(http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+).*'

    def __init__(self):
        self.collection = set()

    def parse(self, file):
        # pattern = re.compile(SimpleTextParser.url_regex_simple_firefox)
        pattern = re.compile(SimpleTextParser.url_regex_http_https)
        for i, line in enumerate(file):
            for match in re.finditer(pattern, line):
                print('Found on line {0}: {1}'.format(i + 1, match.groups()))
                self.collection.add(match.group(1))


if __name__ == "__main__":
    arg_parser = collector.setup_parser('txt')
    argsDict = collector.create_args_dict(arg_parser)
    collector = collector.Collector(
        SimpleTextParser, argsDict['srcdir'], argsDict['destdir'],
        argsDict.get('write_separate_result_files'))
    if 'diff' in argsDict:
        collector.make_diff(argsDict['diff'])
    else:
        collector.collect_all()
        collector.create_result_file()
Exemplo n.º 2
0
        if tag == 'a':
            pocket_tags = dict(attrs).get('tags')
            if not pocket_tags:
                pocket_tags = 'without_tag'

            url = dict(attrs).get('href')
            self.collection.add(url)
            if pocket_tags not in self.urls_by_category:
                self.urls_by_category[pocket_tags] = set()
            self.urls_by_category[pocket_tags].add(url)

    def handle_endtag(self, tag):
        pass
        # print("Encountered an end tag :", tag)

    def handle_data(self, data):
        pass
        # print("Encountered some data  :", data)


if __name__ == "__main__":
    arg_parser = collector.setup_parser('HTML')
    argsDict = collector.create_args_dict(arg_parser)
    collector = collector.Collector(HTMLPocketExportParser, argsDict['srcdir'],
                                    argsDict['destdir'],
                                    argsDict['write_separate_result_files'])
    if 'diff' in argsDict:
        collector.make_diff(argsDict['diff'])
    else:
        collector.collect_all()
        collector.create_result_file()
Exemplo n.º 3
0
        json_input = json.load(file)
        for s in self.id_generator(json_input):
            #print(s)
            self.collection.add(s)
            #pprint(json_input)

    def id_generator(self, json_dict):
        if isinstance(json_dict, dict):
            for k, v in json_dict.items():
                if k == "uri":
                    yield v
                elif isinstance(v, dict) or isinstance(v, list):
                    for id_val in self.id_generator(v):
                        yield id_val
        elif isinstance(json_dict, list):
            for listitem in json_dict:
                for id_val in self.id_generator(listitem):
                    yield id_val


if __name__ == "__main__":
    arg_parser = collector.setup_parser('json')
    argsDict = collector.create_args_dict(arg_parser)
    collector = collector.Collector(JsonParser, argsDict['srcdir'], argsDict['destdir'],
                                    argsDict['write_separate_result_files'])
    if 'diff' in argsDict:
        collector.make_diff(argsDict['diff'])
    else:
        collector.collect_all()
        collector.create_result_file()