class DecryptionWriter:

    def __init__(self):
        self.writer = Writer('decryption_output.csv',['VECTORS','AES-EBC','AES-CBC','RSA_OAEP'])
    
    def write(self, row):
        self.writer.write( row )
Esempio n. 2
0
class VerifyWriter:
    def __init__(self):
        self.writer = Writer('verify_output.csv',
                             ['VECTORS', 'RSA_PSS', 'ECDSA', 'DSA'])

    def write(self, row):
        self.writer.write(row)
class HashingWriter:
    def __init__(self):
        self.writer = Writer('hashing_output.csv',
                             ['VECTORS', 'SHA-1', 'SHA-2', 'SHA-3'])

    def write(self, row):
        self.writer.write(row)
Esempio n. 4
0
class SignatureWriter:
    def __init__(self):
        self.writer = Writer('signature_output.csv',
                             ['VECTORS', 'RSA-PSS', 'DSA', 'ECDSA'])

    def write(self, row):
        self.writer.write(row)
Esempio n. 5
0
def start():
    data = OrderedDict()
    data.update({'Sheet -' : [[1,  5,  9], [2, 'f**k',  0]]})
    writer = Writer('inout/Testy.ods')
    writer.setData(data)
    matrix = [[1,  2,  '3'],  [4,  5,  6],  ['7', '8',  '9']]
    writer.addSheet('Hi there',  matrix)
    
    writer.write('ods')
Esempio n. 6
0
class TweetObtainer(StreamListener):
    writer = None
    sentimentAnalyzer = None
    tokens = ''
    parameter = ''
    liveView = None
    pieView = None
    currentNumber = 0
    stream = None

    def __init__(self, parameter, liveView, pieView):
        self.sentimentAnalyzer = SentimentAnalyzer()
        self.writer = Writer()
        self.parameter = parameter
        print('Creating token')
        self.liveView = liveView
        self.pieView = pieView

    def init_stream(self):
        self.writer.setSaveFile('StreamedTweets.txt')

    def start(self):
        print("Setting up tweetobtainer")
        #TwitterAPI authorization
        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_secret)
        self.stream = Stream(auth, self)
        self.stream.filter(track=[self.parameter], languages=['en'])

    '''
    Wordt elke keer als er een tweet binnenkomt aangeroepen
    Stuurt de opgehaalde tweet door naar de analyse en schrijft de
    analyse+tweet weg in een bestand als er minder dan 10.000 zijn
    opgehaald deze sessie. Slaapt voor 1 seconde zodat er genoeg tijd is om
    de tweet te verwerken.
    '''
    def on_data(self, data):
        text = json.loads(data)

        #Use only the text field of obtained JSON String
        if 'text' in text:
            text = text['text']
            tweet = self.sentimentAnalyzer.preprocess(text)
            print(tweet)
            sentiment = self.sentimentAnalyzer.analyse(tweet)
            if self.currentNumber <= 10000:
                self.writer.write(sentiment + text)
                self.currentNumber += 1
            self.liveView.update(sentiment)
            self.pieView.update()
            time.sleep(1)
        return True

    def on_error(self, status_code):
        print('Got an error with status code: ' + str(status_code))
        return True # To continue listening

    def on_timeout(self):
        print('Timeout...')
        return True # To continue listening

    def stop_stream(self):
        self.stream.disconnect()
Esempio n. 7
0
class Books:
    def __init__(self, path=None, arabic=True):
        self.arabic = arabic
        # Browsing and writing managers
        self.br = Browser()
        self.wr = Writer(path) if path else Writer()
        # An array for scrapped books
        self._books_ids = []

    # Append an external books ids array to local array
    def append_books(self, books_ids):
        # Loop through sent books ids
        for book_id in books_ids:
            # Only append id if it's not stored already
            if book_id not in self._books_ids:
                self._books_ids.append(book_id)

    # Scrape books and write them to a file (browse is: list, lists, author or shelf)
    def output_books(self, keyword=None, browse="list", file_name="books"):
        self.wr.open(file_name, "w+")
        # Get books if keyword is provided, otherwise output stored books
        books_ids = self.get_books(keyword, browse) if keyword else self._books_ids
        # Loop through book ids and write them
        for book_id in books_ids:
            self.wr.write(book_id)
        self.wr.close()

    def output_books_editions(self, books_ids=None, file_name="editions"):
        skip = len(read_books(file_name))
        self.wr.open(file_name, "a+")
        # Loop through book ids and write their editions id
        for book_id in books_ids[skip:] or self._books_ids[skip:]:
            editions_id = self.get_book_editions_id(book_id)
            # Editions id is None when page refuses to load
            if editions_id is None: return self.wr.close()
            # Write editions id to file if it loads correctly
            self.wr.write(editions_id or "-"*7)
            # Display book id and editions id
            print(f"Book ID:\t{book_id:<15}Book Editions ID:\t{editions_id or ''}")
        self.wr.close()
        return True

    def output_books_edition_by_language(self, editions_ids, lang="Arabic", file_name="ara_books"):
        skip = len(read_books(file_name))
        self.wr.open(file_name, "a+")
        # Loop through book ids and write their editions id
        for editions_id in editions_ids[skip:]:
            books_ids = self.get_book_edition_by_language(editions_id, lang) if editions_id.isdigit() else ''
            # Editions id is None when page refuses to load
            if books_ids is None: return self.wr.close()
            # Write editions id to file if it loads correctly
            self.wr.write(books_ids or "-"*7)
            # Display book id and editions id
            print(f"Book Editions ID:\t{editions_id:<15}Books IDs:\t{books_ids or ''}")
        self.wr.close()
        # Open a new file to move done list to it
        self.wr.open(file_name + "_list")
        # Loop through previously scraped editions ids
        for line in read_books(file_name):
            # If line isn't empty
            if line != "-"*7:
                # Write each book edition id in a separate line
                [self.wr.write(id_) for id_ in line.split(',')]
        self.wr.close()
        return True

    # Main function to scrape books ids
    def get_books(self, keyword, browse="list"):
        # Get lists in search list if searching
        if browse == "lists":
            keywords = self._get_lists(keyword.replace(' ', '+'))
            browse = "list"
        # Otherwise, it's a single "list" or "shelf"
        else:
            keywords = [
                str(key) for key in (
                    keyword if isinstance(keyword, list) else [keyword]
                )]
        try:
            # Loop through all lists
            for keyword in keywords:
                # Open each list url
                self.br.open_page(keyword, browse)
                # Scrape pages until there's no next page
                while True:
                    self._scrape_list("book", self._books_ids)
                    if not self.br.goto_next_page():
                        break
        except Exception as e:
            print("Couldn't go to next page:", e)
        finally:
            return self._books_ids

    def get_book_editions_id(self, book_id):
        self.br.open("/book/show/", book_id)
        return self.br.editions_id()

    def get_book_edition_by_language(self, editions_id, lang):
        self.br.open_book_editions(editions_id)
        soup = BeautifulSoup(self.br.page_source, "lxml").find(class_="workEditions")
        if not soup: return None
        editions = []
        for details in soup.find_all(class_="editionData"):
            language, rating = [row.find(class_="dataValue") for row in details.find_all(class_="dataRow")[-3:-1]]
            if language.text.strip() == lang:
                reviewers = get_digits(rating.find("span").text)
                if reviewers > 50:
                    editions.append(id_from_url.match(details.find(class_="bookTitle")["href"]).group(1))
        return ','.join(editions)

    # Main function to scrape lists ids
    def _get_lists(self, keyword):
        lists = []
        # Open GoodReads' lists search url
        self.br.open_list_search(keyword)
        # Scrape all result pages
        while True:
            self._scrape_list("list", lists)
            # Go to next page if there's one, otherwise break
            if not self.br.goto_next_page():
                break
        return lists

    # Scrape a single search results page
    def _scrape_list(self, title, array):
        soup = BeautifulSoup(self.br.page_source, "lxml").find(class_="tableList")
        if not soup: return None
        for book in soup.find_all("tr"):
            if self.arabic or get_digits(book.find(class_="minirating").text.split("—")[1]) > 1000:
                try:  # Get id from url
                    id_ = id_from_url.match(book.find(class_=title + "Title")["href"]).group(1)
                except Exception:
                    print("Couldn't extract Book Id from URL")
                    continue
                # Extract and store unique id from link
                if id_ not in array:
                    array.append(id_)
                    print(f"{title.capitalize()} {id_:<10}count:\t{len(array)}")
Esempio n. 8
0
def process(ROOT_PATH, CORE_FILE, fid, rev):

    WXRX_LOG_FILE = _get_log_file_(ROOT_PATH, fid)

    #set BASE_TIME from the 2nd line (logging start) in the WXRX_LOG_FILE
    BASE_TIME = get_base_time(WXRX_LOG_FILE)
    WXRX_NETCDF_FILENAME = 'weather-radar_faam_%s_r%s_%s.nc' % (
        datetime.datetime.strftime(BASE_TIME,
                                   '%Y%m%d'), str(rev), str.lower(fid))

    if os.path.exists(os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME)):
        sys.stdout.write('weather radar netCDF\n')
        sys.stdout.write('  ... %s\n' %
                         os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME))
        sys.stdout.write('already exists! Exiting ...\n')
        sys.exit(2)

    # get unique valid wxrx-tmp-filelist from log file
    wxrx_file_list = get_wxrx_tmp_filelist(WXRX_LOG_FILE)
    # Calculate total size of tmp-wxrx-data
    MAX_SIZE = np.max([
        os.stat(os.path.join(ROOT_PATH, wxrx_file)).st_size
        for wxrx_file in wxrx_file_list
    ])
    MAXIMUM_NUMBER_OF_RECORDS = (MAX_SIZE * 8 / 1744) + 1
    wxrx_data_list = []

    _RECS = np.zeros(MAXIMUM_NUMBER_OF_RECORDS,
                     dtype=[('label', np.str_, 4), ('control_accept', np.byte),
                            ('slave', np.byte), ('mode_annunciation', np.byte),
                            ('faults', np.byte), ('stabilization', np.byte),
                            ('operating_mode', np.byte), ('tilt', np.float),
                            ('gain', np.float), ('range', np.int16),
                            ('data_accept', np.byte), ('scan_angle', np.float),
                            ('reflectivity', np.byte, (512, ))])

    A708 = Arinc708()

    for wxrx_file in wxrx_file_list:
        sys.stdout.write('Reading ... %s\n' % (wxrx_file))
        # TODO: adding progressbar to see where we are including ETA
        wxrx_data = Reader(os.path.join(ROOT_PATH, wxrx_file))
        wxrx_data.parse()
        sys.stdout.write(wxrx_data)
        ix = []
        for i in range(len(wxrx_data.Buswords)):
            try:
                _RECS[i] = A708.parse(wxrx_data.Buswords[i])
                ix.append(i)
            except:
                pass

        wxrx_data.sIndexList = list(np.array(wxrx_data.sIndexList)[ix])

        add_timestamp(wxrx_data, WXRX_LOG_FILE)
        wxrx_data.Records = _RECS[ix]
        wxrx_data_list.append(wxrx_data)
        # Delete to save memory
        del (wxrx_data)

    # TODO
    _s = Setup(os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME))
    sys.stdout.write('Creating empty netCDF ...\n')

    sys.stdout.write('Writing data to ... %s\n' %
                     (os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME)))
    wxrx_nc_writer = Writer(os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME),
                            wxrx_data_list)
    wxrx_nc_writer.write()
    sys.stdout.write('Merging faam_core data ... %s\n' % (CORE_FILE))
    # TODO
    wxrx_nc_writer.merge_core_file(CORE_FILE)
    wxrx_nc_writer.close()

    # create overview figure
    Overview(
        os.path.join(ROOT_PATH, WXRX_NETCDF_FILENAME),
        os.path.join(
            ROOT_PATH, '%s_%s_wxrx_overview.png' %
            (fid, datetime.datetime.strftime(BASE_TIME, '%Y%m%d'))))