Esempio n. 1
0
    def test_spinner_getters_setters(self):
        """Test spinner getters and setters.
        """
        spinner = Halo()
        self.assertEqual(spinner.text, '')
        self.assertEqual(spinner.color, 'cyan')
        self.assertIsNone(spinner.spinner_id)

        spinner.spinner = 'dots12'
        spinner.text = 'bar'
        spinner.color = 'red'

        self.assertEqual(spinner.text, 'bar')
        self.assertEqual(spinner.color, 'red')

        if is_supported():
            self.assertEqual(spinner.spinner, Spinners['dots12'].value)
        else:
            self.assertEqual(spinner.spinner, default_spinner)

        spinner.spinner = 'dots11'
        if is_supported():
            self.assertEqual(spinner.spinner, Spinners['dots11'].value)
        else:
            self.assertEqual(spinner.spinner, default_spinner)

        spinner.spinner = 'foo_bar'
        self.assertEqual(spinner.spinner, default_spinner)

        # Color is None
        spinner.color = None
        spinner.start()
        spinner.stop()
        self.assertIsNone(spinner.color)
def find_operator_info(args: argparse.Namespace, operator_name: str) -> None:
    """With the specified arguments, calls all the functions
    needed to find information and print all information
    out to the screen.

    This function will determine whether to use Gamepress
    or JSON for information, then call either one's appropriate
    information-getting functions and build an Operator object using
    the provided information.

    The Operator object will be used for printing. Nothing is returned.
    """
    spinner = Halo(text="Fetching...", spinner="dots", color="magenta")
    # Initialize the arguments for cmd purposes
    spinner.start()

    operator_dict, operator_key = get_operator_dict(operator_name)

    spinner.text = "Parsing..."
    spinner.color = "yellow"

    operator = parse_operator_data(args, operator_dict, operator_key,
                                   operator_name)
    # ----------------------------------------

    if operator is not None:
        spinner.succeed("Success!")
        if operator_dict == {} or args.gamepress:
            sys.stdout.write("\nSkipping JSON; Using gamepress.\n")

        # Print out the results
        sys.stdout.write("\n\n" + operator.name + "   ")
        sys.stdout.write("*" * operator.rarity + "   ")  # Star rarity
        sys.stdout.write(operator.profession + "\n")

        sys.stdout.write(operator.get_formatted_tags() + "\n\n")

        for desc_text in operator.description:
            sys.stdout.write(desc_text)

        all_properties = [
            operator.get_property(prop)
            for prop in operator.get_all_properties()
        ]
        # Fetch the stats
        all_messages = ([parse_stats(operator.stats)] + all_properties if
                        (operator.has_stats()) else all_properties)

        for prop in all_messages:
            for text in prop:
                sys.stdout.write(text + "\n")

    else:
        spinner.fail("Failed.")
        sys.stdout.write("\n\n" + operator_name.replace("-", " ").title() +
                         "\n")
        sys.stdout.write("\n" + "Could not find operator! " +
                         "Either the server is down, or your spelling is! \n")

    sys.stdout.write("\n\n")
Esempio n. 3
0
def filterScrape(need, category, page):

    spinner = Halo(text='Scraping content', spinner='dots', animation='bounce')
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
    }
    output_dic = OrderedDict()
    found = 0

    try:
        while (found < need):
            spinner.start()
            url = "https://ctftime.org/writeups?page={}&hidden-tags={}".format(
                page, category)
            spinner.text = "Scraping Page: {}".format(page)
            response = requests.get(url, headers=headers)
            soup = BeautifulSoup(response.content, 'html.parser')
            count_per_page = 0
            for tr in soup.find_all('tr')[1:]:
                tds = tr.find_all('td')
                w_no = tds[4].a["href"]
                task_name = tds[1].text
                writeup_url = "https://ctftime.org/" + w_no
                r = requests.get(writeup_url, headers=headers)
                spinner.text = "Parsing {} ({})".format(
                    w_no,
                    task_name.encode('ascii', 'ignore').decode('ascii'))
                spinner.color = "red"

                if (len(task_name) > 30):
                    task_name = task_name[:27] + '...'

                flag = 0
                original_url = ""
                new_soup = BeautifulSoup(r.content, 'lxml')
                a = new_soup.find_all('a')

                for link in a:
                    if link.text == "Original writeup":
                        original_url = link['href']
                        if (len(original_url) <= 125):
                            flag = 1
                            break
                if flag == 1:
                    if (task_name in output_dic):
                        output_dic[task_name] += '\n' + original_url
                    else:
                        output_dic[task_name] = original_url
                        count_per_page += 1
                        found += 1
                else:
                    if task_name not in output_dic:
                        count_per_page += 1
                        found += 1
                    output_dic[task_name] = writeup_url

                if (found == need):
                    break
                else:
                    continue

            if (count_per_page == 0):
                spinner.fail("Page {} doesn't exist.".format(page))
                spinner.info("Try decreasing the Page Seed or limit")
                spinner.info("Try changing the category")
                print(
                    "Such as : Change 'rev' -> 'reverse engineering' to get more results"
                )
                break
            else:
                spinner.succeed(
                    "Gathered writeups for {} tasks from page {}".format(
                        count_per_page, page))
                spinner.color = "cyan"
                page += 1

        return output_dic

    except (KeyboardInterrupt, SystemExit):
        spinner.warn('Program exited unexpectedly')
        exit()
def find_recruitment_combos(args: argparse.Namespace) -> None:
    """Taking the specified namespace of arguments, this function will
    determine combinations of tags, find operators that match those
    combinations, and print to the screen a formatted list of
    combinations and operators, sorted by value bottom-to-top."""
    spinner = Halo(text="Fetching...", spinner="dots", color="magenta")
    spinner.start()

    op_list = initialize_operator_list()
    if op_list is None:
        spinner.fail("Failed.")
        sys.stdout.write(
            "\n\nThe tag JSON could not be fetched! Try again later.")
    else:
        spinner.text = "Calculating..."
        spinner.color = "yellow"

        tag_dict = initialize_tag_dictionary(op_list)

        # Get both a proper translation from en to zh dict with the
        # new tag shortcuts and the premade tags
        # and a reversed dict initialized for proper tag conversion
        translation_dict = {
            **read_lines_into_dict("./info/recruitops/tagConversions.txt"),
            **read_lines_into_dict("./info/recruitops/tagShortcuts.txt")
        }
        reversed_translation_dict = read_lines_into_dict(
            "./info/recruitops/formattedTagConversions.txt", reverse=True)

        # Take in the user tags and find their proper, translated names
        # so that they can be used with the json.
        proper_tags = []
        # TODO: this tag process could probably be more optimized
        for tag in args.tags:
            if tag.lower() in translation_dict.keys():
                proper_tags.append(translation_dict[tag.lower()])
            else:
                # TODO: exit nicer
                raise Exception(f"The tag '{tag.lower()}' does not exist.")

        # Find all possible combinations of each tag combo
        all_matches = get_all_combinations(proper_tags, tag_dict,
                                           translation_dict,
                                           reversed_translation_dict)

        # Sort based on priority and format all the possible
        # combinations.
        #
        # Consists of all the tag combinations and results, sorted
        # by priority.
        all_sorted_selection = sorted(all_matches, key=lambda s: s.priority)
        messages = format_selections(args, all_sorted_selection)

        # Print the recruitment results
        spinner.succeed("Success!")
        sys.stdout.write("\n\nRecruitment Results\n\n")  # padding
        sys.stdout.write("Note: the lower down the tag collection, " +
                         "the better the tags.\n\n\n")  # padding

        if len(messages) <= 0:
            sys.stdout.write("Could not find any recruitment results.\n")
        else:
            for msg in messages:
                sys.stdout.write(msg + "\n")
        sys.stdout.write("\n")  # padding
Esempio n. 5
0
# -*- coding: utf-8 -*-
"""Example for doge spinner ;)
"""
from __future__ import unicode_literals
import os
import sys
import time

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from halo import Halo

spinner = Halo(text='Such Spins', spinner='dots')

try:
    spinner.start()
    time.sleep(2)
    spinner.text = 'Much Colors'
    spinner.color = 'magenta'
    time.sleep(2)
    spinner.text = 'Very emojis'
    spinner.spinner = 'hearts'
    time.sleep(2)
    spinner.stop_and_persist({
        'symbol': '🦄 '.encode('utf-8'),
        'text': 'Wow!'
    })
except (KeyboardInterrupt, SystemExit):
    spinner.stop()
Esempio n. 6
0
def main():
    """Main function to provide the logic to setup the gmusicapi connection, query radio stations for song information,
    check the local database if the songs exist and if not, query google gmusic for the song ids and then update a google gmusic
    playlist with them. Subsequently update the local database with the addded song to the playlist.

    Playlists are managed by the number of songs that they can contain. If a playlist has over 900 songs, we query to see if any
    other playlists exist that contain < 900 songs and add the songs to them.

    """

    # first query for gmusic from websites
    media_resources = MediaResources(steps=3)

    # use box so that we can retrieve dictionary fields in a more elegant manner
    box_radio_stations = box.Box(media_resources.radio_stations)

    spinner = Halo(text='Running asynchronous fetch on websites',
                   spinner='dots')
    spinner.start()
    spinner.color = 'magenta'

    # process async stations first
    # get the loop for the cbs stations
    loop = asyncio.get_event_loop()

    # loop over the cbs stations first
    for radio_station, url in box_radio_stations.cbs_stations.urls.items():
        # pull parameters from media_resources.radio_stations
        cbs_params = box_radio_stations.cbs_stations.params
        box_radio_stations.cbs_stations.headers.Referer = url
        interval = box_radio_stations.cbs_stations.interval

        playlist_songs_from_cbs_stations = loop.run_until_complete(
            media_resources.run_loop(
                loop,
                headers=box_radio_stations.cbs_stations.headers,
                url=url,
                params=cbs_params,
                station='cbs_stations',
                interval=interval))

        media_resources.parse_cbs_station_data(
            playlist_songs_from_cbs_stations)
    loop.close()
    #
    # # process tunegenie stations next
    # # get the loop
    loop_again = asyncio.new_event_loop()

    for radio_station, url in box_radio_stations.tunegenie.urls.items():
        # parameters are similar to above
        tunegenie_params = box_radio_stations.tunegenie.params
        box_radio_stations.tunegenie.headers.Referer = url
        interval = box_radio_stations.tunegenie.interval

        playlist_songs_from_tunegenie_stations = loop_again.run_until_complete(
            media_resources.run_loop(
                loop_again,
                headers=box_radio_stations.tunegenie.headers,
                url=url,
                params=tunegenie_params,
                station='tunegenie',
                interval=interval))

        media_resources.parse_tunegenie_data(
            playlist_songs_from_tunegenie_stations)
    loop_again.close()

    spinner.succeed()
    spinner.color = 'cyan'
    spinner.text = "Running synchronous fetch on websites"

    # run synchronous get for the other stations
    spinner.start()
    media_resources.run_synchronous_process()
    spinner.succeed()

    # create google api search setup
    google_music_fetch = FetchSongs()

    # remove duplicates
    # first convert to set
    media_set = set(tuple(item) for item in media_resources.music_list)
    music_list = [list(item) for item in media_set]

    if not music_list:
        # log
        spinner.text = "Unable to retrieve song data from websites"
        spinner.fail()
        exit()

    # load pandas dataframe
    pandas_init = QueryUsingPandas(load_or_save=None,
                                   google_music_json_file=None,
                                   dataframe=None,
                                   remaining_songs=None,
                                   new_music_list=None,
                                   playlist=None,
                                   name=None,
                                   song_list=None)
    music_dataframe = pandas_init.load_and_save_pandas_dataframe(
        load_or_save='load')

    # create a filter to remove words that contain the following
    myfilter = ['**', '[', ']', '(', ')', '+']
    song_list = []
    # check if each song in music_list is in the pandas df
    for index, each_song in enumerate(music_list):
        artist, song = each_song[0], each_song[1]
        song = ' '.join([
            title for title in song.split(" ")
            if not any(i in title for i in myfilter)
        ])
        artist = ' '.join([
            singer for singer in artist.split(" ")
            if not any(i in singer for i in myfilter)
        ])

        if '??' in song or '??' in artist:
            continue

        if pandas_init.check_song_in_pandas_dataframe(
                dataframe=music_dataframe, artist=artist, song=song):
            # music_list.pop(index)
            continue

        # need to save the timestamp as well
        timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        # search for song in google play
        # print("Searching for nid for song", song, "by", artist)
        song_nid = google_music_fetch.search_for_songs(artist=artist,
                                                       title=song)
        # print("Found nid", song_nid)

        if song_nid:
            song_list.append([artist, song, song_nid, timestamp])
        else:
            # song not found in google play
            continue

    # now that we have the song_list, we can now query for a playlist to so that we can append it
    # create dictionaries for each playlist_id where the value for the playlist key will be a list of lists
    start_index = 0
    for playlist_info in pandas_init.get_playlist(dataframe=music_dataframe,
                                                  new_music_list=song_list):
        song_ids = []
        if playlist_info and len(playlist_info) == 2:
            playlist_id, playlist_slots = playlist_info[0], playlist_info[1]

        else:
            print("Need to log error")
        print(playlist_id, playlist_slots)
        # if not song_dict[playlist_id]:
        #     song_dict[playlist_id].append([])

        for each_song_list in song_list:
            each_song_list.append(playlist_id)

            song_ids.append(each_song_list[2])

        # now for the final phase. Update the playlist at Play Music with the songids
        if song_ids:
            print(
                google_music_fetch.add_songs_to_gmusic_playlist(
                    playlist_id,
                    song_ids[start_index:start_index + playlist_slots]))
            start_index = playlist_slots

    print(song_list)
    # update the pandas_dataframe
    music_dataframe = QueryUsingPandas.append_to_pandas_dataframe(
        dataframe=music_dataframe, song_list=song_list)
    pandas_init.load_and_save_pandas_dataframe(dataframe=music_dataframe,
                                               load_or_save='save')

    import pickle
    pickle.dump(song_list, open('/tmp/pickle1', 'wb'))
Esempio n. 7
0
    plt.legend(loc='lower right')
    plt.ticklabel_format(style='plain', axis='x')
    plt.xticks(fontsize=7.5, rotation=35, ha="right")
    plt.title('Avancé du COVID-19 dans le monde du ' +
              str(datetime.datetime.today().strftime("%A %d %B %Y")))
    plt.savefig('data/global.png')
    plt.clf()


while True:

    if (datetime.datetime.now().hour >= 19
            and datetime.datetime.now().hour <= 23):

        spinner.color = 'cyan'
        spinner.text = 'Scrapping des données'
        # Parsing des données sur l'API worldometers
        api = requests.get(
            'https://worldometer.herokuapp.com/api/coronavirus/country/france')
        api = api.json()["data"]

        PlaceInWorld = api['place']

        numberOfDay = (datetime.datetime.today() -
                       datetime.datetime(2020, 3, 16)).days
        numberOfDayDeconfinement = (datetime.datetime.today() -
                                    datetime.datetime(2020, 5, 10)).days

        verif = False
Esempio n. 8
0
# Terminal Spinner
spinner = Halo(text='Loading', color='green', spinner='hamburger')

try:
    ## User input
    LINK = input('Enter a URL: ')
    spinner.start()
    time.sleep(2)
    spinner.text = 'Reading the Output'
    r = s.get(LINK)
    spinner.succeed('Successfully Fetched the URL')
    spinner.stop()
except requests.ConnectionError as e:
    spinner.start()
    time.sleep(2)
    spinner.color = 'red'
    spinner.text = 'URL Error - Empty URL or Wrong URL'
    time.sleep(2)
    spinner.fail('URL Validation Error')
    spinner.stop()
    print("OOPS!! Connection Error - May be Tor is Not Enabled or Can't Bypass them")
except requests.Timeout as e:
    print("OOPS!! Timeout Error")
except requests.RequestException as e:
    spinner.start()
    time.sleep(2)
    spinner.color = 'red'
    spinner.text = 'Wrong URL or Empty Field'
    time.sleep(2)
    spinner.fail('Wrong URL or Empty Field')
    spinner.stop()