Пример #1
0
    def download_failed_songs():
        Util.check_file(Controller.FAILED_DOWNLOADED_SONGS_FILE_PATH)
        with open(Controller.FAILED_DOWNLOADED_SONGS_FILE_PATH, "r") as file:
            try:
                failed_songs = json.load(file)
            except Exception:
                print("Error loading failed songs")
                return

        for key in failed_songs.keys():
            Controller._download(failed_songs[key], key)
Пример #2
0
    def redownload_playlists():
        Util.check_file(Controller.DOWNLOADED_PLAYLISTS_FILE_PATH)
        with open(Controller.DOWNLOADED_PLAYLISTS_FILE_PATH, "r") as file:
            try:
                playlists = json.load(file)
            except Exception:
                playlists = []

        if len(playlists) is 0:
            print("No playlists on file to download")
        else:
            Controller.download_playlists(playlists)
Пример #3
0
    def download_custom_songs():
        custom_songs = []
        print(
            "Enter a blank line at any time when you are done entering data..."
        )
        while True:
            print("\nEnter the info for the song you want to download:")
            title = str(input("Enter the title of the song: "))
            if title is "":
                break
            artist = str(input("Enter the artist of the song: "))
            if artist is "":
                break
            album = str(input("Enter the album the song is from: "))
            if album is "":
                break
            time = str(input("Enter the time of the song (eg. 3:18): "))
            if time is "":
                break

            custom_songs.append({
                "title": title.strip(),
                "artist": artist.strip(),
                "album": album.strip(),
                "time": Util.time_in_seconds(time.strip())
            })

        Controller._download(custom_songs, "Custom")
Пример #4
0
 def test_when_points_are_superimposed_over_image_array_and_saved_the_new_image_must_contain_the_new_points(self):
     folder_script=os.path.dirname(__file__)
     filename="Util_unittest.png"
     file_noisy_line=os.path.join(folder_script,"./data/",filename)
     np_image=skimage.io.imread(file_noisy_line,as_gray=True)
     file_result=os.path.join(folder_script,"../out/",filename)
     new_points=list()
     #
     #Superimpose some points
     #
     new_points.append(Point(0,0))
     new_points.append(Point(2,2))
     new_points.append(Point(3,3))
     new_points.append(Point(4,4))
     color_red=100
     color_green=255
     color_blue=90
     np_newimage=Util.superimpose_points_on_image(np_image,new_points,color_red,color_green,color_blue)
     skimage.io.imsave(file_result,np_newimage)
     #Read the image back and test the points
     np_newimage2=skimage.io.imread(file_result,as_gray=False)
     height=np_newimage.shape[0]
     for p in new_points:
         x=p.X
         y=height-p.Y-1
         self.assertEqual(np_newimage2[y][x][0],color_red)
         self.assertEqual(np_newimage2[y][x][1],color_green)
         self.assertEqual(np_newimage2[y][x][2],color_blue)
     pass
     self.assertGreater(len(new_points) , 1)
Пример #5
0
    def get_playlist(self):
        # Retrieve HTML source if it has not been retrieved already
        if not self.html_src:
            source = self.retrieve_html_source()
        else:
            source = self.html_src

        #split to find the playlist name
        name_source = source.split(r'<h1 class="main">')[1]
        name_source = name_source.split('</span>')[0]
        playlist_name = re.findall(r'\">(.*)</a>', name_source)[0]

        # Remove everything before the playlist section
        songs_source = source.split("<tbody data-bind=\"foreach: tracks\"")[1]
        # Divide up into songs
        songs = songs_source.split("</tr>")

        # Create a array of dictionaries of all the songs
        songs_dict = []
        for song in songs:
            try:
                title = re.findall(r'<td.*>(.*)<\/div>', song, re.S)[0]
                artist = re.findall(r'spotify:artist:.*>(.*)<\/a>', song)[0]
                album = re.findall(r'spotify:album.*>(.*)<\/a>', song)[0]
                song_time = re.findall(r'tl-time\">([\w|:]*)<\/td>', song,
                                       re.S)[0]

                title = re.sub(r" - \w* Edit", "", title, re.IGNORECASE)
                title = re.sub(r" -.*Version.*", "", title, re.IGNORECASE)
                title = re.sub(r" -.*Remaster(ed)?.*", "", title,
                               re.IGNORECASE)
                title = re.sub(r" \(Remaster(ed)?\) *", "", title,
                               re.IGNORECASE)
                title = re.sub(r" -.*Anniversary Mix.*", "", title,
                               re.IGNORECASE)

                song_dict = {
                    'title': Util.html_to_ascii(title),
                    'artist': Util.html_to_ascii(artist),
                    'album': Util.html_to_ascii(album),
                    'time': Util.time_in_seconds(song_time),
                }
                songs_dict.append(song_dict)
            except IndexError:
                pass
        return [playlist_name, songs_dict]
Пример #6
0
    def _get_search_info(self, song_search_url):
        """
        Downloads the page source of the song_search_url, and returns a list of dictionaries containing
        the information for each search result. The dictionaries contain 'title', 'url', and 'time' (in seconds) fields.

        :param song_search_url: The url of a search for a song
        :return: A list of dictionaries, each containing the 'title', 'url', and 'time' (in seconds) info of each search result
        """
        with urllib.request.urlopen(song_search_url) as response:
            html = response.read()

        # decodes html source from binary bytes to string
        search_source = html.decode("UTF-8", "ignore")

        # parse source for vid info
        search_info = []

        # Isolate the list of results in the source
        results_source = re.split(
            r"<ol id=\"item-section-.*?\" class=\"item-section\">",
            search_source)[1]
        results_source = re.split(
            r"<div class=\"branded-page-box search-pager.*\">", results_source,
            1)[0]

        # split by video in list, returns the type of entry (video, playlist, channel)
        results_source = re.split(
            r"<li><div class=\"yt-lockup yt-lockup-tile yt-lockup-(.*?) vve-check clearfix.*?\"",
            results_source)[1:]

        index = 0
        while len(search_info) < self.MAX_NUM_SEARCH_RESULTS and index < len(
                results_source) - 1:
            source_type = results_source[index]
            source = results_source[index + 1]

            if source_type == "video":
                video_url = re.findall(r"href=\"\/watch\?v=(.*?)\"", source)[0]
                video_url = self.SONG_URL_RESULT_ROOT + video_url
                video_title = re.findall(r"title=\"(.*?)\"", source)[2]
                video_title = Util.html_to_ascii(video_title)
                video_time = re.findall(r"Duration: (\d+:\d+)", source)[0]
                video_time = re.split(r":", video_time)
                video_time = int(video_time[0]) * 60 + int(video_time[1])

                search_info.append({
                    "url": video_url,
                    "title": video_title,
                    "time": video_time
                })

            index += 2

        return search_info
Пример #7
0
 def test_generate_plottable_linear_points_between_twopoints(self):
     pt_start=Point(1,1)
     pt_end=Point(20,20)
     unit=Vector.create_vector_from_2points(pt_start,pt_end).UnitVector
     new_points=Util.generate_plottable_points_between_twopoints(pt_start,pt_end)
     distance_start_end=Point.euclidean_distance(pt_start,pt_end)
     for new_point in new_points:
         distance_from_start=Point.euclidean_distance(pt_start,new_point)
         self.assertTrue(distance_from_start < distance_start_end)
         new_unit=Vector.create_vector_from_2points(pt_start,new_point).UnitVector
         dot_product=Vector.dot_product(new_unit,unit)
         self.assertAlmostEquals(dot_product,1.0,delta=0.1)
Пример #8
0
    def test_create_noisy_image_and_verify_that_width_height_match_expected(self):
        expected_width=100
        expected_height=200
        actual_image:np.ndarray=Util.generate_noisy_image(width=expected_width, height=expected_height, salt_pepper=0.5)

        actual_width=actual_image.shape[1]
        actual_height=actual_image.shape[0]

        #Asserts
        self.assertEquals(len(actual_image.shape),3)
        self.assertEquals(actual_width, expected_width)
        self.assertEquals(actual_height, expected_height)
Пример #9
0
    def test_create_noisy_image_verify_that_count_of_black_pixels_matches_expected_saltpepper_ration(self):
        expected_width=500
        expected_height=500
        expected_saltpepper=0.5
        actual_image:np.ndarray=Util.generate_noisy_image(width=expected_width, height=expected_height, salt_pepper=expected_saltpepper)
        all_white_indices=np.where(actual_image == Util.WHITE_COLOR)
        count_of_white=len(all_white_indices[0])

        all_black_indices=np.where(actual_image == Util.BLACK_COLOR)
        count_of_black=len(all_black_indices[0])
        actual_saltpepper=count_of_white/(count_of_black+count_of_white)
        self.assertEqual(expected_height*expected_width, (count_of_white+count_of_black),msg="The total count of white+black should be total pixels in the picture")
        self.assertAlmostEquals(actual_saltpepper, expected_saltpepper,delta=0.1,msg="The salt peper ratios should match approximately")
    def _get_search_info(self, song_search_url):
        """
        Downloads the page source of the song_search_url, and returns a list of dictionaries containing
        the information for each search result. The dictionaries contain 'title', 'url', and 'time' (in seconds) fields.

        :param song_search_url: The url of a search for a song
        :return: A list of dictionaries, each containing the 'title', 'url', and 'time' (in seconds) info of each search result
        """
        with urllib.request.urlopen(song_search_url) as response:
            html = response.read()

        # decodes html source from binary bytes to string
        search_source = html.decode("UTF-8", "ignore")

        # parse source for vid info
        search_info = []

        # Isolate the list of results in the source
        results_source = re.split(r"<div class=\"searchResultGroupHeading\">",
                                  search_source)[1]
        results_source = re.split(r"</ul>", results_source, 1)[0]

        # split by search result
        results_source = re.split(r"<div class=\"searchItem\">",
                                  results_source)[1:]

        # This code theoretically works, but urllib can't access all of Soundclouds
        # websource because it thinks it's an invalid browser of something

        index = 0
        while len(search_info) < self.MAX_NUM_SEARCH_RESULTS and index < len(
                results_source):
            source = results_source[index]

            artist = re.findall(
                r"<span class=\"soundTitle_+usernameText\">(.*)</span>",
                source)[0]
            title = re.findall(r"<span class=\"\">(.*)</span>", source)[0]
            url = re.findall(
                r"<a class=\"soundTitle_+title sc-link-dark\" href=\"(.*)\">",
                source)[0]

            title = Util.html_to_ascii(artist + " " + title)
            url = self.SONG_URL_RESULT_ROOT + url

            search_info.append({"url": url, "title": title, "time": None})

            index += 1

        return search_info
Пример #11
0
    def test_get_extreme_colinear_points(self):
        pt1=Point(1,1)
        pt2=Point(2,2)
       
        pt3=Point(3,3)
        pt4=Point(4,4)
        pt5=Point(5,5)
        pt6=Point(6,6)

        lst_randomsequence=[pt6,pt1,pt5,pt2,pt4,pt3]
        (point1,point2)=Util.get_terminal_points_from_coliner_points(lst_randomsequence)
        results=[point1,point2]
        self.assertTrue(pt1 in results)
        self.assertTrue(pt6 in results)
        pass
    def run(self):
        search_url = self.dl._construct_search_url(self.song)
        search_info = self.dl._get_search_info(search_url)
        best_song_info = Util.get_best_song_from_search(self.song, search_info)

        if best_song_info is not None:
            if self.dl._download_song(best_song_info["url"]) is True:
                song_filepath = Util.get_song_in_filepath(self.dl.download_path, best_song_info["title"], best_song_info["url"])
                if song_filepath is not None:
                    Util.normalize_audio(self.dl.download_path + song_filepath)
                    song_filepath = Util.get_song_in_filepath(self.dl.download_path, best_song_info["title"], best_song_info["url"])
                    Util.rename_song_file(self.dl.download_path, song_filepath, self.song)
                    Util.write_metadata(self.song, self.dl.download_path)
                else:
                    print("ERROR: {} was supposedly downloaded but could not be found".format(best_song_info["title"]))
            else:
                with self.lock:
                    self.dl.failed_downloaded_songs.append(self.song)
        else:
            with self.lock:
                self.dl.failed_downloaded_songs.append(self.song)
    def _remove_existing_songs_from_list(self):
        """
        Removes any songs that have already been downloaded by the program from the list of requested songs

        :return: void
        """
        songs_to_remove = []

        for song in self.requested_songs:
            filename = Util.get_song_filename(song)
            song_name_regex = re.escape(filename)

            for file in os.listdir(self.download_path):
                if re.match(song_name_regex, file):
                    songs_to_remove.append(song)
                    self.num_existing_songs += 1
                    break

        for song in songs_to_remove:
            self.requested_songs.remove(song)
Пример #14
0
    def test_create_points_from_numpyimage(self):
        pass
        folder_script=os.path.dirname(__file__)
        filename="Util_unittest.png"
        file_noisy_line=os.path.join(folder_script,"./data/",filename)
        np_image=skimage.io.imread(file_noisy_line,as_gray=True)
        height=np_image.shape[0]
        width=np_image.shape[1]

        lst_points=Util.create_points_from_numpyimage(np_image)
        np_shape=np_image.shape
        self.assertEqual(len(lst_points) , 3)

        for pt_any in lst_points:
            if pt_any.X == 0 and pt_any.Y == height-1:
                pass
            elif (pt_any.X == width-1 and pt_any.Y == height-1):
                pass
            elif (pt_any.X == width-1 and pt_any.Y == 0):
                pass
            else:
               raise Exception("Point '%s' was not expected." % (pt_any))
Пример #15
0
            mp_candidates.remove(n - longest_streak)
        while len(mp_candidates) > 1:
            count += 1
            i = mp_candidates.pop()
            j = mp_candidates.pop()
            if word[i + 1] > word[j + 1]:
                G.add_edge(j + 1, i + 1)
                mp_candidates.append(i)
            elif word[i + 1] < word[j + 1]:
                G.add_edge(i + 1, j + 1)
                mp_candidates.append(j)
            else:
                return j, count
        return mp_candidates.pop(), count


nr_comps = {}

for r, words in enumerate(Util(6, -1).generate_all_words()):
    for word in words:
        (r_fuzzier, count) = compute_fuzzier(word)
        if r_fuzzier != r:
            print("Fuzzier Algorithm failed for {} [r={}, r_actual={}]".format(word, r_fuzzier, r))
        if count not in nr_comps:
            nr_comps[count] = 1
        else:
            nr_comps[count] += 1
        if count == 8:
            print(word)
pprint(nr_comps)
Пример #16
0
    def _download(songs, playlist_name, playlist_url=None):
        """
        Uses the downloaders to download the songs from a given playlist, and updates the tracking files

        :param songs: a list of dictionaries containing song info. Dictionaries must contain 'title', 'artist', 'album',
                      and 'time' fields
        :param playlist_name: the name of the playlist
        :param playlist_url: the url of the playlist
        :return: void
        """
        yt_dl = YouTubeDownloader(songs, playlist_name)
        results = yt_dl.download_songs(
        )  # TODO: later add more downloaders here

        num_existing_songs = results[0]
        failed_downloads = results[1]
        num_failed_downloads = len(failed_downloads)

        summary_info = [
            len(songs), num_existing_songs, num_failed_downloads,
            len(songs) - num_existing_songs - num_failed_downloads
        ]
        Util.print_summary(summary_info, playlist_name)

        if playlist_url is not None:
            # add the playlist to the list of downloaded playlists
            Util.check_file(Controller.DOWNLOADED_PLAYLISTS_FILE_PATH)
            with open(Controller.DOWNLOADED_PLAYLISTS_FILE_PATH, "r") as file:
                try:
                    downloaded_playlists = json.load(file)
                except Exception:
                    downloaded_playlists = []

            if playlist_url not in downloaded_playlists:
                downloaded_playlists.append(playlist_url)

            with open(Controller.DOWNLOADED_PLAYLISTS_FILE_PATH, "w") as file:
                json.dump(downloaded_playlists, file, indent=4)

        # Check if any downloaded songs were in the list of failed downloads and remove them
        # also add the failed songs to the list
        Util.check_file(Controller.FAILED_DOWNLOADED_SONGS_FILE_PATH)
        with open(Controller.FAILED_DOWNLOADED_SONGS_FILE_PATH, "r") as file:
            try:
                playlist_dict = json.load(file)
            except Exception:
                playlist_dict = {}

        downloaded_songs = [x for x in songs if x not in failed_downloads]
        songs_to_remove = []

        if playlist_name in playlist_dict.keys():
            for song in downloaded_songs:
                if song in playlist_dict[playlist_name]:
                    songs_to_remove.append(song)

            for song in songs_to_remove:
                playlist_dict[playlist_name].remove(song)
        else:
            playlist_dict[playlist_name] = []

        for song in failed_downloads:
            if song not in playlist_dict[playlist_name]:
                playlist_dict[playlist_name].append(song)

        with open(Controller.FAILED_DOWNLOADED_SONGS_FILE_PATH, "w") as file:
            json.dump(playlist_dict, file, indent=4)
Пример #17
0
class aceleraDev:
    def __init__(self):
        self.util = Util()
        self.apiRequestUrl = Constants.API_REQUEST_URL
        self.apiResponseUrl = Constants.API_RESPONSE_URL
        self.apiToken = Constants.TOKEN

    def start(self):
        response = self.util.getResponse(self.apiRequestUrl, self.apiToken)
        self.util.saveToFile(Constants.ANSWER_FILE, response)

    def end(self):
        response = self.util.sendResponse(self.apiResponseUrl, self.apiToken,
                                          Constants.ANSWER_FILE)
        print(response.json())

    def decrypt(self):
        pList = []
        decryptedText = ''

        data = self.util.readJson(Constants.ANSWER_FILE)
        print('Encrypted: ' + data[Constants.ENCRYPTED])
        decryptingNumber = data[Constants.NUMBER]
        for p in data[Constants.ENCRYPTED]:
            pList.append(p)

        for x in range(0, len(pList)):
            if pList[x] in Constants.ALPHABET:
                pPosition = Constants.ALPHABET.find(pList[x])
                if not pPosition - decryptingNumber < 0:
                    pDecrypted = (Constants.ALPHABET[pPosition -
                                                     decryptingNumber])
                    pList[x] = pDecrypted
                else:
                    difference = abs(pPosition - decryptingNumber)
                    pList[x] = Constants.ALPHABET[len(Constants.ALPHABET) -
                                                  difference]

        for p in pList:
            decryptedText += p

        self.util.refreshFile(Constants.ANSWER_FILE, Constants.DECRYPTED,
                              decryptedText)

        print('Decrypted: ' + decryptedText)

    def generateHash(self):
        data = self.util.readJson(Constants.ANSWER_FILE)
        hash = self.util.generateSha1(data[Constants.DECRYPTED])
        self.util.refreshFile(Constants.ANSWER_FILE,
                              Constants.CRYPTOGRAPHY_RESUME, hash)
Пример #18
0
 def __init__(self):
     self.util = Util()
     self.apiRequestUrl = Constants.API_REQUEST_URL
     self.apiResponseUrl = Constants.API_RESPONSE_URL
     self.apiToken = Constants.TOKEN
import itertools
import os
from pathlib import Path

from anytree import Node
from anytree.exporter import DotExporter
from tabulate import tabulate

from src.Util import Util

N = 6
MY_UTIL = Util(N, -1)

nr_comparisons_count = {}
difficult_words = []
    
all_words = itertools.product(range(N), repeat=N)
nr_words = N ** N

decision_tree = [Node(0, obj=(0, 1))]
max_height = 8

base_dir = "Greedy"
Path(base_dir).mkdir(parents=True, exist_ok=True)
pic_filename = "{}.png".format(N)
txt_filename = "{}.txt".format(N)
pic_filepath = os.path.join(base_dir, pic_filename)
txt_filepath = os.path.join(base_dir, txt_filename)

start_index = 2
for depth in range(1, max_height + 1):
            if j == i + 1:
                r = i
                break
            k = i
            l = j
            while True:
                k += 1
                l += 1
                if l > len(word) - 1:
                    j -= 1
                    r = i
                    break
                if k == j:
                    return i
                if word[k] < word[l]:
                    i += 1
                    r = j
                    break
                if word[k] > word[l]:
                    j -= 1
                    r = i
                    break
    return r


for word, r in Util(7, -1).generate_all_words():
    r_sandwich = compute_max_prefix_sandwich(word)
    if r_sandwich != r:
        print("Sandwich Algorithm failed for {} [r={}, r_actual={}".format(
            word, r_sandwich, r))
Пример #21
0
from flask import Flask, request, jsonify, session, render_template

from src import Operations
from src.Calculator import Calculator
from src.Util import Util

app = Flask(__name__)
app.secret_key = Util.generate_unique_key()


@app.route('/')
def index():
    return render_template("index.html")


@app.route('/calculator')
def calculator():
    expr_args = request.args.get("expr")

    calc = Calculator()

    if '+' in expr_args:
        operands = expr_args.split('+')
        result = calc.calculateSum(int(operands[0]), int(operands[1]))
    elif '-' in expr_args:
        operands = expr_args.split('-')
        result = calc.calculateDiff(int(operands[0]), int(operands[1]))
    elif '*' in expr_args:
        operands = expr_args.split('*')
        result = calc.calculateProduct(int(operands[0]), int(operands[1]))
    elif '/' in expr_args: