Exemplo n.º 1
0
def main():
    print("This application will allow you to transfer Apple Music playlists (xml) into Spotify playlists.")
    print("Make sure the Apple Music playlists are exported as .xml files and are in the wd.")
    input("Press any key to continue...")
    url = "https://accounts.spotify.com/authorize?client_id=1e020bd65a934fa88aa8e49a05f75b60&response_type=code&redirect_uri=https%3A%2F%2Fexample.com%2F&scope=user-read-private%20user-read-email%20playlist-modify-private%20playlist-modify-public"
    print("Copy this URL into browser and hit enter. Your code will appear in the URL (code=_).\nCopy the code and paste it back in terminal.\nURL:\n " + url)
    code = input("Enter code: \n")
    access_token, refresh_token = get_token(code)
    user_id = input("Enter your spotify user ID: \n")
    flag = "y"
    while flag == "y":
        apple_playlist = input("Enter name of file with playlist data (.xml): \n")
        apple_playlist_tree = xml_parser.parse_xml(apple_playlist)
        song_list = xml_parser.get_songlist(apple_playlist_tree)
        playlist = create_spotify_playlist(user_id, access_token)
        print("Playlist created...")
        song_uris = []
        print("Adding songs to playlist...")
        count = 0
        for song in song_list:
            name = song[0]
            artist = song[1]
            uri = get_song_uri(access_token, name, artist)
            if uri == "none":
                print(f"Error adding {name} by {artist}")
                pass
            else:
                song_uris.append(uri)
                add_song(access_token, user_id, uri, playlist)
                count+=1
        #uri = get_song_uri(access_token, "I'm not the sun", "Arkells")
        print(add_song(access_token, user_id, uri, playlist))
        print()
        print(f"{count} Songs added to playlist!\n")
        flag = input("Would you like to convert another playlist (y for yes, n for no)?")
def stitch_images(path_to_files, output_path, *args):
    from xml_parser import parse_phenix_xml as parse_xml
    from tqdm import tqdm
    from image_stitcher import channel_stitch
    from skimage.external import tifffile

    path_to_xml_file = path_to_files + 'Index.idx.xml'
    dictionary = parse_xml(path_to_xml_file)

    for wells in tqdm(dictionary):
        d = {
            channels: images
            for channels, images in dictionary[wells].items()
        }
        stitching_input = [(wells, channels, images)
                           for channels, images in d.items()]
        stitched_list = [
            channel_stitch(path_to_files, stitching_input[index][0],
                           stitching_input[index][1],
                           stitching_input[index][2])
            for index, item in enumerate(stitching_input)
        ]
        with tifffile.TiffWriter(output_path + wells + "_" +
                                 "image_stack.tiff",
                                 imagej=True) as stack:
            for item in stitched_list:
                stack.save(item, photometric='minisblack')
Exemplo n.º 3
0
    def load(self):
        self.valid = False
        # verify there is only 1 series under study_instance
        series_paths = filter(lambda d: not d.startswith('.'),
                              os.listdir(self.path))
        if not len(series_paths) == 1:
            raise Exception('Number of series under study_instance is ' +
                            str(len(series_paths)) +
                            '. Change code to account for this case.')
        series_path = os.path.join(self.path, series_paths[0])
        xml_paths = filter(
            lambda d: (not d.startswith('.')) and d.endswith('.xml'),
            os.listdir(series_path))
        # verify there is only 1 xml in each series
        if not len(xml_paths) == 1:
            raise Exception('Number of xml files under series is ' +
                            str(len(xml_paths)) +
                            '. Change code to account for this case.')
        xml_path = os.path.join(series_path, xml_paths[0])
        xml_data = xml_parser.parse_xml(xml_path)
        if xml_data == False:
            self.valid = False
            return

        self.dicom_array = DicomArray()
        self.dicom_array.read_dicom(series_path)

        self.reading_sessions = xml_data['readingSession']
        for reading_session in self.reading_sessions:
            reading_session.study_instance = self

        self.valid = True
Exemplo n.º 4
0
def show_person_concretinfo(accountp):
    fpath = db_con.get_person_ConcretInfo(accountp)
    bs, ps, rs = parse_xml()
    return render_template('show_person_concretinfo.html',
                           account=accountp,
                           bs=bs,
                           ps=ps,
                           rs=rs)
Exemplo n.º 5
0
def show_person_concretinfo_s():
    account = request.cookies.get('account')
    fpath = db_con.get_person_ConcretInfo(account)
    bs, ps, rs = parse_xml()
    return render_template('show_person_concretinfo.html',
                           account=account,
                           bs=bs,
                           ps=ps,
                           rs=rs)
Exemplo n.º 6
0
def load_problem(pid):

    p_dir = None
    for loc in [ os.path.join(PROBLEM_DIR, pid, 'problem.xml'),
                 os.path.join(PROBLEM_DIR, pid + '.xml'),
                 os.path.join(PROBLEM_DIR, pid, pid + '.xml') ]:
        if os.path.exists(loc):
            p_dir = os.path.dirname(loc)
            tree = parse_xml(get_contents(loc), TEXT_ELEMS, decode_nodes=['eqn', 'ceqn'])
            break

    if not p_dir:
        raise Exception('Problem not found')

    return _parse_problem(pid, tree, p_dir)
Exemplo n.º 7
0
def process_xml(xml_file):
    global max_width, max_height, num_xmls, num_nodules, num_images
    data = xml_parser.parse_xml(xml_file)
    if data == False:
        return
    num_xmls += 1
    reading_sessions = data["readingSession"]
    for reading_session in [reading_sessions[0]]:
        num_nodules += len(reading_session.get_nodules())
        for nodule in reading_session.get_nodules():
            num_images += len(nodule.get_images())
            for nodule_image in nodule.get_images():
                boundary = nodule_image.get_rectangle_boundary()
                if boundary['width'] > max_width:
                    max_width = boundary['width']
                if boundary['height'] > max_height:
                    max_height = boundary['height']
Exemplo n.º 8
0
def get_image_data_training(image_path, annotation_path):
    """
    Generates the data necessary for rcnn training. This info includes:
        - Resized image pixels
        - Ground truth data including bounding boxes and classes of the different objects
        - RoIs for both foreground and background classes

    :param image_path: path to the image we are generating data for
    :param annotation_path: path to the annotations of the image were are generating data for

    :return: information about the image including pixels, ground truth data and rois
    """
    image_info = {}

    # Adding resized image to the dictionary
    image_in_pixels = image_tools.image_to_pixels(image_path)
    resized_image_in_pixels = image_tools.resize_image(image_in_pixels, 600,
                                                       600)
    image_info["image"] = resized_image_in_pixels
    image_info["image_name"] = image_path

    # Adding all the resized ground-truth bboxes to the dictionary
    gt_boxes = []
    image_annotations = xml_parser.parse_xml(annotation_path)

    for annotation in image_annotations:
        resized_gt_bbox = get_bbox_resized(image_in_pixels.shape,
                                           resized_image_in_pixels.shape,
                                           get_bbox(annotation["bbox"]))
        gt_boxes.append({
            "class": annotation["class"],
            "bbox": resized_gt_bbox
        })

    image_info["gt_bboxes"] = np.array(gt_boxes)

    # Adding rois to the dictionary
    image_info["rois"], image_info["rois_background"] = \
        roi_tools.find_rois_complete(resized_image_in_pixels, gt_boxes, 4, 500)

    if len(image_info["rois"]) == 0:
        print("There are no ROIs for image: " + image_path +
              ". It must be a background image")

    return image_info
Exemplo n.º 9
0
def split_days(firstday, lastday):
    """
    возвращает два списка - с буднями и выходными
    """
    holidays = xml_parser.parse_xml()

    working_days_list = []
    weekend_days_list = []
    day = firstday.replace(hour=0)
    while day < lastday:

        if day in holidays or day.weekday() in (5, 6):
            weekend_days_list.append(day)
        else:
            working_days_list.append(day)

        day += ONE_DAY

    return working_days_list, weekend_days_list
Exemplo n.º 10
0
def load_contest(cid):

    c_dir = None
    for loc in [
            os.path.join(CONTEST_DIR, cid, 'contest.xml'),
            os.path.join(CONTEST_DIR, cid + '.xml'),
            os.path.join(CONTEST_DIR, cid, cid + '.xml')
    ]:
        if os.path.exists(loc):
            c_dir = os.path.dirname(loc)
            tree = parse_xml(get_contents(loc),
                             TEXT_ELEMS,
                             decode_nodes=['eqn', 'ceqn'])
            break

    if not c_dir:
        raise Exception('Contest not found')

    assert len(tree) == 1
    assert tree[0].tag == 'contest'

    contest = Contest(cid, path=c_dir)
    if 'name' in tree[0].attr:
        contest.name = tree[0].attr['name']

    if 'email' in tree[0].attr:
        contest.email = tree[0].attr['email']

    for n1 in tree[0].children:
        if n1.tag == 'problem':
            if 'enabled' in n1.attr and n1.attr['enabled'].lower() == 'false':
                continue

            if 'include' in n1.attr:
                problem = load_problem(n1.attr['include'])
                contest.problems.append(problem)
                if 'name' in n1.attr:
                    problem.name = n1.attr['name']
            else:
                problem = _parse_problem(n1.attr['id'], n1, c_dir)
                contest.problems.append(problem)

    return contest
Exemplo n.º 11
0
def load_problem(pid):

    p_dir = None
    for loc in [
            os.path.join(PROBLEM_DIR, pid, 'problem.xml'),
            os.path.join(PROBLEM_DIR, pid + '.xml'),
            os.path.join(PROBLEM_DIR, pid, pid + '.xml')
    ]:
        if os.path.exists(loc):
            p_dir = os.path.dirname(loc)
            tree = parse_xml(get_contents(loc),
                             TEXT_ELEMS,
                             decode_nodes=['eqn', 'ceqn'])
            break

    if not p_dir:
        raise Exception('Problem not found')

    return _parse_problem(pid, tree, p_dir)
Exemplo n.º 12
0
def get_number_images_with_class(annotation_folder):
    """
    This function generates a dictionary with the following information:
       - Key: Combination of 1, 2 or 3 classes
       - Value: Number of images with that combination of classes

    Example:
       If classes =
           ['person', 'dog', 'cat']
       Possible result =
           {'person': 4, 'dog': 2, 'cat': 2, "cat-person": 1, "cat-dog": 1, "dog-person": 2}

    :param annotation_folder: folder with all the annotation files
    :return: dictionary with class counts
    """
    annotations = listdir(annotation_folder)

    # Creating list with all combinations of 3 elements + all combinations of 2
    # elements + single classes.
    # We'll find counts of images with objects belonging to those class combinations
    class_combinations = \
        {key: 0 for key in CLASSES}
    class_combinations.update(
        {"-".join(sorted(key)): 0 for key in itertools.combinations(CLASSES, 2)})
    class_combinations.update(
        {"-".join(sorted(key)): 0 for key in itertools.combinations(CLASSES, 3)})

    for annotation in annotations:
        annotation_path = annotation_folder + annotation
        # Get all different classes in the current annotation file
        object_classes = \
            {xml_object["class"] for xml_object in xml_parser.parse_xml(annotation_path)}

        # We are only interested in images with up to three different classes
        if len(object_classes) <= 3:
            key = "-".join(sorted(object_classes))
            class_combinations[key] = class_combinations[key] + 1

    for combination in class_combinations.items():
        if combination[1] != 0:
            print(combination)

    return class_combinations
Exemplo n.º 13
0
def load_contest(cid):

    c_dir = None
    for loc in [ os.path.join(CONTEST_DIR, cid, 'contest.xml'),
                 os.path.join(CONTEST_DIR, cid + '.xml'),
                 os.path.join(CONTEST_DIR, cid, cid + '.xml') ]:
        if os.path.exists(loc):
            c_dir = os.path.dirname(loc)
            tree = parse_xml(get_contents(loc), TEXT_ELEMS, decode_nodes=['eqn', 'ceqn'])
            break

    if not c_dir:
        raise Exception('Contest not found')

    assert len(tree) == 1
    assert tree[0].tag == 'contest'

    contest = Contest(cid, path=c_dir)
    if 'name' in tree[0].attr:
        contest.name = tree[0].attr['name']

    if 'email' in tree[0].attr:
        contest.email = tree[0].attr['email']

    for n1 in tree[0].children:
        if n1.tag == 'problem':
            if 'enabled' in n1.attr and n1.attr['enabled'].lower() == 'false':
                continue

            if 'include' in n1.attr:
                problem = load_problem(n1.attr['include'])
                contest.problems.append(problem)
                if 'name' in n1.attr:
                    problem.name = n1.attr['name']
            else:
                problem = _parse_problem(n1.attr['id'], n1, c_dir)
                contest.problems.append(problem)

    return contest
Exemplo n.º 14
0
import argparse
import os

import constants
import xml_parser

parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-x', default=None, type=str,
    help='path to .xml to parse')
parser.add_argument('-m', default=0, type=int,
    help='maximum number of songs to translate')
args = parser.parse_args()

if args.x:
    assert args.x.split('/')[-1].split('.')[-1] == 'xml'
    xml_parser.parse_xml(args.x)
else:
    dirpath = constants.PROJ_PATH + 'library/jazz_scores'
    count = 0
    err_count = 0
    for f in os.listdir(os.fsencode(dirpath)):
        filename = os.fsdecode(f)
        path = os.path.join(dirpath, filename)
        r = xml_parser.parse_xml(path)
        if r == -1:
            print(f'\tError translating {filename}: excess parts.')
            err_count += 1
        elif r == -2:
            print(f'\tError translating {filename}: endings.')
            err_count += 1
        elif r == -3:
Exemplo n.º 15
0
def keyword_extractor(input_file_path, filename):

	# An array which contains a phrase or a word and its corresponding XML data
	parsed_xml = parse_xml(input_file_path)

	# Prints the raw XML data
	counter = 0
	print("\n Raw XML Data: \n")
	for word_array in parsed_xml:
		print(word_array)
		counter += 1
	print("\n Number of word arrays in raw XML data: ", counter)

	# Pre-processing to filter out unwanted data from parsed_content
	filtered_content = pre_processing(parsed_xml)

	# Prints the filtered data so we can check if the pre-processor is working correctly
	print("\n Filtered data (each word with its own XML features): \n")
	counter = 0
	for word_array in filtered_content:
		print(word_array)
		counter += 1
	print("\n Number of word arrays in filtered data: ", counter)

	# A two-dimensional array which contains each word and its features
	classification_features = feature_assignment(filtered_content)

	# Prints each word and its classification features and the number of all words in classification_features
	counter = 0
	print("\n \n Each word and its classification features: \n")
	for word in classification_features:
		# Format: [word, is_bold, is_larger, is_not_black, RAKE]
		print(word)
		counter += 1

	print("\n Number of words in classification_features: ", counter)

	# Runs the k-means algorithm 10 times, a number chosen arbitrarily.
	# The cost function of this algorithm converges towards local minima, not guaranteed to find
	# the global minimum. Thus, several restarts are needed.
	runs = []
	for n in range(10):
		# A 2D array which contains each word and its label
		# For example, [word, 0] or [word, 1], which means [word, Non-keyword] or [word, Keyword]
		classified_words = kmeans_clustering(classification_features, CLUSTERS)

		# Post-processing to measure the performance of our keyword extractor
		performance = post_processing(classified_words, filename)

		runs.append([classified_words, performance])

	# The highest performing classification is extracted.
	classified_words, max_performance = max(runs, key=lambda performance_array: performance_array[1])

	# F1 score, from 0 to 1
	print("\n F1 score: ", max_performance)

	# Takes only the keywords (labelled with 1)
	classified_words = np.asarray(classified_words)
	keywords = classified_words[classified_words[:, 1] == "1", 0]

	return keywords