def remove_phrase(phrase_id):
    """
    Used to delete a phrase from the library, whilst maintaining a consistent,
    consecutive id order.
    """
    log.info(f'Removing phrase {phrase_id}...')
    max_id = get_max_phrase_id()
    max_phrase_file = path.join(AUDIO_ASSETS_DIR, f'phrase_{max_id}.mp3')
    max_phrase = Phrase.from_mp3(max_id, max_phrase_file)

    # Update data file
    data = load_json(DATA_FILE)

    to_remove = None
    for category_id, phrase_set in data['phrases'].items():
        for i, phrase in enumerate(phrase_set):
            if phrase['id'] == phrase_id:
                to_remove = (category_id, i)
                break

    del data['phrases'][to_remove[0]][to_remove[1]]

    for i, phrase in enumerate(data['phrases'][str(max_phrase.category_id)]):
        if phrase['id'] == max_id:
            phrase['id'] = phrase_id
            break

    save_data(data, DATA_FILE)

    # Copy audio data
    max_phrase.id = phrase_id
    max_phrase.save_mp3(max_phrase_file)
    os.remove(max_phrase_file)

    check_missing_data()
def get_next_phrases(n=50):
    phrase_data = load_json(DATA_FILE)
    phrases = []
    for _, phrase_set in phrase_data['phrases'].items():
        for _phrase in phrase_set:
            if len(phrases) < n and 'script' not in _phrase:
                phrase = Phrase.from_dict(_phrase)
                phrases.append(phrase)
    return phrases
def check_missing_data():
    data = load_json(DATA_FILE)

    phrase_ids = []
    for _category_id, phrase_set in data['phrases'].items():
        for phrase in phrase_set:
            phrase_ids.append(phrase['id'])

    phrase_ids = sorted(phrase_ids)
    i = 1
    for j in phrase_ids:
        if i != j:
            raise ValueError((i, j))
        i += 1
def append_data(phrases=(), categories=(), data_file=DATA_FILE):
    if path.exists(data_file):
        data = load_json(data_file)
    else:
        data = {
            'categories': [],
            'phrases': {},
        }

    data['categories'] += list([c.dict for c in categories])
    for p in phrases:
        cat_id = str(p.category_id)
        data['phrases'][cat_id] = data['phrases'].get(cat_id, [])
        data['phrases'][cat_id].append(p.dict)

    save_data(data, data_file)
def update_data(new_levels: List[Dict[str, Union[List[List[int]], int]]], ):
    data = load_json(DATA_FILE)
    sorted_levels = sort_levels(data["levels"] + new_levels)

    positions = set()
    out = []
    for i, lvl in enumerate(sorted_levels):
        if lvl["position"] not in positions:
            positions.add(lvl["position"])
            out.append({
                "id": i,
                "position": lvl["position"],
                "balls": lvl["balls"],
                "par": lvl["par"],
            })

    if len(positions) < len(out):
        raise ValueError("Duplicates found in levels data.")
    save_json({"levels": out}, DATA_FILE, indent=None)
def update_data(phrases=(), categories=(), data_file=DATA_FILE):
    def _update_items(new_items, old_items):
        for new_item in new_items:
            for old_item in old_items:
                if new_item.id == old_item['id']:
                    old_item.update(new_item.dict)

    if path.exists(data_file):
        data = load_json(data_file)

        _update_items(categories, data['categories'])
        cat_ids = {str(p.category_id) for p in phrases}
        for cat_id in cat_ids:
            _update_items(
                filter(lambda p: str(p.category_id) == cat_id, phrases),
                data['phrases'][cat_id],
            )
        save_data(data, data_file)
    else:
        append_data(phrases, categories, data_file)
def get_phrase_by_key(value, key='id'):
    data = load_json(DATA_FILE)
    for _, phrase_set in data['phrases'].items():
        for phrase in phrase_set:
            if phrase[key] == value:
                return Phrase.from_dict(phrase)
def check_duplicates():
    data = load_json(DATA_FILE)
    levels = data["levels"]
    if len(set(lvl["position"] for lvl in levels)) < len(levels):
        raise ValueError("Duplicate levels found in data file.")