Exemple #1
0
    def create_merged_channel(base, delta, precedence="longest"):
        if is_compatible_dict(base, delta):
            if precedence == "base":
                return merge_dict(base,
                                  delta,
                                  merge_lists=True,
                                  new_only=True,
                                  no_dupes=True,
                                  skip_empty=True)
            elif precedence == "delta":
                return merge_dict(base,
                                  delta,
                                  merge_lists=True,
                                  no_dupes=True,
                                  skip_empty=True)
            elif precedence == "longest":
                for key in [k for k in delta if k in base]:
                    if isinstance(base[key], str) and isinstance(
                            delta[key], str):
                        if len(base[key]) > len(delta[key]):
                            delta[key] = base[key]
                if delta.get("skill_id") and base.get("skill_id") \
                        and base.get("skill_id") not in delta.get("skill_id"):
                    delta["skill_id"] = delta["skill_id"] + "/" + base[
                        "skill_id"]
                return merge_dict(base,
                                  delta,
                                  merge_lists=True,
                                  no_dupes=True,
                                  skip_empty=True)

        else:
            return None
Exemple #2
0
 def test_merge_dict(self):
     self.assertEqual(
         merge_dict(deepcopy(self.base_dict), deepcopy(self.delta_dict)), {
             "one": 1,
             "two": 2,
             "three": 30,
             "four": [4, 5, 6, "foo"],
             "five": None
         })
Exemple #3
0
def get_config_object() -> Union[JsonStorageXDG, JsonConfigXDG]:
    """Locates or creates the OSM config file, and ensures that all required
       values are present, inserting defaults as needed

    Returns:
        json_database.JsonConfigXDG: the OSM config object
    """
    config = _existing_osm_config() or \
        JsonConfigXDG("OVOS-SkillsManager", subfolder="OpenVoiceOS")
    default_appstores = {
        "local": {
            "active": True,
            "url": safe_get_skills_folder(),
            "parse_github": False,
            "priority": 1},
        "ovos": {
            "active": True,
            "url": "https://github.com/OpenVoiceOS/OVOS-appstore",
            "parse_github": False,
            "priority": 2},
        "mycroft_marketplace": {
            "active": False,
            "url": "https://market.mycroft.ai/",
            "parse_github": False,
            "priority": 5},
        "pling": {
            "active": False,
            "url": "https://apps.plasma-bigscreen.org/",
            "parse_github": False,
            "priority": 10},
        "neon": {
            "active": False,
            "url": "https://github.com/NeonGeckoCom/neon-skills-submodules/",
            "parse_github": False,
            "auth_token": None,
            "priority": 50},
        "andlo_skill_list": {
            "active": False,
            "url": "https://andlo.gitbook.io/mycroft-skills-list/",
            "parse_github": False,
            "priority": 100}
    }
    if "appstores" not in config:
        # NOTE, below should match Appstore.appstore_id
        config["appstores"] = default_appstores
        config["appstores"] = merge_dict(config["appstores"],
                                            default_appstores,
                                            new_only=True,
                                            no_dupes=True)
    if "version" not in config:
        # This stuff can really only happen on first run
        config["version"] = CURRENT_OSM_VERSION
        config["last_upgrade"] = CURRENT_OSM_VERSION
    config.store()
    return config
Exemple #4
0
    def __init__(self):
        self.config = JsonStorageXDG("OVOS-SkillsManager")
        default_config = {
            "local": {
                "active": True,
                "url": get_skills_folder(),
                "parse_github": False,
                "priority": 1
            },
            "ovos": {
                "active": True,
                "url": "https://github.com/OpenVoiceOS/OVOS-appstore",
                "parse_github": False,
                "priority": 2
            },
            "mycroft_marketplace": {
                "active": False,
                "url": "https://market.mycroft.ai/",
                "parse_github": False,
                "priority": 5
            },
            "pling": {
                "active": False,
                "url": "https://apps.plasma-bigscreen.org/",
                "parse_github": False,
                "priority": 10
            },
            "neon": {
                "active": False,
                "url":
                "https://github.com/NeonGeckoCom/neon-skills-submodules/",
                "parse_github": False,
                "auth_token": None,
                "priority": 50
            },
            "andlo_skill_list": {
                "active": False,
                "url": "https://andlo.gitbook.io/mycroft-skills-list/",
                "parse_github": False,
                "priority": 100
            }
        }

        if "appstores" not in self.config:
            # NOTE, below should match Appstore.appstore_id
            self.config["appstores"] = default_config
            self.save_config()
        self.config["appstores"] = merge_dict(self.config["appstores"],
                                              default_config,
                                              new_only=True,
                                              no_dupes=True)
        self._boostrap_tracker = {}
        self.save_config()
        self._threads = []
Exemple #5
0
    def test_merge_dict_lists(self):

        self.assertEqual(
            merge_dict(deepcopy(self.base_dict),
                       deepcopy(self.delta_dict),
                       merge_lists=True,
                       no_dupes=False), {
                           "one": 1,
                           "two": 2,
                           "three": 30,
                           "four": ["foo", "bar", "baz", 4, 5, 6, "foo"],
                           "five": None
                       })
Exemple #6
0
    def test_merge_dict_no_dupes(self):

        self.assertEqual(
            merge_dict(deepcopy(self.base_dict),
                       deepcopy(self.delta_dict),
                       merge_lists=True,
                       skip_empty=True,
                       no_dupes=True), {
                           "one": 1,
                           "two": 2,
                           "three": 30,
                           "four": ["foo", "bar", "baz", 4, 5, 6],
                           "five": 50
                       })
Exemple #7
0
 def play_video_event(self, message):
     video_data = message.data["modelData"]
     if video_data["skill_id"] == self.skill_id:
         # ensure all data fields present
         video_data = merge_dict(video_data, {
             "match_confidence": 100,
             "media_type": self.media_type,
             "playback": self.playback_type,
             "skill_icon": self.skill_icon,
             "skill_logo": self.skill_logo,
             "bg_image": video_data.get("logo") or self.default_bg,
             "image": video_data.get("logo") or self.default_image,
             "author": self.name
         })
         self.bus.emit(Message("better_cps.play", {
             "tracks": [video_data]
         }))
Exemple #8
0
    def handle_utterance(self, event):
        LOG.info("Utterance: " + str(event['utterances']))
        context = event["context"]  # from audio transformers
        context.update({'client_name': 'mycroft_listener',
                        'source': 'audio',
                        'ident': event.pop('ident', str(round(time()))),
                        'raw_audio': event.pop('raw_audio', None),
                        'destination': ["skills"],
                        "timing": event.pop("timing", {}),
                        'username': self.user_config["user"]["username"] or
                        "local",
                        'user_profiles': [self.user_config.content]
                        })
        if "data" in event:
            data = event.pop("data")
            context = merge_dict(context, data)

        self._emit_utterance_to_skills(Message('recognizer_loop:utterance',
                                               event, context))
    def from_json(data: Union[str, dict], parse_github: bool = True):
        if isinstance(data, str):
            if data.startswith("http"):
                url = data
                if "github" in url:
                    data = {"url": url}
                    # repo is parsed in github info step below,
                    # branch detected when parsing data dict
                else:
                    try:
                        res = requests.get(url).text
                        data = json.loads(res)
                    except JSONDecodeError:
                        raise GithubFileNotFound
            elif isfile(data):
                with open(data) as f:
                    data = json.load(f)
            else:
                data = json.loads(data)

        if not isinstance(data, dict):
            # TODO new exception
            raise ValueError("unrecognized format")

        # augment with github info
        if parse_github:
            url = data.get("url", "")
            if "github" in url:
                try:
                    github_data = get_skill_data(url, data.get("branch"))
                    data = merge_dict(github_data,
                                      data,
                                      merge_lists=True,
                                      skip_empty=True,
                                      no_dupes=True)
                    parse_python_dependencies(
                        data["requirements"].get("python"),
                        requests.headers.get("Authorization"))
                except GithubInvalidUrl as e:
                    raise e
        return SkillEntry(data)
Exemple #10
0
 def skill_entry_from_url(url: str):
     """
     Builds a minimal SkillEntry object from the passed GitHub URL to use for skill installation
     :param url: URL of skill to install
     :return: SkillEntry object with url, branch, requirements, and authorname populated
     """
     from ovos_skills_manager.exceptions import GithubInvalidBranch, GithubFileNotFound
     from ovos_skills_manager.github import get_branch_from_github_url, normalize_github_url, get_requirements_json,\
         get_skill_json
     from ovos_skills_manager.skill_entry import SkillEntry
     try:
         branch = get_branch_from_github_url(url)
     except GithubInvalidBranch:
         branch = None
     url = normalize_github_url(url)
     requirements = get_requirements_json(url, branch)
     requirements["system"] = {
         k: v.split()
         for k, v in requirements.get("system", {}).items()
     }
     try:
         json = get_skill_json(url, branch)
         requirements = merge_dict(requirements,
                                   json.get("requirements", {}),
                                   merge_lists=True,
                                   skip_empty=True,
                                   no_dupes=True)
     except GithubFileNotFound:
         json = {"authorname": author_repo_from_github_url(url)[0]}
     return SkillEntry.from_json(
         {
             "url": url,
             "branch": branch,
             "requirements": requirements,
             "authorname": json.get("authorname")
         }, False)
Exemple #11
0
    def CPS_search(self, phrase, media_type):
        base_score = self.match_media_type(phrase, media_type)
        # penalty for generic searches, they tend to overmatch
        if media_type == CPSMatchType.GENERIC:
            base_score -= 20
        # match titles and sort
        # then match all the metadata up to self.settings["search_depth"]
        videos = sorted(self.videos,
                        key=lambda k: fuzzy_match(k["title"], phrase),
                        reverse=True)
        cps_results = []
        for idx, video in enumerate(videos[:self.settings["search_depth"]]):
            score = base_score + fuzzy_match(video["title"], phrase) * 30
            if self.settings["match_tags"]:
                score += self.match_tags(video, phrase, media_type)
            if self.settings["match_title"]:
                score += self.match_title(video, phrase, media_type)
            if self.settings["match_description"]:
                score += self.match_description(video, phrase, media_type)
            if score < self.settings["min_score"]:
                continue
            cps_results.append(merge_dict(video, {
                "match_confidence": min(100, score),
                "media_type": self.media_type,
                "playback": self.playback_type,
                "skill_icon": self.skill_icon,
                "skill_logo": self.skill_logo,
                "bg_image": video.get("logo") or self.default_bg,
                "image": video.get("logo") or self.default_image,
                "author": self.name
            }))

        cps_results = sorted(cps_results,
                             key=lambda k: k["match_confidence"],
                             reverse=True)
        return cps_results
Exemple #12
0
def get_local_skills(parse_github=False, skiplist=None):
    skills = get_skills_folder()
    skiplist = skiplist or []
    folders = listdir(skills)
    for fold in folders:
        path = join(skills, fold)
        if not isdir(path) or fold in skiplist:
            continue

        skill = {
            "appstore": "InstalledSkills",
            "appstore_url": skills,
            "skill_id": fold,
            "foldername": fold,
            "requirements": {
                "python": [],
                "system": [],
                "skill": []
            }
        }

        # if installed by msm/osm will obey this convention
        if "." in fold:
            try:
                repo, author = fold.split(".")
                skill["skillname"] = repo
                skill["authorname"] = author
                skill["url"] = f'https://github.com/{author}/{repo}'
            except:  # TODO replace with some clever check ?
                pass

        # parse git info
        gitinfo = join(path, ".git/config")
        if isfile(gitinfo):
            with open(gitinfo) as f:
                for l in f.readlines():
                    if l.strip().startswith("url ="):
                        skill["url"] = l.split("url =")[-1].strip()
                        skill["authorname"], skill["skillname"] = \
                            author_repo_from_github_url(skill["url"])
                    if l.strip().startswith("[branch "):
                        skill["branch"] = l.split("branch")[-1]\
                            .replace('"', "").strip()

        for rtdir, foldrs, files in walk(join(skills, fold)):
            for f in files:
                if f in GITHUB_JSON_FILES:
                    with open(join(rtdir, f)) as fi:
                        skill_meta = json.load(fi)
                    skill = merge_dict(skill, skill_meta, merge_lists=True)
                elif f in GITHUB_README_FILES:
                    with open(join(rtdir, f)) as fi:
                        readme = readme_to_json(fi.read())
                    skill = merge_dict(skill,
                                       readme,
                                       new_only=True,
                                       merge_lists=True)
                elif f in GITHUB_DESKTOP_FILES:
                    skill['desktopFile'] = True
                elif f in GITHUB_ICON_FILES:
                    skill["icon"] = join(rtdir, f)
                elif f in GITHUB_LICENSE_FILES:
                    with open(join(rtdir, f)) as fi:
                        lic = fi.read()
                    skill["license"] = parse_license_type(lic)
                elif f in GITHUB_LOGO_FILES:
                    skill["logo"] = join(rtdir, f)
                elif f in GITHUB_MANIFEST_FILES:
                    with open(join(rtdir, f)) as fi:
                        manifest = validate_manifest(fi.read())
                    skill["requirements"]["python"] += manifest.get(
                        "python") or []
                    skill["requirements"]["system"] += manifest.get(
                        "system") or []
                    skill["requirements"]["skill"] += manifest.get(
                        "skill") or []
                elif f in GITHUB_REQUIREMENTS_FILES:
                    with open(join(rtdir, f)) as fi:
                        reqs = [r for r in fi.read().split("\n") if r.strip()]
                    skill["requirements"]["python"] += reqs
                elif f in GITHUB_SKILL_REQUIREMENTS_FILES:
                    with open(join(rtdir, f)) as fi:
                        reqs = [r for r in fi.read().split("\n") if r.strip()]
                    skill["requirements"]["skill"] += reqs
        yield SkillEntry.from_json(skill, parse_github=parse_github)
Exemple #13
0
def get_skill_from_api(url, branch=None, strict=False):
    data = {}

    # extract branch from .json, should branch take precedence?
    # i think so because user explicitly requested it
    branch = get_branch_from_skill_json_github_api(url, branch)

    try:
        api_data = get_repo_data_from_github_api(url, branch)
        data["branch"] = branch = api_data['default_branch']
        data["short_description"] = api_data['description']
        data["license"] = api_data["license"]["key"]
        data["foldername"] = api_data["name"]
        data["last_updated"] = api_data['updated_at']
        data["url"] = api_data["html_url"]
        data["authorname"] = api_data["owner"]["login"]
    except GithubAPIException as e:
        LOG.error("Failed to retrieve repo data from github api")
        raise

    try:
        releases = get_repo_releases_from_github_api(url, branch)
        if branch:
            for r in releases:
                if r["name"] == branch or r["commit"]["sha"] == branch:
                    data["version"] = r["name"]
                    #data["download_url"] = r["tarball_url"]
                    break
        else:
            data["version"] = releases[0]["name"]
            #data["download_url"] = releases[0]["tarball_url"]
    except GithubAPIException as e:
        LOG.error("Failed to retrieve releases data from github api")
        if strict:
            raise GithubAPIReleasesNotFound

    # augment with readme data
    try:
        data = merge_dict(data, get_readme_json_from_api(url, branch),
                          merge_lists=True, skip_empty=True, no_dupes=True)
    except GithubAPIReadmeNotFound:
        pass

    data["requirements"] = get_requirements_json_from_github_api(url, branch)

    # find logo
    try:
        data["logo"] = get_logo_url_from_github_api(url, branch)
    except GithubAPIFileNotFound as e:
        pass

    # find icon
    try:
        data["icon"] = icon = get_icon_url_from_github_api(url, branch)
    except GithubAPIFileNotFound:
        icon = None

    # augment with android data
    try:
        data["android"] = get_android_json_from_github_api(url, branch)
    except GithubAPIFileNotFound:
        # best guess or throw exception?
        author, repo = author_repo_from_github_url(url)
        data["android"] = {
            'android_icon': icon,
            'android_name': skill_name_from_github_url(url),
            'android_handler': '{repo}.{author}.home'.format(repo=repo,
                                                             author=author.lower())}

    # augment with desktop data
    try:
        data["desktop"] = get_desktop_json_from_github_api(url, branch)
        data["desktopFile"] = True
    except GithubFileNotFound:
        data["desktopFile"] = False

    # augment tags
    if "tags" not in data:
        data["tags"] = []
    if is_viral(data["license"]):
        data["tags"].append("viral-license")
    elif is_permissive(data["license"]):
        data["tags"].append("permissive-license")
    elif "unknown" in data["license"]:
        data["tags"].append("no-license")

    # augment with json data
    # this should take precedence over everything else
    try:
        data = merge_dict(data, get_skill_json_from_github_api(url, branch),
                          merge_lists=True, skip_empty=True, no_dupes=True)
    except GithubFileNotFound:
        pass

    return data
Exemple #14
0
def get_skill_from_github_url(url: str,
                              branch: Optional[str] = None) -> str:
    """
    Get skill icon file URL for the specified repository
    @param url: Repository URL to query
    @param branch: Optional branch spec, otherwise default branch will be used
    @return: URL of skill icon
    """
    # cache_repo_requests(url)  # speed up requests TODO avoid rate limit
    author, repo = author_repo_from_github_url(url)
    data = {
        "authorname": author,
        "foldername": repo,
        "branch": branch,
        "license": "unknown",
        "tags": []
    }
    if not branch:
        try:
            # check if branch is in the url itself
            data["branch"] = branch = get_branch_from_github_url(url)
        except GithubInvalidBranch:
            # let's assume latest release
            try:
                release = get_repo_releases_from_github_url(url)[0]
                data["branch"] = data["version"] = branch = release["name"]
                #data["download_url"] = release["tarball_url"]
            except GithubInvalidBranch:
                pass  # unknown branch...

    url = normalize_github_url(url)
    data["url"] = url
    data["skillname"] = skill_name_from_github_url(url)
    data["requirements"] = get_requirements_json_from_github_url(url, branch)

    # extract branch from .json, should branch take precedence?
    # i think so because user explicitly requested it
    branch = get_branch_from_skill_json_github_url(url, branch)

    # augment with readme data
    try:
        readme_data = get_readme_json_from_github_url(url, branch)
        data = merge_dict(data, readme_data,
                          merge_lists=True, skip_empty=True, no_dupes=True)
    except GithubReadmeNotFound:
        pass

    if branch:  # final, all sources checked by priority order
        data["branch"] = branch
        #data["download_url"] = GithubUrls.DOWNLOAD.format(author=author,
        #                                                  repo=repo,
        #                                                  branch=branch)

    try:
        data["license"] = get_license_type_from_github_url(url, branch)
    except GithubLicenseNotFound:
        pass
    try:
        data["icon"] = get_icon_url_from_github_url(url, branch)
    except GithubFileNotFound:
        pass
    # parse bigscreen flags
    if data["requirements"].get("system"):
        data['systemDeps'] = True
    else:
        data['systemDeps'] = False

    # find logo
    try:
        data["logo"] = get_logo_url_from_github_url(url, branch)
    except GithubFileNotFound as e:
        pass

    # augment with android data
    data["android"] = get_android_json_from_github_url(url, branch)

    # augment with desktop data
    try:
        data["desktop"] = get_desktop_json_from_github_url(url, branch)
        data["desktopFile"] = True
    except GithubFileNotFound:
        data["desktopFile"] = False

    # augment tags
    if "tags" not in data:
        data["tags"] = []
    if is_viral(data["license"]):
        data["tags"].append("viral-license")
    elif is_permissive(data["license"]):
        data["tags"].append("permissive-license")
    elif "unknown" in data["license"]:
        data["tags"].append("no-license")

    # augment with json data
    # this should take precedence over everything else
    try:
        data = merge_dict(data, get_skill_json_from_github_url(url, branch),
                          merge_lists=True, skip_empty=True, no_dupes=True)
    except GithubFileNotFound:
        pass

    return data
def get_skill_data_from_directory(skill_dir: str):
    """
    Parse the specified skill directory and return a dict representation of a
    SkillEntry.
    @param skill_dir: path to skill directory
    @return: dict parsed skill data
    """
    skills, fold = skill_dir.rsplit('/', 1)
    skill_data = {
        "appstore": "InstalledSkills",
        "appstore_url": skills,
        "skill_id": fold,
        "requirements": {
            "python": [],
            "system": {},
            "skill": []
        }
    }

    # if installed by msm/osm will obey this convention
    if "." in fold:
        try:
            repo, author = fold.split(".")
            skill_data["skillname"] = repo
            skill_data["authorname"] = author
            skill_data["url"] = f'https://github.com/{author}/{repo}'
        except:  # TODO replace with some clever check ?
            pass

    # parse git info
    gitinfo = join(skill_dir, ".git/config")
    if isfile(gitinfo):
        with open(gitinfo) as f:
            for l in f.readlines():
                if l.strip().startswith("url ="):
                    skill_data["url"] = l.split("url =")[-1].strip()
                    skill_data["authorname"], skill_data["skillname"] = \
                        author_repo_from_github_url(skill_data["url"])
                if l.strip().startswith("[branch "):
                    skill_data["branch"] = l.split("branch")[-1] \
                        .replace('"', "").strip()

    # parse skill files
    for root_dir, _, files in walk(skill_dir):
        for f in files:
            if f in GITHUB_JSON_FILES:  # skill.json
                with open(join(root_dir, f)) as fi:
                    skill_meta = json.load(fi)
                skill_data = merge_dict(skill_data,
                                        skill_meta,
                                        merge_lists=True)
            elif f in GITHUB_README_FILES:
                with open(join(root_dir, f)) as fi:
                    readme = readme_to_json(fi.read())
                skill_data = merge_dict(skill_data,
                                        readme,
                                        new_only=True,
                                        merge_lists=True)
            elif f in GITHUB_DESKTOP_FILES:
                skill_data['desktopFile'] = True
            elif f in GITHUB_ICON_FILES:
                skill_data["icon"] = join(root_dir, f)
            elif f in GITHUB_LICENSE_FILES:
                with open(join(root_dir, f)) as fi:
                    lic = fi.read()
                skill_data["license"] = parse_license_type(lic)
            elif f in GITHUB_LOGO_FILES:
                skill_data["logo"] = join(root_dir, f)
            elif f in GITHUB_MANIFEST_FILES:
                with open(join(root_dir, f)) as fi:
                    manifest = validate_manifest(fi.read()).get(
                        "dependencies", {})
                skill_data["requirements"]["python"] += \
                    manifest.get("python") or []
                skill_data["requirements"]["system"] = \
                    merge_dict(skill_data["requirements"]["system"],
                               manifest.get("system") or {}, merge_lists=True)

                skill_data["requirements"]["skill"] += \
                    manifest.get("skill") or []
            elif f in GITHUB_REQUIREMENTS_FILES:
                with open(join(root_dir, f)) as fi:
                    reqs = [r for r in fi.read().split("\n") if r.strip()]
                skill_data["requirements"]["python"] += reqs
            elif f in GITHUB_SKILL_REQUIREMENTS_FILES:
                with open(join(root_dir, f)) as fi:
                    reqs = [r for r in fi.read().split("\n") if r.strip()]
                skill_data["requirements"]["skill"] += reqs
    # de-dupe requirements
    skill_data["requirements"]["python"] = \
        list(set(skill_data["requirements"]["python"]))
    skill_data["requirements"]["skill"] = \
        list(set(skill_data["requirements"]["skill"]))
    skill_data['foldername'] = fold  # Override what the config specifies
    skill_data['authorname'] = skill_data.get('authorname') or "local"
    return skill_data
Exemple #16
0
 def merge(self, conf):
     merge_dict(self, conf)
     return self