コード例 #1
0
def generate_chart_url(body):  #noga: E501
    response = None

    if connexion.request.is_json:
        content = connexion.request.get_json()
        app.logger.info("Generate Chart: {}".format(content))
        request = PrioritizedRecommendationsRequest.from_dict(content)

        if request.unique_key() in CACHED_CHART_URLs:
            chart_url = CACHED_CHART_URLs[request.unique_key()]
            chart_key = chart_url.split("/")[-1]
            if chart_key in CHART_REQUESTs:
                return ChartResponse(False, None, CACHED_CHART_URLs[request.unique_key()])

        chart_key = "".join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(16))
        chart_url = "http://{}:{}/prioritizer/chart/c/{}".format(helper.app_host(), helper.app_port(), chart_key)

        limit_bugs = 0  #800
        bugzilla_fetcher = bugzillafetcher.BugzillaFetcher("https://bugs.eclipse.org/bugs/rest/bug")
        bugs = bugzilla_fetcher.fetch_bugs(request.assignee, request.products, request.components,
                                           "RESOLVED", limit=limit_bugs)
        requirements = list(map(lambda b: Requirement.from_bug(b), bugs))
        ASSIGNED_RESOLVED_REQUIREMENTS_OF_STAKEHOLDER[request.unique_key()] = requirements
        CACHED_CHART_URLs[request.unique_key()] = chart_url
        CHART_REQUESTs[chart_key] = request
        response = ChartResponse(False, None, chart_url)

    return response
コード例 #2
0
def compute_popularity(body):  # noqa: E501
    """Retrieve a list with values for given set of requirements indicating their popularity for the crowd on twitter.

     # noqa: E501

    :param body: Requirement objects for which the social popularity should be measured
    :type body: list | bytes

    :rtype: List[RequirementPopularity]
    """
    response_list = []
    if connexion.request.is_json:
        content = connexion.request.get_json()
        assert isinstance(content, list)
        requirements = [Requirement.from_dict(d)
                        for d in content]  # noqa: E501
        requirements = list(
            map(
                lambda r: requirement.Requirement(r.id, r.title, r.description
                                                  ), requirements))
        requirements = preprocessing.preprocess_requirements(
            requirements,
            enable_pos_tagging=True,
            enable_lemmatization=False,
            enable_stemming=False)
        # Extend stop word list: https://www.wordfrequency.info/free.asp?s=y

        maut_results = []
        for requ in requirements:
            maut_temp = 0
            if len(list(requ.title_tokens_pos_tags)) > 0:
                for tag in set(requ.title_tokens_pos_tags +
                               requ.description_tokens_pos_tags):
                    for matching_pos_classes in ["NN", "NNS", "NE", "FW"]:
                        if matching_pos_classes in tag:
                            maut_temp += fetch_twitter(str(tag[0]))
            else:
                for token in set(requ.title_tokens + requ.description_tokens):
                    print("Note here!!")
                    maut_temp += fetch_twitter(token)
            maut_results.append(maut_temp)

        for idx, requ in enumerate(requirements):
            response_list.append(
                RequirementPopularity(
                    id=requ.id,
                    #'totalNumberOfFavorites': favorite_count,
                    #'totalNumberOfRetweets': retweet_count,
                    #'MAUT': (maut_result/num_of_tweets),
                    #'popularity': "{0:.5f}".format((maut_results[i] / sum(maut_results))*100)
                    popularity=((maut_results[idx] * 100) / sum(maut_results)
                                if sum(maut_results) > 0 else 0)))

    return response_list
コード例 #3
0
    def test_compute_popularity(self):
        """Test case for compute_popularity

        Retrieve a list with values for given set of requirements indicating their popularity for the crowd on twitter.
        """
        body = [Requirement()]
        response = self.client.open('/v1/popularity',
                                    method='POST',
                                    data=json.dumps(body),
                                    content_type='application/json')
        self.assert200(response,
                       'Response body is : ' + response.data.decode('utf-8'))
コード例 #4
0
def perform_svd():
    enable_tagging = True
    max_distance = 0.6
    with open(os.path.join(helper.APP_PATH, "data",
                           "requirements_en.json")) as f:
        requs = json.load(f)

    max_distance = 0.4
    with open(
            os.path.join(helper.APP_PATH, "data",
                         "siemens_requirements_en.csv")) as f:
        enable_tagging = False
        plain_requirements = csv_reader(f)
        requs = []
        for (idx, description) in enumerate(plain_requirements):
            if idx > 400:
                break
            requs += [{'id': idx, 'title': '', 'description': description}]
    #print(json.dumps(requs))
    #import sys;sys.exit()

    #pprint(requs)
    requs = list(map(lambda r: Requirement.from_dict(r), requs))
    lang = "en"

    requs = list(
        map(lambda r: requirement.Requirement(r.id, r.title, r.description),
            requs))
    requs = preprocessing.preprocess_requirements(
        requs,
        enable_pos_tagging=enable_tagging,
        enable_lemmatization=enable_tagging,
        enable_stemming=False,
        lang=lang)

    _logger.info("SVD...")
    predictions_map = svd.svd(requs, k=3, max_distance=max_distance)
    for subject_requirement, similar_requirements in predictions_map.items():
        if len(similar_requirements) == 0:
            continue

        #print("-" * 80)
        #print(subject_requirement.description_tokens)
        for similar_requirement in similar_requirements:
            print("#{}: {} -> #{}: {}".format(
                subject_requirement.id, subject_requirement.description[:80],
                similar_requirement.id, similar_requirement.description[:80]))
コード例 #5
0
def recommend_requirement_dependencies(body):  # noqa: E501
    """Retrieve a list with values for given set of requirements indicating their popularity for the crowd on twitter.

     # noqa: E501

    :param body: Requirement objects for which the social popularity should be measured
    :type body: list | bytes

    :rtype: List[Requirement]
    """

    response_list = []
    # TODO: introduce parameter to set language
    lang = "en"

    if connexion.request.is_json:
        content = connexion.request.get_json()
        assert isinstance(content, list)
        requs = [Requirement.from_dict(d) for d in content]  # noqa: E501

        requs = list(map(lambda r: requirement.Requirement(r.id, r.title, r.description, r.comments), requs))
        for r in requs:
            r.append_comments_to_description()

        requs = preprocessing.preprocess_requirements(requs,
                                                      enable_stemming=False,
                                                      lang=lang)

        requs = list(filter(lambda r: len(r.tokens()) > 0, requs))

        if len(requs) == 0:
            return []

        _logger.info("SVD...")

        if len(requs) > 100:
            min_distance, max_distance = 0.2, 0.5
            k = 10
        elif len(requs) > 50:
            min_distance, max_distance = 0.2, 0.6
            k = 8
        elif len(requs) > 30:
            min_distance, max_distance = 0.2, 0.65
            k = 5
        elif len(requs) > 10:
            min_distance, max_distance = 0.2, 0.7
            k = 3
        elif len(requs) > 5:
            min_distance, max_distance = 0.2, 0.75
            k = 2
        else:
            min_distance, max_distance = 0.2, 0.8
            k = 1

        predictions_map = svd.svd(requs, k=k, min_distance=min_distance, max_distance=max_distance)
        dependency_pairs = set()
        for subject_requirement, dependent_requirements in predictions_map.items():
            requ = Requirement.from_dict({
                "id": subject_requirement.id,
                "title": subject_requirement.title,
                "description": subject_requirement.description,
                "comments": subject_requirement.comments
            })
            rx = subject_requirement.id
            dependent_requirement_ids = list(set(map(lambda r: r.id, dependent_requirements)))
            all_undirected_pairs_of_subject_requirement = map(lambda ry: [(rx, ry), (ry, rx)], dependent_requirement_ids)
            dependency_pairs_of_subject_requirement = set(list(itertools.chain(*all_undirected_pairs_of_subject_requirement)))
            remaining_dependency_pairs_of_subject_requirement = dependency_pairs_of_subject_requirement - dependency_pairs
            dependency_pairs = dependency_pairs.union(remaining_dependency_pairs_of_subject_requirement)
            predictions = list(set(map(lambda t: t[0] if t[0] != rx else t[1], remaining_dependency_pairs_of_subject_requirement)))

            requ.predictions = predictions
            response_list += [requ]
            for dependent_requirement in dependent_requirements:
                print("{} -> {}".format(subject_requirement, dependent_requirement))

        """
        for idx, requ in enumerate(requirements):
            response_list.append(Requirement.from_dict({
                "id": requ.id,
                "title": requ.title,
                "description": requ.description
            }))
        """

    return response_list
def recommend_similar_requirements(body):  # noqa: E501
    """Retrieve a list with values for given set of requirements indicating their popularity for the crowd on twitter.

     # noqa: E501

    :param body: Requirement objects for which the social popularity should be measured
    :type body: list | bytes

    :rtype: List[Requirement]
    """

    response_list = []
    # TODO: introduce parameter to set language
    lang = "en"

    if connexion.request.is_json:
        content = connexion.request.get_json()
        assert isinstance(content, list)
        requs = [Requirement.from_dict(d) for d in content]  # noqa: E501

        requs = list(
            map(
                lambda r: requirement.Requirement(r.id, r.title, r.description,
                                                  r.comments), requs))
        for r in requs:
            r.append_comments_to_description()

        requs = preprocessing.preprocess_requirements(requs, lang=lang)

        requs = list(filter(lambda r: len(r.tokens()) > 0, requs))

        if len(requs) == 0:
            return []

        _logger.info("SVD...")

        if len(requs) > 100:
            max_distance = 0.4
            k = 10
        elif len(requs) > 50:
            max_distance = 0.5
            k = 8
        elif len(requs) > 30:
            max_distance = 0.55
            k = 5
        elif len(requs) > 10:
            max_distance = 0.6
            k = 3
        elif len(requs) > 5:
            max_distance = 0.6
            k = 2
        else:
            max_distance = 0.6
            k = 1

        predictions_map = svd.svd(requs, k=k, max_distance=max_distance)
        predictions = {}

        for subject_requirement, similar_requirements in predictions_map.items(
        ):
            rx = subject_requirement.id
            rx_predictions = list(
                set(map(lambda r: r.id, similar_requirements)))
            if rx not in predictions:
                predictions[rx] = set()
            predictions[rx] = predictions[rx].union(rx_predictions)

            for ry in rx_predictions:
                if ry not in predictions:
                    predictions[ry] = set()
                predictions[ry].add(rx)

        for subject_requirement, similar_requirements in predictions_map.items(
        ):
            requ = Requirement.from_dict({
                "id":
                subject_requirement.id,
                "title":
                subject_requirement.title,
                "description":
                subject_requirement.description,
                "comments":
                subject_requirement.comments
            })
            rx = subject_requirement.id
            requ.predictions = list(predictions[rx])
            response_list += [requ]
            for similar_requirement in similar_requirements:
                print("{} -> {}".format(subject_requirement,
                                        similar_requirement))
        """
        for idx, requ in enumerate(requirements):
            response_list.append(Requirement.from_dict({
                "id": requ.id,
                "title": requ.title,
                "description": requ.description
            }))
        """

    return response_list