Example #1
0
def find_similar(topic, title, tags):

    print('DEBUG IN FIND SIMILAR: {}, {}, {}\n'.format(topic, title, tags))

    if topic not in valid_sites:
        raise Exception('Unsupported topic')

    method = 'search/advanced'
    SITE = StackAPI(topic, key=APP_KEY, access_token=ACCESS_TOKEN)

    similar = []
    similar += SITE.fetch(
        method, q=title, tags=';'.join(tags), answers=1,
        sort='votes')['items']  # title match and 1+ tags match
    similar += SITE.fetch(method,
                          q=title,
                          answers=1,
                          store_new_question='votes')['items']  # title match
    #similar += SITE.fetch(method, tags=';'.join(tags), answers=1, sort='votes')['items'] # 1+ tags match

    ids = OrderedSet()
    for s in similar:
        ids.add(str(s['question_id']))
    ids = list(ids)[:15]  # Top 15

    print('{} SIMILAR FOUND\n'.format(len(ids)))

    return get_questions_and_answers(topic, ids)
Example #2
0
 def test_no_endpoint_provided(self):
     """Testing that it raises the correct error when no endpoint is provided"""
     with self.assertRaises(ValueError) as cm:
         with patch('stackapi.StackAPI.fetch', fake_stackoverflow_exists) as mock_site:
             site = StackAPI('stackoverflow')
         site.fetch()
         self.assertEqual('No end point provided.', str(cm.exception))
Example #3
0
def tagpair(request, Tag):

    SITE = StackAPI('stackoverflow')
    ori_tag = [Tag]

    TagPairCompares = tagpaircompare.objects.filter(tag=Tag).values('simitag')
    if not TagPairCompares:
        raise Http404("Tag pair does not exist")

    tagsFetch = []
    for tag in TagPairCompares:
        tagname = tag['simitag']
        tagsFetch.append(tagname)
    tagswiki = SITE.fetch('tags/{tags}/wikis', tags=tagsFetch)
    tagsWikiDict = {}
    for item in tagswiki['items']:
        excerpt = item['excerpt']
        excerpt = excerpt.strip().split('. ')[0]
        if '.&' in excerpt:
            excerpt = excerpt.split('.&')[0]
        tagsWikiDict[item['tag_name']] = excerpt

    ori_tagwiki = {}
    ori_wiki = SITE.fetch('tags/{tags}/wikis',
                          tags=ori_tag)['items'][0]['excerpt']
    ori_wiki = ori_wiki.strip().split('. ')[0]
    if '.&' in ori_wiki:
        ori_wiki = excerpt.split('.&')[0]
    ori_tagwiki[Tag] = ori_wiki

    return render(request, 'tagpair.html', {
        'tagsWikiDicts': tagsWikiDict,
        'ori_tagwikis': ori_tagwiki
    })
def details(postID):
    SITE = StackAPI('stackoverflow')
    x = SITE.fetch('questions/{ids}', ids=[postID], filter='withbody')
    y = SITE.fetch('questions/{ids}/answers', ids=[postID], filter='withbody')
    total_answers = x["items"][0]['answer_count']
    question_title = x["items"][0]['title']
    quest_score = x['items'][0]['score']
    all_string = "<h1> (Votes:" + str(
        quest_score
    ) + ") Question:" + question_title + " </h1>" + x["items"][0]['body']
    all_string = all_string + "<h1>Answer Section</h1>"
    for i in range(total_answers):
        ans_score = y['items'][i]['score']
        all_string = all_string + "<h2> (Votes:" + str(
            ans_score) + ") answer " + str(
                i + 1) + " </h2>" + y["items"][i]['body']
        all_string = all_string + "<h2>Comment Section</h2>"
        id = y['items'][i]['answer_id']
        comment = SITE.fetch('answers/{ids}/comments',
                             ids=[id],
                             filter='withbody')
        total_comments = len(comment['items'])
        for k in range(total_comments):
            comment_id = comment['items'][k]['comment_id']
            comment_text = SITE.fetch('comments/{ids}',
                                      ids=[comment_id],
                                      filter='withbody')
            all_string = all_string + "<h3> comment " + str(
                k + 1) + " </h3>" + comment_text['items'][0]['body']
            print(comment_text['items'][0]['body'])

    return all_string
Example #5
0
 def test_no_endpoint_provided(self):
     """Testing that it raises the correct error when no endpoint is provided"""
     with self.assertRaises(ValueError) as cm:
         with patch('stackapi.StackAPI.fetch',
                    fake_stackoverflow_exists) as mock_site:
             site = StackAPI('stackoverflow')
         site.fetch()
         self.assertEqual('No end point provided.', str(cm.exception))
Example #6
0
def getUsers():
  SITE = StackAPI('stackoverflow')
  SITE.max_pages = 1
  SITE.page_size = 100
  try:
    users.append(SITE.fetch('users', sort='reputation'))
  except:
    time.sleep(5)
    users.append(SITE.fetch('users', sort='reputation'))
Example #7
0
def getQuestions():
    SITE = StackAPI('stackoverflow')
    SITE.max_pages = 10
    SITE.page_size = 100
    try:
        questions.append(SITE.fetch('questions', tagged='C#', sort='votes'))
    except:
        time.sleep(5)
        questions.append(SITE.fetch('questions', tagged='C#', sort='votes'))
    time.sleep(15)
Example #8
0
    def test_exceptions_thrown(self):
        """Testing that a StackAPIError is properly thrown

        This test hits the real API."""
        with self.assertRaises(StackAPIError) as cm:
            site = StackAPI('stackoverflow')
            site._api_key = None
            site.fetch('errors/400')
        self.assertEqual(cm.exception.error, 400)
        self.assertEqual(cm.exception.code, 'bad_parameter')
Example #9
0
    def test_exceptions_thrown(self):
        """Testing that a StackAPIError is properly thrown

        This test hits the real API."""
        with self.assertRaises(StackAPIError) as cm:
            site = StackAPI('stackoverflow')
            site._api_key = None
            site.fetch('errors/400')
        self.assertEqual(cm.exception.error, 400)
        self.assertEqual(cm.exception.code, 'bad_parameter')
Example #10
0
def callback():

    superuser = requests_client.OAuth2Session(CLIENT_ID)
    token = superuser.fetch_token(
    	url=TOKEN_URL, client_secret=CLIENT_SECRET, \
        authorization_response=flask.request.url, \
        # redirect_uri="http://jackzlin.com/callback" )
        # redirect_uri="http://localhost:5000/callback" )
        redirect_uri="http://forum-rec-app.herokuapp.com/callback" )


    SITE = StackAPI('superuser', key=SECRET_KEY)
    me = SITE.fetch('me', access_token=token['access_token'])

    # Keep user_id, profile_image, display_name
    global USER_VALS
    USER_VALS = me['items'][0]
    userId = USER_VALS['user_id']

    # Get users with cold start
    query_users_cold = """
    SELECT *
    FROM USERS
    """
    
    cold_users = pd.read_sql(query_users_cold, con=connection)

    # Set of cold_users
    if userId in set(cold_users.user_id):
        pass
    
    else:
        answered_questions = SITE.fetch('me/answers', access_token=token['access_token'], fromdate=SPLIT_DATE)

        try:
            len_questions = len(answered_questions['items']['answers'])
        except:
            len_questions = 0

        if len_questions < 25:
            insert_cold = """
            INSERT INTO USERS (user_id, name, profile_img_url, cold)
            VALUES ({0}, '{1}', '{2}', TRUE)
            """.format(userId, USER_VALS['display_name'], USER_VALS['profile_image'])
            
        else:
            insert_cold = """
            INSERT INTO USERS (user_id, name, profile_img_url, cold)
            VALUES ({0}, '{1}', '{2}', FALSE)
            """.format(userId, USER_VALS['display_name'], USER_VALS['profile_image'])
        
        cursor.execute(insert_cold)
        connection.commit()

    return flask.render_template('main.html', userId=userId, userItems=USER_VALS)
Example #11
0
def getAnswers():
    SITE = StackAPI('stackoverflow')
    SITE.max_pages = 1
    for id in issue_answers:
        try:
            answers.append(SITE.fetch('answers/{}'.format(id)))
            comments.append(SITE.fetch('answers/{}/comments'.format(id)))
        except:
            time.sleep(5)
            answers.append(SITE.fetch('answers/{}'.format(id)))
            comments.append(SITE.fetch('answers/{}/comments'.format(id)))
        time.sleep(10)
Example #12
0
def processing_name():
    name = request.form['name']
    site = request.form['site']
    processed_site = site.lower()
    if(processed_site):
        site = StackAPI(processed_site)
    else:
        site = StackAPI("stackoverflow")
    if(request.form['user_id']):
        users = site.fetch('users', ids=[request.form['user_id']])
        createdUserDate = users['items'][0]['creation_date']
        questionsAnswers = questions_answers([request.form['user_id']], site)
        post_frequency = posting_frequency([request.form['user_id']], site, createdUserDate, questionsAnswers[0])
        badges = badge_check([request.form['user_id']], site)
        tagAnswers = dict()
        for tag in questionsAnswers[4]:
            answerers = dict()
            tagAnswerers = tag_help([request.form['user_id']], site, tag)
            for user in tagAnswerers:
                answerers[user['user']['display_name']] = user['user']['user_id']
            tagAnswers[tag] = {k: answerers[k] for k in list(answerers)[:5]}
        timeLine = timeline([request.form['user_id']], site)
        return render_template('SearchResult.html', name = users['items'][0]['display_name'], users = users['items'][0]['display_name'], user_id= users['items'][0]['user_id'],  badges=badges, questions = questionsAnswers, question_url='static/images/{}'.format(questionsAnswers[2]), answerTags_url='static/images/{}'.format(questionsAnswers[3]), posting_url='static/images/{}'.format(post_frequency), tagAnswerers=tagAnswers, timeline='static/images/{}'.format(timeLine))
    else: 
        processed_name = name
        users = site.fetch('users', inname=processed_name)
        listSameUsers = list()
        listSameUsersID = list()
        for sameUsers in users['items']:
            listSameUsers.append(sameUsers['display_name'])
            listSameUsersID.append(sameUsers['user_id'])
        if(len(listSameUsers) > 1):
            return render_template('SearchResultMany.html', name = processed_name, users = listSameUsers, user_id = listSameUsersID, userAmount = len(users))
        elif(len(listSameUsers) <= 0):
            return render_template('SearchResultNone.html', name = processed_name)
        else:
            questionsAnswers = questions_answers(listSameUsersID, site)
            createdUserDate = users['items'][0]['creation_date']
            post_frequency = posting_frequency(listSameUsersID, site, createdUserDate, questionsAnswers[0])
            badges = badge_check(listSameUsersID, site)
            tagAnswers = dict()
            for tag in questionsAnswers[4]:
                answerers = dict()
                tagAnswerers = tag_help(listSameUsersID, site, tag)
                for user in tagAnswerers:
                    answerers[user['user']['display_name']] = user['user']['user_id']
                tagAnswers[tag] = {k: answerers[k] for k in list(answerers)[:5]}
            timeLine = timeline(listSameUsersID, site)
            return render_template('SearchResult.html', name = processed_name, users = listSameUsers, user_id= listSameUsersID, questions = questionsAnswers,  badges=badges, question_url='static/images/{}'.format(questionsAnswers[2]), answerTags_url='static/images/{}'.format(questionsAnswers[3]), posting_url='static/images/{}'.format(post_frequency), tagAnswerers=tagAnswers, timeline='static/images/{}'.format(timeLine))
Example #13
0
def allanswers(framework, projects):
    global api
    api = StackAPI("stackoverflow")
    samples = get_samples(projects)
    output_write(framework, directory, "all_answers", get_header(), True)
    with open("stackoverflow/" + framework +
              "_questions_and_answers_output.csv") as questions:
        for index, question in enumerate(questions):
            if index == 0: continue
            print("Questions from sample " + question.split(",")[1])
            question = question.replace("\n", "")
            question_id = question.split(",")[2]
            answers = api.fetch("questions/" + question_id +
                                "/answers")["items"]
            print(len(answers))
            for indx, answer in enumerate(answers):
                print("{0}% answers analysed of question {1}".format(
                    (indx + 1) / len(answers) * 100, question_id))
                try:
                    answer_owner = get_owner_by_user_id(
                        api, answer["owner"]["user_id"])
                except KeyError:
                    answer_owner = {
                        "user_id": "",
                        "reputation": "",
                        "creation_date": "",
                        "tags": []
                    }

                output = create_output(framework,
                                       question.split(",")[1], question_id,
                                       answer, answer_owner)
                output_write(framework, directory, "all_answers", output,
                             False)
Example #14
0
def get_data(question_id):
    data = {}
    SITE = StackAPI('codegolf')

    answers = SITE.fetch(
        'questions/{ids}/answers',
        ids=[question_id],
        filter=
        '!*SU8CGYZitCB.D*(BDVIficKj7nFMLLDij64nVID)N9aK3GmR9kT4IzT*5iO_1y3iZ)6W.G*'
    )

    for answer in answers["items"]:

        soup = BeautifulSoup(answer["body"], features="lxml")
        try:
            title = soup.body.h1.text
            raw_parts = title.split(",")
            components = []
            count = 0
            for i in raw_parts:
                if count:
                    components.extend(i.split())
                else:
                    count = 1
            bytecount = str(min([int(i) for i in components if i.isnumeric()]))
            data[parts[0]] = bytecount
        except:
            pass

    return data
Example #15
0
def api_query():
    SITE = StackAPI('stackoverflow')
    SITE.max_pages = 200
    questions = SITE.fetch('questions/no-answers',
                           order='desc',
                           fromdate=int(time.time()) - 3600 * 72,
                           sort='creation',
                           tagged='google-cloud-platform')
    question_table = [
        dict(title='TITLE',
             date='DATE',
             tags='TAGS',
             views='PAGE VIEWS',
             link='LINK')
    ]
    print(question_table)
    for question in questions['items']:
        current = dict(title=question['title'],
                       date=time.strftime(
                           '%m-%d %H:%M',
                           time.localtime(question['creation_date'])),
                       tags=', '.join(
                           str(e) for e in question['tags']
                           if e != 'google-cloud-platform'),
                       views=str(question['view_count']),
                       link=question['link'])
        question_table.append(current)

    return render_template('query.html', questions=question_table)
Example #16
0
def main():
    # Loading GitHub issues
    df = pd.read_csv("artifacts/issues.csv")
    issue_titles = df["title"].to_list()
    # issue_titles = list(dict.fromkeys(issue_titles))

    stack_api = StackAPI("stackoverflow")

    issue_questions = []
    for i, title in enumerate(issue_titles):
        if len(title.split()) > 3:
            log.info(f'#{i}\tfetching questions for issue: "{title}"')
            issue_questions.append(
                stack_api.fetch("search/advanced",
                                title=title,
                                tagged=["python"],
                                order="desc",
                                sort="votes",
                                pagesize=100,
                                page=1))
        else:
            log.warning(f'#{i}\tissue title is too small: "{title}"')

    for i, i_q in zip(df["id"], issue_questions):
        issue, questions = i_q
        log.info(f"#{i}\tSaving questions for issue: {issue}")
        with open(f"output/{issue}.json", "w", encoding="utf-8") as f:
            dump(questions, f, indent=2)
def stackoverflow(framework, projects):
    global api
    api = StackAPI("stackoverflow")
    samples = get_samples(projects)
    output_write(framework, directory, "questions_and_answers", get_header(), True)
    for index, sample in enumerate(samples):
        print_status_samples(index+1, len(samples))
        questions = get_questions_when_body_has(sample)
        for indx, question in enumerate(questions["items"]):
            print("{0}% questions analysed of {1}".format( (indx+1)/len(questions)*100, sample))
            try:
                answer = api.fetch("answers/{ids}", ids=[question["accepted_answer_id"]])["items"][0]
                answer_owner = get_owner_by_user_id(api, answer["owner"]["user_id"])
            except KeyError:
                answer = {
                    "answer_id": "",
                    "score": "",
                    "creation_date": ""
                }
                answer_owner = {
                    "user_id": "",
                    "reputation": "",
                    "creation_date": "",
                    "tags": []
                }
            question_owner = get_owner_by_user_id(api, question["owner"]["user_id"])
            output = create_output(framework, sample, question, answer, question_owner, answer_owner)
            output_write(framework, directory, "questions_and_answers", output, False)
Example #18
0
    def load(self):
        SITE = StackAPI('stackoverflow')
        data = SITE.fetch('questions',
                          filter='!-y(KwOdKQqjehDBmb0h5Opw_j44BmcMCwAOxyvp5P',
                          pagesize=s_pagesize,
                          fromdate=s_pagesize,
                          order=s_order,
                          sort=s_sort,
                          tagged=s_tags)
        questions = []

        for i in data['items']:
            if (i['answer_count'] > 0):
                qid = i['question_id']
                closed = 0
                ans = []
                if ('closed_date' in i):
                    closed = i['closed_date']

                if not any(qid in x for x in questions):
                    for x in i['answers']:
                        body = re.findall('<code>(.*?)<\/code>', x['body'])
                        if body:
                            ans.append([
                                x['answer_id'], x['question_id'], x['score'],
                                body
                            ])
                    if ans:
                        questions.append([
                            qid, i['title'], i['last_activity_date'], closed,
                            ans
                        ])

        self.posts = questions
        return questions
Example #19
0
def identify_questions(tags):
    ques_id = []
    ques_score = []
    ques_body = []
    query = ""
    for i in range(len(tags)):
        tags[i] = ps.stem(tags[i])
        query = query + tags[i] + ';'
    query = query[0:-1]
    try:
        SITE = StackAPI('stackoverflow')
        SITE.page_size = page_size
        SITE.max_pages = max_pages
        questions = SITE.fetch('search', tagged=query, sort='relevance')
        for item in questions[u'items']:
            tags_ques = item[u'tags']
            for i in range(len(tags_ques)):
                tags_ques[i] = ps.stem(tags_ques[i])
            cnt = 0
            for tag in tags_ques:
                if tag not in tags:
                    cnt += 1
            temp = len(tags) - len(list(set(tags).intersection(tags_ques)))
            cnt = cnt + (temp - len(tags))
            if cnt < 0:
                ques_id.append(item[u'question_id'])
                ques_score.append(cnt)
                ques_body.append(item[u'title'])

    except StackAPIError as e:
        print e.message
    print ques_id[1]
    return ques_id, ques_score, ques_body
Example #20
0
def stackoverflow(command):
    try:
        site = StackAPI('stackoverflow')
        engine.say("tell id of question")
        engine.runAndWait()
        id = listentomic()
        #id = input("enter id of question:")
        question = site.fetch('questions/%s' % (id))
        print(question)
        engine.say("what do you want from this question")
        engine.runAndWait()
        req = listentomic()
        #req = input("what do you want from this question:")
        if req == 'wrong':
            stackoverflow(command)
        else:
            engine.say("the %s for this question is" % req)
            engine.runAndWait()
            print(question["items"][0][req])

    except stackapi.StackAPIError as e:
        print("   Error URL: {}".format(e.url))
        print("   Error Code: {}".format(e.code))
        print("   Error Error: {}".format(e.error))
        print("   Error Message: {}".format(e.message))
Example #21
0
def buscar_questoes(tag="python"):
    # definicao do pt.stackoverflow
    sopt = StackAPI("pt.stackoverflow")

    # conf de numero de resultados
    sopt.page_size = 100
    sopt.max_pages = 1
    resultado = []
    # busca por questoes/tag de acordo com intervalo de tempo(atualmente de 1 dia)
    questoes_python = sopt.fetch('questions',
                                 min=1,
                                 fromdate=tsInicio,
                                 todate=tsHoje,
                                 tagged=tag)
    # return str(html.unescape(questoes_python['items'][0]['title']))

    for i in range(0, len(questoes_python['items'])):
        resultado.append("""
        Titulo: {}
        Link: {}
        Criacao: {}
        """.format(html.unescape(questoes_python['items'][i]['title']),
                   questoes_python['items'][i]['link'],
                   questoes_python['items'][i]['creation_date']))
    return resultado
def user_rank(user_url):
    '''
    Calculate user rank
    :param user_url: Stackoverflow url
    :return: Rank
    '''
    try:
        url = user_url
        user_option_file = 'stackoverflowapi\\options\\user-options.json'
        parse_web_name = 'stackoverflow'
        user_id = url.split('/')[4]

        SITE = StackAPI(parse_web_name)
        user_details = SITE.fetch('users/' + user_id)
        items = user_details['items'][0]

        selected_options = dict()
        user_options = dict(json.load(open(user_option_file)))
        for key in user_options.keys():
            if user_options[key] == 1:
                selected_options[key] = items[key]

        calculated_rank = rank_calculator(selected_options['reputation'])

        user_data_json = {
            "user_details": [selected_options],
            "Rank": calculated_rank
        }

        return user_data_json
    except StackAPIError as e:
        print(e.message)
def get_stored_questions(API_name,
                         Start_date=Date(1, 1, 2010),
                         End_date=Date(12, 18, 2018)):
    site = StackAPI('stackoverflow')

    site.max_pages = 1

    questions = site.fetch('questions',
                           fromdate=datetime(2010, 1, 1),
                           todate=datetime(2018, 12, 18),
                           tagged=API_name)

    stored_quest = dict()
    stored_quests = list()

    # accesses the dict that is stored at questions['items]
    for lists in questions['items']:
        # stores only the relevant key value pairs into a new dict stored_quests
        stored_quest = {
            "title": lists.get("title"),
            "score": lists.get("score"),
            "is_answered": lists.get("is_answered"),
            "tags": lists.get("tags"),
            "question_id": lists.get("question_id"),
            "link": lists.get("link")
        }
        stored_quests.append(stored_quest)

    return stored_quests
Example #24
0
def get_questions_and_answers(topic, questionIds):
    if len(questionIds) == 0: return []
    try:
        if topic not in valid_sites:
            raise Exception('Unsupported topic')

        encoded = ';'.join(questionIds)
        method = 'questions/{}/answers'.format(encoded)
        SITE = StackAPI(topic, key=APP_KEY, access_token=ACCESS_TOKEN)

        response = SITE.fetch(method, filter='!-*jbN.OXKfDP')
        answers = response['items']

        curated = []
        for a in answers:
            rep = False
            for i, c in enumerate(curated):
                if a['question_id'] == c['question_id']:
                    curated[i]['answers'].append(a['body'])
                    rep = True
            if not rep:
                curated.append({
                    'question_id': a['question_id'],
                    'question_title': a['title'],
                    'answers': [a['body']]
                })
        print(len(curated))
        return curated
    except Exception as e:
        print(e.message)
        return []
Example #25
0
 def test_nonsite_parameter(self):
     """Testing that it can retrieve data on end points that don't want
     the `site` parameter. Tested using Jeff Atwood's user id"""
     with patch('stackapi.StackAPI.fetch', fake_stackoverflow_exists) as mock_site:
         site = StackAPI('stackoverflow')
     site._api_key = None
     with patch('stackapi.StackAPI.fetch', fake_users) as mock_users:
         self.assertGreaterEqual(len(site.fetch('/users/1/associated')['items']), 1)
Example #26
0
	def __init__(self):
		SITE = StackAPI('stackoverflow')		
		# Number of items per page
		SITE.page_size = 10
		# Number of api calls
		SITE.max_pages = 1
		# Get the current date and time as a datetime object
		self.date = datetime.now()
		# Get dates for thepast week where first day of the week is Monday(0)
		# and last day of the week is Sunday (6)
		interval = self.past_week()
		# Get the top-rated android questions from the past week
		self.top = SITE.fetch('questions', fromdate=interval[0], 
			todate=interval[1], sort='votes', tagged='android')['items']
		# Get the most recent android questions
		self.new =  SITE.fetch('questions', sort='creation', 
			order='desc', tagged='android')['items']
Example #27
0
    def getuserdatastack():

        #Code for getting User Input's data and extracting data from Stack Overflow with Try Exception Handling
        stackiddata = stackidentry.get()
        try:
            stackiddata = int(stackiddata)
            SITE = StackAPI('stackoverflow')
            Stags = SITE.fetch('/users/{}/tags'.format(stackiddata),
                               site='stackoverflow')
            questions = SITE.fetch('/users/{}/questions'.format(stackiddata),
                                   site='stackoverflow')
            tagsnum = 0
            tagslist = []
            tagsstr = ""
            #converting JSON data into coherent data
            for i in questions['items']:
                tagsnum = tagsnum + 1
            for i in range(int(tagsnum)):
                a = (questions['items'][i]['tags'])
                for j in a:
                    tagslist.append(j)
            tagsdict = {}
            for i in tagslist:
                tagsdict[i] = tagslist.count(i)
            newlist = sorted(tagsdict.items(),
                             key=lambda x: x[1],
                             reverse=True)
            var = 0
            for s in newlist:
                var = var + 1
            for i in range(var - 5):
                newlist.pop()
            datadict = {k: v for k, v in newlist}
            #visualising your interests
            df = pd.DataFrame.from_dict(datadict, orient='index')
            df.reset_index(inplace=True)
            df.columns = ['Personality', 'Percentile']
            plt.figure(figsize=(15, 5))
            sns.barplot(x="Personality", y="Percentile", data=df)
            plt.show()
        except Exception:
            exceptionlabel = Label(third_window).config(text="")
            exceptionlabel = Label(
                third_window,
                text="Please make sure you have entered a number")
            exceptionlabel.grid(row=2, column=1)
def get_stored_answers(id):
    site = StackAPI('stackoverflow')

    site.max_pages = 1

    answers = site.fetch('answers', id)

    return answers
Example #29
0
def test_buscar_questoes():
    sopt = StackAPI("pt.stackoverflow")
    sopt.page_size = 100
    sopt.max_pages = 1
    questoes_python = sopt.fetch('questions',
                                 min=1,
                                 fromdate=1534582800,
                                 todate=1534636800,
                                 tagged='python')
    assert 1534625951 == questoes_python['items'][0]['creation_date']
Example #30
0
def get_tags(parameter_list, count, sitename, maxpage, pagesize, page_no):
    try:
        SITE = StackAPI(sitename)
        SITE.max_pages = maxpage
        SITE.page_size = pagesize
        return SITE.fetch('tags', page=page_no)
    except:
        try:
            SITE = StackAPI(parameter_list[count])
            SITE.max_pages = maxpage
            SITE.page_size = pagesize
            return SITE.fetch('tags', page=page_no)
        except stackapi.StackAPIError as e:
            print(" Error URL: {}".format(e.url))
            print(" Error Code: {}".format(e.code))
            print(" Error Error: {}".format(e.error))
            print(" Error Message: {}".format(e.message))

            return 0
Example #31
0
 def test_nonsite_parameter(self):
     """Testing that it can retrieve data on end points that don't want
     the `site` parameter. Tested using Jeff Atwood's user id"""
     with patch('stackapi.StackAPI.fetch',
                fake_stackoverflow_exists) as mock_site:
         site = StackAPI('stackoverflow')
     site._api_key = None
     with patch('stackapi.StackAPI.fetch', fake_users) as mock_users:
         self.assertGreaterEqual(
             len(site.fetch('/users/1/associated')['items']), 1)
Example #32
0
File: api.py Project: dyeray/hmu37
class StackOverflowApi:

    def __init__(self, page_size=1):
        self.SITE = StackAPI('stackoverflow')
        self.SITE.page_size = page_size

    def get_answers(self, question_keyword):

        question_urls = {}

        def is_valid_question(question):
            return question['is_answered']

        def process_and_get_question(question):
            question_urls[question['question_id']] = question['link']
            return question

        def get_questions():
            questions = self.SITE.fetch('search/advanced', q=question_keyword, sort='votes')['items']
            return [process_and_get_question(question) for question in questions if is_valid_question(question)]

        def is_valid_answer(answer):
            return answer['is_accepted']

        def fetch_answers(question_ids):
            return self.SITE.fetch('questions/{ids}/answers', ids=question_ids, sort='votes')['items']

        def get_answers_metadata():
            question_ids = [question['question_id'] for question in get_questions()]
            return [answer for answer in fetch_answers(question_ids) if is_valid_answer(answer)]

        def create_answer(answer):
            return {
                'content': answer['body'],
                'question_url': question_urls[answer['question_id']],
            }

        def get_answer_bodies(answer_ids):
            return self.SITE.fetch('answers', ids=answer_ids, filter='withBody')['items']

        answers_ids = [answer['answer_id'] for answer in get_answers_metadata()]
        return json.dumps([create_answer(answer) for answer in get_answer_bodies(answers_ids)])
Example #33
0
def stackoverflow_data(request):
    SITE = StackAPI('stackoverflow')
    questions = SITE.fetch('questions',
                           page=1,
                           pagesize=2,
                           order='desc',
                           sort='week')
    context = {
        'questions': questions,
    }
    return JsonResponse(context)
def retrieve_post(url):
    """
    Retrieves post information
    :param url: URL of post to retreive
    :return:
        dict: The post returned from the API
        string: The endpoint utilized
    """

    url_regex = r"((?:https?:)?//(.*)\.(?:com|net)/((?:q(?:uestions)?|a(?:nswer)?))/(\d+)(?:/)?(?:\d+|(?:\w|-)+)?(?:/\d+)?(?:#(\d+))?)"
    endpoint_dict = {  # A quick look up dict to utilize for determining appropriate end point
                       "q": "questions",
                       "questions": "questions",
                       "a": "answers",
                       "answers": "answers",
    }

    matches = re.compile(url_regex, re.IGNORECASE).match(url)
    try:
        site_parameter = matches.group(2).split(".")[0]  # Not all sites are top level, some are site.stackexchange.com
        if site_parameter in ['ru', 'pt']:
            site_parameter += ".stackoverflow"
    except AttributeError:
        logging.critical("URL Error: {}".format(url))
        logging.critical("   Groups: {}".format(matches))
        logging.critical("   Groups: {}".format(matches.groups()))
        return
    if matches.group(5) is None:
        endpoint = endpoint_dict[matches.group(3)]
        post_id = matches.group(4)
    else:
        if matches.group(3) in ['q', 'questions']:
            endpoint = 'answers'
            post_id = matches.group(5)

    if endpoint == "questions":
        filter = user_settings.API_QUESTION_FILTER
    elif endpoint == "answers":
        filter = user_settings.API_ANSWER_FILTER

    try:
        SITE = StackAPI(site_parameter, key=user_settings.API_KEY, access_token=user_settings.ACCESS_TOKEN)
    except StackAPIError as e:
        logging.critical("API Error occurred.")
        logging.critical("   Site Parameter: %s" % (site_parameter))
        logging.critical("   Error URL: %s" % (e.url))
        logging.critical("   Error Number: %s" % (e.error))
        logging.critical("   Error Code: %s" % (e.code))
        logging.critical("   Error Message: %s" % (e.message))
        return
    except ValueError:
        logging.critical("API Error occurred.")
        logging.critical("   Invalid Site name provided: {}".format(site_parameter))
        return

    post = SITE.fetch("{}/{}".format(endpoint, post_id), filter=filter)

    try:
        data = post['items'][0]
    except IndexError:
        logging.info("   No 'items' for {}/{}:".format(endpoint, post_id))
        data = None

    return data, endpoint