def data(): query = request.args.get('query', '', type=str) frmt = request.args.get('format', 'json', type=str) output = [] op = people_also_ask.get_related_questions(query) for o in op: output.append(o) for p in people_also_ask.get_related_questions(o): output.append(p) if output == []: output = rebbit(query) output = list(set(output)) if frmt == "csv": path = "<path to temp.csv file>" df = pd.DataFrame(output) df.to_csv('temp.csv',header=False) return send_file(path, mimetype='text/csv', attachment_filename='Response.csv', as_attachment=True) return jsonify({"response": output, "graph": clusterz([output], query)})
async def read_user_item(query: str): try: url = "https://www.quora.com/search?q=" + query session = AsyncHTMLSession() r = await session.get(url) await r.html.arender(sleep=1, keep_page=True, scrolldown=1, timeout=60) a_tags = r.html.find('div.puppeteer_test_question_title') output = [a.text for a in a_tags] if len(a_tags) < 12: op = people_also_ask.get_related_questions(query) for o in op: output.append(o) for p in people_also_ask.get_related_questions(o): output.append(p) # if len(output)<12: # op=rebbit(query) # output.append(op) except Exception as e: print("Error", e) return {"resonse": [], "graph": []} return {"response": output, "graph": clusterz([output], query)}
def search(search_term, item=5): """ method to search from the google and organize the output """ try: paa = people_also_ask.get_related_questions(search_term, item) output = [] for i in paa: response = people_also_ask.get_answer(i) if response.get("has_answer") == True: res = dict((k, response[k]) for k in [ 'question', 'response', 'displayed_link', 'link', 'title' ] if k in response) res1 = urllib.parse.unquote(res.get("link")) res["link"] = res1 output.append(res) else: pass except: output = [] return (output)
def __people_also_ask(self, search_term: str): web = people_also_ask.get_related_questions(search_term, 5) return web