def popUp(self, event): "called when tree item is clicked on" item = self.tree.identify("item", event.x, event.y) menu = Menu(self, tearoff=0) if item: file = self.files[int(item)] if int(item) == self.index: menu.add_command(label="Add comment", command=lambda: Comment(self, file)) else: menu.add_command(label="Add comment", command=lambda: Comment(self, file, False)) if file in self.fileStorage.tagged: menu.add_command( label="Remove tag", command=lambda: self.untagFun(index=int(item))) else: menu.add_command(label="Add tag", command=lambda: self.tagFun(index=int(item))) elif self.controlled: menu.add_command(label='Leave only Bad Points', command=lambda: self.leaveOnlyFun("Bad Points")) menu.add_command(label='Leave only Reflections', command=lambda: self.leaveOnlyFun("Reflections")) menu.add_command( label='Leave only Outside Points', command=lambda: self.leaveOnlyFun("Outside Points")) menu.add_separator() menu.add_command(label="Return all", command=lambda: self.leaveOnlyFun("all")) menu.post(event.x_root, event.y_root)
def get_av_comments(oid): url = f'https://api.bilibili.com/x/v2/reply?type=1&oid={oid}' fetch_url(url) pages = get_pages(oid) replies = [] # print(pages) for i in range(pages): replies_cur_page = parse_html(fetch_url(form_url(oid=oid, page=i))) for reply in replies_cur_page: replies.append(reply) comments = [] for reply in replies: # mid, username, gender, ctime, content, likes, rcounts, rpid comment = Comment(mid=reply['mid'], username=reply['member']['uname'], gender=reply['member']['sex'], ctime=reply['ctime'], content=reply['content']['message'], likes=reply['like'], rcount=reply['rcount'], rpid=reply['rpid']) comments.append(comment.values()) if reply['rcount'] > 0: for item in reply['replies']: comment = Comment(mid=item['mid'], username=item['member']['uname'], gender=item['member']['sex'], ctime=item['ctime'], content=item['content']['message'], likes=item['like'], rcount=item['rcount'], rpid=item['rpid']) comments.append(comment.values()) dbhelper.insert_comment(c, conn, comments)
def main(): pushkin = Book() print(pushkin) andrianov = Book("Andrianov", "typing on PC", 2007, "self-teacher") print(andrianov) gosher = Book("Gosher", "HTML5", 2015, "tutorial") print(gosher) pushkin.comments = [ Comment(5, 'Awesome book, changed my perception of the life'), Comment(4, "Not bad, but Huxley's scenario seems more realistic to me"), ] print(repr(pushkin)) print(repr(gosher))
def post(self, post_id): cookie_val = self.request.cookies.get("user_id") if cookie_val: uid = check_secure_val(cookie_val) if uid: current_u = User.get_by_id(int(uid)) post = Post.get_by_id(int(post_id)) if post: content = self.request.get('content') if content: c = Comment(content=content, created_by=current_u.username) cput = c.put() post.comments.append(c.key()) post.put() self.redirect('/blog/%s' % post_id) else: error = "Sorry. Looks like you're missing content" self.render('newcomment.html', username=current_u.username, error=error) else: self.redirect('/blog') else: self.send_back() else: self.send_back()
def parse_post(self, post, topic): print('parse_post') soup = BeautifulSoup(post, 'html.parser') text = soup.findAll(self.post_text_element, {'itemprop': 'commentText'}) """answer = soup.findAll(self.post_answer_element, {'class': self.post_answer_class})""" author = soup.findAll(self.post_author_element, {'class': self.post_author_class}) date = soup.findAll(self.post_date_element, {'class': self.post_date_class}) #print('------------------------------------------------------------------------------') #print('1)', text[0].text.strip(), '2)', author[0].text.strip(), '3)', date[0].text.strip()) #print('------------------------------------------------------------------------------') author = author[0].text text = text[0].text date = date[0].text """try: to_delete = answer[0].text text = text[len(to_delete):] except: print(None)""" author = author.strip() text = text.strip() return Comment(author, text, date, topic)
def post(self, post_id): username = self.user.name comment = self.request.get("comment") owner = username post_id = self.request.get("post_id") post_id = int(post_id) q = Blog_post.by_id(post_id) parent = q.key() #Comment's parent is key of associated post subject = q.subject if username: # Check that user is valid if comment: # and comment is populated c = Comment(parent=parent, owner=owner, comment=comment, subject=subject) c.put() q.comment_count += 1 q.put() self.redirect("/posts/{}".format(post_id)) else: error = "Please add your comment." self.render_form(subject, comment, username, error, post_id) else: self.redirect('/')
def makeComment(cls, bestCandidate): url = f"https://www.pressreader.com{bestCandidate}" article = Article(url, browser_user_agent="Googlebot-News", keep_article_html=True) article.download() try: article.parse() except: return Comment('', '') title = article.title.replace("\xad", "") # clean the text body = article.text.replace("\xad", "") # clean the text print(f"checking the article in this url: {url} with title {title}") return Comment(title, body)
def handle(cls, url): html = requests.get(url).text soup = BeautifulSoup(html, "html.parser") article_info = soup.find( "meta", {"name": "cXenseParse:mdc-targeturl"})["content"].split('/') category_id = article_info[1] article_id = article_info[2] CNALIFESTYLE_API_URL = ( f"https://cnalifestyle.channelnewsasia.com/graphql?query=query" f"%20article(%24articleId%3A%20String!%2C%20%24categoryId%3A%" f"20String!)%20%7B%0A%20%20article(articleId%3A%20%24articleId%2C" f"%20categoryId%3A%20%24categoryId)%20%7B%0A%20%20%20%20id%0A" f"%20%20%20%20title%0A%20%20%20%20metaTitle%0A%20%20%20%20image%0A" f"%20%20%20%20imageWidth%0A%20%20%20%20imageHeight%0A" f"%20%20%20%20category%0A%20%20%20%20date%0A%20%20%20%20sharing%0A" f"%20%20%20%20exclusive%0A%20%20%20%20link%0A" f"%20%20%20%20hasSubjectTaxonomy%0A%20%20%20%20currentContext%20" f"%7B%0A%20%20%20%20%20%20title%0A%20%20%20%20%20%20link%0A" f"%20%20%20%20%20%20__typename%0A%20%20%20%20%7D%0A" f"%20%20%20%20author%20%7B%0A%20%20%20%20%20%20title%0A" f"%20%20%20%20%20%20__typename%0A%20%20%20%20%7D%0A" f"%20%20%20%20contexts%20%7B%0A%20%20%20%20%20%20title%0A" f"%20%20%20%20%20%20link%0A%20%20%20%20%20%20__typename%0A" f"%20%20%20%20%7D%0A%20%20%20%20defaultPhoto%20%7B%0A" f"%20%20%20%20%20%20link%0A%20%20%20%20%20%20__typename%0A" f"%20%20%20%20%7D%0A%20%20%20%20photos%20%7B%0A" f"%20%20%20%20%20%20teaserText%0A%20%20%20%20%20%20detailText%0A" f"%20%20%20%20%20%20photo%20%7B%0A%20%20%20%20%20%20%20%20link%0A" f"%20%20%20%20%20%20%20%20__typename%0A%20%20%20%20%20%20%7D%0A" f"%20%20%20%20%20%20__typename%0A%20%20%20%20%7D%0A" f"%20%20%20%20video%20%7B%0A%20%20%20%20%20%20id%0A" f"%20%20%20%20%20%20ooyalaId%0A%20%20%20%20%20%20duration%0A" f"%20%20%20%20%20%20playerId%0A%20%20%20%20%20%20category%0A" f"%20%20%20%20%20%20sk%20%7B%0A%20%20%20%20%20%20%20%20name%0A" f"%20%20%20%20%20%20%20%20__typename%0A%20%20%20%20%20%20%7D%0A" f"%20%20%20%20%20%20ner%20%7B%0A%20%20%20%20%20%20%20%20name%0A" f"%20%20%20%20%20%20%20%20__typename%0A%20%20%20%20%20%20%7D%0A" f"%20%20%20%20%20%20__typename%0A%20%20%20%20%7D%0A" f"%20%20%20%20audio%20%7B%0A%20%20%20%20%20%20duration%0A" f"%20%20%20%20%20%20__typename%0A%20%20%20%20%7D%0A" f"%20%20%20%20html%0A%20%20%20%20teaserText%0A" f"%20%20%20%20contentLength%0A%20%20%20%20postedDate%0A" f"%20%20%20%20__typename%0A%20%20%7D%0A%7D%0A" f"&operationName=article&variables=%7B%22categoryId%22%3A" f"%22{category_id}%22%2C%22articleId%22%3A%22{article_id}%22%7D") article = json.loads( requests.get(CNALIFESTYLE_API_URL).content)["data"]["article"] title = article["title"] body = "" article_soup = BeautifulSoup(article["html"], "html.parser") for line in article_soup.find_all( cls.is_p_tag_without_figure_or_picture): body += line.get_text() + "\n\n" return Comment(title, body.strip())
def post(self, post_id): if not self.user: return self.redirect("/login") key = db.Key.from_path('Post', int(post_id)) p = db.get(key) commentin = self.request.get("comment") comment = commentin.replace('\n', '<br>') commentauthor = self.user.name commentid = int(p.key().id()) if commentauthor and comment and commentid: c = Comment(comment=comment, commentauthor=commentauthor, commentid=commentid) c.put() time.sleep(0.1) self.redirect("/blog") else: error = "Enter your text in the comment box" return self.render("newcomment.html", p=p, subject=p.subject, content=p.content, error=error)
def update_leaderboard(self, score, scoreText="Score", ascending=False, leaderboard_indicator = ".leaderboard.data.donot.remove\n",): pr_number = self.comment.pr.number pr_sender = self.comment.pr.user.login entry = "#" + str(pr_number) + " by " + str(pr_sender) leaderboard_content = None comment_pr1 = Comment(pr_number=1) values = comment_pr1.get_comment(leaderboard_indicator) if comment_pr1 is None or len(values) is 0: leaderboard_content = scoreText + ", Entity" else: leaderboard_content = values[0] # Delete the csv comment comment_pr1.del_comment(leaderboard_indicator) df = pd.read_csv(StringIO(leaderboard_content)) df.loc[len(df.index)] = [score, entry] df = df.sort_values(by=df.columns[0], ascending=ascending) new_leaderbord_content = df.to_csv(index=False) comment_pr1.add_comment(leaderboard_indicator + new_leaderbord_content) # Add new leaderboard results as a comment leaderboard_md = "## New Leaderboard\n" + df.to_markdown() self.comment.add_comment(leaderboard_md)
def handle(cls, url): html = requests.get(url).text soup = BeautifulSoup(html, "html.parser") # https://stackoverflow.com/a/24618186 # We only want article text, not inline scripts or inline jss for script in soup(["script", "style"]): script.extract() # Plant markers to denote the start and end of the article start_marker = "EXTRACT_START" end_marker = "EXTRACT_END" soup.find(name="div", class_="post-date").insert(0, start_marker) soup.find(name="span", class_="author-name").append(end_marker) article_start = soup.text.index(start_marker) + len(start_marker) article_end = soup.text.index(end_marker) unwrapped_body = soup.text[article_start:article_end] article_body = "\n".join(textwrap.wrap(unwrapped_body, 80)) article_body = article_body.replace( "\n", "\n\n") # Markdown requires 2 \n to # create a new paragraph article_title = soup.find(name="h2", class_="post-title").text return Comment(article_title, article_body.replace("\xa0", " "))
def new_comment(): if request.method == 'POST': post = Post.find(request.form['post_id']) values = (None, post, request.form['message']) Comment(*values).create() return redirect(url_for('show_post', id=post.id))
def get_users(self, path_to_post) -> [Comment]: self.driver.get(path_to_post) self._logger.info('instagram is loaded') # We need to reveal all comments # Click plus button until it exists while True: try: self.driver.find_element_by_class_name('afkep').click() time.sleep(self._delay) # Short delay to let button get ready except selenium.common.exceptions.NoSuchElementException: break self._logger.info('all comments revealed') comments = self.driver.find_elements_by_class_name('Mr508') result = [] for comment in comments: result.append([Comment( text=comment.find_element_by_tag_name('span').text, user=comment.find_element_by_class_name('ZIAjV').text, date=comment.find_element_by_tag_name('time').get_attribute('datetime') )]) return result
def comments(self): response = self.vk_api.wall.getComments(owner_id=OWNER_ID, v=VERSION, post_id=self.id_, count=100) time.sleep(0.35) raw_comments = response['items'] if response['count'] > 100: for i in range(response['count'] // 100 + 1): new_comments = self.vk_api.wall.getComments(owner_id=OWNER_ID, v=VERSION, post_id=self.id_, count=100, offset=100 * (i + 1)) raw_comments += new_comments['items'] time.sleep(0.35) comments = [] for item in raw_comments: if not item.get('deleted', False): comments.append( Comment(self, item['id'], item['from_id'], item['text'])) return comments
def train_test_model_with_context(train_dir, indir, outdir): '''Custom training and testing SSAE model :param train_dir: Path to JSON file containing training examples :param indir: Path to LOG file containing examples as Comment() object (which has already been classified by Bert) :param outdir: Path to LOG file to be created by adding prediction of this model as well''' import random random.seed(1234567) import tensorflow as tf if tf.test.is_gpu_available(): strategy = tf.distribute.MirroredStrategy() print('Using GPU') else: raise ValueError('CPU not recommended.') with strategy.scope(): vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz') embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz') reader = JSONPerLineDocumentReader(train_dir, True) e = ClassificationExperiment( reader, StructuredSelfAttentiveSentenceEmbedding(vocabulary, embeddings), ClassificationEvaluator()) test_comments = TokenizedDocumentReader(indir) result = e.label_external(test_comments) for k in result.keys(): print(f'{k}: {result[k]}') instances = dict() e = Comment(-1, 'lol', 'ah') f = open(indir, 'rb') try: while True: e = pickle.load(f) print(e) instances[str(e.id)] = e except EOFError: f.close() f = open(outdir, 'wb') for key in result.keys(): model_label, model_score = result[key] model_label = model_label.lower() score = model_score[1] if model_label == 'none': score = model_score[0] instances[key].add_model(model_type, model_label, score, None) e = instances[key] print(e) print(e.labels) print(e.scores) print('=' * 20) pickle.dump(instances[key], f) f.close()
def send_report(): if not request.json: return jsonify({"error": "no data"}), 422 if not "ministry_name" in request.json and not "project_name" in request.json and not "comments" in request.json and not "email" in request.json: return jsonify({"error": "need full data list ministry name, project name,comments,time stamp,email"}), 422 # json_req = json.loads(request.json) # # ministry = json_req["ministry"] # project = json_req["project"] # comments = json_req["comments"] # email = json_req["email"] # time_stamp = json_req["time_stamp"] ministry = request.json["ministry"] project = request.json["project"] comments = request.json["comments"] email = request.json["email"] time_stamp = request.json["time_stamp"] comments = comments.split(",") time_stamp = time_stamp.split(",") Obj = [] if not len(comments) == len(time_stamp): return jsonify({"error": "length of time stamp must be same of comments"}), 501 for i in range(len(comments)): c = Comment(text=comments[i], time=time_stamp[i], sentiment=Sentiment.get_sentiment(comments[i])) Obj.append(c) result, _ = make_report.start(comments=Obj, report_name=project, ministry=ministry, project=project, email=email) return jsonify({"sucess": result})
def add_comment(): error_message = "There was an error saving your comment!" data = request.get_json() print(data) time = datetime.now() line = data['line']['train'] token = data['token'] comment = data['comment'] line_record = Line.select_one(line) user = User.select_token(token) print(token) print(user.username) print(user.pk) # time = datetime.strptime(d, "%Y-%m-%d %H:%M:%S").strftime("%Y-%m-%d %I:%M:%S %p") new_comment = Comment(comment = comment, time = time, line_pk = line_record["pk"], user_pk = user.pk) new_comment.save() return jsonify({"comment": "made a comment!"})
def popUp(self, event): "called when tree item is right-clicked on" item = self.filesTree.identify("item", event.x, event.y) menu = Menu(self, tearoff=0) if item and self.shownFiles == "arenafiles": if item in self.fileStorage.tagged: menu.add_command(label="Remove tag", command=lambda: self.untagFun(item)) else: menu.add_command(label="Add tag", command=lambda: self.tagFun(item)) menu.add_command(label="Add comment", command=lambda: Comment(self, item)) menu.add_separator() if self.filesTree.identify("column", event.x, event.y) == "#0": menu.add_command(label="Open room file", command=lambda: self.openRoomFile(item)) menu.add_separator() menu.add_command(label="Show track", command=lambda: self.showTracks(item)) if item and self.shownFiles == "wrongfiles" and len( self.filesTree.selection()) == 2: menu.add_command(label="Pair selected", command=lambda: self.forcePair()) menu.post(event.x_root, event.y_root)
def read_instances(self, input_path: str, instances_limit: int = -1) -> list: # reading the random comments and converting them to this object instances = list() e = Comment(-1, 'lol', 'ah') f = open(input_path, 'rb') count = 0 try: while True: count += 1 e = pickle.load(f) print(e) ee = TokenizedDocument() ee.id = str(e.id) ee.tokens = e.text.strip().split() print(ee) print('-' * 30) instances.append(ee) except EOFError: f.close() return instances
def post(self, post_subject): # If any user does not logged in redirect to homepage if not self.user: self.redirect('/') # Post page contains a form to post comments, # so a post request comes, lets put that comment into database post_to_comment = Post.find_by_subject(post_subject) # If post couldn't find redirect 404 page if not post_to_comment: self.error(404) return self.render('404.html') content = self.request.get('content').strip() if content: comment = Comment(content=content, post=post_to_comment, user=self.user) comment.put() self.redirect(post_to_comment.link_to('show')) else: errors = {'content': "can't be blank"} self.render("/posts/post.html", post=post_to_comment, errors=errors)
def from_dict(self, dic, reddit): self.src = praw.models.Submission(reddit, url=dic['src']) self.comments = [] for dic_comment in dic['comments']: comment = Comment() comment.from_dict(dic_comment, reddit) self.comments.append(comment)
def get_by_id(comment_id): query = f""" SELECT * FROM comments c WHERE c.id = '{comment_id}' """ cursor = conn.cursor() cursor.execute(query) return Comment(row_to_dict(cursor, cursor.fetchone()))
def get_comment(self, comment_id): (status, result) = self._send_template_request('getComment', {'comment_id': comment_id}) if status and result: return Comment(self, result) else: return None
def get_users(conf): with dbapi2.connect(conf) as connection: cursor = connection.cursor() query = "SELECT COMMENT_ID, USER_ID, COMMENT FROM COMMENTS ORDER BY COMMENT_ID DESC" cursor.execute(query) users = [(key, Comment(user_id, product_id, comment)) for key, user_id, product_id, comment in cursor] return users
def post(self, post_id): key = db.Key.from_path('Post', int(post_id), parent=blog_key()) post = db.get(key) if not post: self.error(404) return c = "" if (self.user): # post-like value increases by clicking like. if (self.request.get('like') and self.request.get('like') == "update"): likes = db.GqlQuery("select * from Like where post_id = " + post_id + " and user_id = " + str(self.user.key().id())) if self.user.key().id() == post.user_id: self.redirect("/blog/" + post_id + "?error=You cannot like your " + "own post!") return elif likes.count() == 0: like = Like(parent=blog_key(), user_id=self.user.key().id(), post_id=int(post_id)) like.put() self.redirect("/blog/" + post_id + "?error=You can like your " + "post once.!!!!") return comment = self.request.get('comment') if comment: c = Comment(parent=blog_key(), user_id=self.user.key().id(), post_id=int(post_id), comment=comment) c.put() else: self.redirect("/blog/" + post_id + "?error=You need to type your " + "comment.!!!!") return else: self.redirect("/login?error=First login and then " + "try to edit, comment or like.!!") return comments = db.GqlQuery("select * from Comment where post_id = " + post_id + "order by created desc") likes = db.GqlQuery("select * from Like where post_id=" + post_id) self.render("permalink.html", post=post, comments=comments, likeCount=likes.count(), new=c)
def split_set_in_consecutive_parts(xml_file, parts_size): parts = [[] for _ in range(parts_size)] # First, count all the threads in the file tree = ET.parse(xml_file) root = tree.getroot() threads_count = 0 for thread in root: threads_count += 1 counter = 0 part_size = int(threads_count / parts_size) # print('threads count:', threads_count) # print('part size:', part_size) for thread in root: counter += 1 question_tag = thread[0] question_id = question_tag.attrib['RELQ_ID'] question_subject = question_tag[0].text question_text = question_tag[1].text question_category = question_tag.attrib['RELQ_CATEGORY'] question_user = question_tag.attrib['RELQ_USERID'] question_date = question_tag.attrib['RELQ_DATE'] question_subject = ignore_non_utf8(question_subject) question_text = ignore_non_utf8(question_text) question_fact_label = question_tag.attrib['RELQ_FACT_LABEL'] if question_fact_label == 'Single Question - Factual': parts_index = -1 for index, comment_tag in enumerate(thread): if index > 0: # the 0 index was processed above - it is the question comment_id = comment_tag.attrib['RELC_ID'] comment_text = comment_tag[0].text comment_user = comment_tag.attrib['RELC_USERID'] comment_date = comment_tag.attrib['RELC_DATE'] comment_fact_label = comment_tag.attrib['RELC_FACT_LABEL'] comment = Comment(question_id, comment_id, question_category, question_subject, question_text, comment_text, comment_user, comment_date) label = get_label(comment_fact_label) if label > -1: # if any of the comments is in the labels, add it to the subset part comment.label = label if parts_index == -1: #parts_index = rand.randint(0,parts_size-1) parts_index = int((counter - 1) / part_size) if parts_index >= parts_size: parts_index -= 1 #print(counter, parts_index) parts[parts_index].append(comment) # for part in parts: # print(len(part), part) return parts
def get_video_comments(self, video_id): """Method to return list of video comments Args: video_id (str): Youtube video unique id from url Returns: list: comment objects including replies """ try: parameters = { 'textFormat': 'plainText', 'part': "snippet,replies", 'videoId': video_id, 'maxResults': 100 } comments = [] service = self.get_endpoint() results = self.service.commentThreads().list( **parameters).execute() next_page_token = results.get('nextPageToken') while next_page_token: for item in results['items']: comment_text = item['snippet']['topLevelComment'][ 'snippet']['textDisplay'] comment_id = item['id'] comment = Comment(comment_text, comment_id, video_id) comments.append(comment) # get replies to each comment if 'replies' in item.keys(): for reply_item in item['replies']['comments']: reply_text = reply_item['snippet']['textDisplay'] reply_id = reply_item['id'] reply = Comment(reply_text, reply_id, video_id) comments.append(reply) next_page_token = results.get('nextPageToken') parameters['pageToken'] = next_page_token results = self.service.commentThreads().list( **parameters).execute() return comments except KeyError as key_error: raise except Exception as e: raise
def handle(cls, url): article = Article(url) article.download() article.parse() title = article.title body = article.text return Comment(title, body)
def retrieveCommentsHelper(self, praw_comment_forest, depth=0): for praw_comment in list(praw_comment_forest): if isinstance(praw_comment, praw.models.MoreComments): continue comment = Comment(praw_comment, depth) self.addComment(comment) self.retrieveCommentsHelper(praw_comment.replies, depth + 1)
def generate_f_data(): """Generates fictional data to load the program.""" ch = Channel("Life","This channel is made to talk about life") ch2 = Channel("None", "none") u = User.users[random.choice(list(User.users.keys()))] u2 = User.users[random.choice(list(User.users.keys()))] while u == u2: u2 = User.users[random.choice(list(User.users.keys()))] a = Admin("Valentina", "Vvasquez", "*****@*****.**", "V123") q = Question(u, ch, "What is life?") c = Comment(u2, q, "That is; in fact, a hard question, what would it be?") r = Rating(u2, q, "like") r1 = Rating(a, c, "dislike") q2 = Question(u2, ch, "What is love?") c2 = Comment(u, q2, "The affection you feel for someone; or even, something.") r2 = Rating(a, q2, "dislike") q3 = Question(a, ch, "What is Death?") n = New(a, "The hollow has come!", "That witch has taken my daughthers body.", "Niklaus.", "Drama")