Ejemplo n.º 1
0
def GetNewArticles(request):
    # Get the articles from RSS
    # aggregator = NewsAggregator()
    # list_of_articles = aggregator.feedreader()
    classifier = Classifier("filename.pkl")
    # Predict
    list_of_classes = []
    # with open("articles_dump", "wb") as dump:
    #     pickle.dump(list_of_articles, dump, pickle.HIGHEST_PROTOCOL)
    with open("articles_dump") as dump:
        list_of_articles = pickle.load(dump)
    for article in list_of_articles:
        list_of_classes.append(article["content"])
    # print list_of_classes
    res = classifier.predict(np.asarray(list_of_classes))

    for i in range(0, len(list_of_articles)):
        if res[i] == 1:
            cat = "Sports"
        elif res[i] == 2:
            cat = "Economy_business_finance"
        elif res[i] == 3:
            cat = "Science_technology"
        else:
            cat = "Lifestyle_leisure"
        element = list_of_articles[i]
        list_of_articles[i]["category"] = cat
        article = Article(article_title=element["title"], article_content=element["content"], article_category=cat)
        article.save()
    json_object = json.dumps(list_of_articles)
    return HttpResponse(json_object)
Ejemplo n.º 2
0
    def handle(self, *args, **options):

        news = RSSNews(RSS_Links)
        telegraph = Telegraph(access_token=os.getenv('TELEGRAPH_ACCESS_TOKEN'))

        if news.urls:
            for url, date in news.urls.items():
                article = NewsPlease.from_url(url)

                a = Article(author=', '.join(article.authors) or 'Anonymous',
                            title=article.title,
                            short_text=article.description,
                            content=article.maintext,
                            date=date,
                            source_link=url,
                            img=article.image_url)
                a.save()

                response = telegraph.create_page(title=a.title,
                                                 html_content=a.content)

                TelegraphArticle(title=a.title, link=response['url']).save()

                bot.send_telegraph_msg(response['url'])

        self.stdout.write(self.style.SUCCESS('Success'))
Ejemplo n.º 3
0
Archivo: news.py Proyecto: wleddy/news
def edit(article_handle='0'):
    setExits()
    g.title = "Article"
    articles = Article(g.db)
    
    #import pdb; pdb.set_trace()
    rec_id = cleanRecordID(article_handle)
    rec = articles.get(article_handle)
    if not rec and not rec_id == 0:
        flash('Could not find that artcle')
        return redirect(g.homeURL)
    
    if rec_id == 0:
        rec = articles.new()    
    
    #Is there a form?
    if request.form:
        #import pdb; pdb.set_trace()
        
        articles.update(rec,request.form)
        if valid_form(rec):
            if request.form['publication_date']:
                # convert to a date time
                rec.publication_date = getDatetimeFromString(request.form['publication_date'])
            try:
                articles.save(rec)
                g.db.commit()
                return redirect(g.homeURL)
            except Exception as e:
                g.db.rollback()
                flash(printException("Error when attepting to save article.",e))
                
    
    return render_template('news/article_edit.html',rec=rec)
Ejemplo n.º 4
0
def feed_update():
    feed_updated_list = []
    article_updated_list = []
    feeds = Feed.objects.all()
    for feed in feeds:
        try:
            feed_update = feedparser.parse(feed.url, etag=feed.etag)
        except:
            print feed.title, "Does not have an etag!!!!!!!!!!!!!!!!!!"
        title_list = []
        if feed_update.status != 304:
            article_inner = []
            feed_updated_list.append(feed.title)
            feeds = Article.objects.all()
            for i in feeds:
                title_list.append(i.title)
            for entry in feed_update.entries:
                if entry.title not in title_list:
                    article_inner.append(entry.title)
                    article = Article()
                    article.title = entry.title
                    article.url = entry.link
                    c = 'https?://(.*?)/'
                    try:
                        article.domain = re.findall(c, entry.link)[0]
                    except:
                        article.domain = entry.link
                    if feed.author:
                        article.author = feed.author
                    else:
                        article.author = entry.author
                    article.authorSlug = slugify(article.author)
                    #description script
                    try:
                        remove = re.findall('<p>(The post.*?)</p>',
                                            entry.description)[0]
                        article.description = entry.description.replace(
                            remove, '')
                    except:
                        try:
                            remove = re.findall('<p>(The post.*?)</p>',
                                                entry.description)[0]
                        except:
                            article.description = entry.description
                    #end descripton script
                    d = datetime.datetime(*(entry.published_parsed[0:6]))
                    dateString = d.strftime('%Y-%m-%d')
                    article.publication_date = dateString
                    article.feed = feed
                    article.practiceArea = feed.practiceArea
                    article.practiceAreaSlug = feed.practiceArea.replace(
                        " ", "_").lower()
                    article.save()
            article_updated_list.append(article_inner)
            try:
                feed.etag = feed_update.etag
            except:
                pass
            feed.save()
    return feed_updated_list, article_updated_list
Ejemplo n.º 5
0
Archivo: news.py Proyecto: wleddy/news
def valid_form(rec):
    valid_form = True
    slug = request.form.get('slug','').strip()
    title = request.form.get('title','').strip()
    
    if not slug and title:
        slug = title.lower()
        for s in ' /<>"\'#.()':
            slug = slug.replace(s,'-')
        rec.slug = slug
    
    if not title:
        flash("The title may not be empty")
        valid_form = False
        
    if not slug:
        flash("The slug line may not be empty")
        valid_form = False
        
    if slug:
        sql = 'select * from article where slug = ? and id <> ?'
        find_slug = Article(g.db).select_raw(sql,(slug,rec.id))
        if find_slug and len(find_slug) > 0:
            valid_form = False
            flash("The slug line must be unique")
        
    # If present, the date must be valid format
    publication_date = request.form.get('publication_date','').strip()
    if publication_date:
        test_date = getDatetimeFromString(publication_date)
        if not test_date:
            valid_form = False
            flash('{} is not a valid date'.format(publication_date))
    
    return valid_form
Ejemplo n.º 6
0
def check_for_updates():

    while True:	
        try:
            posts = aj_gather_data()
            for post in posts:
            	Article(title = post.title, content = post.text, source = post.link, category = categorizer(post.text)).save()
            posts = re_gather_data()
            for post in posts:
            	Article(title = post.title, content = post.text, source = post.link, category = categorizer(post.text)).save()
            posts = ec_gather_data()
            for post in posts:
            	Article(title = post.title, content = post.text, source = post.link, category = categorizer(post.text)).save()
            time.sleep(60)

        except ConnectionError:
            time.sleep(300)
Ejemplo n.º 7
0
    def test_page_slugify_on_save(self):
        """Tests the slug generated when saving the article"""

        user = User()
        user.save()

        article = Article(title="My Test Page", content="test", author=user)
        article.save()
        self.assertEqual(article.slug, 'my-test-page')
Ejemplo n.º 8
0
def parse_article_from_link(link: str) -> Article:
    article = newspaper.Article(link, language='ru')
    article.download()
    article.parse()
    return Article(
        title=article.title,
        text=article.text,
        source=link,
        publish_date=article.publish_date,
    )
Ejemplo n.º 9
0
def create_new_article(data):
    #task unstage dictionnary
    new_article = Article()
    new_article.title = data['title']
    new_article.author = data['author']
    new_article.publication_date = data['publication_date']
    new_article.summary = data['summary']
    new_article.article_image = data['image_url']
    new_article.article_url = data['article_url']
    new_article.save()
Ejemplo n.º 10
0
Archivo: news.py Proyecto: wleddy/news
def display():
    setExits()
    #import pdb; pdb.set_trace()
    rendered_html = render_markdown_for(__file__,mod,'news/news.md')
    
    recs = Article(g.db).select()
    
    return render_template('news/news.html',
        rendered_html=rendered_html, recs = recs,
        )    
Ejemplo n.º 11
0
Archivo: news.py Proyecto: wleddy/news
def delete(rec_id=None):
    setExits()
    g.title = "Article"
    
    if rec_id == None:
        rec_id = request.form.get('id',request.args.get('id',-1))
    
    rec_id = cleanRecordID(rec_id)
    if rec_id <=0:
        flash("That is not a valid record ID")
        return redirect(g.listURL)
        
    rec = Article(g.db).get(rec_id)
    if not rec:
        flash("Record not found")
    else:
        Article(g.db).delete(rec.id)
        g.db.commit()
        
    return redirect(g.listURL)
Ejemplo n.º 12
0
    def fetch_news(self, source_id=None, current_date=None):
        page = 1
        lang = settings.NEWS_API_LANG or 'en'
        while True:
            articles = self.fetch_api(
                page=page,
                source_id=source_id,
                current_date=current_date)

            if not articles:
                break

            for a in articles:

                if not a.get('url'):
                    continue

                article = Article.get_by_url(a.get('url'))
                if article:
                    continue

                pub_date = a.get('publishedAt')

                if pub_date:
                    pub_date = parse_datetime(pub_date)
                else:
                    pub_date = timezone.now()

                article = Article(
                    title=a.get('title'),
                    description=a.get('description'),
                    url=a.get('url').lower(),
                    url_image=a.get('urlToImage'),
                    lang=lang,
                    pub_date=pub_date)

                if a.get('source'):
                    source = a.get('source')
                    article.source = self.get_source(
                        source.get('id'),
                        source.get('name'))

                article.save()

                if a.get('author'):
                    author_names = a.get('author').split(',')
                    author_names = map(lambda x: x.strip(), author_names)
                    authors = map(self.get_author, author_names)
                    for author in authors:
                        article.authors.add(author)
                article.save()

            page += 1
Ejemplo n.º 13
0
    def setUp(self):
        self.james = Editor(first_name='James',
                            last_name='Muriuki',
                            email='*****@*****.**')
        self.james.save_editor()

        # initialize tags
        self.new_tag = Tags(name='tetsting')
        self.new_tag.save()

        self.new_article = Article(title='Test Article',
                                   post='This is a random post',
                                   editor=self.james)
        self.new_article.save()

        self.new_article.tags.add(self.new_tag)
Ejemplo n.º 14
0
Archivo: news.py Proyecto: wleddy/news
def view(article_handle=-1):
    setExits()
    
    rec = Article(g.db).get(article_handle)
    if not rec:
        flash("That article could not be found.")
        return redirect(g.homeURL)
        
    g.title = rec.title
    if len(rec.title) > 20:
        g.title = rec.title[:20] + "..."
    
    rendered_html = render_markdown_text(rec.words)
        
    return render_template('news/article.html',
        rendered_html=rendered_html, rec=rec,
        )       
Ejemplo n.º 15
0
def parse_articles_from_rss(rss: str) -> List[Article]:
    articles: List[Article] = []
    entries = parse_rss_entries(rss)

    for entry in entries:
        # TODO: Probably extend to another languages
        article = newspaper.Article(entry.link, language='ru')
        article.download()
        article.parse()
        articles.append(
            Article(
                title=entry.title,
                text=article.text,
                source=entry.link,
                publish_date=entry.pub_date,
            ))

    return articles
Ejemplo n.º 16
0
def check_for_new_links():

    requests_number = 0

    proxy = random.choice(proxies)

    for site in websites:

        print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
        print(site)

        path = "last_links/" + site + "/"

        f = open("rss_links/" + site + ".txt", "r")
        links = f.read().splitlines()
        for link in links:

            t_link = make_filename(link)
            name = path + t_link + ".txt"
            filee = open(name, "r")
            old_links = filee.read().splitlines()
            new_links = []
            i = 0

            for entry in feedparser.parse(link).entries:
                if entry['link'] not in old_links:
                    print(entry['link'])
                    proxy = parse_article(entry['link'], site, requests_number, proxy)
                    Article(title = proxy[0], content = proxy[1], source = entry['link'], category = article_classify(proxy[1])).save()
                    requests_number += 1
                    i+=1
                    new_links.append(entry['link'])

                else:
                    break

            with open(name, "w") as new_file:
                for link in new_links:
                    new_file.write(link + "\n")
                for link in old_links[0:len(old_links)-i]:
                    new_file.write(link + "\n")
Ejemplo n.º 17
0
        article_datetime = soup.select_one('span[title*=T]').attrs.__getitem__('title').split('T')[0].replace('-', '/')
        pub_date = datetime.strptime(article_datetime, '%Y/%m/%d').date()
    except:
        pass

    if not title or not article_description:
        print('no objects found')
    else:
        for detail in content:
            detail
        for img in images:
            item_id = random.randint(5, 1000)
            if url_img:
                dict_content = dict(id=item_id, article_feature_img=url_img, article_title=title.get_text(),
                                    article_desc=article_description.get_text(), article_content=str(detail),
                                    article_category=random.randint(1, 4), published_date=pub_date)
            else:
                print('no url img')
    return dict_content


for i in range(init_depth, depth):
    test = web_spider(url1 + str(i) + url2)

a = filter(None, test)
obj = [Article(article_feature_img=item['article_feature_img'], article_title=item['article_title'],
               article_desc=item['article_desc'], article_content=item['article_content'],
               article_category=Category.objects.get(id=item['article_category']),
               published_date=item['published_date']) for item in a]
Article.objects.bulk_create(obj)
Ejemplo n.º 18
0
>>> r.full_name
'John Smith'
# Now the new reporter is in the database.
>>> Reporter.objects.all()
<QuerySet [<Reporter: John Smith>]>
# Django provides a rich database lookup API.
>>> Reporter.objects.get(id=1)
<Reporter: John Smith>
>>> Reporter.objects.get(full_name__startswith='John')
<Reporter: John Smith>
>>> Reporter.objects.get(full_name__contains='mith')
<Reporter: John Smith>

# Create an article.
>>> from datetime import date
>>> a = Article(pub_date=date.today(), headline='Django is cool',
content='Yeah.', reporter=r) # r = Reporter(full_name='John Smith')
>>> a.save()

# Now the article is in the database.
>>> Article.objects.all()
<QuerySet [<Article: Django is cool>]>

# Article objects get API access to related Reporter objects.
>>> r = a.reporter
>>> r.full_name
'John Smith'

# And vice versa: Reporter objects get API access to Article objects.
>>>r.article_set.all()
<QuerySet [<Article: Django is cool>]>
Ejemplo n.º 19
0
def parse_one_page(url):
    domain = f'https://www.thedrive.com{url}'
    content = list()
    with HTMLSession() as session:
        response = session.get(domain)
        check_for_redirect(response)
        response.raise_for_status()
    soup = BeautifulSoup(response.text, 'lxml')
    name = soup.find('h1', class_='title').text
    print(name)
    content_intro = soup.find('div', class_='review-intro')
    content.append(content_intro)
    content_table = soup.find('div', class_='articleFragment')
    content.append(content_table)
    short_description = soup.find('div', class_='review-intro').text
    script = soup.find('script', type='application/ld+json')
    data = json.loads(soup.find('script', type='application/ld+json').next)
    pub_date = data[0]['datePublished'].split('T')[0]
    datetime_obj = datetime.datetime.strptime(pub_date, '%Y-%m-%d')
    author, is_author_created = Author.objects.get_or_create(
        name='Thedrive review team')
    categories = [{
        'name': 'reviews',
        'slug': 'reviews'
    }, {
        'name': 'best',
        'slug': 'best'
    }]
    main_image = soup.find('div',
                           class_='review-product-image').find('img')['src']
    image_name = slugify(name)
    image_type = main_image.split('.')[-1][:3]
    # image_path = os.path.join('media', 'images', f'{image_name}.{image_type}')
    # with open(image_path, 'wb') as file:
    #     with HTMLSession() as session:
    #         response = session.get(main_image)
    #         file.write(response.content)
    image_path = f'images/{image_name}.{image_type}'
    with open(f'media/{image_path}', 'wb') as f:
        with HTMLSession() as session:
            response = session.get(main_image)
        f.write(response.content)

    slug = slugify(name)
    article = {
        'name': name,
        'content': content,
        'short_description': short_description,
        'pub_date': datetime_obj,
        'author': author,
        'main_image': image_path,
        'slug': slug
    }

    # такой код работает
    try:
        article = Article(**article)
        article.save()
    except IntegrityError:
        article = Article.objects.get(slug=slug)
        print('Такая статья уже есть в базе')

    # По коду ниже ошибка "'NoneType' object is not callable"
    #
    # article, created = Article.objects.get_or_create(**article)
    #
    for category in categories:
        cat, created = Category.objects.get_or_create(**category)
        article.categories.add(cat)

    return article
Ejemplo n.º 20
0
def refresh(request):
	foreign_policy_req = requests.get("https://foreignpolicy.com/category/latest/")
	foreign_policy_soup = BeautifulSoup(foreign_policy_req.content, "html.parser")
	foreign_policy = foreign_policy_soup.find_all('div', {'class': 'excerpt-content--list content-block'})
	for headline in foreign_policy[::-1]:
		new_article = Article()
		new_article.title = headline.find_all('h3', {'class':'hed'})[0].text
		new_article.url= headline.find_all('a', {'class':'hed-heading -excerpt'})[0]['href']
		new_article.image_url = headline.find_all('img')[0]['data-src']
		auth = headline.find_all('a', {'class':'author'})
		if len(auth) != 0:
			new_article.author = auth[0].text
		else:
			new_article.author = "FP"
		new_article.site = "Foreign Policy"
		new_article.site_url = "https://foreignpolicy.com"
		try:
			new_article.save() #checks for errors
		except IntegrityError as e: 
   			if 'UNIQUE constraint' in str(e.args): #a repeat article
   				pass

	foreign_affairs_req = requests.get("https://www.foreignaffairs.com")
	foreign_affairs_soup = BeautifulSoup(foreign_affairs_req.content, "html.parser")
	foreign_affairs = foreign_affairs_soup.find_all('div', {'class' : 'magazine-list-item--image-link row'})
	for headline in foreign_affairs[::-1]:
		new_article = Article()
		new_article.title = headline.find_all('h3', {'class':'article-card-title font-weight-bold ls-0 mb-0 f-sans'})[0].text
		new_article.image_url = headline.find_all('img',{'class':'b-lazy b-lazy-ratio magazine-list-item--image d-none d-md-block'})[0]['data-src']
		if len(new_article.image_url) > 199:
			new_article.image_url = 'https://subscribe.foreignaffairs.com/FAF/pub_templates/faf/images/logo.png'
		new_article.url = headline.find_all('a', {'class':'d-block flex-grow-1'})[0]['href']
		new_article.author = headline.find_all('h4', {'class':'magazine-author font-italic ls-0 mb-0 f-serif'})[0].text
		new_article.site = "Foreign Affairs"
		new_article.site_url = "https://www.foreignaffairs.com"
		try: 
			new_article.save()
		except IntegrityError as e: 
	   		if 'UNIQUE constraint' in str(e.args):
	   			pass

	#they give a 403 error for other methods
	china_power_req = Request("https://chinapower.csis.org/podcasts/", headers = {'User-Agent' : 'Mozilla/5.0'})
	china_power_page = urlopen(china_power_req).read()
	china_power_soup = BeautifulSoup(china_power_page, "html.parser")
	china_power = china_power_soup.find_all('article')

	for headline in china_power[::-1]:
		#finding author
		disc = headline.find_all('h2', {'class':'entry-title'})[0].text #description has the author's name
		list_disc = disc.split() #find it in the text
		record = False
		list_auth = []
		for name in list_disc:
			if record:
				list_auth.append(name) #add the name
			if name == "with": #start at 'episode,'
				record = True;

		new_article = Article()
		new_article.title = headline.find_all('h2', {'class':'entry-title'})[0].text
		new_article.image_url = "https://megaphone.imgix.net/podcasts/722b9c2a-e6e1-11ea-a520-3349f6671499/image/uploads_2F1598366366917-v9rdxhpawhc-bee946f884ea9a141d33af2322074d0d_2F_ART_ChinaPower.jpg?ixlib=rails-2.1.2&w=400&h=400"
		new_article.url = headline.find_all('a')[0]['href']
		if len(list_auth) != 0:
			new_article.author = " ".join(list_auth) + " & Bonnie Glaser"
		else:
			new_article.author = "Bonnie Glaser"
		new_article.site = "China Power Podcasts"
		new_article.site_url = "https://chinapower.csis.org/podcasts/"
		try: 
			new_article.save()
		except IntegrityError as e: 
	   		if 'UNIQUE constraint' in str(e.args):
	   			pass

	#for war on the rocks, each div class for the articles is different
	warontherocks_req = Request("https://warontherocks.com/", headers = {'User-Agent' : 'Mozilla/5.0'})
	warontherocks_page = urlopen(warontherocks_req).read()
	warontherocks_soup = BeautifulSoup(warontherocks_page, "html.parser")
	warontherocks = warontherocks_soup.find_all('div', {'class' : 'all-posts'})

	#very nice and straight forward html from warontherocks
	header_ = warontherocks[0].find_all('h3')
	link_ = warontherocks[0].find_all('a')
	img_ = warontherocks[0].find_all('img')
	writer_ = warontherocks[0].find_all('h4')

	for i in range(12,1,-1):
		new_article = Article()
		new_article.title = header_[i-1].text
		new_article.image_url = img_[i-1]['src']
		new_article.url = link_[2*i-1]['href']
		new_article.author = writer_[i-1].text
		new_article.site = "War on the Rocks"
		new_article.site_url = "https://warontherocks.com"
		try: 
			new_article.save()
		except IntegrityError as e: 
	   		if 'UNIQUE constraint' in str(e.args):
	   			pass

	"""AP_FP_req = Request("https://apnews.com/hub/foreign-policy", headers = {'User-Agent' : 'Mozilla/5.0'})
	AP_FP_page = urlopen(AP_FP_req).read()
	AP_IL_req = Request("https://apnews.com/hub/international-relations", headers = {'User-Agent' : 'Mozilla/5.0'})
	AP_IL_page = urlopen(AP_IL_req).read()
	AP_FP_soup = BeautifulSoup(AP_FP_page, "html.parser")
	AP_IL_soup = BeautifulSoup(AP_IL_page, "html.parser")
	AP = AP_FP_soup.find_all('div', {'data-key': 'feed-card-wire-story-with-image'}) + AP_IL_soup.find_all('div', {'data-key': 'feed-card-wire-story-with-image'})
	for headline in AP[::-1]:
		new_article = Article()
		new_article.title = headline.find_all('h1')[0].text
		new_article.url= "https://apnews.com" + headline.find_all('a')[0]['href']
		#img machine broke
		img = headline.find_all('img', {'class': 'image-0-2-132'})
		if len(img) == 0:
			new_article.image_url = "https://upload.wikimedia.org/wikipedia/commons/thumb/0/0c/Associated_Press_logo_2012.svg/220px-Associated_Press_logo_2012.svg.png"
		else:
			new_article.image_url = img[0]['src']
		list_auth = (headline.find_all('span')[0].text).split(" ")
		if "GMT" in list_auth:
			new_article.author = "AP"
		else:
			new_article.author = headline.find_all('span')[0].text
		new_article.site = "Associated Press"
		new_article.site_url = "https://apnews.com"
		try:
			new_article.save() #checks for errors
		except IntegrityError as e: 
   			if 'UNIQUE constraint' in str(e.args): #a repeat article
   				pass"""

   	#lowy institute
	LI_req = Request("https://www.lowyinstitute.org/the-interpreter/archive", headers = {'User-Agent' : 'Mozilla/5.0'})
	LI_page = urlopen(LI_req).read()
	LI_soup = BeautifulSoup(LI_page, "html.parser")
	LI = LI_soup.find_all('article')

	for headline in LI[::-1]:
		img = headline.find_all('div',{'class':'article-thumb'})[0]
		if len(img) == 0:
			img = headline.find_all('div',{'class':'article-thumb-wrap'})[0]
		word = [] #getting the link into a list of chars
		record = False
		for letter in list(img['style']):
			if record:
				word.append(letter)
			if letter == "'":
				if record:
					word.pop() #revmoving the ' at the end
					break
				record = True

		new_article = Article()
		new_article.title = headline.find_all('h2', {'class':'article-title txt-f4 txt-s6 mv-0 pv-xs'})[0].text
		new_article.url= "https://www.lowyinstitute.org" + headline.find_all('a', {'class':'txt-dn'})[0]['href']
		new_article.image_url = "".join(word)
		new_article.author = headline.find_all('a', {'class':'txt-dn'})[1].text
		new_article.site = "Lowy Institute"
		new_article.site_url = "https://www.lowyinstitute.org/the-interpreter/archive"
		
		try:
			new_article.save()
		except IntegrityError as e: 
   			if 'UNIQUE constraint' in str(e.args):
   				pass

	return redirect("../")
Ejemplo n.º 21
0
def buildArticle(key, title, link, issue):
    title = (title[:497] + '...') if len(title) > 500 else title
    return Article(key=key, title=title, link=link, issue_id=issue)