def index(request): startdate = '1970-01-01' enddate = Date.today() # retrieves all articles from the db in ascending order of publication date all_articles = Article.objects.filter(pub_date__range = [startdate, enddate]).order_by('-pub_date') # selects a random article from the list random_article = random.choice(all_articles) # gets the day of the article published day = random_article.pub_date.strftime("%A") # gets the top three articles from all articles topThree_articles = all_articles[:5] # retrieves 4 articles from the db in random order random_four = Article.objects.filter(pub_date__range = [startdate, enddate]).order_by('?')[:4] # adding contents to the dictionary context_dict = {} context_dict[ 'random_article' ] = random_article context_dict[ 'topThree_articles' ] = topThree_articles context_dict['day'] = day context_dict['random_four'] = random_four return render(request, 'ver1/index.html', context_dict)
def desc(request,id): startD='1991-01-01' endD=Date.today() obj1=article.objects.get(pk=id) context2={'obj1' : obj1} obj2 = article.objects.filter( public_date__range=[startD,endD]).exclude(pk=obj1.id).order_by('?')[:4] context2['obj2'] = obj2 return render(request, 'demo1/disc.html', context2)
def list(request): startD='1991-01-01' endD=Date.today() obj = article.objects.filter( public_date__range=[startD,endD]).order_by('public_date') #for random articles:- ran_art=random.choice(obj) obj1 = article.objects.filter( public_date__range=[startD,endD]).exclude(pk=ran_art.id).order_by('public_date')[:4] #for footer: obj2=article.objects.filter(public_date__range=[startD,endD]).order_by('?')[:4] context={'obj1':obj1} context['ran_art']=ran_art day = ran_art.public_date.strftime("%A") context['day'] = day context['obj2']=obj2 return render(request, 'demo1/list.html', context)
def page(request, id_num): startdate = '1970-01-01' enddate = Date.today() # gets the article based on the id recieved article = Article.objects.get(id = id_num) # gets the day of the article published day = article.pub_date.strftime("%A") # retrieves 4 articles from the db in random order random_four = Article.objects.filter(pub_date__range = [startdate, enddate]).order_by('?')[:4] # adding contents to the dictionary context_dict = {} context_dict['random_four'] = random_four context_dict['article'] = article context_dict['day'] = day return render(request, 'ver1/page.html', context_dict)
def add_reuters_to_db(): url_list = [] reuters_urls = ['http://uk.reuters.com/tools/rss', 'http://www.reuters.com/tools/rss'] for url in reuters_urls: doc = request.urlopen(url) soup = BeautifulSoup(doc) for s in soup.find_all('td', 'feedUrl'): m = re.search('(?<=//)(.*)', s.text) url_list.append("http://" + m.groups()[0]) for u in url_list: print(u) f = feedparser.parse(u) if not f.feed.updated_parsed.tm_year < Date.today().year: fr = FeedRec(feed_url=u, feed_title=f.feed.title) fr.save()
def add_reuters_to_db(): url_list = [] reuters_urls = [ 'http://uk.reuters.com/tools/rss', 'http://www.reuters.com/tools/rss' ] for url in reuters_urls: doc = request.urlopen(url) soup = BeautifulSoup(doc) for s in soup.find_all('td', 'feedUrl'): m = re.search('(?<=//)(.*)', s.text) url_list.append("http://" + m.groups()[0]) for u in url_list: print(u) f = feedparser.parse(u) if not f.feed.updated_parsed.tm_year < Date.today().year: fr = FeedRec(feed_url=u, feed_title=f.feed.title) fr.save()