コード例 #1
0
ファイル: views.py プロジェクト: gminds/rapidnewsng
    def get(self, request, format=None):
        content = MutableString()
        content = []
        opener = urllib2.build_opener()
        opener.addheaders = [('User-agent', 'Mozilla/24.0')]
#        submitter = User.objects.get(username='******')
#        logging.debug("value of  user my var is %s", str(submitter.username))


        def load(durl, greet):
                def remove_tags(text):
                    text = TAG_RE.sub('', text)
                    text = re.sub("\n", "", text)
                    text = re.sub("\"", "\\\"", text)
                    text = re.sub(u"(\u2018|\u2019)", "'", text)
                    return "" . join(filter(lambda x: ord(x)<128, text))
                content = MutableString()
                content = []

                def entry_to_link_dict(entry):
                    s = MLStripper()
                    s.feed(entry.description)
                    link = {
                       "title": remove_tags(entry.title),
                       "url": entry.link,
                       "linksource": urlparse(entry.link).netloc,
                       "votes": "1",
                       "description": remove_tags(s.get_data()),
                    }
                    return link
                try:
                    user_id = User.objects.filter(username='******')[0].id
                except IndexError:
                    return
                for entry in parse(durl).entries:
                    link = entry_to_link_dict(entry)
                    link["submitter"] = user_id
                    info = link
                    logging.debug("value of  user my var is %s", str(User.objects.filter(username='******')[0].get_username()))
#                    info[title] = Wikifetch(title, description, url)

                    content.append(info)
#                   logging.debug("value of  1 my var is %s", str(title))
#                    logging.debug("value of  2 my var is %s", str(link))
                return content


#        data = 'http://google.com'
        channel = request.GET['channel']
#        logging.debug("value of  16 my var is %s", str(channel))
        content = load(channel, channel)
        serializer2 = LinkSerializer(content, many=True)
        serializer = LinkSerializer(data=content)
#        submitterobj = User.objects.get(username='******') http://www.nairaland.com/feed
#        serializer2.object.submitter = submitterobj
        logging.debug("value of  27 my var is %s", str(serializer))
        if serializer.is_valid():
            serializer.save()
        return Response(serializer2.data)
コード例 #2
0
    def get(self, request, format=None):
        content = MutableString()
        content = []
        opener = urllib2.build_opener()
        opener.addheaders = [('User-agent', 'Mozilla/24.0')]

        #        submitter = User.objects.get(username='******')
        #        logging.debug("value of  user my var is %s", str(submitter.username))

        def load(durl, greet):
            def remove_tags(text):
                text = TAG_RE.sub('', text)
                text = re.sub("\n", "", text)
                text = re.sub("\"", "\\\"", text)
                text = re.sub(u"(\u2018|\u2019)", "'", text)
                return "".join(filter(lambda x: ord(x) < 128, text))

            content = MutableString()
            content = []

            def entry_to_link_dict(entry):
                s = MLStripper()
                s.feed(entry.description)
                link = {
                    "title": remove_tags(entry.title),
                    "url": entry.link,
                    "linksource": urlparse(entry.link).netloc,
                    "votes": "1",
                    "description": remove_tags(s.get_data()),
                }
                return link

            try:
                user_id = User.objects.filter(username='******')[0].id
            except IndexError:
                return
            for entry in parse(durl).entries:
                link = entry_to_link_dict(entry)
                link["submitter"] = user_id
                info = link
                logging.debug(
                    "value of  user my var is %s",
                    str(
                        User.objects.filter(
                            username='******')[0].get_username()))
                #                    info[title] = Wikifetch(title, description, url)

                content.append(info)
#                   logging.debug("value of  1 my var is %s", str(title))
#                    logging.debug("value of  2 my var is %s", str(link))
            return content

#        data = 'http://google.com'

        channel = request.GET['channel']
        #        logging.debug("value of  16 my var is %s", str(channel))
        content = load(channel, channel)
        serializer2 = LinkSerializer(content, many=True)
        serializer = LinkSerializer(data=content)
        #        submitterobj = User.objects.get(username='******') http://www.nairaland.com/feed
        #        serializer2.object.submitter = submitterobj
        logging.debug("value of  27 my var is %s", str(serializer))
        if serializer.is_valid():
            serializer.save()
        return Response(serializer2.data)
コード例 #3
0
ファイル: views.py プロジェクト: gminds/rapidnewsng
    def get(self, request, format=None):
        content = MutableString()
        content = []
        opener = urllib2.build_opener()
        opener.addheaders = [('User-agent', 'Mozilla/24.0')]
#        submitter = User.objects.get(username='******')
#        logging.debug("value of  user my var is %s", str(submitter.username))


        def load(durl, greet):
                def remove_tags(text):
                    text = TAG_RE.sub('', text)
                    text = re.sub("\n", "", text)
                    text = re.sub("\"", "\\\"", text)
                    return "" . join(filter(lambda x: ord(x)<128, text))
                content = MutableString()
                content = []
                opener = urllib2.build_opener()
                opener.addheaders = [('User-agent', 'Mozilla/24.0')]
                MEMCACHE_GREETINGS = greet
                data = cache.get(MEMCACHE_GREETINGS)
                time = 1800 
                if data is None:
                    file = urllib2.urlopen(durl)
                    data = file.read()
                    file.close()
                    cache.add(MEMCACHE_GREETINGS, data, time)
                doc = ET.fromstring(data)
#                logging.debug("value of  16 my var is %s", str(data))
                gg = doc.findall('channel/item')
#                logging.debug("value of  1 my var is %s", str(gg))
                for node in gg:
                    title = node.find('./title').text
                    description = node.find('./description').text
                    url = node.find('./link').text
                    info = {}
                    info['title'] = remove_tags(title)
                    info['description'] = remove_tags(description)
                    info['url'] = url
#                    submitterobj = User.objects.get(username='******')
#                    submitter = submitterobj.username
                    info['submitter'] = User.objects.filter(username='******')[0].id
                    info['linksource'] = urlparse(url).netloc
#                    info['submitter'] = User.objects.filter(is_superuser=1)[1].id
                    info['votes'] = randrange(20)
                    logging.debug("value of  user my var is %s", str(User.objects.filter(username='******')[0].get_username()))
#                    info[title] = Wikifetch(title, description, url)

                    content.append(info)
#                   logging.debug("value of  1 my var is %s", str(title))
#                    logging.debug("value of  2 my var is %s", str(link))
                return content


#        url_id = request.GET['wiki']
#        url = 'http://en.wikipedia.org/w/index.php?title='+url_id+'&action=history' medlineplus http://www.nlm.nih.gov/medlineplus/feeds/news_en.xml
# reuters http://feeds.reuters.com/reuters/healthNews webmd http://rssfeeds.webmd.com/rss/rss.aspx?RSSSource=RSS_PUBLIC    medicinenet http://www.medicinenet.com/rss/dailyhealth.xml

#        data = 'http://google.com'
        channel = request.GET['channel']
#        logging.debug("value of  16 my var is %s", str(channel))
        if channel == 'BBC':

                MEMCACHE_GREETINGS = 'bbc'
                url = 'http://feeds.bbci.co.uk/news/health/rss.xml'
                content = load(url, MEMCACHE_GREETINGS)

        elif channel == 'MEDLINEPLUS':
                url = 'http://www.nlm.nih.gov/medlineplus/feeds/news_en.xml'
                MEMCACHE_GREETINGS = 'MEDLINEPLUS'
                content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'PUNCH':
                url = 'http://www.latestnigeriannews.com/feed/punch/rss.xml'
                MEMCACHE_GREETINGS = 'PUNCH'
                content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'VANGUARD':
                url = 'http://www.vanguardngr.com/feed/'
                MEMCACHE_GREETINGS = 'VANGUARD'
                content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'SUNNEWS':
                url = 'http://sunnewsonline.com/new/?feed=rss2'
                MEMCACHE_GREETINGS = 'SUNNEWS'
                content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'GUARDIAN':
                url = 'http://www.latestnigeriannews.com/feed/guardian/rss.xml'
                MEMCACHE_GREETINGS = 'GUARDIAN'
                content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'THISDAY':
                url = 'http://www.latestnigeriannews.com/feed/thisday/rss.xml'
                MEMCACHE_GREETINGS = 'THISDAY'
                content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'DAILYTIMES':
                url = 'http://www.dailytimes.com.ng/rss/articles/all'
                MEMCACHE_GREETINGS = 'DAILYTIMES'
                content = load(url, MEMCACHE_GREETINGS)
        else :
                url = 'http://rssfeeds.webmd.com/rss/rss.aspx?RSSSource=RSS_PUBLIC'
                MEMCACHE_GREETINGS = 'WEBMD'
                content = load(url, MEMCACHE_GREETINGS)

        serializer2 = LinkSerializer(content, many=True)
        serializer = LinkSerializer(data=content)
#        submitterobj = User.objects.get(username='******') http://www.nairaland.com/feed
#        serializer2.object.submitter = submitterobj
        logging.debug("value of  27 my var is %s", str(serializer))
        if serializer.is_valid():
            serializer.save()
        return Response(serializer2.data)
コード例 #4
0
    def get(self, request, format=None):
        content = MutableString()
        content = []
        opener = urllib2.build_opener()
        opener.addheaders = [('User-agent', 'Mozilla/24.0')]

        #        submitter = User.objects.get(username='******')
        #        logging.debug("value of  user my var is %s", str(submitter.username))

        def load(durl, greet):
            def remove_tags(text):
                text = TAG_RE.sub('', text)
                text = re.sub("\n", "", text)
                text = re.sub("\"", "\\\"", text)
                return "".join(filter(lambda x: ord(x) < 128, text))

            content = MutableString()
            content = []
            opener = urllib2.build_opener()
            opener.addheaders = [('User-agent', 'Mozilla/24.0')]
            MEMCACHE_GREETINGS = greet
            data = cache.get(MEMCACHE_GREETINGS)
            time = 1800
            if data is None:
                file = urllib2.urlopen(durl)
                data = file.read()
                file.close()
                cache.add(MEMCACHE_GREETINGS, data, time)
            doc = ET.fromstring(data)
            #                logging.debug("value of  16 my var is %s", str(data))
            gg = doc.findall('channel/item')
            #                logging.debug("value of  1 my var is %s", str(gg))
            for node in gg:
                title = node.find('./title').text
                description = node.find('./description').text
                url = node.find('./link').text
                info = {}
                info['title'] = remove_tags(title)
                info['description'] = remove_tags(description)
                info['url'] = url
                #                    submitterobj = User.objects.get(username='******')
                #                    submitter = submitterobj.username
                info['submitter'] = User.objects.filter(
                    username='******')[0].id
                info['linksource'] = urlparse(url).netloc
                #                    info['submitter'] = User.objects.filter(is_superuser=1)[1].id
                info['votes'] = randrange(20)
                logging.debug(
                    "value of  user my var is %s",
                    str(
                        User.objects.filter(
                            username='******')[0].get_username()))
                #                    info[title] = Wikifetch(title, description, url)

                content.append(info)
#                   logging.debug("value of  1 my var is %s", str(title))
#                    logging.debug("value of  2 my var is %s", str(link))
            return content


#        url_id = request.GET['wiki']
#        url = 'http://en.wikipedia.org/w/index.php?title='+url_id+'&action=history' medlineplus http://www.nlm.nih.gov/medlineplus/feeds/news_en.xml
# reuters http://feeds.reuters.com/reuters/healthNews webmd http://rssfeeds.webmd.com/rss/rss.aspx?RSSSource=RSS_PUBLIC    medicinenet http://www.medicinenet.com/rss/dailyhealth.xml

#        data = 'http://google.com'

        channel = request.GET['channel']
        #        logging.debug("value of  16 my var is %s", str(channel))
        if channel == 'BBC':

            MEMCACHE_GREETINGS = 'bbc'
            url = 'http://feeds.bbci.co.uk/news/health/rss.xml'
            content = load(url, MEMCACHE_GREETINGS)

        elif channel == 'MEDLINEPLUS':
            url = 'http://www.nlm.nih.gov/medlineplus/feeds/news_en.xml'
            MEMCACHE_GREETINGS = 'MEDLINEPLUS'
            content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'PUNCH':
            url = 'http://www.latestnigeriannews.com/feed/punch/rss.xml'
            MEMCACHE_GREETINGS = 'PUNCH'
            content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'VANGUARD':
            url = 'http://www.vanguardngr.com/feed/'
            MEMCACHE_GREETINGS = 'VANGUARD'
            content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'SUNNEWS':
            url = 'http://sunnewsonline.com/new/?feed=rss2'
            MEMCACHE_GREETINGS = 'SUNNEWS'
            content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'GUARDIAN':
            url = 'http://www.latestnigeriannews.com/feed/guardian/rss.xml'
            MEMCACHE_GREETINGS = 'GUARDIAN'
            content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'THISDAY':
            url = 'http://www.latestnigeriannews.com/feed/thisday/rss.xml'
            MEMCACHE_GREETINGS = 'THISDAY'
            content = load(url, MEMCACHE_GREETINGS)
        elif channel == 'DAILYTIMES':
            url = 'http://www.dailytimes.com.ng/rss/articles/all'
            MEMCACHE_GREETINGS = 'DAILYTIMES'
            content = load(url, MEMCACHE_GREETINGS)
        else:
            url = 'http://rssfeeds.webmd.com/rss/rss.aspx?RSSSource=RSS_PUBLIC'
            MEMCACHE_GREETINGS = 'WEBMD'
            content = load(url, MEMCACHE_GREETINGS)

        serializer2 = LinkSerializer(content, many=True)
        serializer = LinkSerializer(data=content)
        #        submitterobj = User.objects.get(username='******') http://www.nairaland.com/feed
        #        serializer2.object.submitter = submitterobj
        logging.debug("value of  27 my var is %s", str(serializer))
        if serializer.is_valid():
            serializer.save()
        return Response(serializer2.data)