def get_news_posts_by_page():

    str = ''

    try:
        url_page = request.args.get('page_url')
        news_posts = NewsPost.query(NewsPost.source_url == url_page).fetch()

        str += 'num news posts: %d\n' % len(news_posts)

        dict_titles = {}
        for np in news_posts:
            #str += 'title: %s\n' % np.title
            #str += 'url: %s\n' % np.url
            #str +='-------\n'

            dict_titles.setdefault(np.title, [])
            dict_titles[np.title].append(np.url)


        for title in dict_titles:
            str += 'title: %s\n' % title
            str += 'source(s):\n'

            for source in dict_titles[title]:
                str += '%s\n' % source

            str += '---------\n'
    except Exception as inst:
        str += 'Exception: %s' % inst.message


    return Response(str, mimetype='text/plain')
def take_all_news_posts():

    """
    :return returns every news post there is:
    """

    return NewsPost.query().fetch()
def getNewsFromSource():
    source = request.args.get('source')
    s = ''
    newsPosts = NewsPost.query(NewsPost.host_page == source).fetch()

    for np in newsPosts:
        s+= '%s\n' % np.title


    return Response(s, mimetype='text/plain')
def getMYNews():
    category = request.args.get('category')
    news = NewsPost.query().fetch()
    clusters = Cluster.query(Cluster.category == category).fetch()

    newNews = []

    for n in news:
        newObject = NewsPostClient(url = n.url,host_page = n.host_page,title = n.title, description = n.description)
        newNews.append(newObject)


    #result = str(byteify(newNews[0].serialize()))

    result = ''


    return Response(result, mimetype='application/javascript')
def get_posts_by_page():
    str = ''
    url_page = request.args.get('page_url')
    str += 'url_page: %s\n' % url_page
    news_posts = NewsPost.query(NewsPost.source_url == url_page).fetch()
    str += 'num news posts: %d\n' % len(news_posts)



    light_version = []
    for np in  news_posts:
        newNews = NewsPostClient(url = np.url, host_page = np.host_page, title = np.title, numWords = np.numWords, source_id = np.source_id,
                                         source_url = np.source_url,img_url = np.img_url, pub_date = np.pub_date, description = np.description)
        light_version.append(newNews)

    obj = {'listNewsPosts' :[n.serialize() for n in light_version]}

    result = json.dumps(obj, ensure_ascii=True)

    return Response(result, mimetype='text/plain')
def getNewsPosts(source_object, web_page_url, dict_IDF):

    """
    The main function which crawls a particular link and returns news posts as object that have been extracted from that link.
    Needs revising and (possibly) modifying the process of text extraction.

    :param sourceObject: the source object that wraps multiple web_page_urls (we need it for creating the news post object)
    :param web_page_url: the web page url where we extract the information from
    :param dictIDF:     the idf dictionary that we need to calculate tf_idf for a document
    :return: list of news posts and a feedback (for logging)
    """

    #feedback variable for logging
    feedback = ''

    try:
        #opening the url and reading the content
        c = urlopen(web_page_url)
        content = c.read()
        soup = BeautifulSoup(content)

        logging.debug('getNewsPosts: failed on reading web_page_url')


        logging.debug('instantiated beautiful soup')

        #the list of object that we are going to return
        newsPosts = []

        for item in soup.findAll('item'):

            #in each item we have a link to the news that we would like to process

            try:
                #title of the news
                title   = item.find('title').string
                #link to the news
                link_url = item.find('link').string




                feedback += 'title: %s\n' % title
                feedback += 'link_url: %s\n' % link_url

                pub_date = item.find('pubdate')

                if pub_date is not None:
                    pub_date = pub_date.string
                    datetime_obj = parse(pub_date, ignoretz=True)

                    feedback += 'pub_date: %s\n' % (datetime_obj.strftime('%B %d %Y %H:%M'))

                    date_milli = (datetime_obj - epoch).total_seconds() * 1000.0
                    pub_date = date_milli
                    feedback += 'milli: %f\n' % date_milli

                else:
                    pub_date = 0
                    feedback += 'pub_date: None\n'


                same_news_posts = NewsPost.query(NewsPost.url == link_url).fetch()


                #we must not process the same news twice
                if same_news_posts is not None and len(same_news_posts) > 0:
                    feedback += 'There is/are already news post/s with this link. Continuing..\n'
                    feedback += '------------------------------\n'
                    continue


                img_url = None

                #we try to fetch the photo url directly from the rss feed, if not possible we will try later again
                if (item.description is not None) and (item.description.string is not None):
                    img_obj = BeautifulSoup(item.description.string).find('img')

                    if img_obj is not None:
                        img_url = img_obj['src']
                elif item.description is not None:
                    img_obj = item.description.find('img')

                    if img_obj is not None:
                        img_url =  img_obj['src']


                #here we get the content of the news
                link_content = urlopen(link_url).read()
                innerSoup = BeautifulSoup(link_content)


                title_words = Utility.getWords(title)
                title_words = filterTitles(title_words, web_page_url)

                total_words = title_words

                # add title twice, because we consider those words in the title twice as important as the other words
                total_words.extend(total_words)


                #which paragraphs to take into consideration


                text = ''

                for script in innerSoup(['script', 'style']):
                    script.extract()

                if web_page_url in Utility.fetch_text_specifications:

                    specifications = Utility.fetch_text_specifications[web_page_url]


                    if isinstance(specifications, list): #we take the paragraphs

                        start =  Utility.fetch_text_specifications[web_page_url][0]
                        end   = len(innerSoup.findAll('p'))
                        if len(Utility.fetch_text_specifications[web_page_url]) > 1:
                            end = Utility.fetch_text_specifications[web_page_url][1]


                        for p in innerSoup.findAll('p')[start:end]:
                            text += '%s ' % p.text
                    else:

                        tag_type = specifications['tag_type']
                        attr_type = specifications['attribute_type']
                        attr_value = specifications['attribute_value']

                        sections = innerSoup.findAll(tag_type, {attr_type: attr_value})

                        if 'nested_tag_type' in specifications:
                            #we need to go one level deeper
                            nested_tag_type = specifications['nested_tag_type']
                            nested_attr_type = specifications['nested_attribute_type']
                            nested_attr_value = specifications['nested_attribute_value']
                            limit = specifications.get('limit', 1000)

                            new_sections = []

                            for section in sections:
                                new_sections.extend(section.findAll(nested_tag_type,{ nested_attr_type:  nested_attr_value}, limit=limit))

                            sections = new_sections



                        for section in sections:
                            text += '%s ' % section.text



                description = text[:min(100, len(text))]

                total_words.extend(Utility.getWords(text))

                num_words = len(total_words)


                if num_words < 7:
                    continue

                dict_news = {}
                for word in total_words:
                    dict_news[word] = 1 + dict_news.get(word, 0)




                #we are trying to get the image from the news
                if img_url is None:
                    imgs = innerSoup.findAll('img')

                    img_url = ''
                    if imgs is not None and len(imgs) > 0:
                        img_url = imgs[0]['src']

                #deal with the pictures with relative path to the web
                if (img_url is not None) and (len(img_url) > 0):
                    if img_url.find(source_object.url) != 0:
                        img_url = source_object.url + '/' + img_url


                feedback += 'img_url: %s\n' % img_url


                newsPost = NewsPost(parent=ndb.Key('NewsPost', link_url or "*notitle*"), url = link_url, host_page = web_page_url,
                                    title = title, dictWords = dict_news, numWords = num_words, words = total_words ,
                                    source_id = source_object.id, source_url = source_object.url,
                                    img_url = img_url, pub_date = pub_date, description = description)

                newsPost.calculate_tf_idf(dict_IDF)
                newsPost.put()
                newsPosts.append(newsPost)

                feedback += '------------------------------\n'
            except Exception as inst:
                feedback += 'Inner Exception type: %s\n' % str(type(inst))
                feedback += 'Inner Exception message: %s\n' % inst.message


        return newsPosts, feedback
    except Exception as inst:

        feedback += 'Exception type: %s\n' % type(inst)
        feedback += 'Exception message: %s\n' % inst.message

        #if there is an exception, we return and empty list of news posts
        return [], feedback