def fetch_tweets(search_request): """ fetches tweets from Twitter API extracts urls and updates db """ try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords([search_request]) # define search request tso.setCount(settings.tweets_per_page) # only results_per_page tso.setIncludeEntities(True) # give us entity information # create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key=twitter.TWITTER_CONSUMER_KEY, consumer_secret=twitter.TWITTER_CONSUMER_SECRET, access_token=twitter.TWITTER_ACCESS_TOKEN, access_token_secret=twitter.TWITTER_ACCESS_TOKEN_SECRET) ts.authenticate() # user must authenticate first tweets = ts.searchTweetsIterable(tso) found_urls = extract_urls(tweets) search_keyword_object = SearchKeyWord() search_keyword_object.gifs = found_urls search_keyword_object.search_keyword = search_request search_keyword_object.updated_at = datetime.now() print(search_keyword_object) search_keyword_object.save() return found_urls except TwitterSearchException, e: # to take care of errors message = e.message
def fetch_tweets(search_request): """ fetches tweets from Twitter API extracts urls and updates db """ try: tso = TwitterSearchOrder() # create a TwitterSearchOrder object tso.setKeywords([search_request]) # define search request tso.setCount(settings.tweets_per_page) # only results_per_page tso.setIncludeEntities(True) # give us entity information # create a TwitterSearch object with our secret tokens ts = TwitterSearch( consumer_key=twitter.TWITTER_CONSUMER_KEY, consumer_secret=twitter.TWITTER_CONSUMER_SECRET, access_token=twitter.TWITTER_ACCESS_TOKEN, access_token_secret=twitter.TWITTER_ACCESS_TOKEN_SECRET ) ts.authenticate() # user must authenticate first tweets = ts.searchTweetsIterable(tso) found_urls = extract_urls(tweets) search_keyword_object = SearchKeyWord() search_keyword_object.gifs = found_urls search_keyword_object.search_keyword = search_request search_keyword_object.updated_at = datetime.now() print(search_keyword_object) search_keyword_object.save() return found_urls except TwitterSearchException, e: # to take care of errors message = e.message
def bookList(): result = ResponseData(RET.OK) cates = request.args.get('cates', type=str, default='') keyword = request.args.get('keyword') type = request.args.get('type') page_num = request.args.get('pageNum', type=int, default=1) page_size = request.args.get('pageSize', type=int, default=20) try: books_query = Book.query if cates: books_query = books_query.filter(Book.cate_id.in_( cates.split(","))) if keyword: books_query = books_query.filter(Book.book_name.contains(keyword)) if type: if type == 'hot': books_query = books_query.filter(Book.heat > 0).order_by( Book.heat.desc()) elif type == 'collect': books_query = books_query.filter( Book.collect_count > 0).order_by(Book.collect_count.desc()) books_query = books_query.order_by(Book.create_time.desc()) books_paginate = books_query.paginate(page=page_num, per_page=page_size, error_out=False) if keyword: for book in books_paginate.items: book.heat += 1 keyword_query = SearchKeyWord.query.filter_by( keyword=keyword).first() if keyword_query: keyword_query.count += 1 if keyword_query.count > 10: keyword_query.is_hot = True else: keyword_query = SearchKeyWord(keyword=keyword, count=1) db.session.add(keyword_query) db.session.commit() books = [dict(book) for book in books_paginate.items] page_model = PageModel(page_num=page_num, items=books, total_page=books_paginate.pages, total_num=books_paginate.total) result.data = dict(page_model) except Exception as e: current_app.logger.error(e) result.code = RET.DBERR return result.to_dict()
def save_search_record(q, ip, item, user, out_n): """save search record""" try: key_word = SearchKeyWord.objects.get(words=item) except: key_word = SearchKeyWord(words=item) key_word.save() key_word.record_time += 1 key_word.out_number += out_n key_word.save() search_record = SearchRecord( key_word=key_word, ip=ip, total_word=q, recorder=user.nid if user.is_authenticated() else '0') search_record.save()
def fetch_tweets(search_request): """ fetches tweets from Twitter API extracts urls and updates db """ twitter = Twython( settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET, settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET, ) tweets = twitter.search_gen(search_request) found_urls = extract_urls(tweets) search_keyword_object = SearchKeyWord() search_keyword_object.gifs = found_urls search_keyword_object.search_keyword = search_request search_keyword_object.updated_at = datetime.now() print (search_keyword_object) search_keyword_object.save() return found_urls