Beispiel #1
0
    def get(self):
        """
        获取文章列表
        /v1_1/articles?channel_id&timestamp
        """
        qs_parser = RequestParser()
        qs_parser.add_argument('channel_id',
                               type=parser.channel_id,
                               required=True,
                               location='args')
        qs_parser.add_argument('timestamp',
                               type=inputs.positive,
                               required=True,
                               location='args')
        qs_parser.add_argument('with_top',
                               type=inputs.boolean,
                               required=True,
                               location='args')
        args = qs_parser.parse_args()
        channel_id = args.channel_id
        timestamp = args.timestamp
        with_top = args.with_top
        per_page = constants.DEFAULT_ARTICLE_PER_PAGE_MIN
        try:
            feed_time = time.strftime('%Y-%m-%dT%H:%M:%S',
                                      time.localtime(timestamp / 1000))
        except Exception:
            return {'message': 'timestamp param error'}, 400

        results = []
        now = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')

        if with_top:
            # 包含置顶
            top_article_id_li = cache_article.ChannelTopArticlesStorage(
                channel_id).get()
            for article_id in top_article_id_li:
                article = cache_article.ArticleInfoCache(article_id).get()
                if article:
                    article['pubdate'] = now
                    results.append(article)

        # 获取推荐文章列表
        feeds, pre_timestamp = self._feed_articles(channel_id, timestamp,
                                                   per_page)

        # 查询文章
        for feed in feeds:
            article = cache_article.ArticleInfoCache(feed.article_id).get()
            if article:
                article['pubdate'] = feed_time
                article['trace'] = {
                    'click': feed.params.click,
                    'collect': feed.params.collect,
                    'share': feed.params.share,
                    'read': feed.params.read
                }
                results.append(article)

        return {'pre_timestamp': pre_timestamp, 'results': results}
Beispiel #2
0
    def get(self, event):
        """
        im测试
        """
        user_id = 1
        if 'f' == event:
            target = 5
            # 发送关注通知
            _user = cache_user.UserProfileCache(user_id).get()
            _data = {
                'user_id': user_id,
                'user_name': _user['name'],
                'user_photo': _user['photo'],
                'timestamp': int(time.time())
            }
            current_app.sio.emit('following notify',
                                 data=_data,
                                 room=str(target))

            return {'message': '已发送following notify事件'}

        elif 'l' == event:
            target = 141428
            # 发送点赞通知
            _user = cache_user.UserProfileCache(user_id).get()
            _article = cache_article.ArticleInfoCache(target).get()
            _data = {
                'user_id': user_id,
                'user_name': _user['name'],
                'user_photo': _user['photo'],
                'art_id': target,
                'art_title': _article['title'],
                'timestamp': int(time.time())
            }
            current_app.sio.emit('liking notify',
                                 data=_data,
                                 room=str(_article['aut_id']))
            return {'message': '已发送liking notify事件'}

        elif 'c' == event:
            article_id = 141428
            # 发送评论通知
            _user = cache_user.UserProfileCache(user_id).get()
            _article = cache_article.ArticleInfoCache(article_id).get()
            _data = {
                'user_id': user_id,
                'user_name': _user['name'],
                'user_photo': _user['photo'],
                'art_id': article_id,
                'art_title': _article['title'],
                'timestamp': int(time.time())
            }
            current_app.sio.emit('comment notify',
                                 data=_data,
                                 room=str(_article['aut_id']))
            return {'message': '已发送comment notify事件'}

        else:
            return {'message': '错误的事件'}, 404
Beispiel #3
0
    def get(self):
        """
        获取文章列表
        /v1_0/articles?channel_id&page&per_page
        """
        qs_parser = RequestParser()
        qs_parser.add_argument('channel_id',
                               type=parser.channel_id,
                               required=True,
                               location='args')
        qs_parser.add_argument('page',
                               type=inputs.positive,
                               required=False,
                               location='args')
        qs_parser.add_argument('per_page',
                               type=inputs.int_range(
                                   constants.DEFAULT_ARTICLE_PER_PAGE_MIN,
                                   constants.DEFAULT_ARTICLE_PER_PAGE_MAX,
                                   'per_page'),
                               required=False,
                               location='args')
        args = qs_parser.parse_args()
        channel_id = args.channel_id
        page = 1 if args.page is None else args.page
        per_page = args.per_page if args.per_page else constants.DEFAULT_ARTICLE_PER_PAGE_MIN

        results = []

        if page == 1:
            # 第一页
            top_article_id_li = cache_article.ChannelTopArticlesStorage(
                channel_id).get()
            for article_id in top_article_id_li:
                article = cache_article.ArticleInfoCache(article_id).get()
                if article:
                    results.append(article)

        # 获取推荐文章列表
        # ret = self._get_recommended_articles(channel_id, page, per_page)
        # feed推荐 未使用page参数
        feeds = self._feed_articles(channel_id, per_page)

        # 查询文章
        for feed in feeds:
            # self._generate_article_cover(article_id)
            article = cache_article.ArticleInfoCache(feed.article_id).get()
            if article:
                article['trace'] = {
                    'click': feed.params.click,
                    'collect': feed.params.collect,
                    'share': feed.params.share,
                    'read': feed.params.read
                }
                results.append(article)

        return {'page': page, 'per_page': per_page, 'results': results}
Beispiel #4
0
    def post(self):
        """
        用户收藏文章
        """
        req_parser = RequestParser()
        req_parser.add_argument('target', type=parser.article_id, required=True, location='json')
        req_parser.add_argument('Trace', type=inputs.regex(r'^.+$'), required=False, location='headers')
        args = req_parser.parse_args()

        target = args.target

        # 记录埋点日志
        if args.Trace:
            article = cache_article.ArticleInfoCache(target).get()
            write_trace_log(args.Trace, channel_id=article['ch_id'])

        ret = 1
        try:
            collection = Collection(user_id=g.user_id, article_id=target)
            db.session.add(collection)
            db.session.commit()
        except IntegrityError:
            db.session.rollback()
            ret = Collection.query.filter_by(user_id=g.user_id, article_id=target, is_deleted=True) \
                .update({'is_deleted': False})
            db.session.commit()

        if ret > 0:
            cache_user.UserArticleCollectionsCache(g.user_id).clear()
            cache_statistic.ArticleCollectingCountStorage.incr(target)
            cache_statistic.UserArticleCollectingCountStorage.incr(g.user_id)

        return {'target': target}, 201
Beispiel #5
0
    def get(self, user_id):
        """
        获取user_id 用户的文章数据
        """
        exist = cache_user.UserProfileCache(user_id).exists()
        if not exist:
            return {'message': 'Invalid request.'}, 400
        qs_parser = RequestParser()
        qs_parser.add_argument('page', type=inputs.positive, required=False, location='args')
        qs_parser.add_argument('per_page', type=inputs.int_range(constants.DEFAULT_ARTICLE_PER_PAGE_MIN,
                                                                 constants.DEFAULT_ARTICLE_PER_PAGE_MAX,
                                                                 'per_page'),
                               required=False, location='args')
        args = qs_parser.parse_args()
        page = 1 if args.page is None else args.page
        per_page = args.per_page if args.per_page else constants.DEFAULT_ARTICLE_PER_PAGE_MIN

        results = []

        total_count, page_articles = cache_user.UserArticlesCache(user_id).get_page(page, per_page)

        for article_id in page_articles:
            article = cache_article.ArticleInfoCache(article_id).get()
            if article:
                results.append(article)

        return {'total_count': total_count, 'page': page, 'per_page': per_page, 'results': results}
Beispiel #6
0
    def get(self):
        """
        获取用户的收藏历史
        """
        qs_parser = RequestParser()
        qs_parser.add_argument('page',
                               type=inputs.positive,
                               required=False,
                               location='args')
        qs_parser.add_argument('per_page',
                               type=inputs.int_range(
                                   constants.DEFAULT_ARTICLE_PER_PAGE_MIN,
                                   constants.DEFAULT_ARTICLE_PER_PAGE_MAX,
                                   'per_page'),
                               required=False,
                               location='args')
        args = qs_parser.parse_args()
        page = 1 if args.page is None else args.page
        per_page = args.per_page if args.per_page else constants.DEFAULT_ARTICLE_PER_PAGE_MIN

        total_count, collections = cache_user.UserArticleCollectionsCache(
            g.user_id).get_page(page, per_page)

        results = []
        for article_id in collections:
            article = cache_article.ArticleInfoCache(article_id).get()
            results.append(article)

        return {
            'total_count': total_count,
            'page': page,
            'per_page': per_page,
            'results': results
        }
Beispiel #7
0
    def get(self):
        """
        获取当前用户的文章列表
        """
        qs_parser = RequestParser()
        qs_parser.add_argument('page', type=inputs.positive, required=False, location='args')
        qs_parser.add_argument('per_page', type=inputs.int_range(constants.DEFAULT_ARTICLE_PER_PAGE_MIN,
                                                                 constants.DEFAULT_ARTICLE_PER_PAGE_MAX,
                                                                 'per_page'),
                               required=False, location='args')
        args = qs_parser.parse_args()
        page = 1 if args.page is None else args.page
        per_page = args.per_page if args.per_page else constants.DEFAULT_ARTICLE_PER_PAGE_MIN

        results = []

        total_count, page_articles = cache_user.UserArticlesCache(g.user_id).get_page(page, per_page)

        user_article_attitude_cache = cache_user.UserArticleAttitudeCache(g.user_id)
        for article_id in page_articles:
            article = cache_article.ArticleInfoCache(article_id).get()
            if article:
                article['is_liking'] = user_article_attitude_cache.determine_liking_article(article_id)
                results.append(article)

        return {'total_count': total_count, 'page': page, 'per_page': per_page, 'results': results}
Beispiel #8
0
    def get(self):
        """
        获取文章列表
        """
        qs_parser = RequestParser()
        qs_parser.add_argument('channel_id', type=parser.channel_id, required=True, location='args')
        qs_parser.add_argument('timestamp', type=inputs.positive, required=True, location='args')
        args = qs_parser.parse_args()
        channel_id = args.channel_id
        timestamp = args.timestamp
        per_page = constants.DEFAULT_ARTICLE_PER_PAGE_MIN
        try:
            feed_time = time.strftime('%Y-%m-%dT%H:%M:%S', time.localtime(time.time()))
        except Exception:
            return {'message': 'timestamp param error'}, 400

        results = []

        # 获取推荐文章列表
        feeds, pre_timestamp = self._feed_articles(channel_id, timestamp, per_page)

        # 查询文章
        for feed in feeds:
            article = cache_article.ArticleInfoCache(feed.article_id).get()
            if article:
                article['pubdate'] = feed_time
                article['trace'] = {
                    'click': feed.track.click,
                    'collect': feed.track.collect,
                    'share': feed.track.share,
                    'read': feed.track.read
                }
                results.append(article)

        return {'pre_timestamp': pre_timestamp, 'results': results}
Beispiel #9
0
    def get(self):
        """
        获取文章列表
        """

        # 请求解析对象
        qs_parser = RequestParser()
        # 频道id
        qs_parser.add_argument('channel_id',
                               type=int,
                               required=True,
                               location='args')
        # 时间戳
        qs_parser.add_argument('timestamp',
                               type=inputs.positive,
                               required=True,
                               location='args')
        # 开启参数解析
        args = qs_parser.parse_args()

        channel_id = args.channel_id
        timestamp = args.timestamp
        # 推荐文章数量
        per_page = 10

        try:
            feed_time = time.strftime('%Y-%m-%dT%H:%M:%S',
                                      time.localtime(time.time()))
        except Exception:
            return {'message': 'timestamp param error'}, 400

        results = []

        # TODO: 调用grpc 获取推荐文章列表
        feeds, pre_timestamp = self._feed_articles(channel_id, timestamp,
                                                   per_page)

        # 查询文章
        for feed in feeds:
            # feed 代表的 Article 文章
            # feed.article_id 文章id

            # 从缓存工具类中获取文章数据

            article = cache_article.ArticleInfoCache(feed.article_id).get()

            # 文章对象
            if article:
                article['pubdate'] = feed_time
                # 埋点参数
                article['trace'] = {
                    'click': feed.track.click,
                    'collect': feed.track.collect,
                    'share': feed.track.share,
                    'read': feed.track.read
                }
                results.append(article)

        return {'pre_timestamp': pre_timestamp, 'results': results}
Beispiel #10
0
    def post(self):
        """
        文章点赞
        """
        json_parser = RequestParser()
        json_parser.add_argument('target',
                                 type=parser.article_id,
                                 required=True,
                                 location='json')
        args = json_parser.parse_args()
        target = args.target

        # 此次操作前,用户对文章可能是没有态度,也可能是不喜欢,需要先查询对文章的原始态度,然后对相应的统计数据进行累计或减少
        atti = Attitude.query.filter_by(user_id=g.user_id,
                                        article_id=target).first()
        if atti is None:
            attitude = Attitude(user_id=g.user_id,
                                article_id=target,
                                attitude=Attitude.ATTITUDE.LIKING)
            db.session.add(attitude)
            db.session.commit()
            cache_statistic.ArticleLikingCountStorage.incr(target)
        else:
            if atti.attitude == Attitude.ATTITUDE.DISLIKE:
                # 原先是不喜欢
                atti.attitude = Attitude.ATTITUDE.LIKING
                db.session.add(atti)
                db.session.commit()
                cache_statistic.ArticleLikingCountStorage.incr(target)
                cache_statistic.ArticleDislikeCountStorage.incr(target, -1)
                cache_statistic.UserLikedCountStorage.incr(g.user_id)
            elif atti.attitude is None:
                # 存在数据,但是无态度
                atti.attitude = Attitude.ATTITUDE.LIKING
                db.session.add(atti)
                db.session.commit()
                cache_statistic.ArticleLikingCountStorage.incr(target)
                cache_statistic.UserLikedCountStorage.incr(g.user_id)

        # cache_article.ArticleUserAttitudeCache(g.user_id, target).clear()
        cache_user.UserArticleAttitudeCache(g.user_id).clear()

        # 发送点赞通知
        _user = cache_user.UserProfileCache(g.user_id).get()
        _article = cache_article.ArticleInfoCache(target).get()
        _data = {
            'user_id': g.user_id,
            'user_name': _user['name'],
            'user_photo': _user['photo'],
            'art_id': target,
            'art_title': _article['title'],
            'timestamp': int(time.time())
        }
        current_app.sio.emit('liking notify',
                             data=_data,
                             room=str(_article['aut_id']))

        return {'target': target}, 201
Beispiel #11
0
    def get(self):
        """
        获取搜索结果
        """
        qs_parser = RequestParser()
        qs_parser.add_argument('q', type=inputs.regex(r'^.{1,50}$'), required=True, location='args')
        qs_parser.add_argument('page', type=inputs.positive, required=False, location='args')
        qs_parser.add_argument('per_page', type=inputs.int_range(constants.DEFAULT_SEARCH_PER_PAGE_MIN, constants.DEFAULT_SEARCH_PER_PAGE_MAX, 'per_page'), required=False, location='args')
        args = qs_parser.parse_args()
        q = args.q
        page = 1 if args.page is None else args.page
        per_page = args.per_page if args.per_page else constants.DEFAULT_SEARCH_PER_PAGE_MIN

        # Search from Elasticsearch
        query = {
            'from': (page-1)*per_page,
            'size': per_page,
            '_source': False,
            'query': {
                'bool': {
                    'must': [
                        {'match': {'_all': q}}
                    ],
                    'filter': [
                        {'term': {'status': 2}}
                    ]
                }
            }
        }
        ret = current_app.es.search(index='articles', doc_type='article', body=query)

        total_count = ret['hits']['total']

        results = []

        hits = ret['hits']['hits']
        for result in hits:
            article_id = int(result['_id'])
            article = cache_article.ArticleInfoCache(article_id).get()
            if article:
                results.append(article)

        # Record user search history
        if g.user_id and page == 1:
            try:
                cache_user.UserSearchingHistoryStorage(g.user_id).save(q)
            except RedisError as e:
                current_app.logger.error(e)

        return {'total_count': total_count, 'page': page, 'per_page': per_page, 'results': results}
Beispiel #12
0
    def put(self, target):
        """
        修改文章
        """
        req_parser = RequestParser()
        req_parser.add_argument('draft', type=inputs.boolean, required=False, location='args')
        req_parser.add_argument('title', type=inputs.regex(r'.{5,30}'), required=True, location='json')
        req_parser.add_argument('content', type=inputs.regex(r'.+'), required=True, location='json')
        req_parser.add_argument('cover', type=self._cover, required=True, location='json')
        req_parser.add_argument('channel_id', type=self._channel_id, required=True, location='json')
        args = req_parser.parse_args()
        content = args['content']
        cover = args['cover']
        draft = args['draft']

        ret = db.session.query(func.count(Article.id)).filter(Article.id == target, Article.user_id == g.user_id).first()
        if ret[0] == 0:
            return {'message': 'Invalid article.'}, 400

        # 对于自动封面,生成封面
        cover_type = cover['type']
        if cover_type == -1:
            cover = self._generate_article_cover(content)

        Article.query.filter_by(id=target).update(dict(
            channel_id=args['channel_id'],
            title=args['title'],
            cover=cover,
            status=Article.STATUS.DRAFT if draft else Article.STATUS.UNREVIEWED
        ))

        ArticleContent.query.filter_by(id=target).update(dict(content=content))

        try:
            db.session.commit()
        except Exception as e:
            current_app.logger.error(e)
            db.session.rollback()
            return {'message': 'Server has something wrong.'}, 507

        # 清除缓存
        cache_user.UserArticlesCache(g.user_id).clear()
        cache_article.ArticleInfoCache(target).clear()
        cache_article.ArticleDetailCache(target).clear()

        # if not draft:
            # TODO 机器审核
            # TODO 新文章消息推送

        return {'id': target}, 201
Beispiel #13
0
def article_id(value):
    """
    检查是否是article_id
    :param value: 被检验的值
    :return: article_id
    """
    try:
        _article_id = int(value)
    except Exception:
        raise ValueError('Invalid target article id.')
    else:
        if _article_id <= 0:
            raise ValueError('Invalid target article id.')
        else:
            ret = cache_article.ArticleInfoCache(_article_id).exists()
            if ret:
                return _article_id
            else:
                raise ValueError('Invalid target article id.')
Beispiel #14
0
    def post(self):
        req_parser = RequestParser()
        req_parser.add_argument('Trace',
                                type=inputs.regex(r'^.+$'),
                                required=True,
                                location='headers')
        req_parser.add_argument('duration',
                                type=inputs.natural,
                                required=True,
                                location='json')
        req_parser.add_argument('art_id',
                                type=parser.article_id,
                                required=True,
                                location='json')
        args = req_parser.parse_args()

        article = cache_article.ArticleInfoCache(args.art_id).get()
        write_trace_log(args.Trace, args.duration, channel_id=article['ch_id'])

        return {'message': 'OK'}, 201
Beispiel #15
0
    def get(self):
        """
        获取当前用户的文章列表
        """
        qs_parser = RequestParser()
        qs_parser.add_argument('page',
                               type=inputs.positive,
                               required=False,
                               location='args')
        qs_parser.add_argument('per_page',
                               type=inputs.int_range(
                                   constants.DEFAULT_ARTICLE_PER_PAGE_MIN,
                                   constants.DEFAULT_ARTICLE_PER_PAGE_MAX,
                                   'per_page'),
                               required=False,
                               location='args')
        args = qs_parser.parse_args()
        page = 1 if args.page is None else args.page
        per_page = args.per_page if args.per_page else constants.DEFAULT_ARTICLE_PER_PAGE_MIN

        results = []

        # 已废弃
        # articles = cache_user.get_user_articles(g.user_id)
        # total_count = len(articles)
        # page_articles = articles[(page - 1) * per_page:page * per_page]

        total_count, page_articles = cache_user.UserArticlesCache(
            g.user_id).get_page(page, per_page)

        for article_id in page_articles:
            article = cache_article.ArticleInfoCache(article_id).get()
            if article:
                results.append(article)

        return {
            'total_count': total_count,
            'page': page,
            'per_page': per_page,
            'results': results
        }
Beispiel #16
0
    def get(self, article_id):
        """
        获取文章详情
        :param article_id: int 文章id
        """
        user_id = g.user_id
        # 查询文章数据
        exist = cache_article.ArticleInfoCache(article_id).exists()
        if not exist:
            abort(404, message='The article does not exist.')

        article = cache_article.ArticleDetailCache(article_id).get()

        article['is_followed'] = False
        article['attitude'] = None
        # 增加用户是否收藏了文章
        article['is_collected'] = False

        if user_id:
            # 非匿名用户添加用户的阅读历史
            try:
                cache_user.UserReadingHistoryStorage(user_id).save(article_id)
            except ConnectionError as e:
                current_app.logger.error(e)

            # 查询关注
            # article['is_followed'] = cache_user.UserFollowingCache(user_id).determine_follows_target(article['aut_id'])
            article['is_followed'] = cache_user.UserRelationshipCache(
                user_id).determine_follows_target(article['aut_id'])

            # 增加用户是否收藏了文章
            article['is_collected'] = cache_user.UserArticleCollectionsCache(
                g.user_id).determine_collect_target(article_id)

        # 更新阅读数
        cache_statistic.ArticleReadingCountStorage.incr(article_id)
        cache_statistic.UserArticlesReadingCountStorage.incr(article['aut_id'])

        return article
Beispiel #17
0
    def get(self):
        # 检验参数
        qs_parser = RequestParser()
        qs_parser.add_argument('q',
                               type=inputs.regex(r'^.{1,50}$'),
                               required=True,
                               location='args')
        qs_parser.add_argument('page',
                               type=inputs.positive,
                               required=False,
                               location='args')
        qs_parser.add_argument('per_page',
                               type=inputs.int_range(
                                   constants.DEFAULT_SEARCH_PER_PAGE_MIN,
                                   constants.DEFAULT_SEARCH_PER_PAGE_MAX,
                                   'per_page'),
                               required=False,
                               location='args')
        req = qs_parser.parse_args()
        q = req.q
        page = 1 if not req.page else req.page
        per_page = constants.DEFAULT_SEARCH_PER_PAGE_MIN if not req.per_page else req.per_page

        query_dict = {
            "from": per_page * (page - 1),
            "size": per_page,
            "_source": ["article_id", "title"],
            "query": {
                "bool": {
                    "must": {
                        "match": {
                            "_all": q
                        }
                    },
                    "filter": {
                        "term": {
                            "status": Article.STATUS.APPROVED
                        }
                    }
                }
            }
        }

        ret = current_app.es.search(index='articles',
                                    doc_type='article',
                                    body=query_dict)

        total_count = ret['hits']['total']
        results = []
        for item in ret['hits']['hits']:
            # article_id = item['_source']['article_id']
            article_id = item['_id']
            article_dict = cache_article.ArticleInfoCache(article_id).get()
            if article_dict:
                results.append(article_dict)

        # 返回接口
        # {
        # 	"message": "OK",
        # 	"data": {
        # 		"page": xx,
        # 		"per_page": xx,
        # 		"total_count": xx,
        # 		"results": [
        # 			{
        # 				"article_id":xx,
        # 				"title": xx,
        # 				"cover": xx
        # 			},
        # 			...
        # 		]
        # 	}
        # }

        return {
            "page": page,
            "per_page": per_page,
            "total_count": total_count,
            "results": results
        }
Beispiel #18
0
    def post(self):
        """
        创建评论
        """
        json_parser = RequestParser()
        json_parser.add_argument('target',
                                 type=positive,
                                 required=True,
                                 location='json')
        json_parser.add_argument('content', required=True, location='json')
        json_parser.add_argument('art_id',
                                 type=parser.article_id,
                                 required=False,
                                 location='json')

        args = json_parser.parse_args()
        target = args.target
        content = args.content
        article_id = args.art_id

        if not content:
            return {'message': 'Empty content.'}, 400

        allow_comment = cache_article.ArticleInfoCache(
            article_id or target).determine_allow_comment()
        if not allow_comment:
            return {'message': 'Article denied comment.'}, 400

        if not article_id:
            # 对文章评论
            article_id = target

            comment_id = current_app.id_worker.get_id()
            comment = Comment(id=comment_id,
                              user_id=g.user_id,
                              article_id=article_id,
                              parent_id=None,
                              content=content)
            db.session.add(comment)
            db.session.commit()

            # TODO 增加评论审核后 在评论审核中添加缓存
            cache_statistic.ArticleCommentCountStorage.incr(article_id)
            try:
                cache_comment.CommentCache(comment_id).save(comment)
            except SQLAlchemyError as e:
                current_app.logger.error(e)
            cache_comment.ArticleCommentsCache(article_id).add(comment)

            # 发送评论通知
            _user = cache_user.UserProfileCache(g.user_id).get()
            _article = cache_article.ArticleInfoCache(article_id).get()
            _data = {
                'user_id': g.user_id,
                'user_name': _user['name'],
                'user_photo': _user['photo'],
                'art_id': article_id,
                'art_title': _article['title'],
                'timestamp': int(time.time())
            }
            current_app.sio.emit('comment notify',
                                 data=_data,
                                 room=str(_article['aut_id']))

        else:
            # 对评论的回复
            exists = cache_comment.CommentCache(target).exists()
            if not exists:
                return {'message': 'Invalid target comment id.'}, 400

            comment_id = current_app.id_worker.get_id()
            comment = Comment(id=comment_id,
                              user_id=g.user_id,
                              article_id=article_id,
                              parent_id=target,
                              content=content)
            db.session.add(comment)
            db.session.commit()

            # TODO 增加评论审核后 在评论审核中添加评论缓存
            cache_statistic.ArticleCommentCountStorage.incr(article_id)
            cache_statistic.CommentReplyCountStorage.incr(target)
            try:
                cache_comment.CommentCache(comment_id).save(comment)
            except SQLAlchemyError as e:
                current_app.logger.error(e)
            cache_comment.CommentRepliesCache(target).add(comment)

        return {
            'com_id': comment.id,
            'target': target,
            'art_id': article_id
        }, 201
Beispiel #19
0
    def get(self):
        """
        获取搜索结果
        """
        if g.use_token and not g.user_id:
            return {'message': 'Token has some errors.'}, 401

        qs_parser = RequestParser()
        qs_parser.add_argument('q',
                               type=inputs.regex(r'^.{1,50}$'),
                               required=True,
                               location='args')
        qs_parser.add_argument('page',
                               type=inputs.positive,
                               required=False,
                               location='args')
        qs_parser.add_argument('per_page',
                               type=inputs.int_range(
                                   constants.DEFAULT_SEARCH_PER_PAGE_MIN,
                                   constants.DEFAULT_SEARCH_PER_PAGE_MAX,
                                   'per_page'),
                               required=False,
                               location='args')
        args = qs_parser.parse_args()
        q = args.q
        page = 1 if args.page is None else args.page
        per_page = args.per_page if args.per_page else constants.DEFAULT_SEARCH_PER_PAGE_MIN

        # Search from Elasticsearch
        query = {
            'from': (page - 1) * per_page,
            'size': per_page,
            '_source': False,
            'query': {
                'bool': {
                    'must': [{
                        'match': {
                            '_all': q
                        }
                    }],
                    'filter': [{
                        'term': {
                            'status': {
                                'value': 2
                            }
                        }
                    }]
                }
            }
        }
        ret = current_app.es.search(index='articles',
                                    doc_type='article',
                                    body=query)

        total_count = ret['hits']['total']

        results = []

        hits = ret['hits']['hits']
        for result in hits:
            article_id = int(result['_id'])
            article = cache_article.ArticleInfoCache(article_id).get()
            if article:
                results.append(article)

        # Record user search history
        if g.user_id and page == 1:
            try:
                cache_user.UserSearchingHistoryStorage(g.user_id).save(q)
            except RedisError as e:
                current_app.logger.error(e)

        # Add new es index doc
        if total_count and page == 1:
            query = {'_source': False, 'query': {'match': {'suggest': q}}}
            ret = current_app.es.search(index='completions',
                                        doc_type='words',
                                        body=query)
            if ret['hits']['total'] == 0:
                doc = {
                    'suggest': {
                        'input': q,
                        'weight': constants.USER_KEYWORD_ES_SUGGEST_WEIGHT
                    }
                }
                try:
                    current_app.es.index(index='completions',
                                         doc_type='words',
                                         body=doc)
                except Exception:
                    pass

        return {
            'total_count': total_count,
            'page': page,
            'per_page': per_page,
            'results': results
        }
Beispiel #20
0
    def get(self, article_id):
        """
        获取文章详情
        :param article_id: int 文章id
        """
        # 写入埋点日志
        qs_parser = RequestParser()
        qs_parser.add_argument('Trace', type=inputs.regex(r'^.+$'), required=False, location='headers')
        args = qs_parser.parse_args()

        user_id = g.user_id

        # 查询文章数据
        exist = cache_article.ArticleInfoCache(article_id).exists()
        if not exist:
            abort(404, message='The article does not exist.')

        article = cache_article.ArticleDetailCache(article_id).get()

        # 推荐系统所需埋点
        if args.Trace:
            write_trace_log(args.Trace, channel_id=article['ch_id'])

        article['is_followed'] = False
        article['attitude'] = None
        # 增加用户是否收藏了文章
        article['is_collected'] = False

        if user_id:
            # 非匿名用户添加用户的阅读历史
            try:
                cache_user.UserReadingHistoryStorage(user_id).save(article_id)
            except ConnectionError as e:
                current_app.logger.error(e)

            # 查询关注
            # article['is_followed'] = cache_user.UserFollowingCache(user_id).determine_follows_target(article['aut_id'])
            article['is_followed'] = cache_user.UserRelationshipCache(user_id).determine_follows_target(article['aut_id'])

            # 查询登录用户对文章的态度(点赞or不喜欢)
            try:
                # article['attitude'] = cache_article.ArticleUserAttitudeCache(user_id, article_id).get()
                article['attitude'] = cache_user.UserArticleAttitudeCache(user_id).get_article_attitude(article_id)
            except SQLAlchemyError as e:
                current_app.logger.error(e)
                article['attitude'] = -1

            # 增加用户是否收藏了文章
            article['is_collected'] = cache_user.UserArticleCollectionsCache(g.user_id).determine_collect_target(article_id)

        # 获取相关文章推荐
        article['recomments'] = []
        try:
            similar_articles = self._feed_similar_articles(article_id)
            for _article_id in similar_articles:
                _article = cache_article.ArticleInfoCache(_article_id).get()
                article['recomments'].append({
                    'art_id': _article['art_id'],
                    'title': _article['title']
                })
        except Exception as e:
            current_app.logger.error(e)

        # 更新阅读数
        cache_statistic.ArticleReadingCountStorage.incr(article_id)
        cache_statistic.UserArticlesReadingCountStorage.incr(article['aut_id'])

        return article