コード例 #1
0
    def get(self, request, *args, **kwargs):
        params = request.GET
        COUNT_ELEMENTS = 5
        errors = []

        limit = COUNT_ELEMENTS
        offset = 0

        form = forms.SearchForm(params)
        if form.is_valid():
            #pointsreq = MainModels.Person.objects;

            name = form.cleaned_data.get("s")
            users_list = []
            morph = get_morph('/home/tenoclock/yasenput/dicts')
            if name:
                #pointsreq = MainModels.Person.search.query(params.get("s"))
                #search = SphinxSearch()
                search = SphinxQuerySet(index="auth_user")
                name_morph = morph.normalize(name.upper())
                file1 = open('file1.txt','w')
                file1.write(str(list(name_morph)))
                file1.close()
                phrase_list = name.split(' ')
                for phrase in phrase_list:
                    if phrase != '':
                        name_morph = morph.normalize(phrase.upper())
                        for name_m in name_morph:
                            search_query = search.query(name_m)
                            for splited_item in search_query:
                                if not MainModels.Person.objects.get(id = splited_item['id']) in users_list:
                                   users_list.append(MainModels.Person.objects.get(id = splited_item['id']))





            content = form.cleaned_data.get("content")
            if content == 'new':
                pointsreq  = pointsreq.order_by('-id')
            elif content == "popular":
                pointsreq  = pointsreq.annotate(usfiliwers=Count('followers__id')).order_by('-usfiliwers', '-id')
            else:
                pointsreq  = users_list


            points = users_list[offset:limit]

            YpJson = YpSerialiser()
            return HttpResponse(YpJson.serialize(points, fields=("username", "first_name", "last_name")),
                                mimetype="application/json")
        else:
            e = form.errors
            for er in e:
                errors.append(er +':'+e[er][0])
            return JsonHTTPResponse({"status": 0, "txt": ", ".join(errors)});
コード例 #2
0
 def _get_sphinx_results(self):
     try:
         return SphinxQuerySet._get_sphinx_results(self)
     except SearchError:
         # Essayons d'enlever les caractères qui peuvent poser problème.
         for c in '|!@()~/<=^$':
             self._query = self._query.replace(c, ' ')
         try:
             return SphinxQuerySet._get_sphinx_results(self)
         except SearchError:
             # Ça ne marche toujours pas. Enlevons les guillemets et les
             # tirets.
             for c in '"-':
                 self._query = self._query.replace(c, ' ')
             return SphinxQuerySet._get_sphinx_results(self)
コード例 #3
0
class SearchItems(models.Model):
    name = models.CharField('Сущность', max_length=255)
    type_of_item = models.IntegerField('Тип сущности')
    point = models.ForeignKey(Points, max_length=255)
    objects = models.Manager()
    search = SphinxQuerySet(index="search_searchitems")

    file1 = open('file2.txt', 'w')
    file1.write('1')
    file1.close()
コード例 #4
0
ファイル: views.py プロジェクト: krimkus/chicken-app
    def get(self, request, *args, **kwargs):
        query = request.GET.get('q', '')
        filter_query = request.GET.get('filter', None)
        limit = 20

        try:
            page = int(request.GET.get('page', '1'))
        except ValueError:
            page = 1
        if filter_query == 'blog':
            qs = Post.search.query(query)
        else:
            qs = SphinxQuerySet(index=live_indexes()).query(query)
        
        context = self.get_context_data(params=kwargs)

        try:
            offset = limit * (page - 1)
            results = list(qs[offset:offset+limit])
            count = qs.count()
        except:
            count = -1
            results = []
            offset = 0

        context['page'] = page
        context['count'] = count
        context['num_pages'] = max(1, count / limit)
        context['object_list'] = results
        context['query'] = query
        context['filter'] = filter_query
        if context['num_pages'] > 1:
            context['is_paginated'] = True
        if page > 1:
            context['previous_page_number'] = page-1
        if page < context['num_pages']:
            context['next_page_number'] = page+1
        return self.render_to_response(context)
コード例 #5
0
    def get(self, request, *args, **kwargs):

        params = request.GET
        search_res_points = MainModels.Points.search.query(
            params.get('name', ''))
        search_res_sets = CollectionsModels.Collections.search.query(
            params.get('name', ''))
        search = SphinxQuerySet(index="main_points",
                                mode='SPH_MATCH_EXTENDED2',
                                rankmode='SPH_RANK_NONE')
        COUNT_ELEMENTS = LIMITS.POINTS_LIST.POINTS_LIST_COUNT
        errors = []

        form = forms.FiltersForm(params)
        page = params.get('page', 1) or 1
        limit = COUNT_ELEMENTS * int(page)
        offset = (int(page) - 1) * COUNT_ELEMENTS
        if form.is_valid():
            all_items = QuerySetJoin(
                search_res_points.extra(
                    select={
                        'type_of_item':
                        1,
                        'likes_count':
                        'SELECT count(*) from main_points_likeusers where main_points_likeusers.points_id=main_points.id',
                        'reviewusersplus':
                        'SELECT count(*) from main_points_reviews join reviews_reviews on main_points_reviews.reviews_id=reviews_reviews.id where main_points_reviews.points_id=main_points.id and reviews_reviews.rating=1',
                        'reviewusersminus':
                        'SELECT count(*) from main_points_reviews join reviews_reviews on main_points_reviews.reviews_id=reviews_reviews.id where main_points_reviews.points_id=main_points.id and reviews_reviews.rating=0',
                        'sets':
                        'SELECT count(*) from main_points_reviews join reviews_reviews on main_points_reviews.reviews_id=reviews_reviews.id where main_points_reviews.points_id=main_points.id and reviews_reviews.rating=0',
                    }),
                search_res_sets.extra(
                    select={
                        'type_of_item':
                        2,
                        "likes_count":
                        "select count(*) from collections_collections_likeusers where collections_collections_likeusers.collections_id=collections_collections.id"
                    })).order_by('ypi')[offset:limit]
            another_all = list(all_items)
            items = json.loads(self.getSerializeCollections(all_items))
            return HttpResponse(json.dumps(items), mimetype="application/json")

        else:
            e = form.errors
            for er in e:
                errors.append(er + ':' + e[er][0])
            return JsonHTTPResponse({"status": 0, "txt": ", ".join(errors)})
コード例 #6
0
 def queryset(self, request):
     return SphinxQuerySet(
         model=self.model,
         index=self.index,
     )
コード例 #7
0
def filter(request):

    get = request.REQUEST
    logger.info(get)

    page = get.get('page', None)
    if not page:
        page = 1
    else:
        page = int(page)

    limit = 10

    mode = 'SPH_MATCH_ANY'
    # 职业
    pro_id = get.get('pro_id', None)
    to_query = ''
    if pro_id:
        spe_list = {}
        # mbti = Mbti.objects.get( name=mbti_name )
        sql = 'select s.mbti_spe_id, s.name from mbti_profession_specialty ps inner join mbti_specialty s on ps.mbti_spe_id = s.mbti_spe_id where ps.mbti_pro_id = ' + pro_id + ''
        cursor = connection.cursor()
        cursor.execute(sql)
        rs = dictfetchall(cursor)
        for one in rs:
            to_query += ' ' + one['name']

    if to_query == "":
        mode = 'SPH_MATCH_FULLSCAN'

    # 获取分组信息
    ss = SphinxQuerySet(
        index='ccard2',
        mode=mode,
        rankmode='SPH_RANK_NONE',
        limit=limit,
        offset=(page - 1) * limit,
        groupby='school_id',
        # groupsort='school_id',
    )
    # ss.setm

    # from_prov = get.get( 'from_prov', None )
    # if from_prov:
    # 	from_prov = from_prov.encode( 'utf-8' )
    # 	from_prov = re.sub( '省$', '', from_prov )
    # 	from_prov = re.sub( '市$', '', from_prov )
    # 	ss = ss.filter( from_prov=mccrc32( from_prov ))

    school_prov = get.get('school_prov', None)
    if school_prov:
        school_prov = school_prov.encode('utf-8')
        school_prov = re.sub('省$', '', school_prov)
        school_prov = re.sub('市$', '', school_prov)
        ss = ss.filter(school_prov=mccrc32(school_prov))

    # stu_type = get.get( 'stu_type', None )
    # if stu_type:
    # 	ss = ss.filter( stu_type=mccrc32( stu_type ))

    level = get.get('level', None)
    if level:
        ss = ss.filter(level=mccrc32(level))

    school_id = get.get('school_id', None)
    if school_id:
        ss = ss.filter(school_id=school_id)

    r = ss.query(to_query).order_by('@weight')
    # for one in r._sphinx:

    rs_list = {}
    ids = []
    i = 0
    for one in list(r):
        id = one.get('id')
        logger.info(one)
        logger.info(id)
        rs_list[i] = str(id)
        ids.append(str(id))
        i = i + 1

    # 获取学校
    rs_list_out = {}
    if len(ids) > 0:
        sql = 'select sr.relation_id, s.school_id, s.school_name, s.area as school_prov, s.school_icon, s.school_type, \
		 	s.school_property1, s.school_property2, s.school_url from school s \
		 	inner join school_specialty_relations sr on s.school_id = sr.school_id \
		 	where sr.relation_id in ( ' + (', '.join(ids)) + ' )'
        tmp = {}
        cursor = connection.cursor()
        cursor.execute(sql)
        rs = dictfetchall(cursor)
        for one in rs:
            if one['school_icon']:
                one['school_icon'] = host + '/static' + one['school_icon']
            tmp[str(one['relation_id'])] = one

            # logger.info( one['school_name'].encode( 'utf-8' ) )

        for key, one in rs_list.iteritems():
            rs_list[key] = tmp[one]

        # logger.info( rs_list )
        for key in rs_list:
            school_id_key = rs_list[key]['school_id']
            school_id_key = str(school_id_key)
            rs_list_out[school_id_key] = rs_list[key]

    # 获取对应专业和对应情况
    if len(rs_list_out) > 0:
        # 获取分组信息
        ss = SphinxQuerySet(
            index='ccard2',
            mode=mode,
            rankmode='SPH_RANK_NONE',
            limit=1000,
            # offset= (page - 1) * limit ,
            # groupby='school_id',
            # groupsort='school_id',
        )
        # ss.setm

        # from_prov = get.get( 'from_prov', None )
        # if from_prov:
        # 	from_prov = from_prov.encode( 'utf-8' )
        # 	from_prov = re.sub( '省$', '', from_prov )
        # 	from_prov = re.sub( '市$', '', from_prov )
        # 	ss = ss.filter( from_prov=mccrc32( from_prov ))

        school_prov = get.get('school_prov', None)
        if school_prov:
            school_prov = school_prov.encode('utf-8')
            school_prov = re.sub('省$', '', school_prov)
            school_prov = re.sub('市$', '', school_prov)
            ss = ss.filter(school_prov=mccrc32(school_prov))

        # stu_type = get.get( 'stu_type', None )
        # if stu_type:
        # 	ss = ss.filter( stu_type=mccrc32( stu_type ))

        level = get.get('level', None)
        if level:
            ss = ss.filter(level=mccrc32(level))

        # school_id = get.get( 'school_id', None )
        # if school_id:
        for key in rs_list_out:
            ss = ss.filter(school_id=key)

        r = ss.query(to_query).order_by('@weight')
        # for one in r._sphinx:

        ids = []
        for one in list(r):
            id = one.get('id')
            ids.append(str(id))
        # logger.info( ids )

        # logger.info( ids )
        if len(ids) > 0:
            sql = 'select sr.relation_id, sr.school_id, sr.specialty_name, mp.point_id, mp.area as from_prov, mp.type as stu_type, \
				mp.year, mp.point_average, mp.point_height, mp.point_low, mp.level from school_specialty_relations sr \
				left join school_point3 mp on sr.relation_id = mp.relation_id \
				where sr.relation_id in ( ' + (', '.join(ids)) + ' )'
            # logger.info( sql )
            tmp = {}
            cursor = connection.cursor()
            cursor.execute(sql)
            rs = dictfetchall(cursor)
            for one in rs:
                if not tmp.has_key(str(one['school_id'])):
                    tmp[str(one['school_id'])] = []
                tmp[str(one['school_id'])].append(one)
            # logger.info( tmp )
            # logger.info( len(tmp) )

            for key, one in rs_list_out.iteritems():
                if tmp.has_key(str(one['school_id'])):
                    rs_list_out[key]['specialties'] = tmp[str(
                        one['school_id'])]
    # return HttpResponse( s );
    rs_list_out_arr = []
    for key in rs_list_out:
        # logger.info( key )
        rs_list_out_arr.append(rs_list_out[key])
        logger.info("len: " + str(len(rs_list_out[key])))

    return HttpResponse(
        json({
            'error': 0,
            'page': page,
            'limit': limit,
            'rs_list': rs_list_out_arr
        }))
コード例 #8
0
ファイル: views.py プロジェクト: monkeycraps/cCard2
def filter(request):

	get = request.REQUEST;
	logger.info( get ) 

	page = get.get( 'page', None )
	if not page:
		page = 1
	else:
		page = int(page)

	limit = 10

	mode = 'SPH_MATCH_ANY';
	# 职业
	pro_id = get.get( 'pro_id', None )
	to_query = ''
	if pro_id:
		spe_list = {}
		# mbti = Mbti.objects.get( name=mbti_name )
		sql = 'select s.mbti_spe_id, s.name from mbti_profession_specialty ps inner join mbti_specialty s on ps.mbti_spe_id = s.mbti_spe_id where ps.mbti_pro_id = '+ pro_id +'';
		cursor = connection.cursor()
		cursor.execute( sql )
		rs = dictfetchall( cursor )
		for one in rs: 
			to_query += ' '+ one['name']
		
	if to_query == "":
		mode = 'SPH_MATCH_FULLSCAN';

	# 获取分组信息
	ss = SphinxQuerySet(
        index='ccard', 
        mode=mode,
        rankmode='SPH_RANK_NONE',
        limit = limit,  
        offset= (page - 1) * limit , 
        groupby='school_id', 
        # groupsort='school_id', 
    );
	# ss.setm

	from_prov = get.get( 'from_prov', None )
	if from_prov:
		from_prov = from_prov.encode( 'utf-8' )
		from_prov = re.sub( '省$', '', from_prov )
		from_prov = re.sub( '市$', '', from_prov )
		ss = ss.filter( from_prov=mccrc32( from_prov ))

	school_prov = get.get( 'school_prov', None )
	if school_prov:
		school_prov = school_prov.encode( 'utf-8' )
		school_prov = re.sub( '省$', '', school_prov )
		school_prov = re.sub( '市$', '', school_prov )
		ss = ss.filter( school_prov=mccrc32( school_prov ))

	stu_type = get.get( 'stu_type', None )
	if stu_type:
		ss = ss.filter( stu_type=mccrc32( stu_type ))

	level = get.get( 'level', None )
	if level:
		ss = ss.filter( level=mccrc32( level ))

	school_id = get.get( 'school_id', None )
	if school_id:
		ss = ss.filter( school_id=school_id )

	r = ss.query( to_query ).order_by('@weight')
	# for one in r._sphinx:

	rs_list = {}
	ids = []
	i = 0
	for one in list(r):
		id = one.get( 'id' )
		rs_list[i] = str(id)
		ids.append(str(id))
		i = i+1

	# 获取学校
	rs_list_out = {}
	if len(ids) > 0 :
		sql = 'select mp.point_id, s.school_id, s.school_name, s.area as school_prov, s.school_icon, s.school_type, s.school_property1, s.school_property2, s.school_url from school s inner join school_point3 mp on s.school_id = mp.school_id where mp.point_id in ( '+ (', '.join(ids)) +' )'
		tmp = {}
		cursor = connection.cursor()
		cursor.execute( sql )
		rs = dictfetchall( cursor )
		for one in rs: 
			if one['school_icon']:
				one['school_icon'] = host+ '/static'+ one['school_icon'];
			tmp[str(one['point_id'])] = one

			# logger.info( one['school_name'].encode( 'utf-8' ) )

		for key, one in rs_list.iteritems():
			rs_list[key] = tmp[one]

		# logger.info( rs_list )
		for key in rs_list:
			school_id_key = rs_list[key]['school_id']
			school_id_key = str(school_id_key)
			rs_list_out[school_id_key] = rs_list[key]

	# 获取对应专业和对应情况
	# if len(rs_list_out) > 0 :
	# 	# 获取分组信息
	# 	ss = SphinxQuerySet(
	#         index='ccard', 
	#         mode=mode,
	#         rankmode='SPH_RANK_NONE',
	#         limit = 1000,  
	#         # offset= (page - 1) * limit , 
	#         # groupby='school_id', 
	#         # groupsort='school_id', 
	#     );
	# 	# ss.setm

	# 	from_prov = get.get( 'from_prov', None )
	# 	if from_prov:
	# 		from_prov = from_prov.encode( 'utf-8' )
	# 		from_prov = re.sub( '省$', '', from_prov )
	# 		from_prov = re.sub( '市$', '', from_prov )
	# 		ss = ss.filter( from_prov=mccrc32( from_prov ))

	# 	school_prov = get.get( 'school_prov', None )
	# 	if school_prov:
	# 		school_prov = school_prov.encode( 'utf-8' )
	# 		school_prov = re.sub( '省$', '', school_prov )
	# 		school_prov = re.sub( '市$', '', school_prov )
	# 		ss = ss.filter( school_prov=mccrc32( school_prov ))

	# 	stu_type = get.get( 'stu_type', None )
	# 	if stu_type:
	# 		ss = ss.filter( stu_type=mccrc32( stu_type ))

	# 	level = get.get( 'level', None )
	# 	if level:
	# 		ss = ss.filter( level=mccrc32( level ))

	# 	# school_id = get.get( 'school_id', None )
	# 	# if school_id:
	# 	for key in rs_list_out:
	# 		ss = ss.filter( school_id=key )

	# 	r = ss.query( to_query ).order_by('@weight')
	# 	# for one in r._sphinx:

	# 	ids = []
	# 	for one in list(r):
	# 		id = one.get( 'id' )
	# 		ids.append(str(id))
	# 	# logger.info( ids )

	# 	# logger.info( ids )
	# 	if len(ids) > 0 :
	# 		sql = 'select s.school_id, mp.point_id, mp.specialty_category, mp.area as from_prov, mp.type as stu_type, mp.year, mp.point_average, mp.point_height, mp.point_low, mp.level from school s inner join school_point3 mp on s.school_id = mp.school_id where mp.point_id in ( '+ (', '.join(ids)) +' )'
	# 		# logger.info( sql )
	# 		tmp = {}
	# 		cursor = connection.cursor()
	# 		cursor.execute( sql )
	# 		rs = dictfetchall( cursor )
	# 		for one in rs: 
	# 			if not tmp.has_key( str(one['school_id']) ):
	# 				tmp[str(one['school_id'])] = []
	# 			tmp[str(one['school_id'])].append( one )
	# 		# logger.info( tmp )
	# 		# logger.info( len(tmp) )

	# 		for key, one in rs_list_out.iteritems():
	# 			if tmp.has_key(str(one['school_id'])):
	# 				rs_list_out[key]['specialties'] = tmp[str(one['school_id'])]

	# return HttpResponse( s );
	rs_list_out_arr = [];
	for key in rs_list_out: 
		# logger.info( key )
		rs_list_out_arr.append( rs_list_out[key] )

	return HttpResponse( json({ 'error':0, 'page': page, 'limit': limit, 'rs_list': rs_list_out_arr }) );
コード例 #9
0
def sphinx_query(query):
    qs = SphinxQuerySet(index=_all_indexes())
    return qs.query(query)
コード例 #10
0
def sphinx_query(query):
    qs = SphinxQuerySet(index=_all_indexes())
    return qs.query(query)
コード例 #11
0
 def __init__(self, model=None, index=None, weights=None):
     SphinxQuerySet.__init__(self, model=model, index=index,
                             mode='SPH_MATCH_EXTENDED2',
                             rankmode='SPH_RANK_PROXIMITY_BM25',
                             weights=weights)