def search(request, keyword=None, p=None): if politics.is_sensitive(keyword): return redirect('/?' + urllib.urlencode({'notallow': keyword.encode('utf8')})) d = {'keyword': keyword} d['words'] = list(set(re_punctuations.sub(u' ', d['keyword']).split())) try: d['p'] = int(p or request.GET.get('p')) except: d['p'] = 1 d['category'] = request.GET.get('c', '') d['sort'] = request.GET.get('s', 'create_time') d['ps'] = 10 d['offset'] = d['ps']*(d['p']-1) # Fetch list qs = { 'keyword': keyword.encode('utf8'), 'count': d['ps'], 'start': d['offset'], 'category': d['category'], 'sort': d['sort'], } url = API_URL + 'json_search?' + urllib.urlencode(qs) r = req_session.get(url, headers={'Host':API_HOST}) d.update(r.json()) # Fill info ids = '-'.join([str(x['id']) for x in d['result']['items']]) if ids: qs = { 'hashes': ids, } url = API_URL + 'json_info?' + urllib.urlencode(qs) r = req_session.get(url, headers={'Host':API_HOST}) j = r.json() for x in d['result']['items']: x.update(j[str(x['id'])]) x['magnet_url'] = 'magnet:?xt=urn:btih:' + x['info_hash'] + '&' + urllib.urlencode({'dn':x['name'].encode('utf8')}) if 'files' in x: x['files'] = [y for y in x['files'] if not y['path'].startswith(u'_')][:5] x['files'].sort(key=lambda x:x['length'], reverse=True) else: x['files'] = [{'path': x['name'], 'length': x['length']}] # pagination w = 10 total = int(d['result']['meta']['total_found']) d['page_max'] = total / d['ps'] if total % d['ps'] == 0 else total/d['ps'] + 1 d['prev_pages'] = range( max(d['p']-w+min(int(w/2), d['page_max']-d['p']),1), d['p']) d['next_pages'] = range( d['p']+1, int(min(d['page_max']+1, max(d['p']-w/2,1) + w )) ) d['sort_navs'] = [ {'name': '按收录时间', 'value': 'create_time'}, {'name': '按文件大小', 'value': 'length'}, {'name': '按相关性', 'value': 'relavance'}, ] d['cats_navs'] = [{'name': '全部', 'num': total, 'value': ''}] for x in d['cats']['items']: v = workers.metautils.get_label_by_crc32(x['category']) d['cats_navs'].append({'value': v, 'name': workers.metautils.get_label(v), 'num': x['num']}) return render(request, 'list.html', d)
def search(request, keyword=None, p=None): if not keyword: return redirect('/') if politics.is_sensitive(keyword): return redirect('/?' + urllib.urlencode({'notallow': keyword.encode('utf8')})) d = {'keyword': keyword} d['words'] = list(set(re_punctuations.sub(u' ', d['keyword']).split())) try: d['p'] = int(p or request.GET.get('p')) except: d['p'] = 1 d['category'] = request.GET.get('c', '') d['sort'] = request.GET.get('s', 'create_time') d['ps'] = 10 d['offset'] = d['ps']*(d['p']-1) try: res = Hash.objects.search(keyword, d['offset'], d['ps'], d['category'], d['sort']) except: return HttpResponse('Sorry, an error has occurred: %s' % sys.exc_info()[1]) d.update(res) # Fill info ids = [str(x['id']) for x in d['result']['items']] if ids: items = Hash.objects.list_with_files(ids) for x in d['result']['items']: for y in items: if x['id'] == y['id']: x.update(y) x['maybe_fake'] = x['name'].endswith(u'.rar') or u'BTtiantang.com' in x['name'] or u'liangzijie' in x['name'] or u'720p高清视频' in x['name'] if 'files' in x: x['files'] = [z for z in x['files'] if not z['path'].startswith(u'_')][:5] x['files'].sort(key=lambda x:x['length'], reverse=True) else: x['files'] = [{'path': x['name'], 'length': x['length']}] # pagination w = 10 total = int(d['result']['meta']['total_found']) d['page_max'] = total / d['ps'] if total % d['ps'] == 0 else total/d['ps'] + 1 d['prev_pages'] = range( max(d['p']-w+min(int(w/2), d['page_max']-d['p']),1), d['p']) d['next_pages'] = range( d['p']+1, int(min(d['page_max']+1, max(d['p']-w/2,1) + w )) ) d['sort_navs'] = [ {'name': 'By Time', 'value': 'create_time'}, {'name': 'By Size', 'value': 'length'}, {'name': 'By Relavance', 'value': 'relavance'}, ] d['cats_navs'] = [{'name': 'All', 'num': total, 'value': ''}] d['keyword_logs'] = KeywordLog.objects for x in d['cats']['items']: v = workers.metautils.get_label_by_crc32(x['category']) d['cats_navs'].append({'value': v, 'name': workers.metautils.get_label(v), 'num': x['num']}) return render(request, 'list.html', d)
def search(request, keyword=None, p=None): if not keyword: return redirect('/') if politics.is_sensitive(keyword): return redirect('/?' + urllib.urlencode({'notallow': keyword.encode('utf8')})) d = {'keyword': keyword} d['words'] = list(set(re_punctuations.sub(u' ', d['keyword']).split())) try: d['p'] = int(p or request.GET.get('p')) except: d['p'] = 1 d['category'] = request.GET.get('c', '') d['sort'] = request.GET.get('s', 'create_time') d['ps'] = 10 d['offset'] = d['ps']*(d['p']-1) res = Hash.objects.search(keyword, d['offset'], d['ps'], d['category'], d['sort']) d.update(res) # Fill info ids = [str(x['id']) for x in d['result']['items']] if ids: items = Hash.objects.list_with_files(ids) for x in d['result']['items']: for y in items: if x['id'] == y['id']: x.update(y) x['magnet_url'] = 'magnet:?xt=urn:btih:' + x['info_hash'] + '&' + urllib.urlencode({'dn':x['name'].encode('utf8')}) x['maybe_fake'] = x['name'].endswith(u'.rar') or u'BTtiantang.com' in x['name'] or u'liangzijie' in x['name'] if 'files' in x: x['files'] = [z for z in x['files'] if not z['path'].startswith(u'_')][:5] x['files'].sort(key=lambda x:x['length'], reverse=True) else: x['files'] = [{'path': x['name'], 'length': x['length']}] # pagination w = 10 total = int(d['result']['meta']['total_found']) d['page_max'] = total / d['ps'] if total % d['ps'] == 0 else total/d['ps'] + 1 d['prev_pages'] = range( max(d['p']-w+min(int(w/2), d['page_max']-d['p']),1), d['p']) d['next_pages'] = range( d['p']+1, int(min(d['page_max']+1, max(d['p']-w/2,1) + w )) ) d['sort_navs'] = [ {'name': '按收录时间', 'value': 'create_time'}, {'name': '按文件大小', 'value': 'length'}, {'name': '按相关性', 'value': 'relavance'}, ] d['cats_navs'] = [{'name': '全部', 'num': total, 'value': ''}] for x in d['cats']['items']: v = workers.metautils.get_label_by_crc32(x['category']) d['cats_navs'].append({'value': v, 'name': workers.metautils.get_label(v), 'num': x['num']}) return render(request, 'list.html', d)
def search(request, keyword=None, p=None): if politics.is_sensitive(keyword): return redirect('/?' + urllib.urlencode({'notallow': keyword.encode('utf8')})) d = {'keyword': keyword} d['words'] = list(set(re_punctuations.sub(u' ', d['keyword']).split())) try: d['p'] = int(p or request.GET.get('p')) except: d['p'] = 1 d['category'] = request.GET.get('c', '') d['sort'] = request.GET.get('s', 'create_time') d['ps'] = 10 d['offset'] = d['ps']*(d['p']-1) res = Hash.objects.search(keyword, d['offset'], d['ps'], d['category'], d['sort']) d.update(res) # Fill info ids = [str(x['id']) for x in d['result']['items']] if ids: items = Hash.objects.list_with_files(ids) for x in d['result']['items']: for y in items: if x['id'] == y['id']: x.update(y) x['magnet_url'] = 'magnet:?xt=urn:btih:' + x['info_hash'] + '&' + urllib.urlencode({'dn':x['name'].encode('utf8')}) x['maybe_fake'] = x['name'].endswith(u'.rar') if 'files' in x: x['files'] = [z for z in x['files'] if not z['path'].startswith(u'_')][:5] x['files'].sort(key=lambda x:x['length'], reverse=True) else: x['files'] = [{'path': x['name'], 'length': x['length']}] # pagination w = 10 total = int(d['result']['meta']['total_found']) d['page_max'] = total / d['ps'] if total % d['ps'] == 0 else total/d['ps'] + 1 d['prev_pages'] = range( max(d['p']-w+min(int(w/2), d['page_max']-d['p']),1), d['p']) d['next_pages'] = range( d['p']+1, int(min(d['page_max']+1, max(d['p']-w/2,1) + w )) ) d['sort_navs'] = [ {'name': '按收录时间', 'value': 'create_time'}, {'name': '按文件大小', 'value': 'length'}, {'name': '按相关性', 'value': 'relavance'}, ] d['cats_navs'] = [{'name': '全部', 'num': total, 'value': ''}] for x in d['cats']['items']: v = workers.metautils.get_label_by_crc32(x['category']) d['cats_navs'].append({'value': v, 'name': workers.metautils.get_label(v), 'num': x['num']}) return render(request, 'list.html', d)
def search(request, keyword=None, p=None): if politics.is_sensitive(keyword): return redirect('/?' + urllib.urlencode({'notallow': keyword.encode('utf8')})) d = {'keyword': keyword} d['words'] = list(set(re_punctuations.sub(u' ', d['keyword']).split())) try: d['p'] = int(p or request.GET.get('p')) except: d['p'] = 1 d['category'] = request.GET.get('c', '') d['sort'] = request.GET.get('s', 'create_time') d['ps'] = 10 d['offset'] = d['ps'] * (d['p'] - 1) # Fetch list qs = { 'keyword': keyword.encode('utf8').encode('base64'), 'count': d['ps'], 'start': d['offset'], 'category': d['category'], 'sort': d['sort'], 'base64': 1, } url = API_URL + 'json_search?' + urllib.urlencode(qs) r = req_session.get(url, headers={'Host': API_HOST}) d.update(r.json()) # Fill info ids = '-'.join([str(x['id']) for x in d['result']['items']]) if ids: qs = { 'hashes': ids, } url = API_URL + 'json_info?' + urllib.urlencode(qs) r = req_session.get(url, headers={'Host': API_HOST}) j = r.json() for x in d['result']['items']: x.update(j[str(x['id'])]) x['magnet_url'] = 'magnet:?xt=urn:btih:' + x[ 'info_hash'] + '&' + urllib.urlencode( {'dn': x['name'].encode('utf8')}) x['maybe_fake'] = x['name'].endswith(u'.rar') if 'files' in x: x['files'] = [ y for y in x['files'] if not y['path'].startswith(u'_') ][:5] x['files'].sort(key=lambda x: x['length'], reverse=True) else: x['files'] = [{'path': x['name'], 'length': x['length']}] # pagination w = 10 total = int(d['result']['meta']['total_found']) d['page_max'] = total / d['ps'] if total % d[ 'ps'] == 0 else total / d['ps'] + 1 d['prev_pages'] = range( max(d['p'] - w + min(int(w / 2), d['page_max'] - d['p']), 1), d['p']) d['next_pages'] = range( d['p'] + 1, int(min(d['page_max'] + 1, max(d['p'] - w / 2, 1) + w))) d['sort_navs'] = [ { 'name': '按收录时间', 'value': 'create_time' }, { 'name': '按文件大小', 'value': 'length' }, { 'name': '按相关性', 'value': 'relavance' }, ] d['cats_navs'] = [{'name': '全部', 'num': total, 'value': ''}] for x in d['cats']['items']: v = workers.metautils.get_label_by_crc32(x['category']) d['cats_navs'].append({ 'value': v, 'name': workers.metautils.get_label(v), 'num': x['num'] }) return render(request, 'list.html', d)