Esempio n. 1
0
 def rtmp_play_url(self, stream_id, app_name=None):
     if app_name is None:
         app_name = self._app_name
     url_path = '/' + urljoin(quote_plus(app_name)+'/', quote_plus(stream_id))
     if not self._auth_type:
         return 'rtmp://' + self._play_domain + url_path
     elif self._auth_type == self.AUTH_TYPE_A:
         query = {'auth_key': self._calc_auth_token(url_path)}
         return 'rtmp://' + self._publish_domain + url_path + '?' + urlencode(query)
     else:
         raise IVRError('Unknown auth type {0}'.format(self._auth_type))
Esempio n. 2
0
def get_cloud_space(session, csrf='', login=LOGIN):
    """ returns available free space in bytes """
    assert csrf is not None, 'no CSRF'

    timestamp = str(int(time.mktime(datetime.datetime.now().timetuple())* 1000))
    quoted_login = quote_plus(login)
    command = ('user/space?api=' + str(API_VER) + '&email=' + quoted_login +
               '&x-email=' + quoted_login + '&token=' + csrf + '&_=' + timestamp)
    url = urljoin(CLOUD_URL, command)

    try:
        r = session.get(url, verify = VERIFY_SSL)
    except Exception as e:
        if LOGGER:
            LOGGER.error('Get cloud space HTTP request error: {}'.format(e))
        return 0

    if r.status_code == requests.codes.ok:
        r_json = r.json()
        total_bytes = r_json['body']['total'] * 1024 * 1024
        used_bytes = r_json['body']['used'] * 1024 * 1024
        return total_bytes - used_bytes
    elif LOGGER:
        LOGGER.error('Cloud free space request error. Check your connection. \
HTTP code: {}, msg: {}'.format(r.status_code, r.text))
    return 0
Esempio n. 3
0
def safe_encode(*args, pattern=' ', space_char='+'):
    """default: replace spaces with '+'
    """
    # SPACE_CHAR = '+'
    # return SPACE_CHAR.join(args).replace(' ', SPACE_CHAR)
    # args = (quote_plus(str(arg) for arg in args))
    args = itertools.chain.from_iterable(
        (quote_plus(argwd) for argwd in arg.split(pattern)) for arg in args)
    return re.sub(re.compile(pattern),
                  space_char,
                  space_char.join(args),
                  re.DOTALL)
Esempio n. 4
0
def new_search(request):
    search = request.POST.get('search')
    final_url = BASE_CRAIGLIST_URL.format(quote_plus(search))
    response = requests.get(final_url)
    soup = BeautifulSoup(data, features='html.parser')
    post_titles = soup.find_all('a', {'class': 'result-title'})

    data = response.text
    stuff_for_frontend = {
        'search': search,
    }
    return render(request, 'my_app/new_search.html')
Esempio n. 5
0
def preprocess_media_tags(element):
    if isinstance(element, html.HtmlElement):
        if element.tag == 'figcaption':
            # figcaption may have only text content
            [e.drop_tag() for e in element.findall('*')]
        elif element.tag in ['ol', 'ul']:
            # ignore any spaces between <ul> and <li>
            element.text = ''
        elif element.tag == 'li':
            # ignore spaces after </li>
            element.tail = ''
        elif element.tag == 'iframe' and element.get('src'):
            iframe_src = element.get('src')
            youtube = youtube_re.match(iframe_src)
            vimeo = vimeo_re.match(iframe_src)
            if youtube or vimeo:
                if youtube:
                    yt_id = urlparse(iframe_src).path.replace('/embed/', '')
                    element.set(
                        'src', '/embed/youtube?url=' +
                        quote_plus('https://www.youtube.com/watch?v=' + yt_id))
                elif vimeo:
                    element.set(
                        'src', '/embed/vimeo?url=' +
                        quote_plus('https://vimeo.com/' + vimeo.group(2)))

                element = _wrap_tag(element, 'figure')
        elif element.tag == 'blockquote' and element.get(
                'class') == 'twitter-tweet':
            twitter_links = element.cssselect('a')
            for tw_link in twitter_links:
                if twitter_re.match(tw_link.get('href')):
                    twitter_frame = html.HtmlElement()
                    twitter_frame.tag = 'iframe'
                    twitter_frame.set(
                        'src', '/embed/twitter?url=' +
                        quote_plus(tw_link.get('href')))
                    element = _wrap_tag(twitter_frame, 'figure')

    return element
Esempio n. 6
0
def new_search(request):
    search = request.POST.get('search')
    if search:
        time = timezone.now()
        s1=Search.objects.create(search=search,created=time)
    
    final_url = BASE_CGSLIST_URL.format(quote_plus(search))
    no_res = ''
    
    try:
        response = requests.get(final_url)
        data = response.text
        soup =BeautifulSoup(data, features='html.parser')

        post_listings = soup.find_all('li', {'class': 'result-row'})
        
        if soup.find_all('div', {'class': 'alert alert-sm alert-warning'}):
            no_result = soup.find_all('div', {'class': 'alert alert-sm alert-warning'})    
            no_res = "No result found For '{}'. (All words must match)".format(search)
            print(no_result, ' ', no_res)

        final_postings = []

        for post in post_listings:
            post_titles = post.find(class_='result-title').text
            post_url = post.find('a').get('href')
            
            if post.find(class_='result-price'):
                post_price = post.find(class_='result-price').text
            else:
                post_price = 'N/A'

            if post.find(class_='result-image').get('data-ids'):
                post_image = post.find(class_='result-image').get('data-ids').split(',')[0].split(':')[1]
                post_image_url = BASE_IMAGE_URL.format(post_image)
            else:
                post_image_url = 'https://previews.123rf.com/images/urfandadashov/urfandadashov1809/urfandadashov180901275/109135379-photo-not-available-vector-icon-isolated-on-transparent-background-photo-not-available-logo-concept.jpg'
                print(post_image_url)

            final_postings.append((post_titles, post_url, post_price, post_image_url))
        

        stuff_for_front_end = {
            'search' : search,
            'final_postings': final_postings,
            'no_res' :no_res,
        }
    except:
        stuff_for_front_end = { 'errormessage' : 'Check Your Internet Connection'}
    if search:
        s1.save()
    return render(request, 'my_app/new_search.html', stuff_for_front_end)
Esempio n. 7
0
def new_search(request):
    # What we are typing in the search box we are storing database via search Model
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    #print(quote_plus(search))
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    #print(final_url)
    response = requests.get(final_url)
    data = response.text
    #print(data)
    soup = BeautifulSoup(data, features='html.parser')
    #print(soup)
    #post_titles=soup.find_all('a',{'class':'result-title'})
    #print(post_titles[0].get('href'))

    post_listings = soup.find_all('li', {'class': 'result-row'})
    #post_title=post_listings[0].find(class_='result-title').text
    #post_url=post_listings[0].find('a').get('href')
    #post_price=post_listings[0].find(class_='result-price').text

    #print(post_title)
    #print(post_url)
    #print(post_price)

    final_postings = []

    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')
        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'
        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            #print(post_image_id)
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
            #print(post_image_url)
        else:
            post_image_url = 'https://craigslist.org/images/peace.jpg'

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))

    stuff_front_end = {
        'search': search,
        'final_postings': final_postings,
    }
    #print(search)
    #Context Dictionary
    return render(request, 'my_app/new_search.html', stuff_front_end)
Esempio n. 8
0
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(
        search=search
    )  #creating search object #this writes to the database based off of user posts
    # print(quote_plus(search))
    final_url = BASE_CRAIGLIST_URL.format(quote_plus(search))
    # print(final_url)
    response = requests.get(final_url)
    data = response.text
    soup = BeautifulSoup(data, features='html.parser')
    post_title = soup.find_all(
        'a', {'class': 'result-title'
              })  # find all links where class is results-title
    # print(post_title.get('href'))
    post_lisitngs = soup.find_all('li', {'class': 'result-row'})
    # # post_title = post_lisitngs.find('a', {'class' : 'result-title'}) # find all links where class is results-title
    # # post_url = post_lisitngs.find('a').get('href')
    # post_price = post_lisitngs.find(class_= 'result-price')
    # # post_text = new_soup.find(id='postingbody').text

    final_postings = []

    for post in post_lisitngs:
        post_title = post.find('a', {'class': 'result-title'}).text
        post_url = post.find('a').get('href')
        post_price = post.find(class_='result-price')

        if post.find('a', {'class': 'result-image'}).get('data-ids'):
            post_image_url = post.find(
                class_='result-image').get('data-ids').split(',')[0].split(
                    ':')[1]  #parsing the images in the URL
            post_image_url = BASE_IMAGE_URL.format(post_image_url)
        print(post_image_url)
        # else:
        #     post_image_url = ''

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))

    # print(post_title)
    # print(post_url)
    # print(post_price)

    # print(data)
    # print(search)
    stuff_for_frontend = {
        'search': search,
        'final_postings': final_postings,
    }

    return render(request, 'myapp/new_search.html', stuff_for_frontend)
Esempio n. 9
0
def new_search(request):
    # ver 1
    # # wyjmujemy z metody post z playholdera search co zostalo wpisane
    # search = request.POST.get('search')
    # # print(search)
    # # tworze zmmnienna do wysysylania do html
    # staff_for_frontend = {
    #     'search': search,
    # }
    # # tworze html i wysylam do niego dane do wyswietlenia
    # return render(request, 'my_app/new_search.html', staff_for_frontend)
    # wyjmujemy z metody post z playholdera search co zostalo wpisane
    # ver 2
    # dostajemy od html
    search = request.POST.get('search')
    # twozymy obiekt w DB
    models.Search.objects.create(search=search)
    # robimy dobre query z + zamiast ' '
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    # dostajemy odpowiedzi  z neta
    response = requests.get(final_url)
    data = response.text
    # zaba w web scraping
    soup = BeautifulSoup(data, features='html.parser')
    post_listings = soup.find_all('li', {'class': 'result-row'})
    final_postings = []
    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')

        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
        else:
            post_image_url = 'https://craigslist.org/images/peace.jpg'

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))

    # tworze zmmnienna do wysysylania do html
    staff_for_frontend = {
        'search': search,
        'final_postings': final_postings,
    }
    # tworze html i wysylam do niego dane do wyswietlenia
    return render(request, 'my_app/new_search.html', staff_for_frontend)
Esempio n. 10
0
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    # print(quote_plus(search))
    # quote_plus formats the content of the search  with plus signs and the requirements for use as valid part of a url
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    # this concartinates the base url and the quote_plus formatted content of the search
    response = requests.get(final_url)
    data = response.text
    print(final_url)
    # print(data)
    soup = BeautifulSoup(data, features='html.parser')

    # this creates a beautiful soup object o fthe data variable as a html
    post_listings = soup.find_all('li', {'class': 'result-row'})

    final_postings = []
    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')

        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text

        else:
            new_response = requests.get(post_url)
            new_data = new_response.text
            new_soup = BeautifulSoup(new_data, features='html.parser')
            post_text = new_soup.find(id='postingbody').text

            rl = requests.findall(r'\$\w+', post_text)
            if rl:
                post_price = rl[0]
            else:
                post_price = 'N/A'

        if post.find(class_='result-image').get('data-ids'):
            # post_image_id = post.find(class_='result-image').get('data-ids').split(',')[:]
            post_image_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            post_image_url = "https://images.craigslist.org/{}_300x300.jpg".format(
                post_image_id)

        else:
            post_image_url = "https://craigslist.org/images/peace.jpg"

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))

    print(final_postings)
    stuff_for_frontend = {'search': search, 'final_postings': final_postings}
    return render(request, 'my_app/new_search.html', stuff_for_frontend)
Esempio n. 11
0
def new_search(request):
    search=request.POST.get('search')
    models.Search.objects.create(search=search)
    final_url=BASE_CRAIGSLIST_URL.format(quote_plus(search)) 
    response=requests.get(final_url)
    data=response.text
    print(data)

    stuff_for_frontend={
        'search':search

    }
    return render(request,'myapp/new_search.html',stuff_for_frontend)
Esempio n. 12
0
def new_search(request):
    search = request.POST.get("search")
    models.Search.objects.create(search=search)
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    response = requests.get(final_url)
    data = response.text
    soup = BeautifulSoup(data, features="html.parser")
    post_title = soup.find_all("a", {"class": "result-title"})
    print(post_title)
    frontend = {
        "search": search,
    }
    return render(request, "pages/new_search.html", frontend)
Esempio n. 13
0
def new_search(request):
    """
    Handles request for new searches and renders a list of results
    """

    search = request.POST.get('search')

    models.Search.objects.create(search=search)

    # quote_plus joins search query keywords with '+' delimiters (e.g "experienced+python+tutor")
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))

    # gets the response object from the specified '@url' argument.
    response = requests.get(final_url)

    # returns the HTML source code of response
    data = response.text

    # create a soup object for the source code
    soup = BeautifulSoup(data, features='html.parser')

    # returns all list items with class="result-row"
    post_listings = soup.find_all('li', {'class': 'result-row'})

    final_postings = []

    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')

        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

        if post.find(class_="result-image").get('data-ids'):
            post_image_id = post.find(class_="result-image").get(
                'data-ids').split(',')[0].split(':')[1]
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
        else:
            post_image_url = "https://craigslist.org/images/peace.jpg"

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))

    stuff_for_frontend = {
        'search': search,
        'final_postings': final_postings[:12],
    }

    return render(request, 'search_app/new_search.html', stuff_for_frontend)
Esempio n. 14
0
def new_search(request):
    headers = {
        "User-Agent":
        "Mozilla/5.0 (Windows NT 10.0; Win64;     x64; rv:66.0) Gecko/20100101 Firefox/66.0",
        "Accept-Encoding": "gzip, deflate",
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
        "DNT": "1",
        "Connection": "close",
        "Upgrade-Insecure-Requests": "1"
    }
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    final_url = BASE_AMAZON_URL.format(quote_plus(search))
    response = requests.get(final_url, headers=headers)
    data = response.text
    print(final_url)
    soup = BeautifulSoup(data, features='html.parser')
    final_postings = []
    for d in soup.findAll(
            'div',
            attrs=
        {
            'class':
            'sg-col-4-of-24 sg-col-4-of-12 sg-col-4-of-36 s-result-item s-asin sg-col-4-of-28 sg-col-4-of-16 sg-col sg-col-4-of-20 sg-col-4-of-32'
        }):
        link = d.find('a', attrs={
            'class': 'a-link-normal a-text-normal'
        }).get('href')
        link = 'https://www.amazon.in/' + link
        images = d.find('img', {'src': re.compile('.jpg')})
        name = d.find(
            'span',
            attrs={'class': 'a-size-base-plus a-color-base a-text-normal'})
        pricediv = d.find('div',
                          attrs={'class': 'a-row a-size-base a-color-base'})
        price = None
        if pricediv is not None:
            price = pricediv.find('span', attrs={'class': 'a-offscreen'})
        if name is not None and price is not None:
            final_postings.append((link, name.text, price.text, images['src']))
        elif price is None:
            final_postings.append(
                (link, name.text, 'Not Available', images['src']))
        else:
            final_postings.append("unknown-product")
    context = {
        'search': search,
        'final_postings': final_postings,
    }
    return render(request, 'my_app/new_search.html', context)
Esempio n. 15
0
def new_search(request):
    # get inputed search text
    search = request.POST.get('search')
    # add search to database for tracking
    models.Search.objects.create(search=search)
    # this is the final url with corrected formating using quote_plus
    # which will fill in any spaces in the search term string
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    # Getting the Webpage, creating a Response object.
    response = requests.get(final_url)
    # Extracting the source code of the page.
    data = response.text
    # Passing the source code to Beautiful Soup to create a
    # BeautifulSoup object for it.
    soup = BeautifulSoup(data, features="html.parser")
    # Extracting all the <a> tags whose class name is
    #  'results-title' into a list.
    post_listings = soup.find_all('li', {'class': 'result-row'})
    # post_title = post_listings[0].find(class_='result-title').text

    # post_url = post_listings[0].find('a').get('href')
    # post_price = post_listings[0].find(class_='result-price').text

    final_postings = []
    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')

        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            print(post_image_id)
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
            print(post_image_url)

        else:
            post_image_url = "https://craigslist.org/images/peace.jpg"

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))

    stuff_for_frontend = {
        'search': search,
        'final_postings': final_postings,
    }
    return render(request, 'my_app/new_search.html', stuff_for_frontend)
Esempio n. 16
0
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    print(search)
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    #print(final_url)
    response = requests.get(final_url)
    data = response.text
    soup = BeautifulSoup(data, features='html.parser')
    #post_titles=soup.find_all('a',{'class':'result-title'})
    post_listings = soup.find_all('li', {'class': 'result-row'})
    post_title = post_listings[0].find(class_='result-title').text
    post_url = post_listings[0].find('a').get('href')
    '''
    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')
        post_price = post.find(class_='result-price').text
    
    final_postings.append((post_title, post_url, post_price))#, post_image_url))



    #parsed=url_parse.text
    #print(quote_plus(parsed))
    #print(quote_plus(search))'''
    final_postings = []
    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')

        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
            print(post_image_url)
        else:
            post_image_url = 'https://craigslist.org/images/peace.jpg'

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))
    disp = {
        'search': search,
        'final_postings': final_postings,
    }
    return render(request, 'my_app/new_search.html', disp)
Esempio n. 17
0
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(
        search))  #joing the BASE_CRAIGSLIST_URL and the word that you search
    response = requests.get(
        final_url
    )  #using bs4 requests that is imported which stores the final_url in var response
    data = response.text  #converting that url html to text
    soup = BeautifulSoup(data, features='html.parser'
                         )  #parsing and create bs4 object in a variable soup

    post_listings = soup.find_all('li', {'class': 'result-row'})

    final_postings = []

    for post in post_listings:
        post_title = post.find(
            class_='result-title'
        ).text  #finds all items with class 'result-title'
        post_url = post.find('a').get(
            'href')  #finds all items with link to that item

        #if a post has a price then print its price else print N/A
        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

#if post has an image display it else display a peace sign image
#logic to grab image of posts-> bs4 cant grab images through direct img names but through data ids so
#data ids have been parsed and splitted and then grabbed

        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
            print(post_image_url)
        else:
            post_image_url = 'https://craigslist.org/images/peace.jpg'

#creating a tuple of post's title,url,price,image so that it is visible on frontent by accessing through its indices
        final_postings.append(
            (post_title, post_url, post_price, post_image_url))

    print(search)
    stuff_for_frontend = {
        'search': search,
        'final_postings': final_postings,
    }
    return render(request, 'myapp/new_search.html', stuff_for_frontend)
Esempio n. 18
0
def new_search(request):
    search=request.POST.get('search')
    #print(quote_plus(search))
    models.Search.objects.create(search=search)
    final_url=BASE_CRAIGS_LIST_URL.format(quote_plus(search))
    print(final_url)
    #response=requests.get('https://delhi.craigslist.org/search/bbb?query=python&sort=rel')
    response = requests.get(final_url)
    data=response.text
    soup = BeautifulSoup(data,features='html.parser')
    post_listings=soup.find_all('li',{'class':'result-row'})
    #print(len(post_listings))
    #post_title=post_listings[0].find(class_='result-title').text
    #post_url=post_listings[0].find('a').get('href')
    #post_price=post_listings[0].find(class_='result-price').text
    final_postings=[]
    for post in post_listings:
        post_title=post.find(class_='result-title').text
        post_url=post.find('a').get('href')
        if post.find(class_='result-price'):
            post_price=post.find(class_='result-price').text
        else:
            post_price='N/A'

        print(post_title)
        if post.find('a').get('data-ids'):
            #print(post.find('a').get('data-ids').split(',')[0].split(':')[1])
            post_image_id=post.find('a').get('data-ids').split(',')[0].split(':')[1]
            post_image_url=BASE_IMAGE_URL.format(post_image_id)
            #print(post_image_url)
        else:
            post_image_url='https://craigslist.org/images/peace.jpg'
       
        final_postings.append((post_title,post_url,post_price,post_image_url))

    #print(post_title)
    #print(post_url)
    #print(post_price)

    #post_titles=soup.find_all('a',{'class':'result-title'})
    #print(post_titles[0].text)
    #print(post_titles[0].get('href'))

    #print(data)
    #print(search)

    stuff_for_frontend={
        'search':search,
        'final_postings':final_postings,
    }
    return render(request,'webscrapping/newsearch.html',stuff_for_frontend)
Esempio n. 19
0
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    print(quote_plus(search))
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    print(final_url)
    response = requests.get(final_url)
    data = response.text
    # print(data)

    soup = BeautifulSoup(data, features='html.parser')

    post_listings = soup.find_all('li', {'class': 'result-row'})

    final_postings = []

    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')
        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = "N/A"

        if post.find(class_='result-image').get('data-ids'):
            post_img_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            post_img_url = BASE_IMAGE_URL.format(post_img_id)
            print(post_img_url)
        else:
            post_img_url = 'https://craigslist.org/images/peace.jpg'

        final_postings.append((post_title, post_url, post_price, post_img_url))

    stuff_for_frontend = {'search': search, 'final_postings': final_postings}

    return render(request, 'Craigslist_WebScraping_app/new_search.html',
                  stuff_for_frontend)
Esempio n. 20
0
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    response = requests.get(final_url)
    data = response.text
    # create a beautifulsoup object that returns an object of the results
    soup = BeautifulSoup(data, features='html.parser')
    # extract all the links with class 'result-title'
    post_listings = soup.find_all('li', {'class':'result-row'})
    
    final_post_listing = []
    for p in post_listings:
        post_url = p.find('a').get('href')
        post_title = p.find(class_='result-title').text
        if p.find(class_='result-price'):
            post_price = p.find(class_ ='result-price').text
        else:
            post_price ='N/A'

        if p.find(class_='result-image').get('data-ids'):
            post_image_id = p.find(class_='result-image').get('data-ids').split(',')[0].split(':')[1]
            post_image_url = BASE_IMAGE_URL.format(quote_plus(post_image_id))
            print(post_image_url)
        else:
            post_image_url = 'https://images.craigslist.org/images/peace.jpg'

        final_post_listing.append((post_title,post_url,post_price,post_image_url))
           
        



    stuff_for_front_end = {
        'search': search,
        'final_post_listing': final_post_listing
    }
    return render(request,'demoapp/new_search.html', stuff_for_front_end)
Esempio n. 21
0
def current(city):
    city_input = quote_plus(city)
    token = random.choice(settings.KEYS)
    currentUrl = "https://api.openweathermap.org/data/2.5/weather?q={}&units=metric&appid={}".format(
        city_input, token)
    response = requests.get(currentUrl).json()
    returnDict = {
        'city': city,
        "description": response['weather'][0]['description'],
        'temperature': response['main']['temp'],
        'icon': response['weather'][0]['icon'],
        'time': time.strftime('%Y-%m-%d', time.localtime(response['dt'])),
    }
    return returnDict
Esempio n. 22
0
def new_search(request):
    # Getting the search input
    search = request.POST.get('search')

    # Saving it to database
    models.Search.Mansouri.create(search=search)

    # Getting the url format
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))

    # getting the html source code of the url
    response = requests.get(final_url)
    data = response.text

    # Parsing Url
    soup = BeautifulSoup(data, features='html.parser')

    # GETTING OUR DATA

    post_listings = soup.find_all('li', {'class': 'result-row'})

    final_postings = []

    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')

        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

        post_date = post.find('time').get('title')

        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(class_='result-image').get(
                'data-ids').split(',')[0].split(':')[1]
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
            # print(post_image_url)
        else:
            post_image_url = 'https://craigslist.org/images/peace.jpg'

        final_postings.append(
            (post_title, post_url, post_price, post_date, post_image_url))

    stuff_for_frontend = {
        'search': search,
        'final_postings': final_postings,
    }
    return render(request, 'my_app/new_search.html', stuff_for_frontend)
Esempio n. 23
0
    def secure_headers(self, url, post_data):
        """ Creates secure header for cryptopia private api. """
        nonce = str(time.time())
        md5 = hashlib.md5()
        jsonparams = post_data.encode('utf-8')
        md5.update(jsonparams)
        rcb64 = base64.b64encode(md5.digest()).decode('utf-8')

        signature = self.key + "POST" + quote_plus(url).lower() + nonce + rcb64
        hmacsignature = base64.b64encode(hmac.new(base64.b64decode(self.secret),
                                                  signature.encode('utf-8'),
                                                  hashlib.sha256).digest())
        header_value = "amx " + self.key + ":" + hmacsignature.decode('utf-8') + ":" + nonce
        return {'Authorization': header_value, 'Content-Type': 'application/json; charset=utf-8'}
Esempio n. 24
0
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    # print(quote_plus(search))
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    response = requests.get(final_url)
    data = response.text
    soup = BeautifulSoup(data, features='html.parser')

    # print(data)
    stuff_for_frontend = {
        'search': search,
    }
    return render(request, 'games/new_search.html', stuff_for_frontend)
Esempio n. 25
0
 def _by_ids(self, service, ids):
     """ return multiple items by id """
     if self.id and ids:
         values = ','.join(quote_plus(str(i)) for i in ids)
         term = '{}?id={}'.format(service.service_name, values)
         term = self._for_this_service(term)
         if ',' in values:
             return paginator(self._client, term, service.collection_name,
                              service)
         res = self._client.get(term)
         return [
             service(client=self._client, data=res[service.service_name])
         ]
     return []
Esempio n. 26
0
def search(request):
    if request.method == 'POST':
        form = SearchForm(request.POST)
        if form.is_valid():
            search = form.cleaned_data['search']

            if not Search.objects.filter(search=search):
                new_search = form.save(commit=False)
                new_search.save()

        final_url = BASE_SEARCH_URL.format(quote_plus(search))

        response = requests.get(final_url)
        data = response.text
        soup = BeautifulSoup(data, features='html.parser')

        posts = soup.find_all('li', class_='result-row')

        final_posts = []

        for post in posts:
            post_title = post.find('a', class_='result-title').text
            post_link = post.find('a', class_='result-title').get('href')

            if post.find('span', class_='result-price'):
                post_price = post.find('span', class_='result-price').text
            else:
                post_price = 'N/A'

            if post.find('a', class_='result-image gallery'):
                image_ids = post.find(
                    'a', class_='result-image gallery').get('data-ids')
                image_id = image_ids.split(',')[0].split(':')[1]
                post_image = BASE_IMG_URL.format(image_id)
            else:
                post_image = "https://www.craigslist.org/images/peace.jpg"

            final_posts.append((post_title, post_link, post_price, post_image))

        form = SearchForm()

        frontend = {
            'form': form,
            'search': search,
            'final_posts': final_posts,
        }

        return render(request, 'radzlist/search.html', frontend)
    else:
        return redirect('index')
Esempio n. 27
0
    def get_songs_list(self):
        result = []
        search_content = self.artist + ' ' + self.song
        response = requests.get(
            SOUNDCLOUD_SEARCH_URL.format(quote_plus(search_content)))
        soup = BeautifulSoup(response.content, 'html.parser')
        for link in soup.find_all('a'):
            data = {}
            if link.parent.name == 'h2':
                data['title'] = link.text
                data['url'] = SOUNDCLOUD_URL + link['href']
                result.append(data)

        return result[0]
Esempio n. 28
0
def main():

    module = AnsibleModule(argument_spec=dict(user=dict(required=True,
                                                        aliases=['username']),
                                              password=dict(required=True),
                                              ip=dict(required=True),
                                              ptr=dict(
                                                  required=True,
                                                  aliases=['value', 'name'])),
                           supports_check_mode=True)

    user = module.params['user']
    password = module.params['password']
    ip = module.params['ip']
    ptr = module.params['ptr']

    try:
        url = urljoin('https://robot-ws.your-server.de/rdns/', quote_plus(ip))

        # Verify current status
        response = requests.get(url, auth=(user, password))
        if response.status_code == 200:
            result = response.json()
            if result['ip'] == 'ip' and result['ptr'] == ptr:
                module.exit_json(changed=False,
                                 msg="OK",
                                 value=response.json())

            if module.check_mode:
                module.exit_json(changed=True, msg="OK", value=response.json())
        else:
            raise RuntimeError('Incorrect response from Hetzner WS: %s -> %d' %
                               (url, response.status_code))

        # Update record
        response = requests.post(url,
                                 json={'rdns': {
                                     'ip': ip,
                                     'ptr': ptr
                                 }},
                                 auth=(user, password))
        if response.status_code in (200, 201):
            module.exit_json(changed=True, msg="OK", value=response.json())
        else:
            raise RuntimeError('Incorrect response from Hetzner WS: %s -> %d' %
                               (url, response.status_code))
    except Exception as e:
        module.fail_json(changed=False,
                         msg='Failed in call to Hetzner WS: %s' % e.message)
def new_search(request):
    search = request.POST.get('search')  # Gets the search variable from the python dictionary. is then POSTING the
    models.Search.objects.create(search=search)
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    # print(final_url)
    response = requests.get(final_url)
    data = response.text
    soup = BeautifulSoup(data, features='html.parser')
    post_listings = soup.find_all('li', {'class': 'result-row'})

    # post_title = post_listings[0].find(class_='result-title').text
    # post_url = post_listings[0].find('a').get('href')
    # post_price = post_listings[0].find(class_='result-price').text

    # print(post_title)
    # print(post_url)
    # print(post_price)

    final_postings = []

    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')
        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            post_price = 'N/A'

        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(class_='result-image').get('data-ids').split(',')[0].split(':')[1]
            post_image_url = BASE_IMAGE_URL.format(post_image_id)
            print(post_image_url)
        else:
            post_image_url = 'https://craigslist.org/images/peace.jpg'

        final_postings.append((post_title, post_url, post_price, post_image_url))

    # print(post_titles[0].text)
    # post_title = soup.find_all("a", {'class': 'result-title'})
    # print(post_titles[0].get('href'))
    # print(data)
    # result!

    stuff_for_frontend = {
        'search': search,
        'final_postings': final_postings,
    }

    return render(request, 'my_app/new_search.html', stuff_for_frontend)
Esempio n. 30
0
def new_search(request):
    search = request.POST.get('search')
    models.Search.objects.create(search=search)
    s=quote_plus(search)


    final_url = base_url.format(s)


    result = requests.get(final_url)
    src = result.content
    soup = BeautifulSoup( src , 'lxml')


    product_list =soup.find_all("div",{"class" : "product-thumb"})


    product_info = []

    for product in product_list:
        product_title = product.find(class_='product-name').text
        product_url = product.find('a').get('href')
    
    

        if product.find(class_='price'):
              product_price = product.find(class_='price').text
            
        else:
              product_price = 'N/A'

        if product.find(class_='img-holder'):
              product_image_url = product.find('img').get('src')
       

        product_info.append((product_title, product_url, product_price,product_image_url))


    
    #jason_format = jason.dumps(jason_dict)
    #print(jason_format)


    stuff_for_frontend = {
            'search': search,
            'final_postings': product_info,
        }

    return render(request, 'my_app/new_search.html', stuff_for_frontend)
Esempio n. 31
0
def new_search(request):
    # Getting the details of the specific search
    search = request.POST.get('search')
    # saving the search to the database
    models.Search.objects.create(search=search)
    final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
    # Extracting the source code of the page
    response = requests.get(final_url)
    data = response.text
    # Passung the source code to beautiful soup to create a Beautiful soup object for it
    soup = BeautifulSoup(data, features='html.parser')
    # Extracting all the <a> tags whose class name is 'result-title' into a list
    post_listings = soup.find_all('li', {'class': 'result-row'})

    final_postings = []
    for post in post_listings:
        post_title = post.find(class_='result-title').text
        post_url = post.find('a').get('href')

        if post.find(class_='result-price'):
            post_price = post.find(class_='result-price').text
        else:
            new_response = requests.get(post_url)
            new_data = new_response.text
            new_soup = BeautifulSoup(new_data, features='html.parser')
            post_text = new_soup.find(id='postingbody').text

            r1 = re.findall(r'\$\w+', post_text)
            if r1:
                post_price = r1[0]
            else:
                post_price = 'N/A'

        if post.find(class_='result-image').get('data-ids'):
            post_image_id = post.find(
                class_='result-image').get('data-ids').split(',')[0].split(':')[1]
            post_image_url = 'https://images.craigslist.org/{}_300x300.jpg'.format(
                post_image_id)
        else:
            post_image_url = 'https://craigslist.org/images/peace.jpg'

        final_postings.append(
            (post_title, post_url, post_price, post_image_url))

    stuff_for_frontend = {
        'search': search,
        'final_postings': final_postings,
    }
    return render(request, 'my_app/new_search.html', stuff_for_frontend)
Esempio n. 32
0
def post_file(session, domain='', file='', login=LOGIN):
    """ posts file to the cloud's upload server
    param: file - string filename with path
    """
    assert domain is not None, 'no domain'
    assert file is not None, 'no file'

    filetype = guess_type(file)[0]
    if not filetype:
        filetype = DEFAULT_FILETYPE
        if LOGGER:
            LOGGER.warning('File {} type is unknown, using default: {}'.format(file, DEFAULT_FILETYPE))

    filename = os.path.basename(file)
    quoted_login = quote_plus(login)
    timestamp = str(int(time.mktime(datetime.datetime.now().timetuple()))) + TIME_AMEND
    url = urljoin(domain, '?cloud_domain=' + str(CLOUD_DOMAIN_ORD) + '&x-email=' + quoted_login + '&fileapi' + timestamp)
    m = MultipartEncoder(fields={'file': (quote_plus(filename), open(file, 'rb'), filetype)})

    try:
        r = session.post(url, data=m, headers={'Content-Type': m.content_type}, verify = VERIFY_SSL)
    except Exception as e:
        if LOGGER:
            LOGGER.error('Post file HTTP request error: {}'.format(e))
        return (None, None)

    if r.status_code == requests.codes.ok:
        if len(r.content):
            hash = r.content[:40].decode()
            size = int(r.content[41:-2])
            return (hash, size)
        elif LOGGER:
            LOGGER.error('File {} post error, no hash and size received'.format(file))
    elif LOGGER:
        LOGGER.error('File {} post error, http code: {}, msg: {}'.format(file, r.status_code, r.text))
    return (None, None)
Esempio n. 33
0
def top10_largest_contries(request):

    data = requests.get(
        'https://the-top-10.fandom.com/wiki/Top_10_Biggest_Countries').text
    soup = BeautifulSoup(data, features='html.parser')
    data = soup.find('table')
    data = pandas.read_html(str(data), header=0)[0]
    final_list = data.values.tolist()
    data2 = []
    for i in final_list[0:10]:
        image = requests.get(image_url.format(quote_plus(f'{i[1]} flag'))).text
        image_soup = BeautifulSoup(image, 'html.parser')
        image_src = image_soup.find('img', {'class': 't0fcAb'})['src']
        data2.append(i + [image_src])
    return render(request, 'top/largest_contries.html', {'data': data2})
def odata_encode_str(s):
    """ String eq comparison in OData requires special characters
    to be escaped, like &. ALSO, single quotes need to be doubled up,
    so we do that before encoding.  """
    s = s.replace("'", "''")
    return quote_plus(s.encode('utf8'))
Esempio n. 35
0
    def search(self, search_params, age=0, ep_obj=None):  # pylint: disable=too-many-locals
        results = []

        search_url = self.urls['search']
        download_url = self.urls['download']
        if self.custom_url:
            if not validators.url(self.custom_url):
                logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
                return results

            search_url = urljoin(self.custom_url, search_url.split(self.url)[1])
            download_url = urljoin(self.custom_url, download_url.split(self.url)[1])

        if not self.login():
            return results

        for mode in search_params:
            items = []
            logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
            for search_string in search_params[mode]:

                if mode != 'RSS':
                    logger.log('Search string: {0}'.format
                               (search_string.decode('utf-8')), logger.DEBUG)

                get_params = {}
                get_params.update(self.categories[mode])
                get_params["q"] = quote_plus(search_string.decode('utf-8', 'ignore'))

                try:
                    torrents = self.get_url(search_url, params=get_params, returns='json')
                    # Handle empty string response or None #4304
                    if not torrents:
                        raise

                    # Make sure it is iterable #4304
                    iter(torrents)
                except Exception:
                    logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
                    continue

                for torrent in torrents:

                    title = re.sub(r'\[.*\=.*\].*\[/.*\]', '', torrent['name']) if torrent['name'] else None
                    torrent_url = urljoin(download_url, '{0}/{1}.torrent'.format(torrent['t'], torrent['name'])) if torrent['t'] and torrent['name'] else \
                        None
                    if not all([title, torrent_url]):
                        continue

                    seeders = try_int(torrent['seeders'])
                    leechers = try_int(torrent['leechers'])

                    # Filter unseeded torrent
                    if seeders < self.minseed or leechers < self.minleech:
                        if mode != 'RSS':
                            logger.log('Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format(title, seeders, leechers), logger.DEBUG)
                        continue

                    torrent_size = torrent['size']
                    size = convert_size(torrent_size) or -1

                    item = {'title': title, 'link': torrent_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}

                    if mode != 'RSS':
                        logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
                                   (title, seeders, leechers), logger.DEBUG)

                    items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
            results += items

        return results
Esempio n. 36
0
    def authenticate(self):
        config = ConfigParser.ConfigParser()
        config.read(TOKENS_FILE)

        if config.has_option("hubic", "refresh_token"):
            oauth_token = self._refresh_access_token()
        else:
            r = requests.get(
                OAUTH_ENDPOINT + 'auth/?client_id={0}&redirect_uri={1}'
                '&scope=credentials.r,account.r&response_type=code&state={2}'.format(
                    quote(self._client_id),
                    quote_plus(self._redirect_uri),
                    pyrax.utils.random_ascii()  # csrf ? wut ?..
                ),
                allow_redirects=False
            )
            if r.status_code != 200:
                raise exc.AuthenticationFailed("Incorrect/unauthorized "
                                               "client_id (%s)" % str(self._parse_error(r)))

            try:
                from lxml import html as lxml_html
            except ImportError:
                lxml_html = None

            if lxml_html:
                oauth = lxml_html.document_fromstring(r.content).xpath('//input[@name="oauth"]')
                oauth = oauth[0].value if oauth else None
            else:
                oauth = re.search(r'<input\s+[^>]*name=[\'"]?oauth[\'"]?\s+[^>]*value=[\'"]?(\d+)[\'"]?>', r.content)
                oauth = oauth.group(1) if oauth else None

            if not oauth:
                raise exc.AuthenticationFailed("Unable to get oauth_id from authorization page")

            if self._email is None or self._password is None:
                raise exc.AuthenticationFailed("Cannot retrieve email and/or password. Please run expresslane-hubic-setup.sh")

            r = requests.post(
                OAUTH_ENDPOINT + 'auth/',
                data={
                    'action': 'accepted',
                    'oauth': oauth,
                    'login': self._email,
                    'user_pwd': self._password,
                    'account': 'r',
                    'credentials': 'r',

                },
                allow_redirects=False
            )

            try:
                query = urlparse.urlsplit(r.headers['location']).query
                code = dict(urlparse.parse_qsl(query))['code']
            except:
                raise exc.AuthenticationFailed("Unable to authorize client_id, invalid login/password ?")

            oauth_token = self._get_access_token(code)

        if oauth_token['token_type'].lower() != 'bearer':
            raise exc.AuthenticationFailed("Unsupported access token type")

        r = requests.get(
            API_ENDPOINT + 'account/credentials',
            auth=BearerTokenAuth(oauth_token['access_token']),
        )

        swift_token = r.json()
        self.authenticated = True
        self.token = swift_token['token']
        self.expires = swift_token['expires']
        self.services['object_store'] = Service(self, {
            'name': 'HubiC',
            'type': 'cloudfiles',
            'endpoints': [
                {'public_url': swift_token['endpoint']}
            ]
        })
        self.username = self.password = None
Esempio n. 37
0
 def _magnet_from_result(info_hash, title):
     return 'magnet:?xt=urn:btih:{hash}&dn={title}&tr={trackers}'.format(
         hash=info_hash,
         title=quote_plus(title),
         trackers='http://tracker.tntvillage.scambioetico.org:2710/announce')
    def authenticate(self):
#        import httplib
#        httplib.HTTPConnection.debuglevel = 1
        r = requests.get(
            OAUTH_ENDPOINT+'auth/?client_id={0}&redirect_uri={1}'
            '&scope=credentials.r,account.r&response_type=code&state={2}'.format(
                quote(self._client_id),
                quote_plus(self._redirect_uri),
                pyrax.utils.random_ascii() # csrf ? wut ?..
            ),
            allow_redirects=False
        )
        if r.status_code != 200:
            raise exc.AuthenticationFailed("Incorrect/unauthorized "
                    "client_id (%s)"%str(self._parse_error(r)))

        try:
            from lxml import html as lxml_html
        except ImportError:
            lxml_html = None

        if lxml_html:
            oauth = lxml_html.document_fromstring(r.content).xpath('//input[@name="oauth"]')
            oauth = oauth[0].value if oauth else None
        else:
            oauth = re.search(r'<input\s+[^>]*name=[\'"]?oauth[\'"]?\s+[^>]*value=[\'"]?(\d+)[\'"]?>', r.content)
            oauth = oauth.group(1) if oauth else None

        if not oauth:
            raise exc.AuthenticationFailed("Unable to get oauth_id from authorization page")

        r = requests.post(
            OAUTH_ENDPOINT+'auth/',
            data = {
                'action': 'accepted',
                'oauth': oauth,
                'login': self._email,
                'user_pwd': self._password,
                'account': 'r',
                'credentials': 'r',

            },
            allow_redirects=False
        )

        if r.status_code == 302 and r.headers['location'].startswith(self._redirect_uri):
            query = urlparse.urlsplit(r.headers['location']).query
            code = dict(urlparse.parse_qsl(query))['code']
        else:
            raise exc.AuthenticationFailed("Unable to authorize client_id, invalid login/password ?")

        r = requests.post(
            OAUTH_ENDPOINT+'token/',
            data={
                'code': code,
                'redirect_uri': self._redirect_uri,
                'grant_type': 'authorization_code',
            },
            auth=(self._client_id, self._client_secret)
        )
        if r.status_code != 200:
            try:
                err = r.json()
                err['code'] = r.status_code
            except:
                err = {}

            raise exc.AuthenticationFailed("Unable to get oauth access token, "
                                           "wrong client_id or client_secret ? (%s)"%str(err))

        oauth_token = r.json()
        if oauth_token['token_type'].lower() != 'bearer':
            raise exc.AuthenticationFailed("Unsupported access token type")

        r = requests.get(
            API_ENDPOINT+'account/credentials',
            auth=BearerTokenAuth(oauth_token['access_token']),
        )

        swift_token = r.json()
        self.authenticated = True
        self.token = swift_token['token']
        self.expires = swift_token['expires']
        self.services['object_store'] = Service(self, {
            'name': 'HubiC',
            'type': 'cloudfiles',
            'endpoints': [
                {'public_url': swift_token['endpoint']}
            ]
        })
        self.username = self.password = None
Esempio n. 39
0
    def search(self, search_strings, age=0, ep_obj=None):  # pylint: disable=too-many-locals, too-many-branches, too-many-statements
        results = []
        if not self.login():
            return results

        for mode in search_strings:
            items = []
            logger.log(u"Search Mode: {}".format(mode), logger.DEBUG)
            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    search_url = self.urls['search'] % (quote_plus(search_string), self.categories)
                    logger.log(u"Search string: {}".format(search_string.decode("utf-8")),
                               logger.DEBUG)
                else:
                    search_url = self.urls['rss'] % self.categories

                if self.freeleech:
                    search_url = search_url.replace('active=1', 'active=5')

                data = self.get_url(search_url, returns='text')
                if not data or 'please try later' in data:
                    logger.log(u"No data returned from provider", logger.DEBUG)
                    continue

                if data.find('No torrents here') != -1:
                    logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
                    continue

                # Search result page contains some invalid html that prevents html parser from returning all data.
                # We cut everything before the table that contains the data we are interested in thus eliminating
                # the invalid html portions
                try:
                    index = data.lower().index('<table class="mainblockcontenttt"')
                except ValueError:
                    logger.log(u"Could not find table of torrents mainblockcontenttt", logger.DEBUG)
                    continue

                data = data[index:]

                with BS4Parser(data, 'html5lib') as html:
                    if not html:
                        logger.log(u"No html data parsed from provider", logger.DEBUG)
                        continue

                    torrent_rows = []
                    torrent_table = html.find('table', class_='mainblockcontenttt')
                    if torrent_table:
                        torrent_rows = torrent_table('tr')

                    if not torrent_rows:
                        logger.log(u"Could not find results in returned data", logger.DEBUG)
                        continue

                    # Cat., Active, Filename, Dl, Wl, Added, Size, Uploader, S, L, C
                    labels = [label.a.get_text(strip=True) if label.a else label.get_text(strip=True) for label in torrent_rows[0]('td')]

                    # Skip column headers
                    for result in torrent_rows[1:]:
                        try:
                            cells = result.findChildren('td')[:len(labels)]
                            if len(cells) < len(labels):
                                continue

                            title = cells[labels.index(u'Filename')].a.get_text(strip=True)
                            seeders = try_int(cells[labels.index(u'S')].get_text(strip=True))
                            leechers = try_int(cells[labels.index(u'L')].get_text(strip=True))
                            torrent_size = cells[labels.index(u'Size')].get_text()

                            size = convert_size(torrent_size) or -1
                            download_url = self.url + '/' + cells[labels.index(u'Dl')].a['href']
                        except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                            continue

                        if not all([title, download_url]):
                            continue

                        # Filter unseeded torrent
                        if seeders < min(self.minseed, 1):
                            if mode != 'RSS':
                                logger.log(u"Discarding torrent because it doesn't meet the minimum seeders: {0}. Seeders: {1})".format
                                           (title, seeders), logger.DEBUG)
                            continue

                        item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'pubdate': None, 'hash': None}
                        if mode != 'RSS':
                            logger.log(u"Found result: %s with %s seeders and %s leechers" % (title, seeders, leechers), logger.DEBUG)

                        items.append(item)

            # For each search mode sort all the items by seeders if available
            items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)

            results += items

        return results