예제 #1
0
def extract_results(item,condition=None):
        #Url is extended based on condition
        if condition == "new":
                specific_url = util.create_url(MAIN_URL,item,DELIMITER) + "&condition=New,New%20or%20Used&adtype=998"
        else:
                specific_url = util.create_url(MAIN_URL,item,DELIMITER) + "&condition=Used,Refurbished,For%20Parts/Not%20Working,New%20or%20Used&adtype=998"     
        url = util.create_url(MAIN_URL,item, DELIMITER)
        results=[]
        #Check if page has data
        try:
            soup = util.check_exceptions(url)
            table = soup.find('tbody', class_='ResultsNewTable')
            rows=table.find_all('tr')
        except:
                  return []
        #Get 1st 10 results only
        for i in range(len(rows)):
                  row= rows[i]
                  new_result = Result(row.find('a').get('title'))
                  new_result.url = row.find('a').get('href')
                  new_result.price = util.get_price(row.find_all('td')[4].contents[0])
                  number = util.get_price(new_result.title)
                  new_result.image_src = "https://photos.labx.com/labx/"+number+"/"+number+"-0.jpg"
                  if util.is_valid_price(new_result.price):
                          results.append(new_result)
                          if len(results) == 10:
                                  return results
        return results
예제 #2
0
def extract_results(search_word, condition=None):
    if condition == 'new':
        return []
    url = util.create_url(MAIN_URL, search_word, DELIMITER)
    page = urllib.request.urlopen(url)
    soup = BeautifulSoup(page, "html.parser")
    product_grid = soup.find('ul', class_='Products_ul')
    try:
        total_equips = product_grid.find_all('li', class_='Products')
    except:
        return []
    equips = []
    for equip in total_equips:
        title = equip.find(
            'div', class_='title').find('span').find(text=True).strip()
        equipment = Result(title)
        equipment.url = equip.find('a').get('href')
        equipment.image_src = equip.find('div',
                                         class_='Image').find('img').get('src')
        price_text = equip.find('div', class_='price').find_all(text=True)
        equipment.price = util.get_price(''.join(price_text))
        if util.is_valid_price(equipment.price):
            equips.append(equipment)
        if len(equips) >= 10:
            return equips
    return equips
예제 #3
0
def extract_results(search_word, condition=None):
    url = util.create_url(MAIN_URL, search_word, DELIMITER)
    page = urllib.request.urlopen(url)
    soup = BeautifulSoup(page, "html.parser")
    try:
        product_contents = soup.find_all('div',
                                         class_='products-mnbox-content')
    except:
        return []

    results = []
    for product_content in product_contents:
        equip_url = HOME_URL + product_content.find('a').get('href')
        models_site = BeautifulSoup(urllib.request.urlopen(equip_url),
                                    "html.parser")
        model_descriptions = models_site.find_all('td', class_='description')

        for re in model_descriptions:
            result = Result(
                re.find('div', {
                    'id': 'gaProductName'
                }).find(text=True).strip())
            result.image_src = 'https:' + re.find(
                'img', class_='lazy').get('data-original')
            result.url = HOME_URL + re.find('a').get('href')
            price_site = BeautifulSoup(urllib.request.urlopen(result.url),
                                       "html.parser")
            result.price = util.get_price(
                price_site.find('div', class_='price-box').find(
                    'span', class_='price-range').find(text=True))
            if util.is_valid_price(result.price):
                results.append(result)
            if len(results) >= 10:
                return results
    return results
예제 #4
0
def extract_results(search_term, condition=None):
    if condition == 'new':
        return []
    headers = {
        'Host':
        'www.biosurplus.com',
        'Connection':
        'keep-alive',
        'Accept':
        'text/html',
        'Referer':
        'http://www.biosurplus.com/?ajax_search_nonce=b2ba2354a5&s==Beckman+Coulter&post_type=product',
        'Accept-Encoding':
        'gzip, deflate, sdch',
        'Accept-Language':
        'en-US,en;q=0.8',
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
    }
    url = util.create_url(MAIN_URL, search_term,
                          DELIMITER) + "&post_type=product"
    path_to_chromedriver = 'chromedriver.exe'
    option = webdriver.ChromeOptions()
    option.add_argument('headless')
    browser = webdriver.Chrome(executable_path=path_to_chromedriver,
                               options=option)
    browser.get(url)
    time.sleep(5)

    soup = BeautifulSoup(browser.page_source, "html.parser")
    table = soup.find('div', class_='content-area')
    try:
        #check if the table
        rows = table.findAll("li", {"class": re.compile('post-*')})
    except:
        return []
    results = []
    for row in rows:
        new_result = Result(
            row.find('h2', class_="woocommerce-loop-product__title").text)
        new_result.set_price(
            util.get_price(row.find(text=re.compile("Price*"))))
        #Handle different paths
        try:
            img_src = row.find('div', class_="image_frame").find(
                'div', class_="product-loop-image bsi-thumb").get("style")
        except:
            img_src = row.find('div', {
                "style": re.compile('background*')
            }).get('style')
        img_src = img_src.replace(') ', '( ')
        img_src = img_src.split('(')[1]
        img_src = img_src.split(')')[0]
        new_result.set_image_src(img_src)
        new_result.set_url(row.find('a').get('href'))
        if util.is_valid_price(new_result.get_price()):
            results.append(new_result)
            if len(results) == 10:
                return results
    return results
예제 #5
0
def extract_results(search_word, condition=None):
    url = util.create_url(MAIN_URL, search_word, DELIMITER)
    try:
        soup = util.check_exceptions(url)
        product_table = soup.find('table', class_='table_content')
        result_links = product_table.find_all('a')
    except:
        return []

    equips = []
    for link in result_links:
        product_url = HOME_URL + link.get('href')
        product_page_content = BeautifulSoup(
            urllib.request.urlopen(product_url), "html.parser")
        title = ''.join(
            product_page_content.find(
                'div',
                class_='product_left').find('h1').find_all(text=True)).strip()
        equipment = Result(title)
        equipment.url = product_url
        equipment.image_src = HOME_URL + product_page_content.find(
            'img', {
                "id": "big_product_img"
            }).get('src')
        equipment.price = util.get_price(
            product_page_content.find('div',
                                      class_='pr_price2').find(text=True))
        if util.is_valid_price(equipment.price):
            equips.append(equipment)
        if len(equips) >= 10:
            return equips
    return equips
예제 #6
0
def extract_results(search_word, condition=None):
    if condition == 'new':
        return []
    url = util.create_url(MAIN_URL, search_word, DELIMITER)
    path_to_chromedriver = 'chromedriver.exe'
    option = webdriver.ChromeOptions()
    option.add_argument('headless')
    browser = webdriver.Chrome(executable_path=path_to_chromedriver,
                               options=option)
    browser.get(url)
    time.sleep(5)
    soup = BeautifulSoup(browser.page_source, "html.parser")
    product_grid = soup.find('ul', class_='product_list p_list')
    try:
        total_equips = product_grid.find_all(
            'li', {"class": re.compile('p_list_item*')})
    except:
        return []
    equips = []
    for equip in total_equips:
        title = equip.find('div', class_='title').find('a').text
        print(title, "t")
        equipment = Result(title)
        equipment.set_url(HOME_URL + equip.find('a').get('href'))
        equipment.set_image_src(
            HOME_URL +
            equip.find('div', class_='thumb').find('img').get('src'))
        price_text = equip.find('li', class_='price').text
        equipment.set_price(util.get_price(price_text))
        if util.is_valid_price(equipment.get_price()):
            equips.append(equipment)
        if len(equips) == 10:
            return equips
    return equips
예제 #7
0
def extract_results(search_word, condition=None):
    url = util.create_url(MAIN_URL, search_word, DELIMITER)
    url = url + '&cond=used' if condition != 'new' else url + '&cond=new'
    path_to_chromedriver = 'chromedriver.exe'
    option = webdriver.ChromeOptions()
    option.add_argument('headless')
    browser = webdriver.Chrome(executable_path=path_to_chromedriver,
                               options=option)
    browser.get(url)
    time.sleep(5)
    soup = BeautifulSoup(browser.page_source, "html.parser")

    equips = []
    try:
        sale_equips = soup.find_all('div', {'id': re.compile('listing_*')})
    except:
        return equips

    for equip in sale_equips:
        title = equip.find('h4').find('a').text.strip()
        equipment = Result(title)
        equipment.set_url(
            HOME_URL + equip.find('div', class_='row').find('a').get('href'))
        equipment.set_image_src(equip.find('img').get('src'))
        equipment.set_price(util.get_price(equip.find('span', class_='price')))
        if util.is_valid_price(equipment.get_price()):
            equips.append(equipment)
        if len(equips) == 10:
            return equips
    return equips
예제 #8
0
def extract_results(search_word, condition=None):
    url = util.create_url(MAIN_URL, search_word, DELIMITER)
    url = url if condition != "new" else url + '&Condition=5067'
    try:
        soup = util.check_exceptions(url)
        product_grid = soup.find('div', class_='pagebody')
        total_equips = product_grid.find_all('div', class_='el')
    except:
        return []
    equips = []
    for equip in total_equips:
        # items_details have names of generic device, model, manufacturer bundled together
        items_details = equip.find('div',
                                   class_='item_details').find_all(text=True)
        title = ' '.join(items_details).strip()
        equipment = Result(title)
        equipment.url = equip.find('div', class_='image').find(
            'a', class_='item_number').get('href')
        equipment.image_src = equip.find('div',
                                         class_='image').find('img').get('src')
        price_text = equip.find('div', class_='price').find(
            text=True) if equip.find(
                'span', class_='price_element') == None else equip.find(
                    'span', class_='price_element').find(text=True)
        equipment.price = util.get_price(''.join(price_text))
        if util.is_valid_price(equipment.price):
            equips.append(equipment)
        if len(equips) >= 10:
            return equips
    return equips
예제 #9
0
def extract_results(search_word, condition=None):
    if condition == "new":
        return []
    url = util.create_url(MAIN_URL, search_word, DELIMITER)
    try:
        soup = util.check_exceptions(url)
        product_grid = soup.find('div', class_='v-product-grid')
        total_equips = product_grid.find_all('div', class_='v-product')
    except:
        return []
    equips = []

    for equip in total_equips:
        title = equip.find(
            'a', class_='v-product__title productnamecolor colors_productname'
        ).find(text=True).strip()
        equipment = Result(title)
        equipment.url = equip.find('a', class_='v-product__img').get('href')
        equipment.image_src = 'http:' + equip.find('img').get('src')
        price_text = equip.find(
            'div', class_='product_productprice').find_all(text=True)
        equipment.price = util.get_price(''.join(price_text))
        if util.is_valid_price(equipment.price):
            equips.append(equipment)
        if len(equips) >= 10:
            return equips
    return equips
예제 #10
0
def extract_results(search_term, condition=None):
    if condition == 'new':
        return []
    url = util.create_url(MAIN_URL, search_term, DELIMITER)
    page = urllib.request.urlopen(url)
    soup = BeautifulSoup(page, "html.parser")
    table = soup.find('div', class_='content-area')
    rows = table.findAll("article")

    results = []
    for row in rows:
        new_result = Result(
            row.find('h1', class_="entry-title").find("a").text)
        result_url = row.find('a').get('href')

        #scrape from the result's page
        result_soup = BeautifulSoup(urllib.request.urlopen(result_url),
                                    "html.parser")
        new_result.set_url(result_url)
        new_result.set_price(
            util.get_price(result_soup.find('span', class_="amount").text))
        new_result.set_image_src(
            result_soup.find('div', class_='images').find('img').get('src'))
        if util.is_valid_price(new_result.get_price()):
            results.append(new_result)
            if len(results) == 10: return results
    return results
예제 #11
0
def extract_results(item,condition=None):
        #Url is extended based on condition
        if condition == "new":
                url = util.create_url(MAIN_URL,item,DELIMITER) + "&condition=468"
        else:
                url = util.create_url(MAIN_URL,item,DELIMITER) + "&condition=467,469"     
        results=[]
        headers={
        'Host': 'www.labx.com',
        'Connection': 'keep-alive',
        'Accept': '*/*',
        'Referer': 'https://www.labx.com/item/vacuum-pump-230-v-50-hz/12183467',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'en-US,en;q=0.9',
        'User-Agent': 'Chrome/80.0.3987.132, Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36',
        'Sec-Fetch-Dest': 'script',
        'Sec-Fetch-Mode': 'no-cors',
        'Sec-Fetch-Site': 'same-site',
        'Upgrade-Insecure-Requests':'1',
        'x-runtime':'148ms'}
        #Check if page has data
        try:
            path_to_chromedriver = 'chromedriver.exe'
            option = webdriver.ChromeOptions()
            option.add_argument('headless')
            browser = webdriver.Chrome(executable_path = path_to_chromedriver,options=option)
            browser.get(url)
            time.sleep(5)
            soup = BeautifulSoup(browser.page_source,'html.parser')
            rows = soup.find_all('div',class_='product-card')
        except:
                  return []
        #Get 1st 10 results only
        print(len(rows))
        for i in range(len(rows)):
                  row = rows[i]
                  new_result = Result(row.find('a', class_='card-title').text)
                  new_result.url = HOME_URL + row.find('a').get('href')
                  new_result.price = util.get_price(row.find(class_='price').get_text())
                  number = util.get_price(new_result.title)
                  new_result.image_src = row.find('div', class_='card-img-top').find("img").get("src")
                  if util.is_valid_price(new_result.price):
                          results.append(new_result)
                          if len(results) == 9:
                                  break
        return results
예제 #12
0
 def set_price(self):
     try:
         self.price = get_price()
     except:
         self.price = None
     if self.price:
         value = self.price * self.wallet.get_balance() / 1e8
         self.priceDisplay.setText("{:.2f} EUR".format(value))
예제 #13
0
def predict_price():
    total_sqft = float(request.form['total_sqft'])
    location = request.form
    bhk = int(request.form['bhk'])
    bath = int(request.form['bath'])

    response = jsonify(
        {"estimated_price": util.get_price(location, total_sqft, bhk, bath)})
    response.headers.add("Access-Control-Allow-Origin", "*")
    return response
예제 #14
0
def lookup(ticker,api_key):
    user = db.session.query(Accounts).filter_by(api_key=api_key).first()
    if user:
        price = get_price(ticker)
        description = stock_description(ticker)
        chart_data = chart(ticker)
        logo = Logo(ticker)
        related = related_Companies(ticker)
        return jsonify({'current_price':price,"des":description,"chartData":chart_data,"logo":logo})
    return jsonify({"error":"failed"})
예제 #15
0
def predict_home_price():
    total_sqft = float(request.form['total_sqft'])
    location = request.form['location']
    bhk = int(request.form['bhk'])
    bath = int(request.form['bath'])

    response = jsonify(
        {'estimated_price': util.get_price(location, total_sqft, bhk, bath)})
    response.headers.add('Access-Control-Allow-Origin', '*')

    return response
예제 #16
0
def extract_results(search_word, condition=None):
    #url=util.create_url(MAIN_URL,search_word,DELIMITER)
    url = HOME_URL
    try:
        path_to_chromedriver = 'chromedriver.exe'
        option = webdriver.ChromeOptions()
        option.add_argument('headless')
        browser = webdriver.Chrome(executable_path=path_to_chromedriver,
                                   options=option)
        #url = "view-source:" + url
        browser.get(url)
        time.sleep(5)
        soup = BeautifulSoup(browser.page_source, 'html.parser')
        search_bar = soup.find('span', class_="main-search--text")
        search_bar.string = search_word
        #search_bar.send_keys(Keys.RETURN)
        #print(soup,"soup")
        test = browser.find_element_by_tag_name('form')
        product_table = browser.find_element_by_class_name(
            'main-search--results')
        text = WebDriverWait(browser,
                             10).until(lambda browser: product_table.text)
        print(text, "text")
        #test.click()
        #product_table=soup.find('div',class_='main-search--results')
        print(product_table, "pt")
        #product_table=soup.find('ul',class_='results--list')
        result_links = product_table.find_all(
            'li', class_="list--entry result--item")
    except Exception as e:
        print("Error2 was: ", e)
        return []

    equips = []
    for link in result_links:
        product_url = HOME_URL + link.find('a').get('href')
        print(product_url)
        title = link.find('a').get('title')
        print(title)
        equipment = Result(title)
        equipment.set_url(product_url)
        product_page_content = BeautifulSoup(
            urllib.request.urlopen(product_url), "html.parser")
        equipment.set_image_src(HOME_URL +
                                product_page_content.find('img').get('src'))
        equipment.set_price(
            util.get_price(
                product_page_content.find(
                    'span', class_='price--default is--nowrap').text))
        if util.is_valid_price(equipment.get_price()):
            equips.append(equipment)
        if len(equips) == 10:
            return equips
    return equips
예제 #17
0
def extract_results(item, requested_condition=None):
    path_to_chromedriver = 'chromedriver.exe'
    option = webdriver.ChromeOptions()
    option.add_argument('headless')
    browser = webdriver.Chrome(executable_path=path_to_chromedriver,
                               options=option)
    url = util.create_url(MAIN_URL, item, DELIMITER)
    browser.get(url)
    time.sleep(5)
    soup = BeautifulSoup(browser.page_source, 'html.parser')
    #print(soup,"soup")
    results = []
    #Check for data
    try:
        table = soup.find('div', class_='search results')
    except:
        return results
    #Get 1st 10 results only
    rows = table.find_all('li', class_='item product product-item')

    for i in range(len(rows)):
        row = rows[i]
        new_result = Result(
            row.find('a', class_='product-item-link').text.strip())
        #print(new_result.title,"t")
        new_result.url = row.find('a').get('href')
        new_result.price = util.get_price(str(row.find('span',class_='price').find(text=True))\
                           .encode('utf-8')[1:])
        new_result.image_src = row.find('img').get('src')
        browser.get(new_result.url)
        new_soup = BeautifulSoup(browser.page_source, "html.parser")
        condition = new_soup.find('div',
                                  class_='product attribute description').find(
                                      'div', class_='value').text
        conditions = ['new', 'New', 'used', 'Used']
        bad_condition_types = [
            'bad', 'poor', 'not working', 'broken', 'not functional'
        ]
        #Check for matching conditions
        for word in conditions:
            if word in condition:
                if (requested_condition == None and word.lower() == 'used') or \
                        (requested_condition != None and requested_condition.lower()== word.lower()):
                    #Only add working good equipment
                    for type_word in bad_condition_types:
                        if type_word not in condition and util.is_valid_price(
                                new_result.price):
                            results.append(new_result)
                            break
                        if len(results) == 10:
                            return results
    return results
예제 #18
0
def extract_results(search_term, condition=None):
    if condition == 'new':
        return []
    headers = {
        'Host':
        'www.biosurplus.com',
        'Connection':
        'keep-alive',
        'Accept':
        'text/html',
        'Referer':
        'http://www.biosurplus.com/store/search/?per_page=24&product_search_q=Beckman+Coulter+Biomek+Workstation',
        'Accept-Encoding':
        'gzip, deflate, sdch',
        'Accept-Language':
        'en-US,en;q=0.8',
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
    }
    url = util.create_url(MAIN_URL, search_term, DELIMITER)
    req = urllib2.Request(url, headers=headers)
    page = urllib2.urlopen(req)

    #This page is g-zipped. Unzip it
    stringified_data = StringIO.StringIO(page.read())
    unzipped_page = gzip.GzipFile(fileobj=stringified_data)

    soup = BeautifulSoup(unzipped_page, "html.parser")
    table = soup.find('div', class_='product_browse')
    try:
        #check if the table
        rows = table.findAll("div", class_="fps_featured_product")
    except:
        return []
    results = []
    for row in rows:
        manufacturer = row.find('p',
                                class_="fps_fp_description").find(text=True)
        title = row.find('h2',
                         class_="fps_fp_heading").find("a").find(text=True)
        new_result = Result(manufacturer + " " + title)
        new_result.price = util.get_price(
            row.find('p', class_='product_price').find(text=True))
        new_result.image_src = row.find(
            'div', class_="fps_fp_image_inner").find('img').get('src')
        new_result.url = "www.biosurplus.com" + row.find('a').get('href')
        if util.is_valid_price(new_result.price):
            results.append(new_result)
            if len(results) == 10: return results
    return results
예제 #19
0
def predict_price():
    #print(request.is_json)
    content = request.get_json()
    #print(type(content))
    total_sqft = float(content["sqft"])
    location = content["city"]
    bhk = int(content["bhk"])
    bath = int(content["bath"])

    response = jsonify(
        {"price": util.get_price(location, total_sqft, bhk, bath)})

    response.headers.add("Access-Control-Allow-Origin", "*")
    return response
예제 #20
0
def extract_results(search_term, condition=None):
    url = util.create_url(MAIN_URL, search_term, DELIMITER)
    if condition == 'new':
        url = url + '&tbs=vw:l,mr:1,new:1'
    else:
        url = url + '&tbs=vw:l,mr:1,new:3'
    headers = {
        'Connection':
        'keep-alive',
        'Accept':
        'text/html',
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
    }
    r = requests.get(url, timeout=5, headers=headers)
    soup = BeautifulSoup(r.content, "html.parser")
    table = soup.find('div', class_='sh-pr__product-results')
    try:
        rows = table.findAll('div', class_='sh-dlr__list-result')
        print(len(rows), "length")
    except:
        return []

    results = []
    for row in rows:
        if condition != 'new':
            condition_text = str(row.find('span', class_='h1Wfwb O8U6h').text)
            if (('used' not in condition_text)
                    and ('refurbished' not in condition_text)):
                #skip over items that do not say "used" when searching for used items
                continue
        if "eBay" in str(
                row.find('a', class_='shntl hy2WroIfzrX__merchant-name').text):
            #many google results overlap with eBay. Do not include these.
            continue
        new_result = Result(row.find('h3', class_='xsRiS').text)
        new_result.set_url(HOME_URL + row.find('a').get('href'))
        new_result.set_price(
            util.get_price(row.find('span', 'aria--hidden' == 'true').text))
        # if condition!='new':
        # new_result.set_image_src(row.find('img',class_='TL92Hc').get('src'))
        #r = requests.get(new_result.get_url(),timeout=5,headers=headers)
        #new_soup = BeautifulSoup(r.content,"html.parser")
        #new_result.set_image_src(new_soup.find('img',class_='sh-div__image sh-div__current').get('src'))
        if util.is_valid_price(new_result.get_price()):
            results.append(new_result)
            if len(results) == 10:
                return results
    return results
예제 #21
0
def predict_home_price():
    area = float(request.form['area'])
    bhk = int(request.form['bhk'])
    bathroom = int(request.form['bathroom'])
    parking = int(request.form['parking'])
    furnishing_status = request.form['furnishing_status']

    response = jsonify({
        'estimated_price':
        util.get_price(area, bhk, bathroom, parking, furnishing_status)
    })

    response.headers.add('Access-Control-Allow-Origin', '*')

    return response
예제 #22
0
 def sell(self, ticker, amount):
     position = self.get_position_for(ticker)
     price = get_price(ticker)
     if price is None:
         raise KeyError
     notional = abs(price * amount)
     trade = Trades(accounts_pk=self.pk,
                    ticker=ticker,
                    price=price,
                    quantity=amount)
     if position.shares > amount:
         self.balance += notional
         trade.save()
         position.shares -= abs(int(amount))
         position.save()
         self.save()
     else:
         raise ValueError
예제 #23
0
 def buy(self, ticker, amount):
     position = self.get_position_for(ticker)
     price = get_price(ticker)
     if price is None:
         raise KeyError
     notional = price * amount
     trade = Trades(accounts_pk=self.pk,
                    ticker=ticker,
                    price=price,
                    quantity=amount)
     if self.balance >= notional:
         self.balance -= notional
         trade.save()
         position.shares += int(amount)
         position.save()
         self.save()
     else:
         raise ValueError
예제 #24
0
    def get_balance(self, item):
        user_data = util.find_user(item.author.name, self.log, self.db)
        user_address = user_data['ban_address']
        data = {'action': 'account_balance', 'account': user_address}
        parsed_json = self.rest_wallet.post_to_wallet(data, self.log)

        data = {
            'action': 'banoshi_from_raw',
            'amount': int(parsed_json['balance'])
        }
        rai_balance = self.rest_wallet.post_to_wallet(data, self.log)
        self.log.info(rai_balance['amount'])
        xrb_balance = format((float(rai_balance['amount']) / 100.0), '.2f')
        rate = util.get_price()
        if rate is not None:
            usd = float(xrb_balance) * rate
            reply_message = 'Your balance is :\n\n %s BANANO' % xrb_balance
        else:
            reply_message = 'Your balance is :\n\n %s BANANO' % xrb_balance
        item.reply(reply_message)
예제 #25
0
def extract_results(item, requested_condition=None):
    url = util.create_url(MAIN_URL, item, DELIMITER)
    r = requests.get(url, timeout=3)
    #        page = urllib2.urlopen(create_url(MAIN_URL,item,DELIMITER))
    soup = BeautifulSoup(r.content, "html.parser")
    results = []
    #Check for data
    try:
        table = soup.find_all('li', class_='item')
    except:
        return results
    #Get 1st 10 results only
    for i in range(len(table)):
        row = table[i]
        new_result = Result(row.find('a').get('title'))
        new_result.url = row.find('a').get('href')
        new_result.price = util.get_price(str(row.find('span',class_='price').find(text=True))\
                           .encode('utf-8')[1:])
        new_result.image_src = row.find('img').get('src')

        specific_page = urllib2.urlopen(new_result.url)
        new_soup = BeautifulSoup(specific_page, "html.parser")
        condition = new_soup.find('div', class_='product-collateral').find(
            'div', class_='std').text
        conditions = ['new', 'New', 'used', 'Used']
        bad_condition_types = [
            'bad', 'poor', 'not working', 'broken', 'not functional'
        ]
        #Check for matching conditions
        for word in conditions:
            if word in condition:
                if (requested_condition == None and word.lower() == 'used') or \
                        (requested_condition != None and requested_condition.lower()== word.lower()):
                    #Only add working good equipment
                    for type_word in bad_condition_types:
                        if type_word not in condition and util.is_valid_price(
                                new_result.price):
                            results.append(new_result)
                            if len(results) == 10:
                                return results
    return results
예제 #26
0
    def get_balance(self, item):
        user_data = util.find_user(item.author.name, self.log, self.db)
        user_address = user_data['xrb_address']
        data = {'action': 'account_balance', 'account': user_address}
        parsed_json = self.rest_wallet.post_to_wallet(data, self.log)

        data = {
            'action': 'rai_from_raw',
            'amount': int(parsed_json['balance'])
        }
        rai_balance = self.rest_wallet.post_to_wallet(data, self.log)
        self.log.info(rai_balance['amount'])
        xrb_balance = format((float(rai_balance['amount']) / 1000000.0), '.6f')
        rate = util.get_price()
        if rate is not None:
            usd = float(xrb_balance) * rate
            reply_message = 'Your balance is :\n\n %s NANO or $%s USD \n\nUSD conversion rate of $%s' % \
                            (xrb_balance, str(format(float(usd), '.3f')), str(format(float(rate), '.3f')))
        else:
            reply_message = 'Your balance is :\n\n %s NANO' % xrb_balance
        item.reply(reply_message)
예제 #27
0
파일: ebay.py 프로젝트: thotran2015/SeedLab
def extract_results(search_term, condition=None):
	url=''
	if condition=='new':
		url = util.create_url(MAIN_URL, search_term, DELIMITER) + '&LH_BIN=1' + NEW
	else:
		url = util.create_url(MAIN_URL, search_term, DELIMITER) + '&LH_BIN=1' + USED
	page=urllib2.urlopen(url)
	soup = BeautifulSoup(page,"html.parser")
	table=soup.find('div', id='ResultSetItems')
	try:
		rows=table.findAll('li', class_='sresult lvresult clearfix li')
	except:
		return []
	results=[]
	for row in rows: 
		new_result=Result(row.find('h3', class_="lvtitle").find(text=True))
		new_result.url=row.find('h3', class_="lvtitle").find('a').get('href')
		new_result.image_src=row.find('img', class_='img').get('src')
		new_result.price=util.get_price(row.find('li', class_="lvprice prc").find('span').find(text=True))
		if util.is_valid_price(new_result.price):
			results.append(new_result)
	return results
예제 #28
0
def extract_results(search_term, condition=None):
	url=''
	if condition=='new':
		url = util.create_url(MAIN_URL, search_term, DELIMITER) + '&rt=nc' + NEW
	else:
		url = util.create_url(MAIN_URL, search_term, DELIMITER) + '&rt=nc' + USED
	page=urllib.request.urlopen(url)
	soup = BeautifulSoup(page,"html.parser")
	table=soup.find('div',class_='srp-river-results clearfix')
	try:
		rows=table.findAll('div', class_='s-item__wrapper clearfix')
	except:
		return []
	results=[]
	for row in rows: 
		new_result=Result(row.find('img', class_='s-item__image-img').get('alt'))
		new_result.set_url(row.find('a').get('href'))
		new_result.set_image_src(row.find('img', class_='s-item__image-img').get('src'))
		new_result.set_price(util.get_price(row.find('span', class_="s-item__price").text))
		if util.is_valid_price(new_result.get_price()):
			results.append(new_result)
	return results
예제 #29
0
def extract_results(search_term, condition=None):
    if condition == 'new':
        return []
    url = util.create_url(MAIN_URL, search_term, DELIMITER)
    page = urllib.request.urlopen(url)
    soup = BeautifulSoup(page, "html.parser")
    table = soup.find('div', class_='search-results-container')
    try:
        rows = table.findAll("div", class_="card-body")
    except:
        return []
    results = []
    for row in rows:
        new_result = Result(
            row.find('h6', class_="title listing-title-padding").text)
        new_result.set_price(
            util.get_price(row.find('span', class_="price price-amount")))
        new_result.set_url(row.find('a').get('href'))
        new_result.set_image_src(row.find('img').get('src'))
        if util.is_valid_price(new_result.get_price()):
            results.append(new_result)
    return results
예제 #30
0
def extract_results(search_term, condition=None):
    url = util.create_url(MAIN_URL, search_term, DELIMITER)
    url = url + '&tbs=vw:l,mr:1,new:1' if condition == 'new' else url
    headers = {
        'Connection':
        'keep-alive',
        'Accept':
        'text/html',
        'User-Agent':
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36'
    }
    #    	req =urllib2.Request(url, headers=headers)
    #        page=urllib2.urlopen(req)
    r = requests.get(url, timeout=5, headers=headers)
    soup = BeautifulSoup(r.content, "html.parser")
    table = soup.find('div', id='search')
    try:
        rows = table.findAll('div', class_='psli')
    except:
        return []

    results = []
    for row in rows:
        if condition != 'new' and ('used' not in str(
                row.find('span', class_='price'))):
            #skip over items that do not say "used" when searching for used items
            continue
        if "eBay" in str(row.find('div', class_='_tyb shop__secondary').text):
            #many google results overlap with eBay. Do not include these.
            continue
        new_result = Result(row.find('a', class_='pstl').find(text=True))
        new_result.url = HOME_URL + row.find('a', class_='pstl').get('href')
        new_result.price = util.get_price(
            row.find('span', class_='price').b.find(text=True))
        if util.is_valid_price(new_result.price):
            results.append(new_result)
    return results