def scrape_page(webdriver, links, username): '''This function will go to all links provided and scrape each picture for the number of likes and the caption. If the link is a video no information is recorded. The function will only save the caption if the title is the identified user Parameters: the active webdriver, a set of picture links, the username of the page your are scraping Returns: a list of lists with the number of likes and caption ''' picture_info = [] for link in links: # Open new tab webdriver.execute_script("window.open('');") time.sleep(3) # Switch to the new window webdriver.switch_to.window(webdriver.window_handles[1]) webdriver.get(link) time.sleep(5) try: likes_list = webdriver.find_elements_by_class_name('zV_Nj') if len(likes_list) != 0: #If the length is 0, then it is a video if len(likes_list) == 1: #No common friends liked the photo num_likes = webdriver.find_elements_by_class_name('Nm9Fw')[0].text.split(' ')[0] else: num_likes = int(likes_list[1].text.split(' ')[0]) + 1 try: title = webdriver.find_element_by_class_name('_6lAjh').text if title == username: caption_list = webdriver.find_elements_by_xpath("//div[@class='C4VMK']//span") '''This code works but not active since I did not use the information num_of_comments = len(caption_list)''' caption = caption_list[0].text else: caption = None #if the user was not the title except: caption = None #photo does not have a caption or any comments picture_info.append([num_likes, caption]) except: pass webdriver.close() # Switch focus back to main tab webdriver.switch_to.window(webdriver.window_handles[0]) time.sleep(5) return picture_info
def get_signal(ticker, interval): #Declare variable analysis = [] #Open tradingview's site webdriver.get( "https://s.tradingview.com/embed-widget/technical-analysis/?locale=en#%7B%22interval%22%3A%22{}%22%2C%22width%22%3A%22100%25%22%2C%22isTransparent%22%3Afalse%2C%22height%22%3A%22100%25%22%2C%22symbol%22%3A%22{}%22%2C%22showIntervalTabs%22%3Atrue%2C%22colorTheme%22%3A%22dark%22%2C%22utm_medium%22%3A%22widget_new%22%2C%22utm_campaign%22%3A%22technical-analysis%22%7D" .format(interval, ticker)) webdriver.refresh() #Wait for site to load elements while len( webdriver.find_elements_by_class_name( "speedometerSignal-pyzN--tL")) == 0: sleep(0.1) #Recommendation recommendation_element = webdriver.find_element_by_class_name( "speedometerSignal-pyzN--tL") analysis.append(recommendation_element.get_attribute('innerHTML')) #Counters counter_elements = webdriver.find_elements_by_class_name( "counterNumber-3l14ys0C") #Sell analysis.append(int(counter_elements[0].get_attribute('innerHTML'))) #Neutral analysis.append(int(counter_elements[1].get_attribute('innerHTML'))) #Buy analysis.append(int(counter_elements[2].get_attribute('innerHTML'))) last_analysis = analysis signal = last_analysis[0] num_sell = last_analysis[1] num_neutral = last_analysis[2] num_buy = last_analysis[3] line = '-' * 50 ticker = ticker.strip('"') interval = interval.strip('"') line = line.strip('"') signal = signal.strip('"') ticker = json.dumps(ticker) interval = json.dumps(interval) signal = json.dumps(signal) num_sell = json.dumps(num_sell) num_neutral = json.dumps(num_neutral) num_buy = json.dumps(num_buy) line = json.dumps(line) value = f'TradingView Data for {ticker} for {interval}: ' + '<br/>' + line + '<br/>' + f'Overall Signal: {signal}' + '<br/>' + f'Number of Sell Indicators: {num_sell}' + '<br/>' + f'Number of Neutral Indicators: {num_neutral}' + '<br/>' + f'Number of Buy Indicators: {num_buy}' return value
def traverse_items(): items = webdriver.find_elements_by_class_name("_9AhH0") i = 0 items[0].click() sleep(3) while webdriver.find_elements_by_xpath( "//*[contains(text(), 'Next')]") is not None: video_element = webdriver.find_elements_by_class_name("_5wCQW") if len(video_element) > 0: comment("Sounds Great") next_item = webdriver.find_elements_by_xpath( "//*[contains(text(), 'Next')]") next_item[0].click() sleep(3)
def instaLogin(): print("WELCOME TO InstaDownloader by KSHITIJ") print("Navigating to Instagram Login Page") webdriver.get( "https://www.instagram.com/accounts/login/?source=auth_switcher") sleep(2) username = webdriver.find_element_by_name("username") print("Entering Username") username.click() username.send_keys(InstaUsername) password = webdriver.find_element_by_name("password") print("Entering Password") password.click() password.send_keys(InstaPassword) sleep(2) loginButton = webdriver.find_element_by_css_selector( "#react-root > section > main > div > article > div > div:nth-child(1) > div > form > div:nth-child(4) > button > div" ) sleep(1) loginButton.click() print("Logging in . . . ") sleep(2) if (len(webdriver.find_elements_by_class_name('eiCW-')) > 0): print("Incorrect Username or Password") exit()
def hose_stock_list(): global webdriver chrome_driver_path = 'D:\python\selenium\driver\chromedriver.exe' chrome_options = Options() #chrome_options.page_load_strategy #chrome_options.add_argument('--headless') chrome_options.add_argument( '--user-data-dir=C:\\Users\\hung-pro7\\AppData\\Local\\Google\\Chrome\\User Data\\Default\\Default' ) webdriver = webdriver.Chrome(executable_path=chrome_driver_path, options=chrome_options) url = 'https://trade-hcm.vndirect.com.vn/chung-khoan/hose' webdriver.get(url) webdriver.find_element_by_xpath('//*[@id="menuWrp"]/div/a[2]').click() time.sleep(2) webdriver.find_element_by_xpath( '//*[@id="login-popup"]/form/div[1]/div/input').send_keys( "tuanhungstar") webdriver.find_element_by_xpath( '//*[@id="login-popup"]/form/div[2]/div/input').send_keys( "Khanhha-1") webdriver.find_element_by_xpath( '//*[@id="login-popup"]/form/button').click() time.sleep(2) webdriver.find_element_by_xpath( '//*[@id="nav"]/ul[1]/li[2]/a/span').click() time.sleep(2) HOSE_table = webdriver.find_elements_by_class_name('txt-gia-tran') HOSE_ticker = [] for row in HOSE_table: text = row.get_attribute('id').replace('ceil', '') HOSE_ticker.append(text) return HOSE_ticker
def link_to_reset_pswd(self, webdriver): self.restore_link = None webdriver.get('https://mail.yandex.ru/lite') while self.restore_link == None: elements = webdriver.find_elements_by_class_name( 'b-messages__message__left') for i in range(len(elements)): time.sleep(2) try: element = elements[i].text if len( re.findall( 'https://accounts.epicgames.com/resetPassword\?code=(\w{32})', element)) != 0: self.restore_link = \ re.findall('https://accounts.epicgames.com/resetPassword\?code=(\w{32})', element)[0] except: pass else: time.sleep(REFRESH_SEC) webdriver.refresh() time.sleep(REFRESH_SEC) webdriver.refresh() print(self.restore_link) return 'https://accounts.epicgames.com/resetPassword\?code=' + self.restore_link
def isNextPostAvailable(): if (len( webdriver.find_elements_by_class_name( 'coreSpriteRightPaginationArrow')) > 0): return 1 else: return 0
def get_reviews(thisreview): global last_len print "Don't Stop" for webdriver_obj in thisreview.find_elements_by_class_name("WMbnJf"): Name = webdriver_obj.find_element_by_class_name("Y0uHMb") Reviewer.append(Name.text) try: ReviewByuser = webdriver_obj.find_element_by_class_name("A503be") TotalReviewsByUser.append(ReviewByuser.text) except NoSuchElementException: TotalReviewsByUser.append("") star = webdriver_obj.find_element_by_class_name("fTKmHE99XE4__star") ReviewStar = star.get_attribute("aria-label") ReviewRating.append(ReviewStar) Date = webdriver_obj.find_element_by_class_name("dehysf") ReviewDate.append(Date.text) Body = webdriver_obj.find_element_by_class_name('Jtu6Td') try: webdriver_obj.find_element_by_class_name('review-snippet').click() s_32B = webdriver_obj.find_element_by_class_name( 'review-full-text') ReviewDescription.append(s_32B.text) except NoSuchElementException: ReviewDescription.append(Body.text) print("Yes..") element = webdriver_obj.find_element_by_class_name('PuaHbe') webdriver.execute_script("arguments[0].scrollIntoView();", element) print("ah!..Go") time.sleep(3) reviews = webdriver.find_elements_by_class_name( "gws-localreviews__general-reviews-block") r_len = len(reviews) if r_len > last_len: last_len = r_len get_reviews(reviews[r_len - 1])
def search(): search_bar = webdriver.find_elements_by_class_name("XTCLo") search_bar[0].send_keys("#musiccover") sleep(3) search_bar[0].send_keys(Keys.RETURN) search_bar[0].send_keys(Keys.RETURN) sleep(5)
def goToProfile(profile): print("Navigating into " + InstaAccount) webdriver.get("https://www.instagram.com/" + InstaAccount + "/") if (len(webdriver.find_elements_by_class_name('error-container')) > 0): print("Account Doesnt Exists") exit() else: print(InstaAccount + " is opened")
def loop(qinput, WYR): qinput.send_keys(Keys.RETURN) sleep(1) questions = webdriver.find_elements_by_class_name('support-sentence') flag = False for question in questions: count = WYR.new(question.get_attribute('innerHTML')) if count == 0: flag = True return flag
def get_page_links(webdriver): links = webdriver.find_elements_by_class_name('click') result = list() for link in links: try: result.append(link.get_attribute('href')) except: logging.ERROR("Error adding link in page") return result
def code_2fa_open(self, webdriver): webdriver.get('https://www.epicgames.com/account/password') wait_for_element = WebDriverWait(webdriver, TIMEOUT).until( EC.element_to_be_clickable( (By.CLASS_NAME, 'btn-custom btn-submit-custom email-auth'))) buttons = webdriver.find_elements_by_class_name( 'btn-custom btn-submit-custom email-auth') for i in buttons: if re.findall('((E|e)mail)|((П|п)очте)', i.text): i.click() wait_for_element = WebDriverWait(webdriver, TIMEOUT).until( EC.element_to_be_clickable( (By.CLASS_NAME, 'input challengeEmailCode')))
def take_first_code(self, webdriver): self.first_code = None webdriver.get('https://mail.yandex.ru/lite') while self.first_code == None: elements = webdriver.find_elements_by_class_name( 'b-messages__message__left') for i in range(len(elements)): try: element = elements[i].text if len(re.findall('! (\d{6})', element)) != 0: self.first_code = re.findall('! (\d{6})', element)[0] except: pass else: time.sleep(REFRESH_SEC) webdriver.refresh() time.sleep(REFRESH_SEC) webdriver.refresh() print(self.first_code) return self.first_code
def City(): try: SpeakText("Which city?") with sr.Microphone() as source2: print("Listening...") audio2 = r.listen(source2) MyText = r.recognize_google(audio2) MyText = MyText.lower() webdriver.get("https://www.weather-forecast.com/locations/" + MyText + "/forecasts/latest") SpeakText( str( webdriver.find_elements_by_class_name( "b-forecast__table-description-content")[0].text)) # SpeakText(MyText) # print("Did you say "+MyText) except sr.UnknownValueError: print("unknown error occured")
def find_all(webdriver, by, css_selector_val): ''' Wrapper function of selenium python to find list of elments using same locator and locator_value(css_selector_val) Arguments --------- webdriver - object of selenium.webdriver.chrome.webdriver.WebDriver . by - element locator name . contraint: expected value:- name, xpath, link_text, partial_link_text, tag_name, class_name, css_selector other value than the expected will return None css_selector_val- value for the element locator i.e. arg 'by' example:- to find all elements with class_name=contact, value for css_selector_val is 'contact' Return --------- Webelement list - if the value of arg 'by' is an expected value or None - if the value of arg 'by' is an unexpected value ''' if by == 'name': return webdriver.find_elements_by_name(css_selector_val) if by == 'xpath': return webdriver.find_elements_by_xpath(css_selector_val) if by == 'link_text': return webdriver.find_elements_by_link_text(css_selector_val) if by == 'partial_link_text': return webdriver.find_elements_by_partial_link_text(css_selector_val) if by == 'tag_name': return webdriver.find_elements_by_tag_name(css_selector_val) if by == 'class_name': return webdriver.find_elements_by_class_name(css_selector_val) if by == 'css_selector': return webdriver.find_elements_by_css_selector(css_selector_val) else: return None
import pandas as pd import openpyxl PATH = r"C:\Users\Kai\Desktop\BA Mods Materials\AY 2021 Sem 2\BT4103\chromedriver.exe" webdriver = webdriver.Chrome(PATH) webdriver.get( "https://forums.hardwarezone.com.sg/national-service-knowledge-base-162/pes-d-dilemma-3709993.html" ) usernames = [] post_titles = [] posts = [] date = [] names = webdriver.find_elements_by_class_name('bigusername') for n in names: usernames.append(n.text) #print(len(usernames)) titles = webdriver.find_elements_by_class_name('header-gray') for n in names: post_titles.append(titles[0].text) #print(len(post_titles)) post_message = webdriver.find_elements_by_class_name('post_message') for m in post_message: posts.append(m.text) #print(len(posts)) review_df = pd.DataFrame()
from selenium import webdriver from time import sleep from selenium.webdriver.common.by import By import pandas as pd start_url = 'https://www.vorkers.com/company_list?field=&pref=&src_str=&sort=1' webdriver = webdriver.Chrome() webdriver.get(start_url) webdriver.find_element_by_xpath( "//*[@id='contentsBody']/div[1]/div/form/div/dl/dd[2]/button").click() sleep(1) categories = webdriver.find_elements_by_class_name("jsChangeField") csv = [] for category in categories: print(category.text) csv.append(category.text) headers = ['name'] df = pd.DataFrame(csv, columns=headers) df.to_csv("job_category.csv", index=False, encoding="utf_8_sig")
def parse_url(start_url_ext, idx, webdriver, location=False, _filter=False): """Parse a Trip Advisor hotel page and scrape review information: rating, review, and review title. Optional to scrape location details. Args: start_url_ext (str): Trip Advisor hotel page to parse idx (int): current page index, 0 through n, used for print statement webdriver (Selenium WebDriver): browser tool allowing for interaction with website location (bool, optional): Option to return location details. Defaults to False. Returns: page_reviews: list of strings page_ratings: list of strings page_titles: list of integers, 0 - 5 if location = True: location (tuple): (full hotel name, city, state) """ domain = "https://www.tripadvisor.com" # Define waits, moved from stale element 'try' 1/18pm ignored_exceptions = (NoSuchElementException, StaleElementReferenceException, TimeoutException) wait = WebDriverWait(webdriver, 10, ignored_exceptions=ignored_exceptions) # Catch for webdriver time out try: webdriver.get(domain + start_url_ext) # 1/18 reduced from 5 to 3 except TimeoutException: pass if _filter == True: # ACTIVATE low filters to scrape only low reviews try: for f in [3, 2, 1]: # level = f"ReviewRatingFilter_{f}" # webdriver.find_element_by_id(level).click() level = f"ReviewRatingFilter_{f}" wait.until(EC.element_to_be_clickable((By.ID, level))) webdriver.execute_script("arguments[0].click();", (webdriver.find_element_by_id(level))) print(f"filter{f}") except: pass # Catch for webdriver stale element try: # ignored_exceptions = (NoSuchElementException, StaleElementReferenceException, TimeoutException) # wait = WebDriverWait(webdriver, 10, ignored_exceptions=ignored_exceptions) wait.until(EC.element_to_be_clickable((By.CLASS_NAME, "_3maEfNCR"))) except TimeoutException: pass # Find 'read more' buttons all_more_buttons = webdriver.find_elements_by_class_name("_3maEfNCR") # If 'read more' available, activate to expand text, only need to click one if all_more_buttons: try: all_more_buttons[0].click() print('click') except StaleElementReferenceException: pass # Set soup page_source = webdriver.page_source soup = BeautifulSoup(page_source, 'html.parser') # Scrape the ratings data page_reviews, page_ratings, page_titles = retrieve_reviews_ratings( soup, idx) # If location data requested, gather it if location == False: return page_reviews, page_ratings, page_titles else: location = retrieve_location(soup) return page_reviews, page_ratings, page_titles, location
s_32B = webdriver_obj.find_element_by_class_name( 'review-full-text') ReviewDescription.append(s_32B.text) except NoSuchElementException: ReviewDescription.append(Body.text) print("Yes..") element = webdriver_obj.find_element_by_class_name('PuaHbe') webdriver.execute_script("arguments[0].scrollIntoView();", element) print("ah!..Go") time.sleep(3) reviews = webdriver.find_elements_by_class_name( "gws-localreviews__general-reviews-block") r_len = len(reviews) if r_len > last_len: last_len = r_len get_reviews(reviews[r_len - 1]) reviews = webdriver.find_elements_by_class_name( "gws-localreviews__general-reviews-block") last_len = len(reviews) get_reviews(reviews[last_len - 1]) data = pd.DataFrame({ 'Reviewer': Reviewer, 'TotalReviewsByUser': TotalReviewsByUser, 'ReviewRating': ReviewRating, 'ReviewDate': ReviewDate, 'ReviewDescription': ReviewDescription })
def work_term_rating_parser(webdriver): # organize the order: # 1. check if there's no data to display # 2. then parse the data we need try: thing = WebDriverWait(webdriver, 5).until( EC.presence_of_element_located( (By.CLASS_NAME, "highcharts-container"))) except: pass # try finding the alert of no data to display here #print("timed out :(") else: good_stuff = {} # <- the useful dictionary thing2 = webdriver.find_elements_by_class_name("highcharts-container") for each_thing in thing2: title = each_thing.find_element_by_class_name( "highcharts-title").find_element_by_tag_name("tspan").text if "by Student Work Term Number" in title: by_work_term = {} # <- the useful dictionary data_list = each_thing.find_element_by_class_name("highcharts-data-labels.highcharts-tracker").\ find_elements_by_tag_name("text") for each_thing in data_list: pair = each_thing.find_elements_by_tag_name("tspan") # when the browser isn't fast enough, the tspan elements' text # will be read before it even got the chance to load; # therefore implemented a while loop that waits a bit till non-empty results are found # (can improve this by changing it into a definitely ending loop) while (pair[0].text == '' or pair[1].text == ''): time.sleep(0.00001) pair = each_thing.find_elements_by_tag_name("tspan") # ↑ end of failsafe by_work_term.update( {pair[0].text: int(pair[1].text[2:-1])}) good_stuff.update({"by_work_term": by_work_term}) if "Most Frequently Hired Programs" in title: by_program = {} # <- the useful dictionary # _(:з」∠)_ programs = [] programs_elements = each_thing.find_element_by_class_name("highcharts-axis-labels.highcharts-xaxis-labels").\ find_elements_by_tag_name("text") for each_program in programs_elements: try: program_name = each_program.find_element_by_tag_name( "tspan").text programs.append(program_name) except: program_name = each_program.text programs.append(program_name) # _(:з」∠)_ entries = [] entries_elements = each_thing.find_element_by_class_name("highcharts-data-labels.highcharts-tracker").\ find_elements_by_tag_name("text") for element in entries_elements: entry = int(element.find_element_by_tag_name("tspan").text) entries.append(entry) by_program = { programs[i]: entries[i] for i in range(min(len(entries), len(programs))) } good_stuff.update({"by_program": by_program}) #good_stuff = str(good_stuff).replace("'", '"') #print(good_stuff) return good_stuff
def isMediaVideo(): if len(webdriver.find_elements_by_class_name('videoSpritePlayButton')) > 0: return 1 else: return 0
tabla = webdriver.find_element_by_css_selector( "body > form > div:nth-child(4) > center > table > tbody") tableRows = tabla.find_elements_by_tag_name("tr") iterRows = iter(tableRows) next(iterRows) for fila in iterRows: celdas = fila.find_elements_by_tag_name("td") for j in range(len(celdas)): if j % 3 == 0: fecha = str(anhoFecha) + '-' + str(i).zfill( 2) + '-' + str(celdas[j].text).zfill(2) elif j % 3 == 1: compra = celdas[j].text elif j % 3 == 2: venta = celdas[j].text arrayTC.append({ "fecha": fecha, "compra": compra, "venta": venta }) fecha = 0 compra = 0 venta = 0 break except: botonAnterior = webdriver.find_elements_by_class_name( "button")[0].click() with open("tc-sunat-" + str(anhoFecha) + ".json", 'w') as f: json.dump(arrayTC, f)
def exchange_walutomat(username, password, transaction_type, first_currency, second_currency, amount, rate): webdriver.implicitly_wait(10) webdriver.get('https://panel.walutomat.pl/moj-walutomat') webdriver.find_element_by_id('username').send_keys(username) webdriver.find_element_by_id('password').send_keys(password) webdriver.find_element_by_class_name('bem-button__inner-text').click() time.sleep(5) webdriver.get('https://user.walutomat.pl/#/order-placement') element = webdriver.find_element_by_id('order-volume') element.clear() element.send_keys(str(amount)) #send amount time.sleep(3) #TODO: choose transaction type from a dropdown menu. Buy is by default. ''' webdriver.find_element_by_id('order-type').click() #click on buy/sell time.sleep(2) # element from a dropdown menu is wrongly selected. To be fixed if transaction_type == 'buy': #choose buy/sell webdriver.find_element_by_class_name('select2-results__option select2-results__option--highlighted') elif transaction_type == 'sell': webdriver.find_element_by_link_text('Chcę sprzedać') ''' #TODO: find a way to select element for a different currencies. USD/PLN is by default. # element selector from a dropdown menu doesn't work ''' element.send_keys(Keys.TAB, Keys.SPACE) #click to choose first currency time.sleep(2) webdriver.find_element_by_class_name('icon-{}'.format(first_currency)).click() #choose first currency time.sleep(2) webdriver.send_keys(Keys.TAB) #click on second currency time.sleep(2) webdriver.send_keys(Keys.SPACE) webdriver.find_element_by_class_name('icon-{}'.format(second_currency)).click() #choose second currency time.sleep(2) webdriver.find_element_by_id('price-type-fixed').click() #choose custom exchange rate time.sleep(2) ''' webdriver.find_element_by_id('order-at-price').send_keys(str(rate)) #send custom exchange rate time.sleep(3) webdriver.find_element_by_id('order-preliminary-submit').click() #confirm transaction parameters time.sleep(3) element = webdriver.find_elements_by_class_name('content') podsumowanie = element[3].text.split('\n') podsumowanie = '{}, kurs {} {}\n{}\n'.format(' '.join(podsumowanie[1:3]), podsumowanie[4].lower(), podsumowanie[5], ' '.join(podsumowanie[6:8])) print(podsumowanie) confirmation = input('Czy potwierdzasz?') if confirmation in ['T', 't', 'Tak', 'tak', 'Y', 'y', 'Yes', 'yes']: try: webdriver.find_element_by_id('confirm-exchange').click() print('Zlecenie zostało złożone.') except: 'Something goes wrong. Laaambaada!' else: print('Operacja anulowana.') webdriver.close() return
def isNextMediaAvailable(): if len(webdriver.find_elements_by_class_name( 'coreSpriteRightChevron')) > 0: return 1 else: return 0
def retrieve_image(search_query, webdriver, dir_name, img_name): try: logger.log("image_scraping function start") image_name = '' # Variable that holds the number of images to fetch number_of_images_to_fetch = 1 index = 0 # Scroll down the webpage to load more images scroll_down(webdriver) time.sleep(5) # Save all of the html image elements from our google search # 'rg_i' is the class name that the images have image_elements = webdriver.find_elements_by_class_name('rg_i') target_dir = basest_dir + "/" + dir_name # Check if the directory that we want to put our iamges in already exists if not os.path.exists(target_dir): # If not, make that directory os.mkdir(target_dir) found_image_count = 0 attempt_count = 0 logger.log("begin finding images") for element in image_elements: attempt_count += 1 try: # Check if you've downloaded all the images you want if found_image_count == number_of_images_to_fetch: break # Click on the image you want to download element.click() # Give the browser some time to catch up time.sleep(2) # After clicking on the image, get the larger version found_image = webdriver.find_element_by_class_name('n3VNCb') # find the source of the image, it's url image_url = found_image.get_attribute('src') logger.log("attempt " + str(attempt_count) + ": " + image_url[0:10]) # Make sure that the image url is a valid source if 'http' in image_url: logger.log("successful image found") # Download this image as a BytesIO object image_file = io.BytesIO(requests.get(image_url).content) # Convert our BytesIO object into an actual image image = Image.open(image_file).convert('RGB') # Create the the name of this image we're downloaded image_name = img_name + '.jpg' logger.log(image_name) # Save the path that we want to save the image to # The directory will be the same name as the search query image_path = target_dir + '/' + image_name # Save the image image.save(image_path, 'JPEG', quality=85) found_image_count += 1 # endif statement # end try block except: logger.log("couldn't find enhanced images") # end except block # End for loop loop # close the web browser #webdriver.close() if attempt_count > 3: logger.log("multiple attempts: " + search_query + "<=======") else: logger.log(image_name) return image_name except: logger.log("retrieve image crash") webdriver.close()
print('Price: ' + str(price)) numbers = [1, 2, 3, 4, 5, 6, 7, 8] for type_interval, n in zip(type_intervals, numbers): if interval == type_interval: element = webdriver.find_element_by_xpath( f'//*[@id="technicals-root"]/div/div/div[1]/div/div/div[1]/div/div/div[{n}]' ) element.click() else: continue time.sleep(1) # Overall Recommendation recommendation_elements = webdriver.find_elements_by_class_name( "speedometerSignal-pyzN--tL") analysis.append(recommendation_elements[1].get_attribute('innerHTML')) counter_elements = webdriver.find_elements_by_class_name( "counterNumber-3l14ys0C") analysis.append(int(counter_elements[3].get_attribute('innerHTML'))) analysis.append(int(counter_elements[4].get_attribute('innerHTML'))) analysis.append(int(counter_elements[5].get_attribute('innerHTML'))) df = pd.DataFrame.from_records([tuple(analysis)], columns=[ 'Recommendation', '# of Sell Signals', '# of Neutral Signals', '# of Buy Signals'
def City(): speak("Which city?") MyText = takeCommand().lower() webdriver.get("https://www.weather-forecast.com/locations/" + MyText + "/forecasts/latest") speak(str(webdriver.find_elements_by_class_name("b-forecast__table-description-content")[0].text))
webdriver = webdriver.Chrome( executable_path= '/Users/shashank/Documents/GitHub/Code/Finance/chromedriver.exe', options=options) #Declare variable analysis = [] #Open tradingview's site webdriver.get( "https://s.tradingview.com/embed-widget/technical-analysis/?locale=en#%7B%22interval%22%3A%22{}%22%2C%22width%22%3A%22100%25%22%2C%22isTransparent%22%3Afalse%2C%22height%22%3A%22100%25%22%2C%22symbol%22%3A%22{}%22%2C%22showIntervalTabs%22%3Atrue%2C%22colorTheme%22%3A%22dark%22%2C%22utm_medium%22%3A%22widget_new%22%2C%22utm_campaign%22%3A%22technical-analysis%22%7D" .format(interval, ticker)) webdriver.refresh() #Wait for site to load elements while len(webdriver.find_elements_by_class_name( "speedometerSignal-pyzN--tL")) == 0: sleep(0.1) #Recommendation recommendation_element = webdriver.find_element_by_class_name( "speedometerSignal-pyzN--tL") analysis.append(recommendation_element.get_attribute('innerHTML')) #Counters counter_elements = webdriver.find_elements_by_class_name( "counterNumber-3l14ys0C") #Sell analysis.append(int(counter_elements[0].get_attribute('innerHTML'))) #Neutral
) first_thumbnail.click() sleep(randint(3, 4)) likes_button = webdriver.find_element_by_xpath('//*[@class="Nm9Fw"]/button') likes_button.click() sleep(randint(5, 6)) scr1 = webdriver.find_element_by_xpath('//*[@class="_1XyCr"]/div[2]/div') for i in range(2): webdriver.execute_script( "arguments[0].scrollTop = arguments[0].scrollHeight", scr1) sleep(randint(4, 5)) for i in range(10): username.append(webdriver.find_elements_by_class_name('MBL3Z')[i].text) sleep(randint(6, 7)) for user in username: if user in prev_user_list: username.remove(user) sleep(randint(3, 4)) num = randint(len(username) - 10, len(username)) for i in range(num): webdriver.get('https://www.instagram.com/' + username[i] + '/') sleep(randint(4, 7)) try: follow_button = webdriver.find_element_by_xpath(