示例#1
0
def extract_left_strike(webdriver):
    lp_left = webdriver.find_elements_by_xpath("//td[@class='lastPrice_left']")
    strike_price = webdriver.find_elements_by_xpath(
        "//td[@class='strikePrice']")
    lp_right = webdriver.find_elements_by_xpath(
        "//td[@class='lastPrice_right']")
    out_list = []
    for i in range(len(lp_left)):
        out_list.append(
            [lp_left[i].text, strike_price[i].text, lp_right[i].text])
    return out_list
示例#2
0
def traverse_items():
    items = webdriver.find_elements_by_class_name("_9AhH0")
    i = 0
    items[0].click()
    sleep(3)
    while webdriver.find_elements_by_xpath(
            "//*[contains(text(), 'Next')]") is not None:
        video_element = webdriver.find_elements_by_class_name("_5wCQW")
        if len(video_element) > 0:
            comment("Sounds Great")
        next_item = webdriver.find_elements_by_xpath(
            "//*[contains(text(), 'Next')]")
        next_item[0].click()
        sleep(3)
示例#3
0
 def runFollowersFollow(self, webdriver, person, account, stopFlag):
     sleep(2)
     webdriver.get('https://www.%s.com/%s/followers/' %
                   (str.lower(account.accountType), person))
     sleep(1)
     follow_list = webdriver.find_elements_by_xpath(self.follow_list)
     maxVal = len(follow_list)
     followDoc = open("./bot_docs/masterList.txt", "a")
     for index in range(1, 10):
         try:
             followButton = webdriver.find_element_by_xpath(
                 self.follow_list_item + str(index) + ']/div/div[3]/button')
             accountName = webdriver.find_element_by_xpath(
                 self.follow_list_item + str(index) +
                 ']/div/div[2]/div[1]/div/div/a')
             webdriver.execute_script(
                 "return arguments[0].scrollIntoView();", followButton)
             sleep(random.uniform(0, 1))
             # if(followButton.text=='Follow' and accountName.text not in clickedList.split(',')[0]):
             #     followButton.click()
             #     followDoc.write(accountName.text + ', ' + accountName.get_attribute("href") + datetime.today().strftime('%Y-%m-%d') + '\n')
         except:
             print("no follow button: " + self.follow_list_item +
                   str(index) + ']/div/div[3]/button')
     followDoc.close()
示例#4
0
def main():
    items = []

    #Log in
    webdriver.get(MAIN_PAGE_URL)
    webdriver.find_element_by_id(
        'ctl00_ContentPlaceHolder1_Username').send_keys(
            credentials['username'])
    webdriver.find_element_by_id(
        'ctl00_ContentPlaceHolder1_Password').send_keys(
            credentials['password'])
    webdriver.find_element_by_name('ctl00$ContentPlaceHolder1$ctl04').click()

    #Set items to show = 100
    webdriver.find_element_by_xpath(
        "//select[@name='ctl00$ContentPlaceHolder1$GridView1$ctl13$ctl11']/option[text()='100']"
    ).click()
    #Getting number of pages
    page_number = len(
        webdriver.find_elements_by_xpath(
            "//tr[@class='grid-pager']//table//tr/td[not(@class)]"))
    page_href_script = "__doPostBack('ctl00$ContentPlaceHolder1$GridView1','Page$%s')"

    #Extracting each page on the website
    for i in range(page_number):
        i += 1
        if i != 1:
            webdriver.execute_script(page_href_script % i)
            # Wait for redirecting
            time.sleep(10)
        items += extract()

    #Insert into database all extracted items (rigs)
    insert_into_database(items)
示例#5
0
def scrape_page(webdriver, links, username):
        '''This function will go to all links provided
        and scrape each picture for the number of likes
        and the caption. If the link is a video no information is recorded. 
        The function will only save the caption if the title is the 
        identified user
        
        Parameters: the active webdriver, a set of picture links, 
        the username of the page your are scraping

        Returns: a list of lists with the number of likes and caption
        '''
        picture_info = []

        for link in links:
                # Open new tab
                webdriver.execute_script("window.open('');")
                time.sleep(3)

                # Switch to the new window
                webdriver.switch_to.window(webdriver.window_handles[1])
                webdriver.get(link)
                time.sleep(5)
                try:
                        likes_list = webdriver.find_elements_by_class_name('zV_Nj')

                        if len(likes_list) != 0: #If the length is 0, then it is a video
                                
                                if len(likes_list) == 1: #No common friends liked the photo
                                        num_likes = webdriver.find_elements_by_class_name('Nm9Fw')[0].text.split(' ')[0]
                                
                                else:
                                        num_likes = int(likes_list[1].text.split(' ')[0]) + 1

                                try:
                                        title = webdriver.find_element_by_class_name('_6lAjh').text
                                        if title == username:
                                                caption_list = webdriver.find_elements_by_xpath("//div[@class='C4VMK']//span")
                                                
                                                '''This code works but not active since I did not use the information
                                                num_of_comments = len(caption_list)'''
                                                
                                                caption = caption_list[0].text
                                        else:
                                                caption = None #if the user was not the title
                                except:
                                        caption = None #photo does not have a caption or any comments
                                        

                                picture_info.append([num_likes, caption])
                except:
                        pass
                webdriver.close()
                
                # Switch focus back to main tab
                webdriver.switch_to.window(webdriver.window_handles[0])    
                time.sleep(5)        
       
        return picture_info
示例#6
0
def select(firstParam, secondParam):
    list = webdriver.find_elements_by_xpath('//div/h3/a')
    if firstParam:
        list[int(firstParam) - 1].click()
        time.sleep(2)

    if secondParam:
        list[int(secondParam) - 1].click()
示例#7
0
def login():
    webdriver.get(
        'https://www.instagram.com/accounts/login/?source=auth_switcher')
    sleep(3)
    username = webdriver.find_element_by_name('username')
    username.send_keys('official_rakshit')
    password = webdriver.find_element_by_name('password')
    password.send_keys('')
    button_login = webdriver.find_elements_by_xpath(
        "//*[contains(text(), 'Log In')]")
    button_login[0].click()
    sleep(5)
    notnow = webdriver.find_elements_by_xpath(
        "//*[contains(text(), 'Not Now')]")
    notnow[0].click()
    sleep(3)
    notnow = webdriver.find_elements_by_xpath(
        "//*[contains(text(), 'Not Now')]")
    notnow[0].click()
    sleep(3)
示例#8
0
 def runFollowersFollow(self, webdriver, person, account, stopFlag):
     sleep(2)
     webdriver.get('https://www.%s.com/%s/followers/' %
                   (str.lower(account.accountType), person))
     follow_list = webdriver.find_elements_by_xpath(self.follow_list)
     index = 1
     like_count = 0
     file_object = open(
         self.path + '/account-info/mixcloudFollowedList.txt', 'a')
     while (like_count < int(account.numInteractions)):
         for val in range(index, len(follow_list)):
             if random.randint(0, 5) > 2 and stopFlag:
                 stopFlag = self.clickFollowButton(webdriver, val, stopFlag,
                                                   file_object)
                 if stopFlag:
                     like_count += 1
                 webdriver.execute_script("window.scrollBy(0, 200)")
                 if (like_count >= int(account.numInteractions)):
                     break
         index = len(follow_list)
         follow_list = webdriver.find_elements_by_xpath(self.follow_list)
     file_object.close()
     return stopFlag
示例#9
0
def get_results_by_year(webdriver, input_file):
    login(webdriver)
    response_files_by_year = {}
    submenus = [
        submenu.get_attribute('id')
        for submenu in webdriver.find_elements_by_xpath(
            "//div[@id='menugroup_4']/div[not(@id='menugroup_4_1')]")
    ]
    for submenu_id in submenus:
        year = send_request_file(webdriver, submenu_id, input_file)
        time.sleep(10)
        file = download_response_file(webdriver)
        if (file):
            response_files_by_year[year] = file
        webdriver.implicitly_wait(2)
    time.sleep(3)
    webdriver.quit()
    return response_files_by_year
示例#10
0
def find_all(webdriver, by, css_selector_val):
    '''
    Wrapper function of selenium python to find list of elments using same locator and locator_value(css_selector_val)

    Arguments
    ---------

    webdriver       -   object of selenium.webdriver.chrome.webdriver.WebDriver .
    by              -   element locator name .
                        contraint:
                            expected value:-    name, xpath, link_text, partial_link_text, 
                                                tag_name, class_name, css_selector 
                        other value than the expected will return None
    css_selector_val-   value for the element locator i.e. arg 'by' 
                        example:- to find all elements with class_name=contact, value for css_selector_val is 'contact'
    
    Return
    ---------

    Webelement list -   if the value of arg 'by' is an expected value
                    or
    None            -   if the value of arg 'by' is an unexpected value
    '''

    if by == 'name':
        return webdriver.find_elements_by_name(css_selector_val)
    if by == 'xpath':
        return webdriver.find_elements_by_xpath(css_selector_val)
    if by == 'link_text':
        return webdriver.find_elements_by_link_text(css_selector_val)
    if by == 'partial_link_text':
        return webdriver.find_elements_by_partial_link_text(css_selector_val)
    if by == 'tag_name':
        return webdriver.find_elements_by_tag_name(css_selector_val)
    if by == 'class_name':
        return webdriver.find_elements_by_class_name(css_selector_val)
    if by == 'css_selector':
        return webdriver.find_elements_by_css_selector(css_selector_val)
    else:
        return None
示例#11
0
 def runHashtagFollow(self, hashtag, clickedList, selectedAccount):
     sleep(4)
     search_bar = webdriver.find_element_by_xpath(
         '//*[@id="react-root"]/div/section/div[4]/div/div/header/div/div[2]/input'
     )
     search_bar.click()
     search_bar.send_keys(hashtag)
     sleep(2)
     filter_time = webdriver.find_element_by_xpath(
         '//*[@id="react-root"]/div/section/div[4]/div/div/div[1]/div/div/section/div[1]/div[2]/div[2]/h1/span/span'
     )
     filter_time.click()
     filter_month = webdriver.find_element_by_xpath(
         '//*[@id="react-root"]/div/section/div[4]/div/div/div[1]/div/div/section/div[1]/div[2]/div[2]/h1/span/span[2]/span[1]/span[2]'
     )
     filter_month.click()
     filter_time.click()
     sleep(2)
     follow_list = webdriver.find_elements_by_xpath(
         '//*[@id="react-root"]/div/section/div[4]/div/div/div[1]/div/div/section/div[1]/div[2]/div[2]/section/div/ul/li'
     )
     max_val = len(follow_list)
     for val in range(3, max_val):
         try:
             follow_button = webdriver.find_element_by_xpath(
                 '//*[@id="react-root"]/div/section/div[4]/div/div/div[1]/div/div/section/div[1]/div[2]/div[2]/section/div/ul/li['
                 + str(val) + ']/button')
             follow_button.click()
             print(
                 "Clicked: " +
                 '//*[@id="react-root"]/div/section/div[4]/div/div/div[1]/div/div/section/div[1]/div[2]/div[2]/section/div/ul/li['
                 + str(val) + ']/button')
         except:
             print(
                 "no follow button: " +
                 '//*[@id="react-root"]/div/section/div[4]/div/div/div[1]/div/div/section/div[1]/div[2]/div[2]/section/div/ul/li['
                 + str(val) + ']/button')
         sleep(1)
     return True
示例#12
0
def how_many_scrolls(webdriver):
    '''
    Simple method that allows for the tracking of how many brands have been processed,
    and how many scrolls have been completed already.
    
    --------------------------------------------------------------------------------------------
    Input:
    webdriver: Selenium Webdriver object
    
    --------------------------------------------------------------------------------------------
    Output:
    returns: number of scrolls completed compared to the total items overall.
    '''
    
	total_items_raw = webdriver.find_element_by_xpath('//h3[@class="-summary"]').text
	temp = re.search("\d+", total_items_raw)
	total_items = int(temp.group())
	print(total_items)

	listings = webdriver.find_elements_by_xpath('//div[@class="feed-item"]')
	items_on_page = len(listings)
	print(items_on_page)

	return((total_items // items_on_page) + 1)
def getMatchLinks(webdriver):
    # find the button for next pagination
    next_button = webdriver.find_element_by_xpath(
        "//*[@id='tie-block_2186']/div/div[1]/div/div/ul/li[2]/a")
    # results = driver.find_elements_by_xpath("//li/h3/a")
    # print('cantidad de resultados: {}'.format(len(results)))
    partidos = []
    loop = True
    cont = 1

    # loop the results
    while loop:
        wait = WebDriverWait(webdriver, 10)
        next_button = wait.until(
            EC.element_to_be_clickable((By.CLASS_NAME, 'next-posts')))

        # next_button = driver.find_element_by_class_name('next-posts')
        loop = not ('pagination-disabled' in next_button.get_attribute('class'))
        #print(next_button.get_attribute('class') + ' | ' + str(loop))

        results = webdriver.find_elements_by_xpath("//li/h3/a")
        print(' {} results in page {} !'.format(str(len(results)), cont))

        for result in results:
            partido = Game(result.text, result.get_attribute('href'))
            partidos.append(partido)

        if loop:
            next_button.click()
            # wait for the transition animation to finish loading results
            time.sleep(6)
            webdriver.implicitly_wait(6)

        cont += 1

    return partidos
示例#14
0
from selenium import webdriver

webdriver = webdriver.Firefox()
webdriver.implicitly_wait(10)
webdriver.maximize_window()
webdriver.get("http://www.baidu.com")
keyword = webdriver.find_element_by_id("kw")
keyword.clear()
keyword.send_keys("山东")
keyword.submit()
products = webdriver.find_elements_by_xpath("//div[contains(@class, 'c-abstract')]")
print("Found " + str(len(products)) + "products:")
for product in products:
    print(product.text)
webdriver.close()
示例#15
0
def check_exists_by_xpath(webdriver, xpath):
    try:
        webdriver.find_elements_by_xpath(xpath)
    except NoSuchElementException:
        return False
    return True
示例#16
0
num = int(raw_input('How many times? '))
webdriver = webdriver.Chrome(executable_path=chromedriver_path)
webdriver.get('https://www.instagram.com/direct/inbox/')
username = WebDriverWait(
    webdriver, 15).until(lambda d: d.find_element_by_name('username'))
username.send_keys(user)
password = webdriver.find_element_by_name('password')
password.send_keys(pswd)

#instead of searching for the Button (Log In) you can simply press enter when you already selected the password or the username input element.
submit = webdriver.find_element_by_tag_name('form')
submit.submit()
notNowButton = WebDriverWait(
    webdriver,
    15).until(lambda d: d.find_element_by_xpath('//button[text()="Not Now"]'))
notNowButton.click()
sleep(3)  # need to remove this hard code
string = "//*[contains(text()," + "\'" + receiver + "\'" + ")]"
el2 = webdriver.find_elements_by_xpath(string)
for x in el2:
    if (x.text == receiver):
        x.click()
        break
for i in range(num):
    text = webdriver.find_element_by_xpath(
        "//textarea[@placeholder='Message...']")
    text.clear()
    text.send_keys(message)
    el2 = webdriver.find_elements_by_xpath("//*[contains(text(), 'Send')]")
    el2[0].click()
示例#17
0
                 60).until(lambda browser: browser.find_elements_by_xpath(
                     '//*[@id="hitList"]/div[2]/table/tbody/tr'))
         #---- loop between queries that belong to same specified experiment
         for query_node in queries_judges_link_nodes:
             query_info_node = query_node.find_element_by_xpath(
                 './td/a[@data-bind="text: QueryText, attr: {href: HitUrl()}"]'
             )  #elment that contains query and it's link
             query = query_info_node.text
             query_info_node.click()  #
             browser.switch_to.window(
                 browser.window_handles[-1]
             )  #switch to specifed query information webpage
             google_bing_label_nodes = WebDriverWait(
                 browser, 60
             ).until(lambda webdriver: webdriver.find_elements_by_xpath(
                 '//*[@id="RightPane"]/table/tbody/tr/td/span[@style="color: red; font-weight: 700; font-size: x-large;"]'
             ))
             left_node_label = google_bing_label_nodes[0].text
             right_node_lable = google_bing_label_nodes[-1].text
             res = "left:" + left_node_label.lower(
             ) + ", " + "right:" + right_node_lable.lower()
             print("{2}\t{0}\t{1}".format(query, res, taskId))
             fw.write("{2}\t{0}\t{1}\n".format(query, res, taskId))
             browser.close()
             browser.switch_to.window(
                 exp_window_handle
             )  #swithc to specified window that contains query list
     print("\n\nExp finished {0}\n\n".format(exp_num))
     browser.close()
     browser.switch_to.window(main_window)
 #--------------------Decode query engine info -End- -------------------
示例#18
0
from selenium import webdriver
import time

'''Если захочешь запустить, укажи путь до драйвера'''
path_to_chromedriver = 'S:\\chromedriver.exe'

webdriver = webdriver.Chrome(path_to_chromedriver)
time.sleep(2)
print("load pages")
webdriver.get('https://riarating.ru/infografika/20191112/630141653.html')
time.sleep(4)
frame_table = webdriver.find_element_by_xpath("/html/body/div[3]/div[1]/div/div/iframe")
webdriver.switch_to.frame(frame_table)
rows = len(webdriver.find_elements_by_xpath('/html/body/div/div/div[3]/table/tbody/tr'))
cols = len(webdriver.find_elements_by_xpath('/html/body/div/div/div[3]/table/tbody/tr[1]/td'))

print(rows)
print(cols)

for row in range(1,rows+1):
    for col in range(1,cols+1):
        value = webdriver.find_element_by_xpath("/html/body/div/div/div[3]/table/tbody/tr["+str(row)+"]/td["+str(col)+"]").text
        value = str(value).replace("­", "")
        with open('output.txt', 'a', encoding='utf-8') as f:
            f.write(value + '\n')
        print(value)
示例#19
0
webdriver.find_element_by_name("")
webdriver.find_element_by_class_name("")
webdriver.find_element_by_tag_name("")
webdriver.find_element_by_link_text("")
webdriver.find_element_by_partial_link_text("")
webdriver.find_element_by_css_selector("")
webdriver.find_element_by_xpath("")
# 定位一组元素,返回对象列表  8种
webdriver.find_elements_by_id("")  # id复数定位
webdriver.find_elements_by_name("")  # name复数定位
webdriver.find_elements_by_class_name("")  # class复数定位
webdriver.find_elements_by_tag_name("")  # teg复数定位
webdriver.find_elements_by_link_text("")  # link复数定位
webdriver.find_elements_by_partial_link_text("")  # partial_link复数定位
webdriver.find_elements_by_css_selector("")  # css_selector 复数定位
webdriver.find_elements_by_xpath("")  # xpath复数定位
# 这两种是参数化的方法,对上面各8种的总结
webdriver.find_element(by='id', value="")
webdriver.find_elements(by='id', value="")
'''
1、xpath语法:
指明标签 //*或者 //input[@id="kw"]
根据@属性定位,id name class 或其他属性
逻辑运算and or not,用的最多的是and,同时满足两个属性,//*[@id="kw" and @name="aa"]
层级定位/,
索引定位,从1开始,input[1],多个相同标签,用索引定位
模糊匹配:标签对之间的文本信息的模糊匹配//*[contains(text(),"hao123)]
        模糊匹配某个属性//*[contains(@id,"kw")]
        模糊匹配以什么开头//*[starts-with(@id,"kw")]
        模糊匹配以什么结尾//*[ends-with(@id,"kw")]
        还支持最强的正则表达式 //*[match(text(),"kw")]  后边是我的猜测//*[match(@id,"^kw")]
示例#20
0
sleep(1)
followers_list__open = webdriver.find_element_by_xpath(
    '/html/body/div[1]/section/main/div/header/section/ul/li[2]/a')
followers_list__open.click()
sleep(2)

fBody = webdriver.find_element_by_xpath("//div[@class='isgrP']")
scroll = 0
while scroll < limits.scroll_amount:  # scroll 5 times
    webdriver.execute_script(
        'arguments[0].scrollTop = arguments[0].scrollTop + arguments[0].offsetHeight;',
        fBody)
    sleep(2)
    scroll += 1

fList = webdriver.find_elements_by_xpath("//div[@class='isgrP']//li")

wbList = webdriver.find_elements_by_xpath("//div[@class='RR-M- h5uC0 SAvC5']")
print("Users found with Story to watch: {}".format(len(wbList)))
story_watching_done = 'false'
stories_next_watch = "true"
story_watching_counter = 0
stories_bf_watched = 1

try:
    for user_story in wbList:
        while story_watching_done == 'false':
            stories_next_watch = "true"
            wbList = webdriver.find_elements_by_xpath(
                "//div[@class='RR-M- h5uC0 SAvC5']")
            webdriver.execute_script("arguments[0].scrollIntoView();",
示例#21
0
    #     "PANPSC": "3916381627938070527%3AW5fKWWSguZBODGOBl4FW9LHQG6qeixv0W3gk0j36GcGYvOBUKVBcBn5k87Cjw7JD36RmMWyu83GNTP"
    #               "It1rTOiaS63VeBUP8%2FIjNe%2FZa%2FcMRcqAh9P390KK1Sr93PEHDi%2B%2Bo8ht7g5D2LIWeznIUCtSkA0KYPulwlfkiV"
    #               "l%2Ff%2FRfx4kU4b9dv9by76Oxw8WnAw"
    #
    # }

    #downloader = PageDownload(proxy={"http": "http://202.100.83.139:80"})
    # page = downloader.simple_download(url="http://www.52flac.com/download/9108.html")
    # #page = downloader.download_with_cookies(login_url="http://www.52flac.com/download/9222.html", cookies=cookies)
    # print page
    webSelenium = WebSelenium()
    webdriver = webSelenium.simple_download(
        "http://127.0.0.1/common/get_bmap_boundary?city=黄梅县", "chrome")

    # webdriver = webSelenium.login_with_cookies(login_url="http://pan.baidu.com/s/1c03zJGW", cookies_data=cookies, domain="pan.baidu.com")
    button_path = webdriver.find_elements_by_xpath("/html/body/input[2]")[0]
    button_path.click()
    time.sleep(5)
    button_path.click()
    button_download = webdriver.find_element_by_xpath("/html/body/input[3]")
    time.sleep(5)
    button_download.click()

    # textbox.send_keys("m43t")
    # button = webdriver.find_elements_by_xpath("//a[@class='g-button g-button-blue-large']")[0]
    # button.click()
    # WebDriverWait(webdriver, 30).until(lambda the_driver: the_driver.find_element_by_xpath(
    #         "//a[@class='g-button g-button-blue']").is_displayed())
    # save_button = webdriver.find_elements_by_xpath("//a[@class='g-button g-button-blue']")[0]
    # save_button.click()
示例#22
0
def story_from_followers():
    num = 0
    for user in userlist.users:
        sleep(2)
        webdriver.get('https://www.instagram.com/' + userlist.users[num] + '/')
        sleep(5)
        print("Getting followers with Stories to watch from: {}".format(
            userlist.users[num]))
        num += 1
        sleep(1)
        followers_list__open = webdriver.find_element_by_xpath(
            '/html/body/div[1]/section/main/div/header/section/ul/li[2]/a')
        followers_list__open.click()
        sleep(2)

        fBody = webdriver.find_element_by_xpath("//div[@class='isgrP']")
        scroll = 0
        while scroll < limits.scroll_amount:  # scroll 5 times
            webdriver.execute_script(
                'arguments[0].scrollTop = arguments[0].scrollTop + arguments[0].offsetHeight;',
                fBody)
            sleep(2)
            scroll += 1

        fList = webdriver.find_elements_by_xpath("//div[@class='isgrP']//li")

        wbList = webdriver.find_elements_by_xpath(
            "//div[@class='RR-M- h5uC0 SAvC5']")
        print("Users found with Story to watch: {}".format(len(wbList)))
        story_watching_done = 'false'
        stories_next_watch = "true"
        story_watching_counter = 0
        stories_bf_watched = 1

        try:
            for user_story in wbList:
                while story_watching_done == 'false':
                    stories_next_watch = "true"
                    wbList = webdriver.find_elements_by_xpath(
                        "//div[@class='RR-M- h5uC0 SAvC5']")
                    webdriver.execute_script("arguments[0].scrollIntoView();",
                                             wbList[story_watching_counter])
                    wbList[story_watching_counter].click()
                    while stories_next_watch == "true":
                        try:
                            sleep(randint(2, 3))
                            webdriver.implicitly_wait(2)
                            watch_all_stories_next = webdriver.find_element_by_css_selector(
                                ".ow3u_").click()
                            stories_bf_watched += 1
                            print("Stories watched from followers: {}".format(
                                stories_bf_watched))
                        except (NoSuchElementException,
                                StaleElementReferenceException) as e:
                            stories_next_watch = "false"

                    else:
                        try:
                            watch_all_stories_close_from_followers = webdriver.find_element_by_xpath(
                                "/html/body/div[1]/section/div/div/section/div[2]/button[3]/div"
                            )
                            watch_all_stories_close_from_followers.click()
                        except NoSuchElementException:
                            pass
                        wbList = webdriver.find_elements_by_xpath(
                            "//div[@class='RR-M- h5uC0 SAvC5']")
                        story_watching_counter += 1
                        sleep(1)
                else:
                    pass
        except IndexError as error:
            print("Index Error..")
            print("Moving to next user...")
            pass
    else:
        pass
示例#23
0
sleep(2)

webdriver.get(
    'https://www.instagram.com/accounts/access_tool/current_follow_requests')

sleep(3)

a = []
elem = webdriver.find_element_by_xpath("//button[@type='button']")

try:
    while (elem):
        elem.click()
        sleep(2)

except:
    b1 = webdriver.find_elements_by_xpath("//div[@class='-utLf']")
    for i in b1:
        print(i.text)
        a.append(i.text)

for i in a:
    webdriver.get('https://www.instagram.com/' + i)
    sleep(2)
    b1 = webdriver.find_element_by_xpath(
        "//button[@class='BY3EC  _0mzm- sqdOP  L3NKy   _8A5w5    ']")
    b1.click()
    c1 = webdriver.find_element_by_xpath("//button[@class='aOOlW -Cab_   ']")
    c1.click()
    sleep(1)
示例#24
0
                  hashtag_list[tag] + '/')
    sleep(randint(2, 4))
    first_thumbnail = webdriver.find_element_by_xpath(
        '//*[@id="react-root"]/section/main/article/div[1]/div/div/div[1]/div[1]/a/div'
    )

    print("===>Clicking first post of the hashtag", hashtag)
    first_thumbnail.click()
    sleep(randint(1, 2))
    try:
        for x in range(1, total_posts + 1):
            print('Hashtag: ', hashtag, 'Post: ', x)

            # If we already like, then do nothing with the post, go to next post
            alreadyLike = webdriver.find_elements_by_xpath(
                "//section/span/button/div/span[*[local-name()='svg']/@aria-label='Like']"
            )
            if len(alreadyLike) == 1:
                print('===>Following the user if we have follow button')
                button_follow = webdriver.find_element_by_xpath(
                    "//button[text()='Follow']")

                if button_follow.text == 'Follow':
                    print("===following user now in few seconds")
                    sleep(randint(4, 6))
                    button_follow.click()
                    followed += 1
                else:
                    print(
                        "===>No follow button available, so skipping the following"
                    )
示例#25
0
webdriver.get('https://www.instagram.com/accounts/login/?source=auth_switcher')
sleep(3)

username = webdriver.find_element_by_name('username')
username.send_keys('')
password = webdriver.find_element_by_name('password')
password.send_keys('')

button_login = webdriver.find_element_by_css_selector('#react-root > section > main > div > article > div > div:nth-child(1) > div > form > div:nth-child(4) > button')
button_login.click()
sleep(5)

notnow = webdriver.find_element_by_css_selector('body > div.RnEpo.Yx5HN > div > div > div.mt3GC > button.aOOlW.HoLwm')
notnow.click() #comment these last 2 lines out, if you don't get a pop up asking about notifications
webdriver.get("https://www.instagram.com/dwini_k/")

for i in range(30):
    time.sleep(10)
    buttons = webdriver.find_elements_by_xpath("//a[@class='-nal3 ']")
    following_button = [button for button in buttons if 'following' in button.get_attribute('href')]
    following_button[0].click()
    time.sleep(20)
    for j in range(10):
        webdriver.find_element_by_xpath("//button[@class='sqdOP  L3NKy    _8A5w5    ']").click()
        time.sleep(1)
        webdriver.find_element_by_xpath("//button[@class='aOOlW -Cab_   ']").click()
        time.sleep(10)
    time.sleep(randint(400,800))
    webdriver.refresh()

示例#26
0
password_element.send_keys(data['password'])
password_element.send_keys(Keys.RETURN)

webdriver.get(url_job_search)
random_time = random.uniform(3.5, 4.9)
time.sleep(random_time)

amount_of_results = webdriver.find_element_by_css_selector('body > div.application-outlet > div.authentication-outlet > div.job-search-ext > div > div > section.jobs-search__left-rail > div > header > div.jobs-search-results-list__title-heading > small')

print(f'amount_of_results: {amount_of_results.text}')

scrollresults = webdriver.find_element_by_class_name("jobs-search-results")

for i in range(300, 3000, 100):
    webdriver.execute_script("arguments[0].scrollTo(0, {})".format(i), scrollresults)


job_links = webdriver.find_elements_by_xpath('//div[@data-job-id]')

IDs = []
for job_link in job_links:
    children = job_link.find_elements_by_xpath('.//a[@data-control-name]')
    for child in children:
        temp = job_link.get_attribute("data-job-id")
        jobID = temp.split(":")[-1]
        IDs.append(int(jobID))
        

print(IDs)

webdriver.close()