if likes == 'Reply':
            likes = 0
        try:
            likes = likes.replace('likes', '')
        except:
            pass
        try:
            likes = likes.replace('like', '')
        except:
            pass
    except:
        likes = 0
    try:
        replies = c.find_element_by_class_name('EizgU').text
        replies = replies.replace('View replies', '')
    except:
        replies = 0
    user_likes.append(likes)
    user_names.append(name)
    user_comments.append(content)
    user_replies.append(replies)

user_names.pop(0)
user_comments.pop(0)
user_likes.pop(0)
user_replies.pop(0)

excel_exporter.export(user_names, user_comments, user_likes, user_replies)

driver.close()
예제 #2
0
    print("Found {}".format(str(load_more_comment)))
    i = 0
    while load_more_comment.is_displayed() and i < 10000000:  #int(sys.argv[2])
        load_more_comment.click()
        time.sleep(1.5)
        load_more_comment = driver.find_element_by_css_selector(
            '.MGdpg > button:nth-child(1)')
        print("Found {}".format(str(load_more_comment)))
        i += 1
except Exception as e:
    print(e)
    pass

user_names = []
user_comments = []
comment = driver.find_elements_by_class_name('gElp9 ')
for c in comment:
    container = c.find_element_by_class_name('C4VMK')
    name = container.find_element_by_class_name('_6lAjh').text
    content = container.find_element_by_tag_name('span').text
    content = content.replace('\n', ' ').strip().rstrip()
    user_names.append(name)
    user_comments.append(content)

user_names.pop(0)
user_comments.pop(0)
import excel_exporter
excel_exporter.export(user_names, user_comments)

driver.close()
예제 #3
0
        u"\u2640-\u2642"
        "]+",
        flags=re.UNICODE)

    for x in text:
        x[1] = emoji_pattern.sub(r'', x[1])
        cmtprat.append(x[1])
    return text


scraped_data = []

for x in comments:
    # print(x)
    usr = api.user_info(x['user_id'])
    # print(usr)
    usrname = usr['user']['username']
    usrprat.append(usrname)
    comment = x['text']
    scraped_data.append([usrname, comment])
    print(i)
    i = i + 1

cleaned_data = clean(scraped_data)
for x in cleaned_data:
    y.add_row(x)

print(y)

pr.export(usrprat, cmtprat)