def get_ip_info(proxies):
     getIpInfo = get(requests.session(), 'https://ipinfo.io', proxies=proxies)
     if getIpInfo:
         ipInfo = json.loads(getIpInfo.text)
         ip = ipInfo['ip']
     else:
         ip = '未获取到ip'
     return ip
Esempio n. 2
0
 def run(self):
     s = requests.session()
     for key in self.cookies:
         s.cookies.set(key, self.cookies[key])
     for url in self.AD_urls:
         res = tools.get(s, url)
         if res and 'Add to Cart' in res.text:
             print('成功点击了广告!')
Esempio n. 3
0
 def run(self):
     s = requests.session()
     for key in self.cookies:
         s.cookies.set(key, self.cookies[key])
     url = self.task.get('attack_url')
     res = get(s, url=url, headers=self.headers, proxies=self.proxies)
     if res and 'Add to Cart' in res.text:
         logger.info('成功点击广告 [1] 次!')
         attack_info = {
             "id": self.task.get("id"),
             "click_number": "1",
             "brand_name": self.task.get("attack_brand_name"),
             "asin": self.task.get("attack_asin"),
             "log": '成功点击广告1次!',
             "ip": self.get_ip_info(proxies=self.proxies)
         }
         kill_data = {
             'data': {
                 "master_id": self.task.get("master_id"),
                 "attack_info": json.dumps(attack_info)
             }
         }
         self.update_kill_data(json.dumps(kill_data))
Esempio n. 4
0
#主函数
def main(user_infos, proxy, index, register_city):
    spider = AliexpressRegisterSpider(user_infos, proxy, index, register_city)
    spider.register()


#思路:让主线程循环获取任务,每次获取一个任务,并创建一个子线程去跑注册流程,如果子线程网络出错或者其他原因导致的超时,则直接退出子线程。这样主线程又可以获取任务接着跑了。
if __name__ == '__main__':
    while True:
        s = requests.Session()
        try:
            # 获取地址ip
            # r = tools.get(s, 'http://third.gets.com/api/index.php?sec=20171212getscn&act=aliexpressGetOneValidAddress')
            r = tools.get(
                s,
                'http://third.gets.com/api/index.php?sec=20171212getscn&act=aliexpressGetOneValidAddress&country_code=DE'
            )
            html = json.loads(r.text)
            if int(html['code']) == 200:
                # oxy代理
                # proxies = tools.get_oxylabs_proxy('us', None, random.random())
                # 住宅代理
                proxies = {
                    'https':
                    'http://10502+DE+10502-%s:[email protected]:8000'
                    % random.randint(300000, 400000)
                }
                getIpInfo = tools.get(requests.session(),
                                      'https://ipinfo.io',
                                      proxies=proxies)
                if getIpInfo:
Esempio n. 5
0
        else:
            ip = '未获取到ip'
        return ip


def main(task_id, task_info):
    spider = AliexpressReviewSpider(task_id, task_info)
    spider.run()


if __name__ == '__main__':
    # 多线程循环模式:默认一次最多获取5个待评论任务,自动根据获取到的任务数量创建相应数量的线程数,去执行各自分配到的刷单任务;当获取不到任务时,程序退出或休眠。
    #修改limit参数即可设置获取的任务数量,现在是2
    while True:
        url = 'http://third.gets.com/api/index.php?sec=20171212getscn&act=aliexpressGetTaskOrdersList2&country_code=US&get_type=3&limit=6'
        resp = get(requests.session(), url=url)
        if resp:
            try:
                if resp.json() == []:
                    # logger.info('未获取到待评论列表的数据,程序结束!')
                    # sys.exit(0)
                    logger.info('未获取到待评论的数据,程序[sleep 10m]...')
                    time.sleep(60 * 10)

                else:
                    tasks_all = resp.json()
                    task_item_list = []
                    for task_id_str in tasks_all:
                        item = (int(task_id_str), tasks_all[task_id_str])
                        task_item_list.append(item)
                    logger.info('成功获取到%d条待留评的数据!' % len(task_item_list))
Esempio n. 6
0
import re
import requests
from mytools import tools
import json

s = requests.Session()
cvf = tools.get(
    s,
    'http://134.175.243.164/mailbox/index.php?m=api&do=getVerifyCode&email=' +
    '*****@*****.**' + '&account=mm123')
if cvf.text:
    cvf = json.loads(cvf.text)
    if 'mail_body' in cvf['message']:
        body = cvf['message']['mail_body']
        form_R = re.compile('(\d{6})')
        list_form = form_R.findall(body)

        # if 'mail_body' in cvf['message']:
        #     body = cvf['message']['mail_body']
        #     cvf_bodyR = re.compile(r'<p class=\\\"otp\\\">([^"]*)<\/p>')
        #     # post_data = {
        #     'code': cvf_bodyR.findall(body)[0],
        #     # 'action': tools.get_input_value("action", r.text),
        #     # 'openid.mode': tools.get_input_value("openid.mode", r.text),
        #     # 'language': tools.get_input_value("language", r.text),
        #     # 'openid.ns': tools.get_input_value("openid.ns", r.text),
        #     # 'verifyToken': tools.get_input_value("verifyToken", r.text),
        #     # 'metadata1': ''
        # }
        print(list_form[0])