def get_cookies():
    try:
        udemy_cookies = browser_cookie3.load(domain_name='www.udemy.com')._cookies["www.udemy.com"]["/"]
        return udemy_cookies["access_token"].value, udemy_cookies["client_id"].value

    except KeyError:
        raise NoCookiesException()
Exemple #2
0
    def __init__(self,
                 gpu,
                 notification_handler,
                 locale="en_us",
                 test=False,
                 interval=5):
        self.product_ids = set([])
        self.cli_locale = locale.lower()
        self.locale = self.map_locales()
        self.session = requests.Session()
        self.gpu = gpu
        self.enabled = True
        self.auto_buy_enabled = False
        self.attempt = 0
        self.started_at = datetime.now()
        self.test = test
        self.interval = interval

        self.gpu_long_name = GPU_DISPLAY_NAMES[gpu]

        self.cj = browser_cookie3.load(".nvidia.com")
        self.session.cookies = self.cj

        # Disable auto_buy_enabled if the user does not provide a bool.
        if type(self.auto_buy_enabled) != bool:
            self.auto_buy_enabled = False

        adapter = TimeoutHTTPAdapter()
        self.session.mount("https://", adapter)
        self.session.mount("http://", adapter)
        self.notification_handler = notification_handler

        self.get_product_ids()
Exemple #3
0
def main(args=None):
    p, args = parse_args(args)

    try:
        if args.browser:
            cj = args.browser(cookie_file=args.cookie_file,
                              key_file=args.key_file)
        else:
            cj = browser_cookie3.load()
    except browser_cookie3.BrowserCookieError as e:
        p.error(e.args[0])

    for cookie in cj:
        if cookie.domain in (args.domain,
                             '.' + args.domain) and cookie.name == args.name:
            if not args.json:
                print(cookie.value)
            else:
                print(
                    json.dumps({
                        k: v
                        for k, v in vars(cookie).items()
                        if v is not None and (k, v) != ('_rest', {})
                    }))
            break
    else:
        raise SystemExit(1)
Exemple #4
0
def find_info():
    webbrowser.open(
        "https://sheilta.apps.openu.ac.il/pls/dmyopt2/sheilta.myop")

    time.sleep(30)

    jar = requests.cookies.RequestsCookieJar()
    cj = browser_cookie3.load(domain_name='openu.ac.il')
    #print(cj)
    for cookie in cj:
        if 'openu.ac.il' in cookie.domain:
            jar.set(cookie.name, cookie.value, domain=cookie.domain, path='/')
    #print(jar)
    response = requests.get(
        "https://sheilta.apps.openu.ac.il/pls/dmyopt2/course_info.courses",
        cookies=jar)
    if response.status_code != 200:
        print("Failed to get 200 from server! got %d instead" %
              response.status_code)
        sys.exit(1)

    #print(response.text)

    for loc1 in re.findall('https\:\/\/.+course.php\?.+?\"', response.text):
        course = loc1.split("course=")[1].split("&semester")[0]
        tmp = response.text.find("course_info.courseinfo?p_kurs=" + course[1:])
        grp = response.text[tmp:tmp + 100].split("p_MERKAZ_LIMUD=")[1].split(
            "&")[0] + "_" + response.text[tmp:tmp + 100].split(
                "p_KVUTZAT_LIMUD=")[1].split("&")[0]
        semester = response.text[tmp:tmp +
                                 100].split("p_semester=20")[1].split("&")[0]
        print("Course=" + course + ", Group=" + grp + ", Semester=" + semester)
def fetch_cookies():
    try:
        cookies = browser_cookie3.load(domain_name='www.udemy.com')
        return requests.utils.dict_from_cookiejar(cookies), cookies
    except:
        print('\nAuto login failed!!, try by adding cookie file using "py udemy.py -c cookie_file.txt"')
        exit()
Exemple #6
0
def set_cookies(cookies):
    if isinstance(cookies, str):
        if cookies == "from_browser":
            try:
                import browser_cookie3
                cookies = browser_cookie3.load(domain_name='.facebook.com')
            except:
                raise ModuleNotFoundError(
                    "browser_cookie3 must be installed to use browser cookies")
        else:
            try:
                cookies = parse_cookie_file(cookies)
            except ValueError as e:
                raise exceptions.InvalidCookies(
                    f"Cookies are in an invalid format: {e}")
    elif isinstance(cookies, dict):
        cookies = cookiejar_from_dict(cookies)
    if cookies is not None:
        cookie_names = [c.name for c in cookies]
        missing_cookies = [
            c for c in ['c_user', 'xs'] if c not in cookie_names
        ]
        if missing_cookies:
            raise exceptions.InvalidCookies(
                f"Missing cookies with name(s): {missing_cookies}")
        _scraper.session.cookies.update(cookies)
        if not _scraper.is_logged_in():
            raise exceptions.InvalidCookies(f"Cookies are not valid")
Exemple #7
0
    def __init__(self,
                 cookie=None,
                 username=None,
                 password=None,
                 useBrowserCookie=None,
                 requireUserId=True):
        self.token = None
        self.processes = Processes()

        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent':
            "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
        })
        self.robloxPath = self.GetLatestRobloxExe()

        if useBrowserCookie and not cookie:
            useBrowserCookie = useBrowserCookie.lower()
            if useBrowserCookie == "chrome":
                cookie = bc.chrome(domain_name=".roblox.com")
            elif useBrowserCookie == "firefox":
                cookie = cookie = bc.firefox(domain_name=".roblox.com")
            else:
                cookie = cookie = bc.load(domain_name=".roblox.com")

        if cookie:
            self.session.cookies = cookie
            if requireUserId:
                request = self.session.get(
                    "https://assetgame.roblox.com/Game/GetCurrentUser.ashx")
                self.userId = request.text
        elif username and password:
            self.login(username, password)
Exemple #8
0
def get_cookie_from_browser(site='https://pc.woozooo.com'):
    """直接读取浏览器的 cookie 数据库,优先返回 Firefox cookie,最后为 Chrome
    """
    cookie = {}
    domain = re.match(r".*://([^/]+)/?", site)
    domain = domain.groups()[0]
    domain = domain.split(".")
    domain = ".".join(domain[-2:])
    cookies = browser_cookie3.load(domain_name=domain)
    for c in cookies:
        if c.domain in site:
            if c.name in ("ylogin", 'phpdisk_info'):
                cookie[c.name] = c.value

    return cookie
Exemple #9
0
def get_headers(host, port):  #pylint: disable=unused-argument
    headers = {}
    cookie_domain = host
    cookie_string = ''
    # get cookies for domain and all parent domains except tld
    while cookie_domain.count('.', 2):
        cj = browser_cookie3.load(domain_name=cookie_domain)
        for c in cj:
            cookie = c.name + "=" + c.value + "; "
            # add if cookie doesn't already exist from subdomain
            if not c.name + "=" in cookie_string:
                cookie_string += cookie
        cookie_domain = cookie_domain.split('.', 1)[1]
    headers[b"Cookie"] = strtobytes(cookie_string)
    return headers
Exemple #10
0
def cookies_auth():

    cookies = browser_cookie3.load()

    response = requests.get('https://oauth.vk.com/authorize',
                            params=settings.AUTH_DATA,
                            cookies=cookies)

    result = requests.utils.urlparse(response.url).fragment

    if result:
        return dict([item.split('=') for item in result.split('&')])
    else:
        webbrowser.get().open(response.url)
        raise ConnectionError('Авторизация не произошла.')
def get_cookie(websize):
    """
    需要将直接获取浏览器的cookie
    :return:dict
    """
    domain = '.{}.{}'.format(tldextract.extract(websize).domain, tldextract.extract(websize).suffix)
    cookies = browser_cookie3.load()
    items = dict()
    for cookie in cookies:
        item = items.get(cookie.domain, [])
        item.append({'domain': cookie.domain, 'expiry': cookie.expires,
                     'path': cookie.path, 'name': cookie.name,
                     'secure': cookie.secure, 'value': cookie.value})
        items[cookie.domain] = item
    data = items.get(domain, [])
    if not data:
        return False
    return data
Exemple #12
0
def searchTitle():
    SEARCH_API_URL = 'https://mangadex.org/search?title={}'

    session = requests.Session()
    session.headers.update(HEADERS)

    # TODO: implement login with username and password
    s = 'y'# input('would you like to get automatically cookies from you browser for login? (y/n) ').lower()
    while s != 'y' and s != 'n' and s != 'yes' and s != 'no':
        s = input('please enter y or n: ').lower()

    if s == 'y' or s == 'yes':
        try:
            cookies = browser_cookie3.load('mangadex.org')
            session.cookies.update(cookies)
        except:
            print('error loading cookies from mangadex.org')
            login(session)
    else:
        login(session)

    title = input('what manga would you like to download? ')

    # TODO: ask for lang
    response = session.get(SEARCH_API_URL.format(title))
    if response.status_code != 200:
        print(f'requested failed with searching for {title} with status code {response.status_code}, exiting...')
        exit()

    tree = etree.HTML(response.content)
    PATH = '//html/body/div[@id="content"]/div[@class="manga-entry border-bottom"]'
    node = tree.xpath(PATH)
    i = 1
    print('---the following manga are found---')
    for n in node:
        print(i, ' - ', n.xpath('div/div/a')[0].get('href'))
        i += 1
    selection = input('select the manga you want to download: ')
    while not selection.isdigit() or int(selection) >= i or int(selection) <= 0:
        selection = input('please input a valid selection: ')

    title_url = 'https://mangadex.org' + node[int(selection)-1].xpath('div/div/a')[0].get('href')
    downloadTitle(title_url, session)
Exemple #13
0
    def __init__(self, **settings):
        init()

        self.logger = logging.getLogger("Idle Master")

        logging.basicConfig(filename="idlemaster.log",
                            filemode="w",
                            format="[ %(asctime)s ] %(message)s",
                            datefmt="%m/%d/%Y %I:%M:%S %p",
                            level=logging.DEBUG)

        console = logging.StreamHandler()
        console.setLevel(logging.INFO)
        console.setFormatter(
            logging.Formatter("[ %(asctime)s ] %(message)s",
                              "%m/%d/%Y %I:%M:%S %p"))
        self.logger.addHandler(console)

        self.logger.info(Fore.GREEN + "WELCOME TO IDLE MASTER" + Fore.RESET)

        self.session = requests.Session()
        self.session.cookies = browser_cookie3.load(
            domain_name="steamcommunity.com")

        try:
            self.account_id = requests.utils.dict_from_cookiejar(
                self.session.cookies)["steamLoginSecure"][:17]
        except KeyError:
            self.logger.error(
                Fore.RED +
                "Unable to load cookies, login into https://steamcommunity.com then try again."
                + Fore.RESET)

        self.sort = settings["sort"]
        self.blacklist = settings["blacklist"]
        self.delay_per_card = settings["delayPerCard"]

        self.logger.info(Fore.GREEN +
                         "Finding games that have card drops remaining" +
                         Fore.RESET)

        self.games_left = self.get_games()
Exemple #14
0
def set_cookie_auto(browser: str=None) -> None:
    """Like set_cookie, but gets the cookies by itself.
    
    Requires the module browser-cookie3
    Be aware that this process can take up to 10 seconds, 
    so it should be ran only once.
    To speed it up select a browser.
    
    If a specifc browser is set, gets data from that browser only.
    Avalible browsers: chrome, chromium, opera, edge, firefox
    """
    import browser_cookie3
    logger.debug(f'Loading cookies automatically.')
    if browser is None:
        jar = browser_cookie3.load()
    else:
        jar = getattr(browser_cookie3,browser)()
    
    for c in jar:
        if 'hoyolab' in c.domain or 'mihoyo' in c.domain:
            session.cookies.set(c.name,c.value) # do not limit to specific domains
Exemple #15
0
    def __init__(
        self,
        jira_url,
        auth,
        params=None,
        webbrowser_auth=False,
        jsessionid_for_testing=None,
    ):
        retries = 3
        backoff_factor = 0.3
        status_forcelist = (500, 502, 504)
        retry_session = None
        self.jira_url = jira_url
        self.insight_api_url = f"{jira_url}/rest/insight/1.0"
        self.auth = auth
        # Configure retry session
        self.retry_session = retry_session or requests.Session()
        retry = Retry(
            total=retries,
            read=retries,
            connect=retries,
            backoff_factor=backoff_factor,
            status_forcelist=status_forcelist,
        )
        adapter = HTTPAdapter(max_retries=retry)
        self.retry_session.mount("http://", adapter)
        self.retry_session.mount("https://", adapter)
        self.retry_session.auth = self.auth
        self.retry_session.params = params

        # TODO: Find out why the lib does not extract session cookies
        if webbrowser_auth:
            import browser_cookie3

            self.retry_session.cookies = browser_cookie3.load()

        if jsessionid_for_testing:
            self.retry_session.cookies = requests.cookies.cookiejar_from_dict(
                {"JSESSIONID": jsessionid_for_testing})
Exemple #16
0
 def __init__(self, account_name, league):
     self.account_name = account_name
     self.league = league
     self.cookie_jar = browser_cookie3.load(config.POE[8:])
def fetch_cookies():
    cookies = browser_cookie3.load(domain_name="www.udemy.com")
    return requests.utils.dict_from_cookiejar(cookies), cookies
Exemple #18
0
def main():
    parser = argparse.ArgumentParser(description='',
                                     conflict_handler="resolve")
    authentication = parser.add_argument_group("Authentication")
    authentication.add_argument(
        '-c', '--cookies',\
        dest='cookies',\
        type=str,\
        help="Cookies to authenticate",metavar='')

    try:
        global typeA
        args = parser.parse_args()
        name = ''
        if args.cookies:
            cookie_file = Path('./my_cookies/' + args.cookies)
            if cookie_file.exists():
                typeA = 1
                print('Performing on ' + args.cookies)
                fp = open(cookie_file, 'r')
                all_cooks = fp.read().split('||')
                sessionId = all_cooks[0]
                opkey = all_cooks[1]
                newopkey = all_cooks[2]
                cck1 = ''
                cck = dict(sessionId=sessionId, opkey=opkey, newopkey=newopkey)
            else:
                print('Error - cookie file doesn\'t exists')
                exit()
        else:
            name = input(
                'Enter name of cookie (make sure to give unique name everytime): '
            )
            if Path(name).is_file() == False:
                global cookies
                typeA = 0
                try:
                    cookies = browser_cookie3.load(domain_name='.realme.com')
                    cck1 = requests.utils.dict_from_cookiejar(cookies)
                except:
                    print(
                        '\nSorry to say but we are only working with chrome and firefox broswer\nInstall any of it to continue\nor use cookie method(check Readme on github)'
                    )
                    exit()
                sessionId = cck1['sessionId']
                opkey = cck1['opkey']
                newopkey = cck1['newopkey']
                cck = dict(sessionId=sessionId, opkey=opkey, newopkey=newopkey)
            else:
                print('Another file already exists with same name')

        url = 'https://api.realme.com/in/user/address/list'
        r2 = requests.get(url, headers=head1, cookies=cck, verify=False)
        js = r2.json()
        if len(js['data']['records']) > 0:
            for address in js['data']['records']:
                addID = address['id']

                url2 = 'https://api.realme.com/in/user/address/delete'
                data2 = '{"id":"' + addID + '","siteCode":"in"}'
                r3 = requests.post(url2,
                                   headers=head1,
                                   data=data2,
                                   cookies=cck,
                                   verify=False)
                js2 = r3.json()
                if js2['msg'] == 'success':
                    print('Removed old address ' + addID)
            add_address(head1, cck, name)
        else:
            add_address(head1, cck, name)
    except Exception as e:
        print(
            e,
            'Please browse some more pages in website so that cookies can be captured/generated'
        )
import requests
from bs4 import BeautifulSoup
import browser_cookie3
cj = browser_cookie3.load()
s = requests.Session()
for c in cj:
	s.cookies.set_cookie(c)
Exemple #20
0
run_scheduler = True

# INITIALIZE PROGRAM ENVIRONMENT
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
    app_path = os.path.dirname(sys.executable)
    exec_path = sys.executable
else:
    app_path = os.path.dirname(os.path.abspath(__file__))
    exec_path = f"python \'{os.path.abspath(__file__)}\'"

# SETUP LOGGING
log = open(os.path.join(app_path, 'botlog.txt'), 'a+')

# GET COOKIES
cookies = browser_cookie3.load(domain_name=DOMAIN_NAME)
if len(cookies) == 0:
    print("Login information not found! Please login first to hoyolab once in Chrome/Firefox/Opera/Edge/Chromium before using the bot.")
    print("You only need to login once for a year to https://www.hoyolab.com/genshin/ for this bot")
    log.write('LOGIN ERROR: cookies not found\n')
    log.close()
    sys.exit(1)

# ARGPARSE
help_text = 'Genshin Hoyolab Daily Check-In Bot\nWritten by darkGrimoire'
parser = argparse.ArgumentParser(description=help_text)
parser.add_argument("-v", "--version", help="show program version", action="store_true")
parser.add_argument("-R", "--runascron", help="run program without scheduler", action="store_true")

args = parser.parse_args()
if args.version:
Exemple #21
0
def Download_Map(old_songs, songpath, osupath):
    print("importing modules...")
    try:
        import browser_cookie3
        import requests
        from bs4 import BeautifulSoup as BS
        print("successfully imported browser_cookie3, requests and bs4")
    except ImportError:
        promptm = True
        while promptm:
            i = input(
                "browser_cookie3, requests and bs4 are required to download maps from this program, would you like to install these packages? (Require pip) Y/n: "
            )
            if i == "Y" or i == "y":
                subprocess.call([
                    sys.executable, "-m", "pip", "install", "browser_cookie3"
                ])
                subprocess.call(
                    [sys.executable, "-m", "pip", "install", "requests"])
                subprocess.call(
                    [sys.executable, "-m", "pip", "install", "bs4"])
                import browser_cookie3
                import requests
                from bs4 import BeautifulSoup as BS
                print(
                    "successfully imported browser_cookie3, requests and bs4")
                promptm = False
            if i == "N" or i == "n":
                print("exiting...")
                exit()
    promptm = True
    while promptm:
        i = input(
            "Would you like to download video on all maps when possible? Y/n : "
        )
        if i == "Y" or i == "y":
            option = ""
            promptm = False
        if i == "N" or i == "n":
            option = "?noVideo=1"
            promptm = False
    BMID_list = Song_ID(songpath)
    for id in BMID_list:
        BMID_list[BMID_list.index(
            id)] = "https://osu.ppy.sh/beatmapsets/" + id + "\n"

    cj = browser_cookie3.load()
    print("Info: Comparing map in osu!/Songs VS updated data" + "\n")
    with open(old_songs, "r") as f:
        with open("./download osu!mapSync/NewSongs.txt", "w") as otp:
            [
                otp.write(link) for link in f.readlines()
                if link not in BMID_list
            ]

    os.remove(old_songs)

    with open("./download osu!mapSync/NewSongs.txt", "r") as f:
        data = [i.strip("\n") for i in f]
        for idx, link in enumerate(data):
            print("Info: Downloading", link)
            headers = {"referer": link}
            with requests.get(link) as r:
                t = BS(r.text, 'html.parser').title.text.split("·")[0]
                sign = ['*', '"', '/', '\\', ':', ';', '|', '?', '<', '>']
                for s in sign:
                    t = t.replace(s, "_")
            with requests.get(link + "/download" + option,
                              stream=True,
                              cookies=cj,
                              headers=headers) as r:
                if r.status_code == 200:
                    try:
                        id = re.sub("[^0-9]", "", link)
                        with open(
                                os.path.abspath(osupath + "/Songs/" + id +
                                                " " + t + ".osz"),
                                "wb") as otp:
                            otp.write(r.content)
                        print("Success: Done downloading " + t + " " +
                              str(idx + 1) + "/" + str(len(data)) + "\n")
                        continue
                    except:
                        print(
                            "You either aren't connected on osu!'s website or you're limited by the API, in which case you now have to wait 1h and then try again."
                        )
                if r.status_code == 404:
                    with requests.get(link + "/download",
                                      stream=True,
                                      cookies=cj,
                                      headers=headers) as rn:
                        try:
                            id = re.sub("[^0-9]", "", link)
                            with open(
                                    os.path.abspath(osupath + "/Songs/" + id +
                                                    " " + t + ".osz"),
                                    "wb") as otp:
                                otp.write(rn.content)
                            print("Success: Done downloading " + t + " " +
                                  str(idx + 1) + "/" + str(len(data)) + "\n")
                            continue
                        except:
                            print(
                                "You either aren't connected on osu!'s website or you're limited by the API, in which case you now have to wait 1h and then try again."
                            )
Exemple #22
0
import browser_cookie3 as bc
import datetime
from flask import request
try:
    cookie_data = bc.load(domain_name='espn.com')
    swid = cookie_data._cookies['.espn.com']['/']['SWID'].value
    espn2 = cookie_data._cookies['.espn.com']['/']['espn_s2'].value
except KeyError:
    swid = request.cookies.get('SWID')
    espn2 = request.cookies.get('espn_s2')
year = datetime.datetime.today().year
# myLeague = league.League(year,espn_s2=espn2,swid=swid)
# myLeague = None
# board = graphs.DashBoard(myLeague)
Exemple #23
0
 def load_cookies_from_local_browser(self):
     """By browser_cookies3 """
     for domain in self.accept_domains:
         self.logger.info("Loading cookies from local browser for %s." %
                          (domain, ))
         self.cookies.update(browser_cookie3.load(domain_name=domain))
    print("Config not found/corrupted! Making default config...")
    config = {
        'BROWSER': 'all',
        'SERVER_UTC': 8,
        'DELAY_MINUTE': 0,
        'ACT_ID': 'e202102251931481',
        'DOMAIN_NAME': '.mihoyo.com'
    }
    config_file = open(os.path.join(app_path, 'config.json'), 'w')
    config_file.write(json.dumps(config))

# GET COOKIES
cookies = None
try:
    if config['BROWSER'].lower() == 'all':
        cookies = browser_cookie3.load(domain_name=config['DOMAIN_NAME'])
    elif config['BROWSER'].lower() == 'firefox':
        cookies = browser_cookie3.firefox(domain_name=config['DOMAIN_NAME'])
    elif config['BROWSER'].lower() == 'chrome':
        cookies = browser_cookie3.chrome(domain_name=config['DOMAIN_NAME'])
    elif config['BROWSER'].lower() == 'opera':
        cookies = browser_cookie3.opera(domain_name=config['DOMAIN_NAME'])
    elif config['BROWSER'].lower() == 'edge':
        cookies = browser_cookie3.edge(domain_name=config['DOMAIN_NAME'])
    elif config['BROWSER'].lower() == 'chromium':
        cookies = browser_cookie3.chromium(domain_name=config['DOMAIN_NAME'])
    else:
        raise Exception("ERROR: Browser not defined!")
except Exception as e:
    print(
        "Login information not found! Please login first to hoyolab once in Chrome/Firefox/Opera/Edge/Chromium before using the bot."
Exemple #25
0
def packt_cli(cfgpath, grab, grabd, dall, sgd, mail, status_mail, folder, jwt,
              cookiejwt, noauth_local_webserver, product):
    config_file_path = cfgpath
    into_folder = folder

    try:
        cfg = ConfigurationModel(config_file_path)
        product_data = None
        if jwt or cookiejwt:
            recaptcha_solution = ''
        else:
            recaptcha_solution = solve_recaptcha(cfg.anticaptcha_api_key,
                                                 PACKT_URL,
                                                 PACKT_RECAPTCHA_SITE_KEY)

        cj = None
        if cookiejwt:
            logger.info("Fetching packtpub.com cookie")
            cj = browser_cookie3.load(domain_name='.packtpub.com')

        api_client = PacktAPIClient(
            {
                'recaptcha': recaptcha_solution,
                **cfg.packt_login_credentials
            }, cj)

        if product != '':
            download_one = True
        else:
            download_one = False

        # Grab the newest book
        if grab or grabd or sgd or mail:
            product_data = claim_product(api_client, recaptcha_solution)

            # Send email about successful book grab. Do it only when book
            # isn't going to be emailed as we don't want to send email twice.
            if status_mail and not mail:
                from utils.mail import MailBook
                mb = MailBook(config_file_path)
                mb.send_info(subject=SUCCESS_EMAIL_SUBJECT.format(
                    dt.datetime.now().strftime(DATE_FORMAT),
                    product_data['title']),
                             body=SUCCESS_EMAIL_BODY.format(
                                 product_data['title']))

        # Download book(s) into proper location.
        if grabd or dall or sgd or mail or download_one:
            download_directory, formats = cfg.config_download_data
            download_directory = download_directory if (
                dall or grabd) else os.getcwd()  # cwd for temporary downloads
            formats = formats or AVAILABLE_DOWNLOAD_FORMATS

            # Download one book into proper location.
            if download_one:
                product_data = get_book_data(api_client, product)
                if product_data is not None:
                    # get_product_download_urls
                    download_products(api_client,
                                      download_directory,
                                      formats, [product_data],
                                      into_folder=into_folder)
            elif dall:
                download_products(api_client,
                                  download_directory,
                                  formats,
                                  get_all_books_data(api_client),
                                  into_folder=into_folder)
            elif grabd:
                download_products(api_client,
                                  download_directory,
                                  formats, [product_data],
                                  into_folder=into_folder)
            else:  # sgd or mail
                download_products(api_client,
                                  download_directory,
                                  formats, [product_data],
                                  into_folder=False)

        # Send downloaded book(s) by mail or to Google Drive.
        if sgd or mail:
            paths = [
                os.path.join(download_directory, path)
                for path in os.listdir(download_directory)
                if os.path.isfile(path)
                and slugify_product_name(product_data['title']) in path
            ]
            if sgd:
                from utils.google_drive import GoogleDriveManager
                google_drive = GoogleDriveManager(config_file_path)
                google_drive.send_files(paths)
            else:
                from utils.mail import MailBook
                mb = MailBook(config_file_path)
                pdf_path = None
                mobi_path = None
                try:
                    pdf_path = [
                        path for path in paths if path.endswith('.pdf')
                    ][-1]
                    mobi_path = [
                        path for path in paths if path.endswith('.mobi')
                    ][-1]
                except IndexError:
                    pass
                if pdf_path:
                    mb.send_book(pdf_path)
                if mobi_path:
                    mb.send_kindle(mobi_path)
            for path in paths:
                os.remove(path)

        logger.success("Good, looks like all went well! :-)")
    except Exception as e:
        logger.error("Exception occurred {}".format(e))
        if status_mail:
            from utils.mail import MailBook
            mb = MailBook(config_file_path)
            mb.send_info(subject=FAILURE_EMAIL_SUBJECT.format(
                dt.datetime.now().strftime(DATE_FORMAT)),
                         body=FAILURE_EMAIL_BODY.format(str(e)))
        sys.exit(2)
Exemple #26
0
urls = []
xpath = r'//*[@id="index_ajax_list"]'
url = 'http://www.xoiof.com/h-mate/page_3.html'
data = requests.get(url)
html = etree.HTML(data.text)
a = html.xpath(xpath)[0]
for ele in a.getchildren():
    urls.append(ele.getchildren()[0].attrib['href'])

# session = requests.Session()
# payload={'log':'lawnight',
# 'pwd':'jiangtao12'}
# session.post('http://www.xoiof.com/login', data=payload)

import browser_cookie3 
cj = browser_cookie3.load('www.xoiof.com')
cj2={'Hm_lvt_2731e0269cd08158974f8e0d8a366836':'1548289309','wordpress_test_cookie':'WP+Cookie+check', 'Hm_lpvt_2731e0269cd08158974f8e0d8a366836':'1548343294'
}


headers = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'en,zh-CN;q=0.9,zh;q=0.8,ja;q=0.7,zh-TW;q=0.6',
'Cache-Control':'max-age=0',
'Cookie':'Hm_lvt_2731e0269cd08158974f8e0d8a366836=1548289309; wordpress_test_cookie=WP+Cookie+check; wordpress_logged_in_7a48bfd769b4b84cc8a8daf8e52cf825=lawnight%7C1548516118%7CUSTCMMgBtx1dhW3ZErzyfP3AzVu21WoYpHqKZC9kGQ5%7Cbd03f5568676230c276f1130d3c8bdc2c73b44edca3035b9d9bf1e01082bfa28; Hm_lpvt_2731e0269cd08158974f8e0d8a366836=1548344787',
'Host':'www.xoiof.com',
'Proxy-Connection':'keep-alive',
'Referer':'http://www.xoiof.com/h-xiaoyuan',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'