def getLatlng(self,address,countr):
     if address.strip()=='':
         #return (None,None)
         address = 'null'
         return (None,None)
     try:
         jsonLatlng = UtiUtil_backuptGEOCode(address, countr)
         if jsonLatlng !=None:
             if jsonLatlng.get('status') =='OK':
                 result =  jsonLatlng.get('results')
                 for re in result:
                     if re.get('geometry')!=None:
                         geometry = re.get('geometry')
                         location = geometry.get('location')
                         lat = location.get('lat')
                         lng = location.get('lng')
                         return(str(lat),str(lng))
             else:
              
                 return (None,None)
         else:
          
             return (None,None)
     except Exception,ex:
         return (None,None)
예제 #2
0
 def push_review(self):
     for re in self.run_elements:
         if re.get('barcode'):
             patch_internal('run_elements',
                            payload=self._summary,
                            run_id=self.run_id,
                            lane=re.get('lane'),
                            barcode=re.get('barcode'))
         else:
             patch_internal('run_elements',
                            payload=self._summary,
                            run_id=self.run_id,
                            lane=re.get('lane'))
예제 #3
0
def getUrlsFromRcms(turls, username, parent, request_id, action,  isSub, type, isdir):
    res = []
    invalids = []
    try:
        for u in turls:
            if u and u.strip():
                re = get_url(u, username, parent, request_id, action, isSub, type, isdir)
                logger.info('re: %s' % re)
                res.append(re)
                if re.get('status') == 'INVALID':
                    invalids.append({'u_id': str(re.get('_id'))})
    except Exception, e:
        logger.info('getUrlsFromRcms splite error:%s' % traceback.format_exc(e))
        logger.info('getUrlsFromRcms splite error:%s' % turls)
예제 #4
0
    def cscmp(l, r, odd=odd):
        d = sum(l.date) - sum(r.date)
        if d:
            return d

        # detect vendor branches and initial commits on a branch
        le = {}
        for e in l.entries:
            le[e.rcs] = e.revision
        re = {}
        for e in r.entries:
            re[e.rcs] = e.revision

        d = 0
        for e in l.entries:
            if re.get(e.rcs, None) == e.parent:
                assert not d
                d = 1
                break

        for e in r.entries:
            if le.get(e.rcs, None) == e.parent:
                if d:
                    odd.add((l, r))
                d = -1
                break

        return d
예제 #5
0
파일: auth.py 프로젝트: karbiv/py4web
 def reset_password(self):
     form = Form([Field("password", type="password")])
     user = None
     token = request.query.get("token")
     if token:
         query = self.auth._query_from_token(token)
         user = self.auth.db(query).select().first()
         if not user:
             raise HTTP(404)
     user = self.auth.db.auth_user(self.auth.user_id)
     form = Form(
         [
             Field(
                 "new_password",
                 type="password",
                 requires=self.auth.db.auth_user.password.requires,
             ),
             Field("new_password_again", type="password", requires=IS_NOT_EMPTY()),
         ]
     )
     if form.submitted:
         new_password = form.post_vars.get("new_password")
         if form.post_vars["new_password_again"] != new_password:
             form.errors["new_password_again"] = "Passwords do not match"
             form.accepted = False
         else:
             res = self.auth.change_password(
                 user, new_password, check=True, check_old_password=False
             )
         form.errors = re.get("errors", {})
         form.accepted = not res.get("errors")
     self._postprocessng("profile", form, user)
     return form
예제 #6
0
def get_job_requirement(conn, job_id):
    requirements = {}
    res = requests.get_ui_return_json(
        conn,
        conn.baseurl + '/rest/api/latest/config/job/' + job_id + '/requirement', None)
    if not res:
        return requirements
    for re in res:
        key = re.get('key')
        if key:
            requirements[key] = {
                'key': key,
                'matchType': re.get('matchType'),
                'matchValue': re.get('matchValue')
            }
    return requirements
예제 #7
0
파일: auth.py 프로젝트: karbiv/py4web
 def change_password(self):
     self._check_logged("change_password")
     user = self.auth.db.auth_user(self.auth.user_id)
     form = Form(
         [
             Field("old_password", type="password", requires=IS_NOT_EMPTY()),
             Field(
                 "new_password",
                 type="password",
                 requires=self.auth.db.auth_user.password.requires,
             ),
             Field("new_password_again", type="password", requires=IS_NOT_EMPTY()),
         ]
     )
     if form.submitted:
         old_password = form.post_vars.get("new_password")
         new_password = form.post_vars.get("new_password")
         if form.post_vars["new_password_again"] != new_password:
             form.errors["new_password_again"] = "Passwords do not match"
             form.accepted = False
         else:
             res = self.auth.change_password(
                 user, new_password, old_password, check=True
             )
         form.errors = re.get("errors", {})
         form.accepted = not res.get("errors")
     self._postprocessng("profile", form, user)
     return form
async def main(loop):
    global count
    global seen, useen
    pool = mp.Pool(mp.cpu_count())
    async with aiohttp.ClientSession() as session:
        while count < 1000000:
            tasks = [loop.create_task(fetch(session, url)) for url in useen]
            finished, unfinished = await asyncio.wait(tasks)
            htmls = [r.result() for r in finished]

            parse_jobs = [pool.apply_async(parse, args=(html, )) for html in htmls]
            results = [re.get() for re in parse_jobs]

            seen.update(useen)
            useen.clear()
            for page_urls, poem in results:
                useen.update(page_urls - seen)
                if len(useen) > 3000:
                    useen = set(list(useen)[:3000])

                if poem is not None:   # save poem
                    file_obj.write(poem)
                    count += 1
            print('已经爬取{}首诗 Downloading ......'.format(count))
            seen_file = open('seen_data.txt', 'w')
            seen_file.write(str(seen))
            seen_file.close()
예제 #9
0
def get_time_stamp():
    # list = []
    # if list:
    #     print('============1')
    # else:
    #     print('--------------2')
    ct = time.time()
    local_time = time.localtime(ct)
    today = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
    print(today)
    re = redis.StrictRedis(host='localhost', port=6379, db=0)
    task_five_key = "FLASH_TASK_FIVE_" + str(2)
    task_five_value = "FLASH_TASK_FIVE_:%s:%s" % (2, today)
    data = re.get(task_five_key)
    print(data)
    if data:
        time_value = str(data[-19:])
        print("=========", time_value)
    else:
        re.set(task_five_key, task_five_value)
    print(task_five_value)

    # if today.strftime('%Y-%m-%d %H:%M:%S') == time_value:
    #     print('==========================')

    today = datetime.date.today()
    yesterday = today - datetime.timedelta(days=1)
    print(type(yesterday.strftime('%Y-%m-%d')))
    print(type(yesterday.strftime('%Y-%m-%d')))
예제 #10
0
    def cscmp(l, r):
        d = sum(l.date) - sum(r.date)
        if d:
            return d

        # detect vendor branches and initial commits on a branch
        le = {}
        for e in l.entries:
            le[e.rcs] = e.revision
        re = {}
        for e in r.entries:
            re[e.rcs] = e.revision

        d = 0
        for e in l.entries:
            if re.get(e.rcs, None) == e.parent:
                assert not d
                d = 1
                break

        for e in r.entries:
            if le.get(e.rcs, None) == e.parent:
                assert not d
                d = -1
                break

        return d
예제 #11
0
def get_CreatorCount(df, i):
    url = df["creator_url"][i]
    resp = {"created": "null", "backed": "null"}
    cont = re.get(url)
    content = BeautifulSoup(cont.content, "html.parser")
    try:
        s = content.find("span", {"class": "backed"})
        result = re.search('d (.*) p', s.text)
        resp["backed"] = result.group(1)
    except:
        pass
    try:
        for each in content.find(
                "a",
            {
                "class":
                "nav--subnav__item__link nav--subnav__item__link--gray js-created-link"
            }):
            if ("Created" in str(each)):
                pass
            else:
                sk = BeautifulSoup(str(each), features="lxml")
                for c, e in enumerate(sk.find("span")):

                    if (c == 0):
                        resp["created"] = str(e).strip()
    except:
        pass
    return resp
예제 #12
0
    def test_get_case(self):
        results = re.get(self.url('gets')).json()
        self.assertEqual(len(results), 2)

        self.assertEqual(results[0]['title'], 'java')
        self.assertEqual(results[0]['version'], '1.8.0')

        self.assertEqual(results[1]['title'], 'python')
        self.assertEqual(results[1]['version'], '3.5')
예제 #13
0
 def getLatlng(self, address, countr):
     try:
         jsonLatlng = Util.getRequestsXML(address, countr)
         if jsonLatlng != None:
             if jsonLatlng.get('status') == 'OK':
                 result = jsonLatlng.get('results')
                 for re in result:
                     if re.get('geometry') != None:
                         geometry = re.get('geometry')
                         location = geometry.get('location')
                         lat = location.get('lat')
                         lng = location.get('lng')
                         return (str(lat), str(lng))
             else:
                 return (None, None)
         else:
             return (None, None)
     except Exception, ex:
         return (None, None)
예제 #14
0
def get_job_requirement(conn, job_id):
    requirements = {}
    res = requests.get_ui_return_json(
        conn, conn.baseurl + "/rest/api/latest/config/job/" + job_id + "/requirement", None
    )

    if not res:
        return requirements

    for re in res:
        key = re.get("key")
        if key:
            requirements[key] = {
                "rid": re.get("id"),
                "key": key,
                "matchType": re.get("matchType"),
                "matchValue": re.get("matchValue"),
            }
    return requirements
예제 #15
0
    def run(self):
        # The result of clustering are documents that have a key k and a list of values
        # (of the cluster) v.
        get_key_values = operator.itemgetter("k", "v")
        for i, line in enumerate(self.iterable):
            if i % 20000 == 0 and self.verbose:
                print(i, file=sys.stderr)
            line = line.strip()
            if not line:
                continue
            doc = json.loads(line)
            k, vs = get_key_values(doc)
            if len(vs) < 2:
                self.counter[Reason.SINGULAR_CLUSTER] += 1
                continue
            if len(vs) > self.max_cluster_size:
                self.counter[Reason.MAX_CLUSTER_SIZE_EXCEEDED] += 1
                continue
            for a, b in itertools.combinations(vs, r=2):
                for re in (a, b):
                    container_name = re.get("extra", {}).get(
                        "container_name", "") or ""
                    if container_name.lower().strip(
                    ) in CONTAINER_NAME_BLACKLIST:
                        self.counter[Reason.CONTAINER_NAME_BLACKLIST] += 1
                        continue
                    if re.get("publisher",
                              "").lower().strip() in PUBLISHER_BLACKLIST:
                        self.counter[Reason.PUBLISHER_BLACKLIST] += 1
                        continue
                result, reason = verify(a, b)
                self.counter[reason] += 1
                print("https://fatcat.wiki/release/{}".format(a["ident"]),
                      "https://fatcat.wiki/release/{}".format(b["ident"]),
                      result, reason)

        self.counter["total"] = sum(v for _, v in self.counter.items())
예제 #16
0
    def cscmp(l, r, odd=odd):
        d = sum(l.date) - sum(r.date)
        if d:
            return d

        # detect vendor branches and initial commits on a branch
        le = {}
        for e in l.entries:
            le[e.rcs] = e.revision
        re = {}
        for e in r.entries:
            re[e.rcs] = e.revision

        d = 0
        for e in l.entries:
            if re.get(e.rcs, None) == e.parent:
                assert not d
                d = 1
                break

        for e in r.entries:
            if le.get(e.rcs, None) == e.parent:
                if d:
                    odd.add((l, r))
                d = -1
                break
        # By this point, the changesets are sufficiently compared that
        # we don't really care about ordering. However, this leaves
        # some race conditions in the tests, so we compare on the
        # number of files modified, the files contained in each
        # changeset, and the branchpoints in the change to ensure test
        # output remains stable.

        # recommended replacement for cmp from
        # https://docs.python.org/3.0/whatsnew/3.0.html
        c = lambda x, y: (x > y) - (x < y)
        # Sort bigger changes first.
        if not d:
            d = c(len(l.entries), len(r.entries))
        # Try sorting by filename in the change.
        if not d:
            d = c([e.file for e in l.entries], [e.file for e in r.entries])
        # Try and put changes without a branch point before ones with
        # a branch point.
        if not d:
            d = c(len(l.branchpoints), len(r.branchpoints))
        return d
예제 #17
0
    def cscmp(l, r, odd=odd):
        d = sum(l.date) - sum(r.date)
        if d:
            return d

        # detect vendor branches and initial commits on a branch
        le = {}
        for e in l.entries:
            le[e.rcs] = e.revision
        re = {}
        for e in r.entries:
            re[e.rcs] = e.revision

        d = 0
        for e in l.entries:
            if re.get(e.rcs, None) == e.parent:
                assert not d
                d = 1
                break

        for e in r.entries:
            if le.get(e.rcs, None) == e.parent:
                if d:
                    odd.add((l, r))
                d = -1
                break
        # By this point, the changesets are sufficiently compared that
        # we don't really care about ordering. However, this leaves
        # some race conditions in the tests, so we compare on the
        # number of files modified, the files contained in each
        # changeset, and the branchpoints in the change to ensure test
        # output remains stable.

        # recommended replacement for cmp from
        # https://docs.python.org/3.0/whatsnew/3.0.html
        c = lambda x, y: (x > y) - (x < y)
        # Sort bigger changes first.
        if not d:
            d = c(len(l.entries), len(r.entries))
        # Try sorting by filename in the change.
        if not d:
            d = c([e.file for e in l.entries], [e.file for e in r.entries])
        # Try and put changes without a branch point before ones with
        # a branch point.
        if not d:
            d = c(len(l.branchpoints), len(r.branchpoints))
        return d
예제 #18
0
    def get_data(self, key, force_reload=False, ifnotfound=None):
        if key in self.group_data:
            re = self.group_data.get(key, ifnotfound)

            if type(re) is dict and re.get('is_cache', False) == True:
                # this is a cache, load from cache!
                if not force_reload and key in self.cache_env:
                    return self.cache_env.get(key, ifnotfound)

                # load cache
                print('Loading from cache')
                d = from_json(from_file=re['absolute_path'])

                for k, v in d.items():
                    self.cache_env[k] = v
                return self.cache_env.get(key, ifnotfound)

            return re

        return ifnotfound
async def main(loop):
    global count
    global seen, useen
    pool = mp.Pool(mp.cpu_count())
    async with aiohttp.ClientSession() as session:
        while count < 10000:
            tasks = [loop.create_task(fetch(session, url)) for url in useen]
            finished, unfinished = await asyncio.wait(tasks)
            htmls = [r.result() for r in finished]

            parse_jobs = [pool.apply_async(parse, args=(html, )) for html in htmls]
            results = [re.get() for re in parse_jobs]

            seen.update(useen)
            useen.clear()
            for page_urls, add_count in results:
                useen.update(page_urls - seen)
                if len(useen) > 200:
                    useen = set(list(useen)[:200])
                count += add_count
                print('高速爬取到第{}首 Downloading ......'.format(count))
async def main(loop):
    global count, urls
    pool = mp.Pool(mp.cpu_count())

    async with aiohttp.ClientSession() as session:
        while len(urls) > 0:
            urls_to = urls[:5]
            urls = urls[5:]
            tasks = [loop.create_task(fetch(session, url)) for url in urls_to]
            finished, unfinished = await asyncio.wait(tasks)
            htmls = [r.result() for r in finished]

            parse_jobs = [pool.apply_async(parse, args=(html, )) for html in htmls]
            poem_datas = [re.get() for re in parse_jobs]
            for poem_data in poem_datas:
                for i in poem_data:
                    print(i[:10])
                    file_obj.write(i)
                    count += 1
            print('已经爬取{}首诗 Downloading ......'.format(count))
            time.sleep(random.random()*5)
예제 #21
0
 def getCourseInfo(self, keyword):
     re = dict()
     keyword = str(keyword)
     for item in Bot.COURSES:
         courseCode = item['courseCode']
         if keyword in courseCode:
             if re.get(courseCode) is None:
                 re[courseCode] = item.copy()
                 re[courseCode]['termName'] = [re[courseCode]['termName']]
             else:
                 re[courseCode]['termName'].append(item['termName'])
     res = ''
     for item in re.values():
         res += "课程代码:{}\n".format(item['courseCode'])
         res += "课程名称:{} {}\n".format(item['courseName'],
                                      item['courseNameEn'])
         res += "学分:{}\n".format(item['credit'])
         res += "开设时间:"
         for term in set(item['termName']):
             res += "{} ".format(term)
         res += "\n\n"
     return res.strip()
예제 #22
0
def infor(request):
    #  判断是post提交还是get提交
    if request.method == 'POST':
        # 获取post提交的的form表单数据
        r = request.POST
        a=r.get('my_name')
        # 调用form类
        data = Myinfor(r, request.FILES)
        # 验证数据合法性
        if data.is_valid():
            # 清洗数据 处理数据 返回个人中心
            a1 = data.cleaned_data
            a = my_infor.objects.create(**a1)
            # aa=my_infor()
            a.user_img = a1.get('user_img')
            a.save()
            return redirect('用户:个人中心')
        else:
            # 数据不合法报错
            context = {
                'err': data.errors
            }
            return render(request, 'user/infor.html', context)
    else:
        # 获取get提交的数据 获取session
        re = request.session
        c = re.get('username')
        # 得到用户注册的用户名 id
        xr = User.objects.filter(username=c).first()
        aa = xr.pk
        # 判断修改用户信息表是否有值
        context = {
            'aaa': my_infor.objects.filter(pk=aa),

        }
        return render(request, 'user/infor.html', context)
예제 #23
0
파일: xxx.py 프로젝트: MtucX/study
#programming number 5
#MtucX
import requests , re ,os
re = requests.Session()
vars = []
r = re.get("http://.............../Programming/Prog5.php")
r= r.text
solution = ""
list = r.split(' ')
for i in range(len(list)):
	vars.append(int(list[i]))
for i in range(2,len(vars)):
	if(vars[i-1]>= vars[i]):
		if(vars[i-1] >= vars[i-2]):
			solution+=str(vars[i-1])+" "
cookie = str(re.cookies.get_dict())
cookie = cookie.strip("{ }")[14:40]
cm = 'curl --data "azezaeza'+solution[:-1]+'" --cookie "PHPSESSID='+cookie+'"http://....../Programming/Prog5.php'
os.system(cm)
예제 #24
0
     if page.isRedirectPage():
         rp = page.getRedirectTarget().title()
         with open('tags_dup', 'r') as file:
             data = file.read()
         with open('tags', 'r') as file:
             data_t = file.read()
         with open('tags_dup', 'w') as file:
             data = data.replace('{{' + tag + '}}',
                                 '{{' + rp + '}}|{{' + tag + '}}')
             file.write(data)
         with open('tags', 'w') as file:
             data_t = data_t.replace('{{' + tag + '}}', '{{' + rp + '}}')
             file.write(data_t)
     if not page.isRedirectPage():
         url = 'https://bn.wikipedia.org/w/index.php?title=বিশেষ:সংযোগকারী_পৃষ্ঠাসমূহ/টেমপ্লেট:' + tag + '&hidetrans=1&hidelinks=1'
         response = re.get(url)
         redirs = re.findall(
             r'<li><a.*?>টেমপ্লেট:(.*?)?</a> \(পুনর্নির্দেশ\).*?</li>',
             response.text)
         with open('tags_dup', 'r') as file:
             data = file.read()
         match = re.search(r'{{' + tag + r'}}.*?\n', data)
         rpd = '{{' + tag + '}}|{{' + '}}|{{'.join(redirs) + '}}'
         data = data.replace(rpd, match.group())
         with open('tags_dup', 'w') as file:
             file.write(data)
 if not page.exists():
     with open('tags_dup', 'r') as file:
         data = file.read()
     data = data.replace('{{' + tag + '}}', '')
     with open('tags_dup', 'w') as file:
예제 #25
0
#programming number 5
#MtucX
import requests, re, os
re = requests.Session()
vars = []
r = re.get("http://.............../Programming/Prog5.php")
r = r.text
solution = ""
list = r.split(' ')
for i in range(len(list)):
    vars.append(int(list[i]))
for i in range(2, len(vars)):
    if (vars[i - 1] >= vars[i]):
        if (vars[i - 1] >= vars[i - 2]):
            solution += str(vars[i - 1]) + " "
cookie = str(re.cookies.get_dict())
cookie = cookie.strip("{ }")[14:40]
cm = 'curl --data "azezaeza' + solution[:
                                        -1] + '" --cookie "PHPSESSID=' + cookie + '"http://....../Programming/Prog5.php'
os.system(cm)
예제 #26
0
# df.loc[df['퇴원현황'].str.contains('퇴원'), '퇴원현황'] = '퇴원'
# df.loc[df['퇴원현황'].str.contains('사망'), '퇴원현황'] = '사망'
# df.loc[~df['퇴원현황'].str.contains('퇴원|사망'), '퇴원현황'] = np.nan
# df['퇴원현황'].value_counts()
# ==============================================================
df.loc[0]
update_date = df.loc[0]['확진일'].replace('-', '_')
df.to_csv(f'{dir}seoul_covid19_status_{update_date}.csv', encoding='euc-kr')

### requests 사용 일 경우 ###
# html


user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"

### 네이버 주식 종목 시세########
### 삼성전자 -> 검사 -> Doc 부분의 Request URL을 확인.########
# url = 'https://finance.naver.com/item/sise_day.nhn?code=005930&page=1'
# table = pd.read_html(url, encoding='cp949')
# len(table)
# tb[11]
### naver site 변경 - html로 안됨.########

url = 'https://finance.naver.com/item/sise_day.nhn?code=005930&page=1'
r = rq.get(url, headers={"user-agent": user_agent})
wp = r.content.decode('euc-kr')  # 'utf-8') or 'cp949')
tb = pd.read_html(wp)

### json_data 사용 일 경우 ###
json_data = json.loads(re.get(url).text)
예제 #27
0
def sign(req, *opts):
    """
Sign the working document.

:param req: The request
:param opts: Options (unused)
:return: returns the signed working document

Sign expects a single dict with at least a 'key' key and optionally a 'cert' key. The 'key' argument references
either a PKCS#11 uri or the filename containing a PEM-encoded non-password protected private RSA key.
The 'cert' argument may be empty in which case the cert is looked up using the PKCS#11 token, or may point
to a file containing a PEM-encoded X.509 certificate.

**PKCS11 URIs**

A pkcs11 URI has the form

.. code-block:: xml

    pkcs11://<absolute path to SO/DLL>[:slot]/<object label>[?pin=<pin>]

The pin parameter can be used to point to an environment variable containing the pin: "env:<ENV variable>".
By default pin is "env:PYKCS11PIN" which tells sign to use the pin found in the PYKCS11PIN environment
variable. This is also the default for PyKCS11 which is used to communicate with the PKCS#11 module.

**Examples**

.. code-block:: yaml

    - sign:
        key: pkcs11:///usr/lib/libsofthsm.so/signer

This would sign the document using the key with label 'signer' in slot 0 of the /usr/lib/libsofthsm.so module.
Note that you may need to run pyff with env PYKCS11PIN=<pin> .... for this to work. Consult the documentation
of your PKCS#11 module to find out about any other configuration you may need.

.. code-block:: yaml

    - sign:
        key: signer.key
        cert: signer.crt

This example signs the document using the plain key and cert found in the signer.key and signer.crt files.
    """
    if req.t is None:
        raise PipeException("Your plumbing is missing a select statement.")

    if not type(req.args) is dict:
        raise PipeException("Missing key and cert arguments to sign pipe")

    key_file = req.args.get('key', None)
    cert_file = req.args.get('cert', None)

    if key_file is None:
        raise PipeException("Missing key argument for sign pipe")

    if cert_file is None:
        log.info("Attempting to extract certificate from token...")

    opts = dict()
    re = root(req.t)
    if re.get('ID'):
        opts['reference_uri'] = "#%s" % re.get('ID')
    xmlsec.sign(req.t, key_file, cert_file, **opts)

    return req.t
예제 #28
0
    def getInfoFromBOT(self):
        re = requests.session()
        res = re.get('https://fctc.bot.com.tw/Purchase/WarningPage#')

        soup = BeautifulSoup(res.text, 'html.parser')
        sessionToken = soup.input.get('value')
        payload = {'__RequestVerificationToken': sessionToken}

        res2 = re.post('https://fctc.bot.com.tw/Purchase/SelectCurrencyBank',
                       data=payload)

        soup2 = BeautifulSoup(res2.text, 'html.parser')

        for i in soup2.find_all('div', {'class': 'Exchange rate'}):
            if (i.find('p', {'class': 'Country_USD'})):
                self.exchange_dict['美金 USD'] = i.find('p', {
                    'class': 'Country_USD'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_HKD'})):
                self.exchange_dict['港幣 HKD'] = i.find('p', {
                    'class': 'Country_HKD'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_MYR'})):
                self.exchange_dict['馬來幣 MYR'] = i.find('p', {
                    'class': 'Country_MYR'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_GBP'})):
                self.exchange_dict['英鎊 GBP'] = i.find('p', {
                    'class': 'Country_GBP'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_AUD'})):
                self.exchange_dict['澳幣 AUD'] = i.find('p', {
                    'class': 'Country_AUD'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_CAD'})):
                self.exchange_dict['加拿大幣 CAD'] = i.find(
                    'p', {
                        'class': 'Country_CAD'
                    }).findNext('p', {
                        'class': 'number'
                    }).string
            if (i.find('p', {'class': 'Country_SGD'})):
                self.exchange_dict['新加坡幣 SGD'] = i.find(
                    'p', {
                        'class': 'Country_SGD'
                    }).findNext('p', {
                        'class': 'number'
                    }).string
            if (i.find('p', {'class': 'Country_CHF'})):
                self.exchange_dict['瑞士法郎 CHF'] = i.find(
                    'p', {
                        'class': 'Country_CHF'
                    }).findNext('p', {
                        'class': 'number'
                    }).string
            if (i.find('p', {'class': 'Country_JPY'})):
                self.exchange_dict['日圓 JPY'] = i.find('p', {
                    'class': 'Country_JPY'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_SEK'})):
                self.exchange_dict['瑞典幣 SEK'] = i.find('p', {
                    'class': 'Country_SEK'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_NZD'})):
                self.exchange_dict['紐元 NZD'] = i.find('p', {
                    'class': 'Country_NZD'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_THB'})):
                self.exchange_dict['泰幣 THB'] = i.find('p', {
                    'class': 'Country_THB'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_PHP'})):
                self.exchange_dict['菲國比索 PHP'] = i.find(
                    'p', {
                        'class': 'Country_PHP'
                    }).findNext('p', {
                        'class': 'number'
                    }).string
            if (i.find('p', {'class': 'Country_IDR'})):
                self.exchange_dict['印尼幣 IDR'] = i.find('p', {
                    'class': 'Country_IDR'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_EUR'})):
                self.exchange_dict['歐元 EUR'] = i.find('p', {
                    'class': 'Country_EUR'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_KRW'})):
                self.exchange_dict['韓元 KRW'] = i.find('p', {
                    'class': 'Country_KRW'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_CNY'})):
                self.exchange_dict['人民幣 CNY'] = i.find('p', {
                    'class': 'Country_CNY'
                }).findNext('p', {
                    'class': 'number'
                }).string
            if (i.find('p', {'class': 'Country_VND'})):
                self.exchange_dict['越南盾 VND'] = i.find('p', {
                    'class': 'Country_VND'
                }).findNext('p', {
                    'class': 'number'
                }).string
# Start our session.
re = requests.Session()
# Log in
req = re.post('https://mycourses.rit.edu/d2l/lp/auth/login/login.d2l', data={
    'username': args.u,
    'password': password
})

if "Invalid Username" in req.text:
    print("F**k. MyCourses rejected your username and/or password")
    exit()
else:
    print(" Login M'Kay")

r = re.get('https://mycourses.rit.edu/d2l/home')
soup = BeautifulSoup(r.text)

# but first Get the f*****g D2L.LP.Web.Authentication.Xsrf.Init
xsrf = str(soup.findAll("script")[-1]).splitlines()
for line in xsrf:
    if "D2L.LP.Web.Authentication.Xsrf.Init" in line:
        xsrf = line.split("\"")[16][:-1]
        print(" Xsrf is " + xsrf)


# Switch to the current courses.
data = {
    'widgetId': "11",
    "placeholderId$Value": "d2l_1_12_592",
    'selectedRoleId': "604",