예제 #1
0
def get_bd_table(driver):
    # 주총결의 데이터 세팅
    driver.switch_to.frame(driver.find_element_by_tag_name("iframe"))

    bd_gubun = ''
    bd_kind = ''
    bd_gijun_ymd = ''
    bd_gum = 0
    bd_total = 0

    # 배당금 테이블
    bd_tb = driver.find_elements_by_xpath(
        '//*[@id="XFormD1_Form0_Table0"]/tbody/tr')
    for i in range(0, 15):
        td_title = bd_tb[i].find_elements_by_tag_name('td')[0].text.replace(
            ' ', '')
        if '배당구분' in td_title:
            bd_gubun = bd_tb[i].find_elements_by_tag_name('td')[1].text
        if '배당종류' in td_title:
            bd_kind = bd_tb[i].find_elements_by_tag_name('td')[1].text
        if '주당배당금' in td_title:
            bd_gum = get_num(bd_tb[i].find_elements_by_tag_name('td')[2].text)
        if '배당금총액' in td_title:
            bd_total = get_num(
                bd_tb[i].find_elements_by_tag_name('td')[1].text)
        if '배당기준일' in td_title:
            bd_gijun_ymd = get_num(
                bd_tb[i].find_elements_by_tag_name('td')[1].text)

    return bd_gubun, bd_kind, bd_gum, bd_total, bd_gijun_ymd
예제 #2
0
def stock_use_ins(arr, meet_seq, rcp_no):
    stock_gijun_yy = get_num(arr[0])
    if stock_gijun_yy == 'null':
        stock_gijun_yy = '0000'
    stock_ymd = arr[1]
    stock_pe_cnt = get_num(arr[2])
    if stock_pe_cnt == 'null':
        stock_pe_cnt = '0'
    stock_give_cnt = get_num(arr[4])
    if stock_give_cnt == 'null':
        stock_give_cnt = '0'
    stock_use_cnt = get_num(arr[5])
    if stock_use_cnt == 'null':
        stock_use_cnt = '0'
    stock_actual_cnt = get_num(arr[6])
    if stock_actual_cnt == 'null':
        stock_actual_cnt = '0'
    stock_extra_cnt = get_num_int(arr[7])
    if stock_extra_cnt == 'null':
        stock_extra_cnt = '0'

    in_qry = """insert into proxy027(meet_seq, rcp_no, stock_gijun_yy, stock_ymd, stock_pe_cnt, 
                                     stock_kind, stock_give_cnt, stock_use_cnt, stock_actual_cnt, stock_extra_cnt, create_dt, modify_dt)
                                 values('{0}', '{1}', '{2}', '{3}', {4}, '{5}', {6}, {7}, {8}, {9}, '{10}', '{11}')
                """.format(meet_seq, rcp_no, stock_gijun_yy, stock_ymd,
                           stock_pe_cnt, arr[3], stock_give_cnt, stock_use_cnt,
                           stock_actual_cnt, stock_extra_cnt, create_dt,
                           create_dt)

    return in_qry
예제 #3
0
파일: Tsb.py 프로젝트: rdkr/pybank
    def main_page(self, s, r):
        
        soup = BeautifulSoup(r.text, 'html.parser')
      
        for accountEntry in soup.find(id = 'lstAccLst').findAll('li', recursive=False):

            # get account details and add to accounts list

            r = s.get('https://secure.tsb.co.uk' + accountEntry.find('h2').a['href'])
            soup = BeautifulSoup(r.text, 'html.parser')

            accountNumbers = soup.find(class_ = 'numbers').get_text().split(', ')
            
            acc = {'bank': 'TSB'}

            acc['name'] = soup.find('h1').get_text()
            acc['sort'] = accountNumbers[0].replace('-', '')
            acc['number'] = accountNumbers[1]

            acc['balance'] = get_num(soup.find(class_ = 'balance').get_text())
            acc['available'] = get_num(soup.find(class_ = 'manageMyAccountsFaShowMeAnchor {bubble : \'fundsAvailable\', pointer : \'top\'}').parent.get_text())
            
            self.accounts.append(acc)

            # download transaction files

            r = s.get('https://secure.tsb.co.uk' + soup.find(id = 'pnlgrpStatement:conS1:lkoverlay')['href'])
            soup = BeautifulSoup(r.text, 'html.parser')

            # get all form fields, put in dictionary d - # http://stackoverflow.com/a/32074666
            soup = BeautifulSoup(r.text, 'html.parser')
            d = {e['name']: e.get('value', '') for e in soup.find_all('input', {'name': True})}

            now = time.localtime(time.time())
            yearAgo = time.localtime(time.time() - 6570000) # ~ 2.5 months year ago

            # will download current view if 0, past 2.5 months if 1
            d['frmTest:rdoDateRange'] = '0'
            
            d['frmTest:dtSearchFromDate'] = time.strftime('%d', yearAgo) 
            d['frmTest:dtSearchFromDate.month'] = time.strftime('%m', yearAgo) 
            d['frmTest:dtSearchFromDate.year'] = str(time.strftime('%Y', yearAgo)) 

            d['frmTest:dtSearchToDate'] = time.strftime('%d', now) 
            d['frmTest:dtSearchToDate.month'] = time.strftime('%m', now)
            d['frmTest:dtSearchToDate.year'] = str(time.strftime('%Y', now))

            d['frmTest:strExportFormatSelected'] =  'Quicken 98 and 2000 and Money (.QIF)' 
            
            r = s.post('https://secure.tsb.co.uk/personal/a/viewproductdetails/m44_exportstatement_fallback.jsp', data=d)

            filename = time.strftime('%Y%m%d') + '-' + acc['sort'] + '-' + acc['number'] + '.qif'
            file = open(filename, 'w')
            file.write(r.text)
            file.close()
예제 #4
0
def friends(user_id=None):
    if user_id is None and request.method == 'GET':
        return render_template('new_friend.html')

    if request.method == 'POST':
        user_id1 = utils.get_num(request, 'user_id1', required=True)
        user_id2 = utils.get_num(request, 'user_id2', required=True)

        return users.new_friend(user_id1, user_id2)

    return users.get_friends(user_id)
예제 #5
0
def do_comment():
    u = require_token()

    pid = get_num(request.form.get('pid'))

    post = Post.query.get(pid)
    if not post: abort(404)
    if post.deleted: abort(451)

    content = request.form.get('text')
    content =  content.strip() if content else None
    content = '[tmp]\n' + content if u.name[:4] == 'tmp_' else content
    if not content or len(content) > 4096: abort(422)

    c = Comment(
            name_hash = hash_name(u.name),
            content = content,
            )
    post.comments.append(c)
    post.comment_timestamp = c.timestamp
    db.session.commit()

    return {
            'code': 0,
            'data': pid
            }
예제 #6
0
def users_search():
    if request.method == 'GET':
        return render_template('search.html')

    query = utils.get_field(request, 'query', required=True)
    user_id = utils.get_num(request, 'user_id', required=True)
    return users.search_users(query, user_id)
예제 #7
0
def attention():
    u = require_token()
    if u.name[:4] == 'tmp_': abort(403)

    s = request.form.get('switch')
    if s not in ['0', '1']: abort(422)

    pid = get_num(request.form.get('pid'))
    
    post = Post.query.get(pid)
    if not post: abort(404)

    at = Attention.query.filter_by(name_hash=hash_name(u.name), pid=pid).first()

    if not at:
        at = Attention(name_hash=hash_name(u.name), pid=pid, disabled=True)
        db.session.add(at)

    if(at.disabled != (s == '0')):
        at.disabled = (s == '0')
        post.likenum += 1 - 2 * int(s == '0');
        db.session.commit()

    return {
            'code': 0,
            'likenum': post.likenum,
            'attention': (s=='1')
            }
예제 #8
0
def pay_handler():
    if request.method =='POST':
        data = request_form_to_dict()
        pid = int(data.get("pid"))
        count = int(data.get("amount"))
        period=Period.get(id=pid)
        time_now=datetime.now()
        left=period.total_count-period.join_count
        if left >= count:
            num = get_num(pid, count)
            # 创建或更新夺宝订单明细记录
            create_or_update_order_detail(current_user.id, pid, {
                "count": count,
                "created_datetime": time_now,
                "num": num
            })
            # 更新用户余额
            update_user(current_user.id, current_user.balance - count)
            if left == count:
                kj_time = get_kj_time(time_now)
                update_period(pid, {
                     "join_count": period.join_count + count,
                     "status": 1,
                     "end_time": time_now,
                     "kj_count": create_kj_count(time_now),
                     "kj_time": kj_time,
                })
                create_period(period.product.id, period.total_count,  period.number+ 1)
            else:
                update_period(pid,{
                    "join_count": period.join_count + count,
                    })
            return redirect(url_for('period_detail',pid=pid))
예제 #9
0
    def main_page(self, s, r):

        soup = BeautifulSoup(r.text, 'html.parser')

        acc = {'bank': 'CapitalOne'}

        acc['original_available'] = get_num(
            soup.find(
                text='Available to spend').parent.parent.find('div').text)
        acc['limit'] = get_num(
            soup.find(text='Credit limit').parent.parent.text)
        acc['balance'] = get_num(
            soup.find(text='Current balance').parent.parent.text)

        acc['available'] = None

        # get available in terms of debit
        # float precision safe comparison
        if abs(acc['original_available'] == acc['balance']) <= 0.01:
            acc['available'] = -acc['balance']
        else:
            acc['available'] = acc['original_available'] - acc['limit']

        acc['name'] = 'Capital One'

        self.accounts.append(acc)

        # download transaction files

        r = s.get(
            'https://www.capitaloneonline.co.uk/CapitalOne_Consumer/Transactions.do'
        )
        soup = BeautifulSoup(r.text, 'html.parser')

        d = {}
        d['org.apache.struts.taglib.html.TOKEN'] = \
            soup.find('input',{'name': 'org.apache.struts.taglib.html.TOKEN'})['value']
        d['downloadType'] = 'qif'

        r = s.post(
            'https://www.capitaloneonline.co.uk/CapitalOne_Consumer/DownLoadTransaction.do',
            data=d)

        filename = time.strftime('%Y%m%d') + '-' + 'cap1.qif'
        file = open(filename, 'w')
        file.write(r.text)
        file.close()
예제 #10
0
def get_restaurants():
    if request.method == 'GET':
        return render_template('restaurants.html',
                               data=restaurants.filters_object())

    lat = utils.get_field(request, 'lat', required=True)
    lng = utils.get_field(request, 'long', required=True)
    rad = utils.get_num(request, 'radius', 1, 20, required=True)
    cuisines = utils.get_list(request, 'cuisines')
    categories = utils.get_list(request, 'categories')
    price = utils.get_num(request, 'price', required=True)
    user_id = utils.get_num(request, 'user_id', required=True)
    limit = utils.get_num(request, 'limit')
    offset = utils.get_num(request, 'offset')

    return restaurants.get_restaurants(lat, lng, rad, price, limit, offset,
                                       cuisines, categories)
예제 #11
0
def stock_extra_ins(arr, meet_seq, rcp_no):
    stock_total_cnt = get_num(arr[0])
    if stock_total_cnt == 'null':
        stock_total_cnt = '0'
    stock_avl_cnt = get_num(arr[3])
    if stock_avl_cnt == 'null':
        stock_avl_cnt = '0'
    stock_ext_cnt = get_num(arr[4])
    if stock_ext_cnt == 'null':
        stock_ext_cnt = '0'

    in_qry = """insert into proxy026(meet_seq, rcp_no, stock_total_cnt, stock_avl_range, stock_kind, stock_avl_cnt, stock_ext_cnt, create_dt, modify_dt)
                                 values('{0}', '{1}', '{2}', '{3}', '{4}', {5}, {6}, '{7}', '{8}')
                """.format(meet_seq, rcp_no, stock_total_cnt, arr[1], arr[2],
                           stock_avl_cnt, stock_ext_cnt, create_dt, create_dt)

    return in_qry
예제 #12
0
def search():
    u = require_token()

    page     = get_num(request.args.get('page'))
    pagesize = max(get_num(request.args.get('pagesize')), 200)
    keywords = request.args.get('keywords')

    pids = [tr.pid for tr in TagRecord.query.filter_by(tag=keywords).order_by(db.desc('pid')).paginate(page, pagesize).items]

    data = [ map_post(Post.query.get(pid), u.name)
                for pid in pids if Post.query.get(pid) and not Post.query.get(pid).deleted
        ]

    return {
            'code': 0,
            'count': len(data),
            'data': data
            }
예제 #13
0
 def nextRound(self):
     g.screen.fill((0, 0, 0))
     self.next_button.set_sensitive(False)
     self.journal = True  # set to False if we come in via main()
     self.num = utils.get_num(self.level)
     self.lives, self.offset = utils.get_lives(self.level)
     self.status = None
     self.input = []
     self.attempts_list = []
     self.compare_list = []
예제 #14
0
 def __init__(self, level=3, parent=None):
     self.parent = parent
     self.level = level
     self.journal = True  # set to False if we come in via main()
     self.num = utils.get_num(self.level)
     self.lives, self.offset = utils.get_lives(self.level)
     self.status = None
     self.input = []
     self.compare_list = []
     self.attempts_list = []
     self.score = 0
     self.parent.update_score(self.score)
예제 #15
0
 def change_level(self, level):
     g.screen.fill((0, 0, 0))
     self.level = level
     self.journal = True  # set to False if we come in via main()
     self.num = utils.get_num(self.level)
     self.lives, self.offset = utils.get_lives(self.level)
     self.status = None
     self.input = []
     self.attempts_list = []
     self.compare_list = []
     self.score = 0
     self.parent.update_score(self.score)
     self.next_button.set_sensitive(False)
예제 #16
0
파일: CapitalOne.py 프로젝트: rdkr/pybank
    def main_page(self, s, r):
        
        soup = BeautifulSoup(r.text, 'html.parser')
     
        acc = {'bank': 'CapitalOne'}

        acc['original_available'] = get_num(soup.find(text = 'Available to spend').parent.parent.find('div').text)
        acc['limit'] = get_num(soup.find(text = 'Credit limit').parent.parent.text)
        acc['balance'] = get_num(soup.find(text = 'Current balance').parent.parent.text)

        acc['available'] = None

        # get available in terms of debit
        # float precision safe comparison
        if abs(acc['original_available'] == acc['balance']) <= 0.01:
            acc['available'] = -acc['balance']
        else:
            acc['available'] =  acc['original_available'] - acc['limit']

        acc['name'] = 'Capital One'

        self.accounts.append(acc)

        # download transaction files

        r = s.get('https://www.capitaloneonline.co.uk/CapitalOne_Consumer/Transactions.do')
        soup = BeautifulSoup(r.text, 'html.parser')

        d = {}
        d['org.apache.struts.taglib.html.TOKEN'] = \
            soup.find('input',{'name': 'org.apache.struts.taglib.html.TOKEN'})['value']
        d['downloadType'] = 'qif'

        r = s.post('https://www.capitaloneonline.co.uk/CapitalOne_Consumer/DownLoadTransaction.do', data=d)

        filename = time.strftime('%Y%m%d') + '-' + 'cap1.qif'
        file = open(filename, 'w')
        file.write(r.text)
        file.close()
예제 #17
0
파일: Nationwide.py 프로젝트: rdkr/pybank
    def main_page(self, s, r):

        soup = BeautifulSoup(r.text, "html.parser")

        for accountEntry in soup.find_all(class_="account-row"):

            # get account details and add to accounts list

            r = s.get("https://onlinebanking.nationwide.co.uk" + accountEntry.find(class_="acLink")["href"])
            soup = BeautifulSoup(r.text, "html.parser")

            accInfo = soup.find(class_="stage-head-ac-info")
            accNumbers = accInfo.find("h2").get_text()

            acc = {"bank": "Nationwide"}

            acc["name"] = accNumbers.splitlines()[3].lstrip()
            acc["sort"] = accNumbers.splitlines()[4].lstrip().split()[0]
            acc["number"] = accNumbers.splitlines()[4].lstrip().split()[1]

            acc["balance"] = get_num(accInfo.find_all("dd")[0].get_text())
            acc["available"] = get_num(accInfo.find_all("dd")[1].get_text())

            self.accounts.append(acc)

            # download transaction files

            d = {}
            d["__token"] = soup.find(id="transactionsfullstatementdownloadfs")["value"]
            d["downloadType"] = "Ofx"

            r = s.post("https://onlinebanking.nationwide.co.uk/Transactions/FullStatement/DownloadFS", data=d)

            filename = time.strftime("%Y%m%d") + "-" + acc["sort"] + "-" + acc["number"] + ".ofx"
            file = open(filename, "w")
            file.write(r.text)
            file.close()
예제 #18
0
    def main_page(self, s, r):
        
        soup = BeautifulSoup(r.text, 'html.parser')

        for accountEntry in soup.find_all(class_ = 'account-row'):

            # get account details and add to accounts list

            r = s.get('https://onlinebanking.nationwide.co.uk' + accountEntry.find(class_ = 'acLink')['href'])
            soup = BeautifulSoup(r.text, 'html.parser')

            accInfo = soup.find(class_ = 'stage-head-ac-info')
            accNumbers = accInfo.find('h2').get_text()
            
            acc = {'bank': 'Nationwide'}

            acc['name'] = accNumbers.splitlines()[3].lstrip()
            acc['sort'] = accNumbers.splitlines()[4].lstrip().split()[0]
            acc['number'] = accNumbers.splitlines()[4].lstrip().split()[1]
            
            acc['balance'] = get_num(accInfo.find_all('dd')[0].get_text())
            acc['available'] = get_num(accInfo.find_all('dd')[1].get_text())

            self.accounts.append(acc)

            # download transaction files

            d = {}
            d['__token'] = soup.find(id = 'transactionsfullstatementdownloadfs')['value']
            d['downloadType'] = 'Ofx'

            r = s.post('https://onlinebanking.nationwide.co.uk/Transactions/FullStatement/DownloadFS', data=d)

            filename = time.strftime('%Y%m%d') + '-' + acc['sort'] + '-' + acc['number'] + '.ofx'
            file = open(filename, 'w')
            file.write(r.text)
            file.close()
예제 #19
0
def report():
    u = require_token()

    pid = get_num(request.form.get('pid'))

    reason = request.form.get('reason', '')

    db.session.add(Syslog(
            log_type='REPORT',
            log_detail=f"pid={pid}\n{reason}",
            name_hash=hash_name(u.name)
        ))
    db.session.commit()

    return {'code': 0}
예제 #20
0
def get_one():
    u = require_token()
    
    pid = get_num(request.args.get('pid'))

    post = Post.query.get(pid)
    if not post: abort(404)
    if post.deleted: abort(451)

    data = map_post(post, u.name)

    return {
            'code': 0,
            'data': data
            }
예제 #21
0
def get_stockoption(driver, meet_seq, rcp_no, cursor):
    try:
        #print('------------------------- 주식매수선택권 -------------------------')
        person_arr, method_arr, extra_stock_arr, use_stock_arr = dis_stockoption(
            driver)
        # 부여받을 자의 성명 등
        if person_arr:
            for i in range(0, len(person_arr)):
                person_arr[i][4] = get_num(person_arr[i][4])

        stockoption_db(meet_seq, rcp_no, person_arr, method_arr,
                       extra_stock_arr, use_stock_arr, cursor)

        info_logger.info('[10] stock option success.')
    except Exception as e:
        error_logger.error('[10] stock option fail. [{0}] : {1}'.format(
            rcp_no, e))
예제 #22
0
def get_comment():
    u = require_token()

    pid = get_num(request.args.get('pid'))

    post = Post.query.get(pid)
    if not post: abort(404)
    if post.deleted: abort(451)

    data = map_comment(post, u.name)
    
    return {
            'code': 0,
            'attention': check_attention(u.name, pid),
            'likenum': post.likenum,
            'data': data
            }
예제 #23
0
def get_list():
    u = require_token()

    p = get_num(request.args.get('p'))

    posts = Post.query.filter_by(deleted=False)
    if 'no_cw' in request.args:
        posts = posts.filter_by(cw=None)
    posts = posts.order_by(db.desc('comment_timestamp')) if 'by_c' in request.args else posts.order_by(db.desc('id'))
    posts = posts.paginate(p, PER_PAGE)

    data =list(map(map_post, posts.items, [u.name] * len(posts.items)))

    return {
            'code': 0,
            'tmp_token': tmp_token(),
            'count': len(data),
            'data': data
            }
예제 #24
0
def delete():
    u = require_token()

    obj_type = request.form.get('type')
    obj_id = get_num(request.form.get('id'))
    note = request.form.get('note')

    if note and len(note)>100: abort(422)

    obj = None
    if obj_type == 'pid':
        obj = Post.query.get(obj_id)
    elif obj_type == 'cid':
        obj = Comment.query.get(obj_id)
    if not obj: abort(404)

    if obj.name_hash == hash_name(u.name):
        if obj_type == 'pid':
            if len(obj.comments): abort(403)
            Attention.query.filter_by(pid=obj.id).delete()
            TagRecord.query.filter_by(pid=obj.id).delete()
            db.session.delete(obj)
        else:
            obj.deleted = True
    elif u.name in app.config.get('ADMINS'):
        obj.deleted = True
        db.session.add(Syslog(
            log_type='ADMIN DELETE',
            log_detail=f"{obj_type}={obj_id}\n{note}",
            name_hash=hash_name(u.name)
            ))
        if note.startswith('!ban'):
            db.session.add(Syslog(
                log_type='BANNED',
                log_detail=f"=> {obj_type}={obj_id}",
                name_hash=obj.name_hash
                ))
    else:
        abort(403)

    db.session.commit()
    return {'code': 0}
예제 #25
0
def edit_cw():
    u = require_token()

    cw = request.form.get('cw')
    pid = get_num(request.form.get('pid'))

    cw =  cw.strip() if cw else None
    if cw and len(cw)>32: abort(422)

    post = Post.query.get(pid)
    if not post: abort(404)
    if post.deleted: abort(451)

    if not (u.name in app.config.get('ADMINS') or hash_name(u.name) == post.name_hash):
        abort(403)

    post.cw = cw;
    db.session.commit()

    return {'code': 0}
예제 #26
0
def isa_elect_ins(arr, meet_seq, rcp_no, seq):
    # 개행 제거
    for i in range(0, len(arr)):
        arr[i] = str(arr[i]).replace("\n", "")

    is_out = arr[2]
    if '사외이사' in is_out or is_out == '예' or is_out == 'Y' or is_out == 'y' or is_out == '해당' or is_out == 'O' or is_out == 'o' or is_out == '○':
        is_out = '2'
    elif '감사' == is_out:
        is_out = '3'
    elif '기타비상무' in is_out or '비상무' in is_out or '기타' in is_out:
        is_out = '4'
    else:
        is_out = '1'

    # 생년월/생년월일 구분
    birth = arr[1]
    if len(get_num(birth)) < 7:
        birth = make_birth(birth)
    else:
        birth = make_birth_ymd(birth)

    # 이름 띄어쓰기 제거
    pe_nm = str(arr[0]).replace(" ", "")
    pe_nm = get_nm(pe_nm)
    jm_code = meet_seq[:6]
    pe_code = get_pecode(jm_code, pe_nm)

    in_qry = """insert into proxy021(meet_seq, rcp_no, isa_gb, pe_seq, pe_code, pe_nm, pe_birth, is_out, juju_rel, recommender, bigo, create_dt, modify_dt)
                                 values('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}', '{11}', '{12}')
                """.format(meet_seq, rcp_no, arr[5],
                           jm_code + str(seq + 1).zfill(4), pe_code, pe_nm,
                           birth, is_out, arr[3], arr[4], '', create_dt,
                           create_dt)

    return in_qry
예제 #27
0
    def parse(self, response):
        hxs = HtmlXPathSelector(response)
        h_div = get_one(hxs.select('//body/div[@id="bodydiv"]/div[@class="main_content"]/div/div[@class="left_cont"]/div[@id="msnew"]/div[@class="msnew_infobar"]'))
        xpath_list_div = [
            ['start_time_str', 'div[@class="msnew_infotitle"]/p[@class="msinfotitle_left02"]/text()', 'strip', None], 
            ['from_time', 'span[@class="CountDown"]/@fr', 'int', None], 
            ['to_time', 'span[@class="CountDown"]/@to', 'int', None], 
        ]
        if not h_div:
            self.log('no page to parse', level = log.WARNING)
            return
        attr_dict = get_attr(xpath_list_div, h_div)
        start_time_str = str(datetime.datetime.now().date()) + " " + attr_dict['start_time_str'] 
        print 'start_time_str ' + start_time_str.encode('utf8')
        display_time_begin = int(datetime.datetime.strptime(start_time_str, "%Y-%m-%d %H:%M:%S").strftime("%s"))
        print 'display_time_begin ' + str(display_time_begin)
        cost_seconds = ((attr_dict['to_time'] - attr_dict['from_time']) / 1000)
        print 'cost_seconds ' + str(cost_seconds)
        display_time_end = display_time_begin + cost_seconds
        print 'display_time_end ' + str(display_time_end)
        
        h_li_array = hxs.select('//body/div[@id="bodydiv"]/div[@class="main_content"]/div/div[@class="left_cont"]/div[@id="msnew"]/div[@class="msnew_infobar"]/ul[@id="ms_tuanlist"]/li')
        print "len " + str(len(h_li_array))

        xpath_list = [
            ['img_url', 'div[@class="pic"]/a/img/@src', 'string', None],
            ['title', 'div[@class="info"]/span/a[@target="_blank"]/@title', 'string', None],
            ['url', 'div[@class="info"]/span/a[@target="_blank"]/@href', 'string', None],
            ['origin_price', 'div[@class="info"]/span[@class="gray6"]/text()', 'get_float_str_to_fen', None],
            ['current_price', 'div[@class="info"]/span[@class="redf16"]/text()', 'get_float_str_to_fen', None],
            ['sale_info', 'div[@class="info"]/span[@class="red94"]/text()', 'string', None],
        ]
        ret_items = []
        for h_li in h_li_array:
            attr_dict = get_attr(xpath_list, h_li)
            if attr_dict['url'][0] == '/':
                attr_dict['url'] = 'http://tuan.vancl.com' + attr_dict['url'] 

            limit = get_num(attr_dict['sale_info'].split(' ')[0])
            left = get_num(attr_dict['sale_info'].split(' ')[1])
            sale = limit - left
            print 'limit ' + str(limit) + " left " + str(left)
            prod = VanclMiaoshaItem()
            prod['link'] = attr_dict['url']
            prod['id'] = hashlib.md5(prod['link']).hexdigest().upper()
            prod['title'] = attr_dict['title']
            prod['img'] = attr_dict['img_url']
            prod['ori_price'] = attr_dict['origin_price']
            prod['cur_price'] = attr_dict['current_price']
            prod['discount'] = get_discount(attr_dict['origin_price'], attr_dict['current_price'])
            #TODO
            prod['stat'] = utils.BEGIN
            prod['sale'] = sale
            prod['sale_percent'] = sale * 100 / limit
            prod['display_time_begin'] = display_time_begin
            prod['display_time_end'] = display_time_end
            #prod['actual_time_begin'] = start_time
            #prod['actual_time_end'] = start_time
            prod['limit'] = limit
            prod['source'] = self.display_name
            ret_items.append(prod)

        return ret_items
예제 #28
0
파일: analyze.py 프로젝트: toonhaer/WISC
def exposure(data_path, country, parallel=True):
    """"
    Creation of exposure table of the specified country
    
    Arguments:
        data_path {string} -- string of data path where all data is located.
        country {string} -- ISO2 code of country to consider.
    
    Keyword Arguments:
        parallel {bool} -- calculates all regions within a country parallel. Set to False if you have little capacity on the machine (default: {True})
    
    Returns:
        dataframe -- pandas dataframe with all buildings of the country and potential exposure to wind
    """

    input_ = buildings(country, parallel=False)

    #==============================================================================
    # Fill table
    #==============================================================================

    # Specify Country
    input_["COUNTRY"] = country

    # Calculate area
    input_["AREA_m2"] = input_.geometry.area

    # Determine centroid
    input_["centroid"] = input_.geometry.centroid

    # Get land use
    nuts_eu = gpd.read_file(
        os.path.join(data_path, 'input_data', 'NUTS3_ETRS.shp'))
    nuts_eu.loc[nuts_eu['NUTS_ID'] == country].to_file(
        os.path.join(data_path, country, 'NUTS2_SHAPE',
                     '{}.shp'.format(country)))

    CLC_2012 = os.path.join(data_path, country, 'NUTS2_LANDUSE',
                            '{}_LANDUSE.tif'.format(country))
    clip_landuse(data_path, country, CLC_2012)

    input_['CLC_2012'] = point_query(list(input_['centroid']),
                                     CLC_2012,
                                     nodata=-9999,
                                     interpolate='nearest')

    print('Finished coupling land-use to buildings for {}'.format(country))
    #==============================================================================
    # Loop through storms
    #==============================================================================
    storm_list = get_storm_list(data_path)
    for outrast_storm in storm_list:
        storm_name = str(get_num(outrast_storm[-23:]))
        input_[storm_name] = point_query(list(input_['centroid']),
                                         outrast_storm,
                                         nodata=-9999,
                                         interpolate='nearest')

    print('Finished the exposure table for {}'.format(country))

    return input_
arg = parser.parse_args()

conf_thresh = 0.5
duke_resize_scale = 0.1
duke_sample_step = 6
metric_dir = 'metric'
txt_name = 'duke.txt' if arg.task == 'duke' else 'metric.txt'

data_dir = path.join('data', arg.task, 'pt', arg.subtask, metric_dir)
result_metric_dir = path.join('result', arg.task, arg.subtask, arg.exp,
                              arg.model, metric_dir)

pt_names = glob.glob(path.join(result_metric_dir, '*.pt'))  # get filenames
pt_names = [path.basename(pt_name)
            for pt_name in pt_names]  # remove directory name
pt_names.sort(key=lambda f: utils.get_num(f))  # sort filename by number
S = len(pt_names)

pt_tmp = torch.load(path.join(result_metric_dir, pt_names[0]))
N = pt_tmp.size(0)
T = pt_tmp.size(1)
O = pt_tmp.size(2)
assert pt_tmp.size(3) == 5, 'Wrong tensor size.'

# load results from all pt files
res = torch.Tensor(N, S, T, O, 5).zero_()
for s in range(0, S):
    pt = torch.load(path.join(result_metric_dir, pt_names[s]))  # N * T * O * 5
    res[:, s].copy_(pt)
res = res.view(-1, O, 5)  # NST * O * 5, F * O * 5
if arg.task == 'duke':
예제 #30
0
파일: analyze.py 프로젝트: toonhaer/WISC
def losses(data_path, country, parallel=True, storm_event_set=False):
    """"Estimation of the losses for all buildings in a country to the pre-defined list of storms
    
    Arguments:
        data_path {string} -- string of data path where all data is located.
        country {string} -- ISO2 code of country to consider.
    
    Keyword Arguments:
        parallel {bool} -- calculates all regions within a country parallel. Set to False if you have little capacity on the machine (default: {True})
    
    Returns:
        dataframe -- pandas dataframe with all buildings of the country and their losses for each wind storm

    """

    start = time.time()

    #load storms
    if storm_event_set == False:
        storm_list = get_storm_list(data_path)
        storm_name_list = [str(get_num(x[-23:])) for x in storm_list]
    else:
        None

    #load max dam
    max_dam = load_max_dam(data_path)

    #load curves
    curves = load_curves(data_path)

    #load sample
    sample = load_sample(country)

    output_table = exposure(data_path, country, parallel=False)

    no_storm_columns = list(
        set(output_table.columns).difference(list(storm_name_list)))
    write_output = pd.DataFrame(output_table[no_storm_columns])

    for storm in storm_name_list:
        ##==============================================================================
        ## Calculate losses for buildings in this NUTS region
        ##==============================================================================
        max_dam_country = np.asarray(
            max_dam[max_dam['CODE'].str.contains(country)].iloc[:, 1:],
            dtype='int16')

        df_C2 = pd.DataFrame(output_table[['AREA_m2', 'CLC_2012', storm]])
        df_C3 = pd.DataFrame(output_table[['AREA_m2', 'CLC_2012', storm]])
        df_C4 = pd.DataFrame(output_table[['AREA_m2', 'CLC_2012', storm]])

        df_C2[storm + '_curve'] = df_C2[storm].map(curves['C2'])
        df_C3[storm + '_curve'] = df_C3[storm].map(curves['C3'])
        df_C4[storm + '_curve'] = df_C4[storm].map(curves['C4'])

        #specify shares for urban and nonurban
        RES_URB = 1 - sample[3] / 100
        IND_URB = sample[3] / 100

        RES_NONURB = 0.5
        IND_NONURB = 0.5

        # Use pandas where to fill new column for losses
        df_C2['Loss'] = np.where(
            df_C2['CLC_2012'].between(0, 12, inclusive=True),
            (df_C2['AREA_m2'] * df_C2[storm + '_curve'] *
             max_dam_country[0, 0] * RES_URB + df_C2['AREA_m2'] *
             df_C2[storm + '_curve'] * max_dam_country[0, 2] * IND_URB) *
            (sample[0] / 100), 0)
        df_C2['Loss'] = np.where(
            df_C2['CLC_2012'].between(13, 23, inclusive=True),
            (df_C2['AREA_m2'] * df_C2[storm + '_curve'] *
             max_dam_country[0, 0] * RES_NONURB + df_C2['AREA_m2'] *
             df_C2[storm + '_curve'] * max_dam_country[0, 2] * IND_NONURB) *
            (sample[0] / 100), df_C2['Loss'])

        df_C3['Loss'] = np.where(
            df_C3['CLC_2012'].between(0, 12, inclusive=True),
            (df_C3['AREA_m2'] * df_C3[storm + '_curve'] *
             max_dam_country[0, 0] * RES_URB + df_C3['AREA_m2'] *
             df_C3[storm + '_curve'] * max_dam_country[0, 2] * IND_URB) *
            (sample[1] / 100), 0)
        df_C3['Loss'] = np.where(
            df_C3['CLC_2012'].between(13, 23, inclusive=True),
            (df_C3['AREA_m2'] * df_C3[storm + '_curve'] *
             max_dam_country[0, 0] * RES_NONURB + df_C3['AREA_m2'] *
             df_C3[storm + '_curve'] * max_dam_country[0, 2] * IND_NONURB) *
            (sample[1] / 100), df_C3['Loss'])

        df_C4['Loss'] = np.where(
            df_C4['CLC_2012'].between(0, 12, inclusive=True),
            (df_C4['AREA_m2'] * df_C4[storm + '_curve'] *
             max_dam_country[0, 0] * RES_URB + df_C4['AREA_m2'] *
             df_C4[storm + '_curve'] * max_dam_country[0, 2] * IND_URB) *
            (sample[2] / 100), 0)
        df_C4['Loss'] = np.where(
            df_C4['CLC_2012'].between(13, 23, inclusive=True),
            (df_C4['AREA_m2'] * df_C4[storm + '_curve'] *
             max_dam_country[0, 0] * RES_NONURB + df_C4['AREA_m2'] *
             df_C4[storm + '_curve'] * max_dam_country[0, 2] * IND_NONURB) *
            (sample[2] / 100), df_C4['Loss'])

        #        # and write output
        write_output[storm] = (df_C2['Loss'].fillna(0).astype(int) +
                               df_C3['Loss'].fillna(0).astype(int) +
                               df_C4['Loss'].fillna(0).astype(int))

    print('Finished estimating the losses for {}'.format(country))

    end = time.time()

    print('{} took {} minutes to finish.'.format(
        country, str(np.float16((end - start) / 60))))

    return (gpd.GeoDataFrame(write_output))
    """
예제 #31
0
# Get image_names and cam_ids
img_names_total = {'train': [], 'test': []}
cam_ids_total = {'train': [], 'test': []}
rg = range(1, 9) if arg.c == -1 else range(arg.c, arg.c+1)
for cam_id in rg:
    start_bias = start_times[cam_id-1] - 1
    train_start = trainval[0] - start_bias
    train_end = trainval[1] - start_bias
    test_start = test_easy[0] - start_bias
    test_end = test_easy[1] - start_bias
    input_img_dir = path.join(input_dir, 'camera'+str(cam_id))
    img_names = glob.glob(path.join(input_img_dir, '*.jpg')) # get filenames
    img_names = [path.basename(img_name) for img_name in img_names] # remove directory name
    if arg.metric == 0:
        img_names = list(filter(lambda f: train_start <= utils.get_num(f) <= train_end, img_names))
    else:
        img_names = list(filter(lambda f: test_start <= utils.get_num(f) <= test_end, img_names))
    img_names.sort(key=lambda f: utils.get_num(f)) # sort filename by number
    frame_num = len(img_names)
    print('frame number in camera' + str(cam_id) + ': ' + str(frame_num))
    train_frame_num = math.floor(frame_num * train_ratio)
    test_frame_num = math.floor(frame_num * (1 - train_ratio))
    img_names_total['train'] += img_names[0: train_frame_num]
    img_names_total['test']  += img_names[train_frame_num: train_frame_num+test_frame_num]
    cam_ids_total['train']   += [cam_id] * train_frame_num
    cam_ids_total['test']    += [cam_id] * test_frame_num
train_frame_num = len(img_names_total['train'])
test_frame_num = len(img_names_total['test'])
print('train frame number: ' + str(train_frame_num))
print('test frame number: ' + str(test_frame_num))
예제 #32
0
    def main_page(self, s, r):

        soup = BeautifulSoup(r.text, 'html.parser')

        for accountEntry in soup.find(id='lstAccLst').findAll('li',
                                                              recursive=False):

            # get account details and add to accounts list

            r = s.get('https://secure.tsb.co.uk' +
                      accountEntry.find('h2').a['href'])
            soup = BeautifulSoup(r.text, 'html.parser')

            accountNumbers = soup.find(class_='numbers').get_text().split(', ')

            acc = {'bank': 'TSB'}

            acc['name'] = soup.find('h1').get_text()
            acc['sort'] = accountNumbers[0].replace('-', '')
            acc['number'] = accountNumbers[1]

            acc['balance'] = get_num(soup.find(class_='balance').get_text())
            acc['available'] = get_num(
                soup.find(
                    class_=
                    'manageMyAccountsFaShowMeAnchor {bubble : \'fundsAvailable\', pointer : \'top\'}'
                ).parent.get_text())

            self.accounts.append(acc)

            # download transaction files

            r = s.get('https://secure.tsb.co.uk' +
                      soup.find(id='pnlgrpStatement:conS1:lkoverlay')['href'])
            soup = BeautifulSoup(r.text, 'html.parser')

            # get all form fields, put in dictionary d - # http://stackoverflow.com/a/32074666
            soup = BeautifulSoup(r.text, 'html.parser')
            d = {
                e['name']: e.get('value', '')
                for e in soup.find_all('input', {'name': True})
            }

            now = time.localtime(time.time())
            yearAgo = time.localtime(time.time() -
                                     6570000)  # ~ 2.5 months year ago

            # will download current view if 0, past 2.5 months if 1
            d['frmTest:rdoDateRange'] = '0'

            d['frmTest:dtSearchFromDate'] = time.strftime('%d', yearAgo)
            d['frmTest:dtSearchFromDate.month'] = time.strftime('%m', yearAgo)
            d['frmTest:dtSearchFromDate.year'] = str(
                time.strftime('%Y', yearAgo))

            d['frmTest:dtSearchToDate'] = time.strftime('%d', now)
            d['frmTest:dtSearchToDate.month'] = time.strftime('%m', now)
            d['frmTest:dtSearchToDate.year'] = str(time.strftime('%Y', now))

            d['frmTest:strExportFormatSelected'] = 'Quicken 98 and 2000 and Money (.QIF)'

            r = s.post(
                'https://secure.tsb.co.uk/personal/a/viewproductdetails/m44_exportstatement_fallback.jsp',
                data=d)

            filename = time.strftime(
                '%Y%m%d') + '-' + acc['sort'] + '-' + acc['number'] + '.qif'
            file = open(filename, 'w')
            file.write(r.text)
            file.close()