def web010_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() uid = event['uid'] print('uid = {}'.format(uid)) tfs = dao.table('twitter_friends_sum').find_by_key(uid) ranks = sorted(tfs['rank'].items(), key=lambda r: len(r[1]['ds']), reverse=True) ranks = ranks[:min(20, len(ranks))] ret = [] for r in ranks: rank = { 'cd': r[0], 'nm': r[1]['nm'], 'ct': len(r[1]['ds']), 'd': r[1]['ds'][0]['d'] } ret.append(rank) tweet = {} if 'tid' in tfs: tweet = dao.table('tweet').find_by_key(tfs['tid']) del tweet['id_str'] del tweet['user_id'] del tweet['user_name'] del tweet['user_screen_name'] cond = dao.table('condition').find_by_key('Task002_tweet_report_update') ob = {'d': cond['update_time'], 't': tweet, 'r': ret} return response({"v": "1", "tfs": ob})
def run(self, dao: Dao, h1): h1('stock_brandsを全件delete') self._delete_stock_brands() h1('ブランド一覧取得') ys = YahooStock() ret = ys.get_brands() for r in ret: r['market'] = r['market'].strip() r['info'] = r['info'].strip() h1('stock_brandsコレクションに追加') dao.table('stock_brands').insert(ret) h1('Job007をキック') boto3.client('batch').submit_job( jobName='Job007', jobQueue= "arn:aws:batch:ap-northeast-1:007575903924:job-queue/Job007_stock_brands_patch_recreate", jobDefinition="Job007_stock_brands_patch_recreate:1") h1('Job011をキック') boto3.client('batch').submit_job( jobName='Job011', jobQueue= "arn:aws:batch:ap-northeast-1:007575903924:job-queue/Job011_populate_stock_detail", jobDefinition="Job011_populate_stock_detail:1") h1('終了')
def tbl_report(): # os.environ["PRODUCTION_DAO"] = "True" mongo = OldDao(DB()) dynamo = Dao() cur_friends = mongo.table("twitter_friends").find({},{'_id': 0, 'tweet_summary': 0}) friends = list(cur_friends) dynamo.table("twitter_friends").delete_all() dynamo.table("twitter_friends").insert(friends)
def tbl_price_up(): os.environ["PRODUCTION_DAO"] = "True" mongo = OldDao(DB()) dynamo = Dao() dirs = os.listdir('c:/temp/pac_price') # dynamo.table('stock_price_history').delete_all() rows = [] for ccode in dirs: last_file = sorted(os.listdir( 'c:/temp/pac_price/{}'.format(ccode)))[-1] if last_file != '201806.csv': # print(ccode, last_file) continue df = pd.read_csv('c:/temp/pac_price/{}/{}'.format(ccode, last_file)) df['cd'] = ccode # row = df.tail(1).to_dict('records') row = df.to_dict('records') if row[len(row) - 1]['d'].split('_')[0] != '2018/04/20': print(ccode, row[0]['d']) continue row = row[:len(row) - 1] dat = [] for r in row: r = { k: '-' if type(v) == float and math.isnan(v) else v for k, v in r.items() } r = {k: str(v) if type(v) == int else v for k, v in r.items()} # r = {k: str(int(v)) if type(v) == float else v for k, v in r.items()} r['o'] = str(int(r['o'])) if r['o'] != '-' else '-' r['l'] = str(int(r['l'])) if r['l'] != '-' else '-' r['h'] = str(int(r['h'])) if r['h'] != '-' else '-' r['c'] = str(int(r['c'])) if r['c'] != '-' else '-' r['v'] = str(int(r['v'])) if r['v'] != '-' else '-' r['j'] = str(int(r['j'])) if r['j'] != '-' else '-' r['b'] = str(r['b']) r['e'] = str(r['e']) r['t'] = str(int(r['t'])) if r['t'] != '-' else '-' r['y'] = str(int(r['y'])) if r['y'] != '-' else '-' r['k'] = str(int(r['k'])) if r['k'] != '-' else '-' r['d'] = r['d'].split('_')[0].replace('/', '') dat.append(r) rows.extend(dat) for r in rows: for k, v in r.items(): if type(v) == float: print(k, v, r) dynamo.table('stock_price_history').insert(rows)
def web002_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() cd = event['cd'] print('cd={}'.format(cd)) brand = dao.table("stock_brands").find_by_key(cd) thema = dao.table("stock_thema_ccode").find_by_key(cd) if thema: brand['thema'] = thema['nms'] return response({"v": "1", "brand": brand})
def run(self, dao: Dao, h1): ys = YahooStock() h1('ストップ高取得') high_list = ys.get_dayly_limit(mode='high') h1('ストップ安取得') low_list = ys.get_dayly_limit(mode='low') h1('stock_brands_high_lowコレクションに追加') high_list.extend(low_list) if high_list: self.date_to_string(high_list) dao.table('stock_brands_high_low').insert(high_list)
def tbl_report(): # os.environ["PRODUCTION_DAO"] = "True" mongo = OldDao(DB()) dynamo = Dao() cur_repo = mongo.table("stock_report_pre").find({}, { '_id': 0 }).sort([('created_at', -1)]) repos = [] for repo in cur_repo: repo['tweets'] = [t['id_str'] for t in repo['tweets']] repos.append(repo) dynamo.table("stock_report").delete_all() dynamo.table("stock_report").insert(repos)
def tbl_tweet(): mongo = OldDao(DB()) os.environ["PRODUCTION_DAO"] = "True" dynamo = Dao() cur_twt = mongo.table("tweet").find({}).sort([('created_at', -1)]) ids = [t['id_str'] for t in cur_twt] # ids = [] # with open('non_exists_tweet_ids7.txt', 'r') as f: # ids = f.readlines() # ids = [s.rstrip('\n') for s in ids] # old_tweet = [] # for id in ids[3000:]: # old_t = mongo.table("tweet").find_one({"id_str": id}, {'_id': 0, 'retweet': 0}) # old_tweet.append(old_t) # # # for r in old_tweet: # # dynamo.table("tweet").delete_item_silent({"S": r['id_str']}) # dynamo.table("tweet").insert(old_tweet) # print(len(old_tweet)) ids_bit = ids[15000:30000] rets = dynamo.table("tweet").find_batch(ids_bit) non_exists_ids = [] for id_str in ids_bit: if len([r for r in rets if r["id_str"] == id_str]) == 0: non_exists_ids.append(id_str) if non_exists_ids: with open("non_exists_tweet_ids8.txt", "a") as f: f.write("\n".join(non_exists_ids) + "\n")
def run(self, dao: Dao, h1): h1('twitter_friends取得') friends = dao.table('twitter_friends').full_scan() s3 = boto3.resource('s3').Bucket("kabupac.com") h1('画像取得してBase64化') err_list = [] friend = None all_data = {} piece_up = False try: for fr in friends: friend = fr url = fr['profile_image_url'] data = request.urlopen(url).read() str_img = base64.b64encode(data).decode('utf-8') # 個別 json = {"i":"data:image/{};base64,{}".format(self.get_ext(url), str_img)} if piece_up: s3.put_object(Key="prof/{}".format(fr['id_str']), Body=JSON.dumps(json)) all_data[fr['id_str']] = "data:image/{};base64,{}".format(self.get_ext(url), str_img) except: Util.print_stack_trace() err_list.append(friend['id_str']) h1('s3にアップロード') s3.put_object(Key="prof/profimg.json", Body=JSON.dumps(all_data)) print(err_list) h1('終了')
def task_exp_handler(event, context): os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 print(DateTimeUtil.str_now()) dao = Dao() dao_edi = dao.table('stock_edinet') edis = dao_edi.full_scan() print('len(edi) = {}'.format(len(edis))) for edi in edis: if 'holders' not in edi: edi['holders'] = 'non' if 'holder_rate' not in edi: edi['holder_rate'] = 'non' if 'outstanding_share' not in edi: edi['outstanding_share'] = 'non' cd = edi['ccode'] nm = edi['name'] hr = edi['holder_rate'] ot = edi['outstanding_share'] tot = get(hr, 'tot') tabs = '{}\t' * 6 print( tabs.format(cd, nm, get(hr, 'unit'), get(tot, 1), get(ot, 'hutuu_hakkou'), get(ot, 'unit'))) return ''
def run(self, dao: Dao, h1): h1('twitter_friends_sum取得') tbl_tfs = dao.table('twitter_friends_sum') tfs_list = tbl_tfs.full_scan() d = DateTimeUtil.now() - timedelta(days=15) two_week_ago = d.strftime('%Y/%m/%d_00:00:00') h1('集計開始') for tfs in tfs_list: for cd in tfs['rank']: ds = tfs['rank'][cd]['ds'] n_ds = [] for r in ds: if r['d'] >= two_week_ago: n_ds.append(r) if n_ds: tfs['rank'][cd]['ds'] = n_ds else: print("tfs['rank'][cd] = 空. cd={}".format(cd)) del tfs['rank'][cd] if tfs['rank']: tbl_tfs.put_item(Item=tfs) else: print('空。 uid={}'.format(tfs['uid'])) tbl_tfs.delete_item_silent({"S": tfs['uid']}) h1('終了')
def task004_handler(event, context): os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 print(DateTimeUtil.str_now()) lambda_cnt = 6 dao = Dao() brands = dao.table('stock_brands').full_scan() ccode_list = [r['ccode'] for r in brands] ccode_list.append('998407') # 日経平均 ccode_list.append('USDJPY') # ドル円 q = len(ccode_list) // lambda_cnt chank_ccode = [ccode_list[i:i + q] for i in range(0, len(ccode_list), q)] if len(chank_ccode) > lambda_cnt: mod_chank = chank_ccode.pop(len(chank_ccode) - 1) chank_ccode[-1].extend(mod_chank) for i, ccodes in enumerate(chank_ccode, start=1): func_name = 'arn:aws:lambda:ap-northeast-1:007575903924:function:Price{0:03d}'.format( i) cds = '_'.join(ccodes) print(func_name) boto3.client("lambda").invoke(FunctionName=func_name, InvocationType="Event", Payload=json.dumps({"cds": cds})) return 0
def task006_handler(event, context): os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 print(DateTimeUtil.str_now()) dao = Dao() ident_list = dao.table('stock_identify').full_scan() data = {} for ident in ident_list: cd = ident['ccode'] dat = {} if cd not in data else data[cd] if 'ns' in dat: dat['ns'] = dat['ns'].split('_') dat['ns'].append(ident['nm']) else: dat['ns'] = [ident['nm']] if ident['main'] == 'y': dat['n'] = ident['nm'] dat['ns'] = '_'.join(dat['ns']) data[cd] = dat ret = [] for d in data.items(): d[1]['c'] = d[0] ret.append(d[1]) s3 = boto3.resource('s3').Bucket("kabupac.com") s3.put_object(Key="suggest/dat.json", Body=JSON.dumps(ret)) return ''
def run(self, dao: Dao, h1): ys = YahooStock() h1('値上がり率取得') rise_list = ys.get_rise_fall_price_rate(mode='rise', period='w', today=False) h1('値下がり率取得') fall_list = ys.get_rise_fall_price_rate(mode='fall', period='w', today=False) h1('stock_brands_high_lowコレクションに追加') rise_list.extend(fall_list) if rise_list: for i in range(len(rise_list)): dat = rise_list[i] dat['date'] = DateTimeUtil.strf_ymdhms(dat['date']) dao.table('stock_brands_rise_fall').insert(rise_list)
def run(self, dao: Dao, h1): h1('フォロー一覧削除') self._delete_twitter_friends() h1('フォロー一覧取得') tw = TwitterInspector(Irebaburn) friends = tw.get_friends() h1('twitter_friendsコレクションに追加') dao.table('twitter_friends').insert(friends) h1('Job009をキック') boto3.client('batch').submit_job( jobName='Job009', jobQueue= "arn:aws:batch:ap-northeast-1:007575903924:job-queue/Job009_twitter_friends_profimg_create", jobDefinition="Job009_twitter_friends_profimg_create:1")
def web001_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() page = event['p'] if 'p' in event else "1" print('page = {}'.format(page)) tbl, uptime = get_available_table(dao) if tbl == 'err': return response({"v": "1", "pages": [], "repos": [], "upt": "err"}) repo_list_page = dao.table(tbl).find_by_key(page) ccodes = repo_list_page["ccodes"].split(',') repo_list_page["ccodes"] = ccodes repos = dao.table("stock_report").find_batch(ccodes) for repo in repos: repo['tweets'] = str(len(repo['tweets'])) return response({"v": "1", "pages": repo_list_page, "repos": repos, "upt": uptime })
def web003_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() cd = event['cd'] last_tweet_id = event['lt'] if 'lt' in event else None direction = event['dr'] if 'dr' in event else None print('cd = {}, last_tweet_id = {}, direction = {}'.format(cd, last_tweet_id, direction)) repo = dao.table("stock_report").find_by_key(cd) if not repo: return response({"v": "1", "tweets": []}) tweet_ids = repo['tweets'] end_id = tweet_ids[0] tweets = dao.table("tweet").find_batch(tweet_ids) if tweets: tweets = sorted(tweets, key=lambda r: r['created_at'], reverse=True) tweets = tweets[:30] target_tweets = [] more_flg = False for tw in tweets: if direction == 'new': if last_tweet_id == tw['id_str']: break target_tweets.append(tw) else: if last_tweet_id == tw['id_str']: more_flg = True continue elif more_flg: target_tweets.append(tw) if target_tweets: if target_tweets[-1]["id_str"] == end_id: target_tweets[-1]["e"] = 1 return response({"v": "1", "tweets": target_tweets})
def run(self, dao: Dao, h1): h1('銘柄一覧取得') stock_brands = dao.table('stock_brands').full_scan() h1('銘柄ごとに時系列データを取得してDB登録(並列処理)') ys = YahooStock() def j010_historical(param): last_plus_one = param[1] + timedelta(days=1) str_last_plus_one_date = last_plus_one.strftime('%Y-%m-%d') prices = ys.get_historical_prices(ccode=param[0], str_start_date=str_last_plus_one_date) if not prices: return AsyncWorkerException(param[0]) return prices # @tracelog def c010_historical(results, param): if results: price_list = PriceLogic().convert_history_yahoo_to_pacpac(results, param[0]) dao.table('stock_price_history').insert(price_list) ccode_list = [r['ccode'] for r in stock_brands] ccode_list.append('998407') # 日経平均 ccode_list.append('USDJPY') # ドル円 param_list = [] db_history = dao.table('stock_price_history') for ccode in ccode_list: history = db_history.get_query_sorted_max({"cd": ccode}, 1) if history and history[0]: str_last_day = history[0]['d'] last_day = DateTimeUtil.str_to_date(str_last_day) param_list.append((ccode, last_day)) result = AsyncWorker(j010_historical, c010_historical, 5).go(param_list) h1('終了') err_list = result[1] if err_list: print('[失敗銘柄] len(err) = {}, err_targets = {}'.format(len(err_list), err_list))
def web004_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() ids = event['ids'] print('ids = {}'.format(ids)) id_list = ids.split('_') friends = dao.table("twitter_friends").find_batch(id_list) return response({"v": "1", "friends": friends})
def web011_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() key = event['k'] p = int(event['p']) - 1 print('key = {}'.format(key)) thema = dao.table('stock_thema_nm').find_by_key(key) ccodes, repos = [], [] if thema: ccodes = thema['ccodes'].split(',') repos = dao.table('stock_report').find_batch(ccodes) repos = repos[p * 10:min((p + 1) * 10, len(repos))] for repo in repos: repo['tweets'] = str(len(repo['tweets'])) return response({"v": "1", "cds": ccodes, "repos": repos})
def run(self, dao: Dao, h1): h1('stock_report取得') repos = dao.table('stock_report').full_scan() d = DateTimeUtil.now() - timedelta(days=15) two_week_ago = d.strftime('%Y/%m/%d_00:00:00') friend_tweet = {} ret_list = [] h1('ツイート集計開始') for repo in repos: cd = repo['ccode'] tw_ids = repo['tweets'] tweets = dao.table("tweet").find_batch(tw_ids) tweets = [t for t in tweets if t['created_at'] > two_week_ago] tweets = sorted(tweets, key=lambda r: r['created_at'], reverse=True) for t in tweets: u_id = t['user_id'] d = t['created_at'] ob = {'nm': repo['name'], 'ds': [{'d': d, 't': t['id_str']}]} if u_id in friend_tweet: if cd in friend_tweet[t['user_id']]: friend_tweet[u_id][cd]['ds'].append({ 'd': d, 't': t['id_str'] }) else: friend_tweet[u_id][cd] = ob else: friend_tweet[t['user_id']] = {} friend_tweet[t['user_id']][cd] = ob for k in friend_tweet: r = {'uid': k, 'rank': friend_tweet[k]} ret_list.append(r) dao.table('twitter_friends_sum').insert(ret_list) print('hoge')
def task003_handler(event, context): os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() repos = dao.table('stock_report').full_scan() print('len(repos)={}'.format(len(repos))) repos = list({v['ccode']: v for v in repos}.values()) print('unique. len(repos)={}'.format(len(repos))) repos = sorted(repos, key=itemgetter('last_updated_at'), reverse=True)[:min(len(repos),1000)] list_pages = [{"p": "1", "ccodes": []}] page_cnt = 1 for i, repo in enumerate(repos, start=1): list_pages[page_cnt - 1]["ccodes"].append(repo["ccode"]) if i < len(repos) and i % 10 == 0: page_cnt += 1 list_pages.append({"p": str(page_cnt), "ccodes": []}) for row in list_pages: row['ccodes'] = ','.join(row["ccodes"]) notify_list_pages_update_to_condition(dao, '0,1') pages1 = dao.table('stock_report_list_pages_1') pages1.delete_all() pages1.insert(list_pages) notify_list_pages_update_to_condition(dao, '1,0') pages2 = dao.table('stock_report_list_pages_2') pages2.delete_all() pages2.insert(list_pages) notify_list_pages_update_to_condition(dao, '1,1') return 0
def web005_handler(event, context): """ 現在価格API """ print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() logic = PriceLogic() uptime = logic.get_now_price_update_time() cds = event['cds'] print('cds = {}'.format(cds)) cd_list = cds.split('_') prices = dao.table("stock_price_now").find_batch(cd_list) return response({ "v": "1", "prices": prices, "now_price_uptime": uptime })
def run(self, dao: Dao, h1): dao_brand = dao.table('stock_brands') dao_repo = dao.table('stock_report') dao_hl = dao.table('stock_brands_high_low') h1('stock_brands取得') brands = dao_brand.full_scan() for brand in brands: # stock_brandsにhigh_lowデータ追加 cd = brand['ccode'] high_list, low_list = self.collect_high_low_list(cd, dao_hl) brand['hs'] = high_list brand['ls'] = low_list dao_brand.put_item(Item=brand) # stock_reportに3日以内S高S安をマーク repo = dao_repo.find_by_key(cd) if repo: mark = repo['mk'] if 'mk' in repo else {} mark = self.mark_high_low('hs', high_list, mark) mark = self.mark_high_low('ls', low_list, mark) repo['mk'] = mark dao_repo.put_item(Item=repo)
def web009_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() cd = event['cd'] print('cd = {}'.format(cd)) high_low = dao.table("stock_brands_high_low").find_query({'ccode': cd}, asc=False) r = {} if high_low: high = [r for r in high_low if r['mode'] == 'high'] low = [r for r in high_low if r['mode'] == 'low'] r = {'h': high, 'l': low} rise_fall = dao.table("stock_brands_rise_fall").find_query({'ccode': cd}, asc=False) if rise_fall: rise = [r for r in rise_fall if r['mode'] == 'rise'] fall = [r for r in rise_fall if r['mode'] == 'fall'] r['r'] = rise r['f'] = fall return response({"v": "1", "hl": r})
def web008_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() prices = dao.table("stock_price_history").find_query({'cd': event['cd']}) if prices: if 'lt' in event and event['lt']: prices = [r for r in prices if r['d'] > event['lt']] else: s = 0 if len(prices) <= 30 else abs(len(prices) - 30) prices = prices[s:] else: prices = [] return response({"v": "1", "ps": prices})
def get_stock_brands_from_search_row(self, search_row, dao: Dao): """検索一覧行オブジェクトを基にstock_brandsを検索する""" name = self.company_name_normarize(search_row['company_name']) name_s = self.norm(name, blank_cut=False) brand = dao.table('stock_brands').find_one({ '$or': [{ 'name': re.compile(name) }, { 'name': re.compile(name_s) }, { 'identify': { "$in": [name, name_s] } }] }) return brand
def task001_handler(event, context): print(DateTimeUtil.str_now()) dao = Dao() # 前回取得した最後のtweetidを取得 dat = dao.table('condition').get_item(Key={'key': 'Task001_last_tweet_id'}) last_tweet_id = dat['val'] print('last_tweet_id: ', last_tweet_id) tw = TwitterInspector(Irebaburn) timeline = tw.get_list_timeline_rotate(list_name='株', count=1000, last_data_id=last_tweet_id) print(timeline) if timeline: ids = [t['id_str'] for t in timeline] # 重複取得されたレコードがある場合、削除して入れなおすため、一旦消す。 dao.table('tweet').delete_batch_silent(ids) # ids = [] # for tweet in timeline: # # 重複取得されたレコードがある場合、削除して入れなおすため、一旦消す。 # dao.table('tweet').delete_item_silent({'id_str': tweet['id_str']}) # # ids.append(tweet['id_str']) # 追加 dao.table('tweet').insert(timeline) last_tweet_id = timeline[0]['id_str'] # 降順の先頭(取得した最新) print('last_tweet_id for update: ', last_tweet_id) # last_tweet_id 更新 dao.table("condition").update_item( Key={"key": dat['key']}, ExpressionAttributeValues={ ':val': last_tweet_id, ':dt': DateTimeUtil.str_now() }, UpdateExpression="set val = :val, update_time = :dt") # Task002呼び出し boto3.client("lambda").invoke( FunctionName= "arn:aws:lambda:ap-northeast-1:007575903924:function:Task002", InvocationType="Event", Payload=json.dumps({"ids": ids})) return ''
def web007_handler(event, context): print(DateTimeUtil.str_now()) os.environ["PRODUCTION_DAO"] = "True" # TODO 本番向けテスト用 dao = Dao() cd = event['cd'] print('cd = {}'.format(cd)) edi_dat = dao.table("stock_edinet").find_by_key(cd) r = {} if edi_dat: r = { 'ho': edi_dat['holders'] if 'holders' in edi_dat else {}, 'ra': edi_dat['holder_rate'] if 'holder_rate' in edi_dat else {}, 'os': edi_dat['outstanding_share'] if 'outstanding_share' in edi_dat else {}, } return response({"v": "1", "edi": r})
def run(self, dao: Dao, h1): h1('s3からmecab_dic.xlsxをダウンロード') df_dic: DataFrame = self.load_mecab_dic_dataframe() h1('stock_identifyを取得') identifies = dao.table('stock_identify').full_scan() h1('データフレームにstock_identifyをマージする') idx = len(df_dic) + 1 stock_row = {} for ident in identifies: nm = ident['nm'] stock_row[idx] = [ nm, None, None, 10, '名詞', '一般', '*', '*', '*', '*', '*', '*', '*', '株' ] idx += 1 df_stock = DataFrame().from_dict(stock_row, orient='index') df_stock.columns = df_dic.columns.values df_dic = df_dic.append(df_stock) h1('PacPac.csvを出力する') df_dic.to_csv('./PacPac.csv', encoding='utf-8', line_terminator='\n', index=False, header=False, mode='w') h1('s3にPacPac.csvをアップロード') s3_bucket = boto3.resource('s3').Bucket('kabupac.system') s3_bucket.upload_file('./PacPac.csv', 'PacPac.csv') h1('Job005をキック') boto3.client('batch').submit_job( jobName='Job005', jobQueue= "arn:aws:batch:ap-northeast-1:007575903924:job-queue/Job005_mecab_lambda_build", jobDefinition="Job005_mecab_lambda_build:1") h1('終了')