class ExtractReserve: def __init__(self, in_path, in_char, out_path, out_char, reg_type_path): self.count_rec = CountRecord() self.file_io = FileIO() self.in_path = in_path self.in_char = in_char self.out_path = out_path self.out_char = out_char self.reg_type_path = reg_type_path def extract(self): # ファイルオープン処理 file = self.file_io.open_file_as_pandas(self.in_path, self.in_char) # 集計処理1 # 顧客ID, 状況, 指名区分を鍵としてレコード数を集計 status = self.count_rec.group_size( file, index_col='顧客ID', aggregate_col=['顧客ID', '状況', '指名区分']) # 集計処理2 # 顧客IDを鍵として認知媒体区分、登録区分を抽出 register_type = self.count_rec.drop_duplicates( file, index_col='顧客ID', keep_list=['顧客ID', '登録区分']) # 書き出し処理 self.file_io.export_csv_from_pandas(status, self.out_path) self.file_io.export_csv_from_pandas(register_type, self.reg_type_path) # ヘッダー付与のため再度ファイルオープン out_file = self.file_io.open_file_as_pandas(self.out_path, self.out_char) # ヘッダー付与 out_file.columns = ['顧客ID', '状況', '指名区分', '予約回数'] # 書き出し処理 self.file_io.export_csv_from_pandas(out_file, self.out_path)
def __init__(self, in_path, in_char, out_path, out_char): self.count_rec = CountRecord() self.file_io = FileIO() self.in_path = in_path self.in_char = in_char self.out_path = out_path self.out_char = out_char
def __init__(self, in_path, in_char, stay_time_path, out_char, pv_sum_path, session_path): self.count_rec = CountRecord() self.file_io = FileIO() self.in_path = in_path self.in_char = in_char self.stay_time_path = stay_time_path self.out_char = out_char self.pv_sum_path = pv_sum_path self.session_path = session_path
class ExtractLog: def __init__(self, in_path, in_char, stay_time_path, out_char, pv_sum_path, session_path): self.count_rec = CountRecord() self.file_io = FileIO() self.in_path = in_path self.in_char = in_char self.stay_time_path = stay_time_path self.out_char = out_char self.pv_sum_path = pv_sum_path self.session_path = session_path def extract(self): # ファイルオープン処理 file = self.file_io.open_file_as_pandas(self.in_path,self.in_char) # 不要列を削除 file = file.drop(['IPアドレス','メソッド','パス','HTTPバージョン','ファイル名','レスポンスバイト数','リファラ','ユーザーエージェント','レスポンスタイム'], axis=1) # timestamp列をdatetime表示 file['アクセス日時_unix'] = pd.to_datetime(file['アクセス日時']) # アクセス日時の差(秒)を算出 file['アクセス間隔'] = (file['アクセス日時_unix'].shift(-1) - file['アクセス日時_unix']).dt.seconds # 顧客IDの同一性を確認 file['顧客ID同一当否'] = (file['顧客ID'].shift(-1) == file['顧客ID']) # IDが同一でないセルのアクセス間隔をゼロにする file.loc[~file['顧客ID同一当否'], 'アクセス間隔'] = 0 # 同一セッションのアクセスであるフラグ file.loc[file['顧客ID同一当否'], 'セッションフラグ'] = 1 # 総滞在時間 stay_time = self.count_rec.group_sum(file, index_col='顧客ID', aggregate_col='アクセス間隔') # 閲覧ページ総数(集計処理) pv_sum = self.count_rec.count_record(file, '顧客ID') # セッション回数 same_session = self.count_rec.group_sum(file, index_col='顧客ID', aggregate_col='セッションフラグ') # 書き出し処理 #self.file_io.export_csv_from_pandas(file, './data/out/log.csv') self.file_io.export_csv_from_pandas(stay_time, self.stay_time_path) self.file_io.export_csv_from_pandas(pv_sum, self.pv_sum_path) self.file_io.export_csv_from_pandas(same_session, self.session_path) # ヘッダー付与のため再度ファイルオープン out_file1 = self.file_io.open_file_as_pandas(self.stay_time_path,self.out_char) out_file2 = self.file_io.open_file_as_pandas(self.pv_sum_path,self.out_char) out_file3 = self.file_io.open_file_as_pandas(self.session_path,self.out_char) # ヘッダー付与 out_file1.columns = ['顧客ID','滞在時間'] out_file2.columns = ['顧客ID','閲覧ページ総数'] out_file3.columns = ['顧客ID','閲覧ページ数/セッション'] # 書き出し処理 self.file_io.export_csv_from_pandas(out_file1, self.stay_time_path) self.file_io.export_csv_from_pandas(out_file2, self.pv_sum_path) self.file_io.export_csv_from_pandas(out_file3, self.session_path)
def __init__(self, in_path, in_char, payment_path, out_char, cust_attr_path, target_attr_path, average_attr_path): self.count_rec = CountRecord() self.file_io = FileIO() self.in_path = in_path self.in_char = in_char self.payment_path = payment_path self.out_char = out_char self.cust_attr_path = cust_attr_path self.target_attr_path = target_attr_path self.average_attr_path = average_attr_path
def __init__( self, id_path, cust_payment_path, cust_attr_path, target_attr_path, average_attr_path, cust_path, cancel_path, contact_path, cti_path, register_type_path, status_path, stay_time_path, pv_sum_path, session_path, shop_path, pref_path, char_type): self.file_io = FileIO() self.encode = CategoryEncode() self.count_rec = CountRecord() self.extract_col = ExtractColumns() self.bin = Binning() # ファイルオープン self.id = self.file_io.open_file_as_pandas(id_path,char_type) self.cust_payment = self.file_io.open_file_as_pandas(cust_payment_path, char_type) self.cust_attr = self.file_io.open_file_as_pandas(cust_attr_path, char_type) self.target_attr = self.file_io.open_file_as_pandas(target_attr_path, char_type) self.average_attr = self.file_io.open_file_as_pandas(average_attr_path, char_type) self.cust = self.file_io.open_file_as_pandas(cust_path, char_type) self.cancel = self.file_io.open_file_as_pandas(cancel_path, char_type) self.contact = self.file_io.open_file_as_pandas(contact_path, char_type) self.cti = self.file_io.open_file_as_pandas(cti_path, char_type) self.register_type = self.file_io.open_file_as_pandas(register_type_path, char_type) self.status = self.file_io.open_file_as_pandas(status_path, char_type) self.stay_time = self.file_io.open_file_as_pandas(stay_time_path, char_type) self.pv_sum = self.file_io.open_file_as_pandas(pv_sum_path, char_type) self.session = self.file_io.open_file_as_pandas(session_path, char_type) self.shop = self.file_io.open_file_as_pandas(shop_path, char_type) self.pref = self.file_io.open_file_as_pandas(pref_path, char_type)
def __init__(self, id_path, con_path, char_type): self.file_io = FileIO() self.encode = CategoryEncode() self.count_rec = CountRecord() self.extract_col = ExtractColumns() self.bin = Binning() self.ss = Scaler() # ファイルオープン self.id = self.file_io.open_file_as_pandas(id_path, char_type) self.con = self.file_io.open_file_as_pandas(con_path, char_type)
class ExtractCancel: def __init__(self, in_path, in_char, out_path, out_char): self.count_rec = CountRecord() self.file_io = FileIO() self.in_path = in_path self.in_char = in_char self.out_path = out_path self.out_char = out_char def extract(self): # ファイルオープン処理 file = self.file_io.open_file_as_pandas(self.in_path,self.in_char) # 集計処理 vc = self.count_rec.count_record(file, '顧客ID') # 書き出し処理 self.file_io.export_csv_from_pandas(vc, self.out_path) # ヘッダー付与のため再度ファイルオープン out_file = self.file_io.open_file_as_pandas(self.out_path,self.out_char) # ヘッダー付与 out_file.columns = ['顧客ID','キャンセル回数'] # 書き出し処理 self.file_io.export_csv_from_pandas(out_file, self.out_path)
class ExtractSalesSp: def __init__(self, in_path, in_char, payment_path, out_char, cust_attr_path, target_attr_path, average_attr_path): self.count_rec = CountRecord() self.file_io = FileIO() self.in_path = in_path self.in_char = in_char self.payment_path = payment_path self.out_char = out_char self.cust_attr_path = cust_attr_path self.target_attr_path = target_attr_path self.average_attr_path = average_attr_path def extract(self): # ファイルオープン処理 file = self.file_io.open_file_as_pandas(self.in_path, self.in_char) # 顧客属性前処理:顧客属性取得のため、個別商品の行を売上の行に統合 sales_file = file.query('明細コード == 1') # 集計処理:顧客IDごとの支払情報を集計 cust_payment = self.count_rec.group_sum(sales_file, index_col='顧客ID', aggregate_col=['顧客ID', '施術時間']) # 顧客属性集計処理:顧客IDごとの属性情報を集計 ex_id = sales_file['顧客ID'] ex_nominate = sales_file['指名回数'] ex_course = sales_file['コース受諾回数'] ex_card = sales_file['紹介カード受渡回数'] ex_reception = sales_file['治療送客回数'] ex_director = sales_file['院長挨拶回数'] # 追加顧客属性 #ex_branch = sales_file['店舗'] #ex_accosiate = sales_file['担当者'] # マージ cust_attr = pd.concat([ex_id, ex_nominate], axis=1) cust_attr = pd.concat([cust_attr, ex_course], axis=1) cust_attr = pd.concat([cust_attr, ex_card], axis=1) cust_attr = pd.concat([cust_attr, ex_reception], axis=1) cust_attr = pd.concat([cust_attr, ex_director], axis=1) cust_attr = pd.concat([cust_attr, cust_payment], axis=1) #cust_attr = self.cont_rec.group_size(sales_file, index_col='顧客ID', keep_list=['顧客ID','指名回数','コース受託回数','紹介カード受渡回数','治療送客回数','院長挨拶回数']) # 集計処理2.2:顧客IDごとの個別商品属性情報を集計 ex_id_product = file['顧客ID'] ex_product_code = file['商品コード'] ex_price_product = file['売上単価'] ex_amount_product = file['数量'] # マージ product_attr = pd.concat([ex_id_product, ex_product_code], axis=1) product_attr = pd.concat([product_attr, ex_price_product], axis=1) product_attr = pd.concat([product_attr, ex_amount_product], axis=1) # 売上列追加 product_attr['売上'] = file['売上単価'] * file['数量'] # 個別商品IDに相当する列追加 product_attr['明細ID'] = file['伝票コード'] * 10 + file['明細コード'] # スコア列設定 product_attr['スコア'] = 0 # スコア設定 product_attr.loc[product_attr['商品コード'] == '1A1501', 'スコア'] = 5 product_attr.loc[product_attr['商品コード'] == '1B2201', 'スコア'] = 4 product_attr.loc[product_attr['商品コード'] == '1A1601', 'スコア'] = 3 product_attr.loc[product_attr['商品コード'] == '200071', 'スコア'] = 2 product_attr.loc[product_attr['商品コード'] == '200006', 'スコア'] = 1 product_attr['スコア'] = product_attr['スコア'] * product_attr['数量'] # 不要な行を削除 #product_attr = product_attr[(product_attr['商品コード']=='1A1501')|(product_attr['商品コード']=='1B2201')|(product_attr['商品コード']=='1A1601')|(product_attr['商品コード']=='200071')|(product_attr['商品コード']=='200006')] # 書き出し処理 self.file_io.export_csv_from_pandas(cust_payment, self.payment_path) self.file_io.export_csv_from_pandas(cust_attr, self.cust_attr_path) #self.file_io.export_csv_from_pandas(target_attr, self.target_attr_path) self.file_io.export_csv_from_pandas(product_attr, self.average_attr_path)
class ConcatCsvs: def __init__(self, id_path, cust_payment_path, cust_attr_path, target_attr_path, average_attr_path, cust_path, cancel_path, contact_path, cti_path, register_type_path, status_path, stay_time_path, pv_sum_path, session_path, shop_path, pref_path, char_type): self.file_io = FileIO() self.encode = CategoryEncode() self.count_rec = CountRecord() self.extract_col = ExtractColumns() self.bin = Binning() # ファイルオープン self.id = self.file_io.open_file_as_pandas(id_path, char_type) self.cust_payment = self.file_io.open_file_as_pandas( cust_payment_path, char_type) self.cust_attr = self.file_io.open_file_as_pandas( cust_attr_path, char_type) self.target_attr = self.file_io.open_file_as_pandas( target_attr_path, char_type) self.average_attr = self.file_io.open_file_as_pandas( average_attr_path, char_type) self.cust = self.file_io.open_file_as_pandas(cust_path, char_type) self.cancel = self.file_io.open_file_as_pandas(cancel_path, char_type) self.contact = self.file_io.open_file_as_pandas( contact_path, char_type) self.cti = self.file_io.open_file_as_pandas(cti_path, char_type) self.register_type = self.file_io.open_file_as_pandas( register_type_path, char_type) self.status = self.file_io.open_file_as_pandas(status_path, char_type) self.stay_time = self.file_io.open_file_as_pandas( stay_time_path, char_type) self.pv_sum = self.file_io.open_file_as_pandas(pv_sum_path, char_type) self.session = self.file_io.open_file_as_pandas( session_path, char_type) self.shop = self.file_io.open_file_as_pandas(shop_path, char_type) self.pref = self.file_io.open_file_as_pandas(pref_path, char_type) def concat(self, out_path, out_path2): # 特徴量抽出処理 # cust_payment # カテゴリーデータなし # --- check --- #print("--- cust_payment shape ---\n {}\n".format(self.cust_payment.shape)) #print(self.cust_payment.head()) # cust_attr cust_attr_col_list = [] cust_attr_tg_list = [ '指名回数', 'コース受諾回数', '紹介カード受渡回数', '治療送客回数', '院長挨拶回数' ] # カテゴリ列を抽出 cust_attr_category_col = self.extract_col.extract( self.cust_attr, self.cust_attr['顧客ID'], extract_col=cust_attr_tg_list) # 非カテゴリ列を抽出 cust_attr_non_category_col = self.extract_col.exclude( self.cust_attr, exclude_col=cust_attr_tg_list) # 特徴量抽出 org_cust_attr = self.encode.transform_feature( cust_attr_category_col, aggregate_col=cust_attr_tg_list) org_cust_attr = org_cust_attr.fillna(0) #org_cust_attr = org_cust_attr.drop('Unnamed: 0', axis=1) # ラベル付与 for col in cust_attr_tg_list: cust_attr_col_list += self.encode.transform_label( self.cust_attr[col], col) else: cust_attr_col_list += ['顧客ID'] # ラベル設定 org_cust_attr.columns = cust_attr_col_list # 集計処理 feat_cust_attr = self.count_rec.group_sum( org_cust_attr, index_col='顧客ID', aggregate_col=cust_attr_col_list) # カテゴリ列と非カテゴリ列を結合 feat_cust_attr = pd.merge(feat_cust_attr, cust_attr_non_category_col, on='顧客ID', how='left') feat_cust_attr = feat_cust_attr.drop('Unnamed: 0', axis=1) # --- check --- #print("--- feat_cust_attr shape ---\n {}\n".format(feat_cust_attr.shape)) #print(feat_cust_attr.head()) #self.file_io.export_csv_from_pandas(feat_cust_attr, './data/out/mid_feat_cust_attr.csv') # product_attr ''' product_attr_col_list = [] product_attr_tg_list = ['商品コード'] # カテゴリ列を抽出 product_attr_category_col = self.extract_col.extract(self.target_attr, self.target_attr['明細ID'], extract_col=product_attr_tg_list) # 元DSからカテゴリ列を除去することによって、非カテゴリ列を抽出 product_attr_non_category_col = self.extract_col.exclude(self.target_attr, exclude_col=product_attr_tg_list) # 特徴量抽出 org_product_attr = self.encode.transform_feature(product_attr_category_col, aggregate_col=product_attr_tg_list) org_product_attr = org_product_attr.fillna(0) #org_product_attr = org_product_attr.drop('Unnamed: 0', axis=1) #print(org_product_attr) # ラベル付与 for col in product_attr_tg_list: product_attr_col_list += self.encode.transform_label(self.target_attr[col],col) else: product_attr_col_list += ['明細ID'] # ラベル設定 org_product_attr.columns = product_attr_col_list # カテゴリ列と非カテゴリ列を結合 feat_product_attr = pd.merge(org_product_attr, product_attr_non_category_col, on='明細ID',how='left') feat_product_attr = feat_product_attr.drop('Unnamed: 0', axis=1) ''' # product_attr feat_product_attr = self.average_attr # --- check --- #print("--- feat_product_attr shape ---\n {}\n".format(feat_cust_attr.shape)) #print(feat_product_attr.head()) #self.file_io.export_csv_from_pandas(feat_product_attr, './data/out/mid_feat_product_attr.csv') # cust cust_col_list = [] cust_tg_list = ['性別', '携帯TEL', '自宅TEL', '携帯メール', 'PCメール', '職業'] # 外れ値を削除 new_cust = self.cust.drop(self.cust[self.cust['生年月日'].str.contains( '\*', na=True)].index) today = int(pd.to_datetime('today').strftime('%Y%m%d')) new_cust['生年月日'] = pd.to_datetime( new_cust['生年月日']).dt.strftime('%Y%m%d').astype(np.int64) new_cust['生年月日'] = ((today - new_cust['生年月日']) / 10000).astype( np.int64) new_cust['生年月日'] = self.bin.list_divide(new_cust['生年月日'], [0, 10, 20, 30, 40, 50], ['10', '20', '30', '40', '50']) # カテゴリ列を抽出 cust_category_col = self.extract_col.extract(new_cust, new_cust['顧客ID'], extract_col=cust_tg_list) # 非カテゴリ列を抽出 cust_non_category_col = self.extract_col.exclude( new_cust, exclude_col=cust_tg_list) # 特徴量抽出 feat_cust = self.encode.transform_feature(cust_category_col, aggregate_col=cust_tg_list) feat_cust = feat_cust.fillna(0) #feat_cust = feat_cust.drop('Unnamed: 0', axis=1) feat_cust = feat_cust[feat_cust.columns.drop( list(feat_cust.filter(regex='Unnamed:')))] # ラベル付与 for col in cust_tg_list: cust_col_list += self.encode.transform_label(new_cust[col], col) else: cust_col_list += ['顧客ID'] # ラベル設定 feat_cust.columns = cust_col_list # カテゴリ列と非カテゴリ列を結合 feat_cust = pd.merge(feat_cust, cust_non_category_col, on='顧客ID', how='left') #feat_cust = feat_cust.drop('Unnamed: 0', axis=1) # --- check --- #print("--- feat_cust shape ---\n {}\n".format(feat_cust.shape)) #print(feat_cust.head()) #self.file_io.export_csv_from_pandas(feat_cust, './data/out/mid_feat_cust.csv') # shop shop_col_list = [] shop_tg_list = ['担当店舗'] # カテゴリ列を抽出 shop_category_col = self.extract_col.extract(self.shop, self.shop['顧客ID'], extract_col=shop_tg_list) # 特徴量抽出 feat_shop = self.encode.transform_feature(shop_category_col, aggregate_col=shop_tg_list) feat_shop = feat_shop.fillna(0) #feat_shop = feat_cust.drop('Unnamed: 0', axis=1) feat_shop = feat_shop[feat_shop.columns.drop( list(feat_shop.filter(regex='Unnamed:')))] # ラベル付与 for col in shop_tg_list: shop_col_list += self.encode.transform_label(self.shop[col], col) else: shop_col_list += ['顧客ID'] # ラベル設定 feat_shop.columns = shop_col_list #feat_shop = feat_shop.drop('Unnamed: 0', axis=1) # --- check --- #print("--- feat_shop shape ---\n {}\n".format(feat_shop.shape)) #print(feat_shop.head()) #self.file_io.export_csv_from_pandas(feat_shop, './data/out/mid_feat_shop.csv') # pref pref_col_list = [] pref_tg_list = ['町域'] new_pref = self.pref.drop(self.pref[self.pref['町域'] == 0].index) # カテゴリ列を抽出 pref_category_col = self.extract_col.extract(new_pref, new_pref['顧客ID'], extract_col=pref_tg_list) # 特徴量抽出 feat_pref = self.encode.transform_feature(pref_category_col, aggregate_col=pref_tg_list) feat_pref = feat_pref.fillna(0) #feat_pref = feat_cust.drop('Unnamed: 0', axis=1) feat_pref = feat_pref[feat_pref.columns.drop( list(feat_pref.filter(regex='Unnamed:')))] # ラベル付与 for col in pref_tg_list: pref_col_list += self.encode.transform_label(self.pref[col], col) else: pref_col_list += ['顧客ID'] # ラベル設定 feat_pref.columns = pref_col_list #feat_pref = feat_pref.drop('Unnamed: 0', axis=1) # --- check --- #print("--- feat_pref shape ---\n {}\n".format(feat_pref.shape)) #print(feat_pref.head()) #self.file_io.export_csv_from_pandas(feat_pref, './data/out/mid_feat_pref.csv') # cancel # カテゴリーデータなし # --- check --- #print("--- cancel shape ---\n {}\n".format(cancel.shape)) #print(cancel.head()) # contact # カテゴリーデータなし # --- check --- #print("--- contact shape ---\n {}\n".format(contact.shape)) #print(contact.head()) # cti # カテゴリーデータなし # --- check --- #print("--- cti shape ---\n {}\n".format(cti.shape)) #print(cti.head()) # stay_time new_stay_time = self.stay_time new_stay_time['滞在時間'] = self.bin.quant_divide( new_stay_time['滞在時間'], 6, ['1', '2', '3', '4', '5']) bin_stay_time = new_stay_time.drop('Unnamed: 0', axis=1) # pv_sum new_pv_sum = self.pv_sum new_pv_sum['閲覧ページ総数'] = self.bin.quant_divide( new_pv_sum['閲覧ページ総数'], 11, ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']) bin_pv_sum = new_pv_sum.drop('Unnamed: 0', axis=1) # session new_session = self.session new_session['閲覧ページ数/セッション'] = self.bin.quant_divide( new_session['閲覧ページ数/セッション'], 6, ['1', '2', '3', '4', '5']) bin_session = new_session.drop('Unnamed: 0', axis=1) # register_type reg_col_list = [] reg_tg_list = ['登録区分'] # カテゴリ列を抽出 reg_category_col = self.extract_col.extract(self.register_type, self.register_type['顧客ID'], extract_col=reg_tg_list) # 非カテゴリ列を抽出 reg_non_category_col = self.extract_col.exclude( self.register_type, exclude_col=reg_tg_list) # 特徴量抽出 feat_register_type = self.encode.transform_feature( reg_category_col, aggregate_col=reg_tg_list) feat_register_type = feat_register_type.fillna(0) #feat_register_type = feat_register_type.drop('Unnamed: 0', axis=1) # ラベル付与 for col in reg_tg_list: reg_col_list += self.encode.transform_label( self.register_type[col], col) else: reg_col_list += ['顧客ID'] # ラベル設定 feat_register_type.columns = reg_col_list # カテゴリ列と非カテゴリ列を結合 feat_register_type = pd.merge(feat_register_type, reg_non_category_col, on='顧客ID', how='left') feat_register_type = feat_register_type.drop('Unnamed: 0', axis=1) # --- check --- #print("--- feat_register_type shape ---\n {}\n".format(feat_register_type.shape)) #print(feat_register_type.head()) #self.file_io.export_csv_from_pandas(feat_register_type, './data/out/mid_feat_register_type.csv') # status stat_col_list = [] stat_tg_list = ['状況', '指名区分'] # カテゴリ列を抽出 stat_category_col = self.extract_col.extract(self.status, self.status['顧客ID'], extract_col=stat_tg_list) # 非カテゴリ列を抽出 stat_non_category_col = self.extract_col.exclude( self.status, exclude_col=stat_tg_list) # 特徴量抽出 feat_status = self.encode.transform_feature(stat_category_col, aggregate_col=stat_tg_list) feat_status = feat_status.fillna(0) #feat_status = feat_status.drop('Unnamed: 0', axis=1) # ラベル付与 for col in stat_tg_list: stat_col_list += self.encode.transform_label(self.status[col], col) else: stat_col_list += ['顧客ID'] # ラベル設定 feat_status.columns = stat_col_list # カテゴリ列と非カテゴリ列を結合 feat_status = pd.merge(feat_status, stat_non_category_col, on='顧客ID', how='left') feat_status = feat_status.drop('Unnamed: 0', axis=1) #feat_status = feat_status.drop('Unnamed: 0', axis=1) # --- check --- #print("--- feat_status shape ---\n {}\n".format(feat_status.shape)) #print(feat_status.head()) #self.file_io.export_csv_from_pandas(feat_status, './data/out/mid_feat_status.csv') # 結合処理 con_file = pd.merge(feat_product_attr, self.cust_payment, on='顧客ID', how='left') #print("1.1: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, self.cancel, on='顧客ID', how='left') #print("1.2: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, self.contact, on='顧客ID', how='left') #print("1.3: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, self.cti, on='顧客ID', how='left') #print("1.4: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, bin_stay_time, on='顧客ID', how='left') #print("1.5: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, bin_pv_sum, on='顧客ID', how='left') #print("1.6: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, bin_session, on='顧客ID', how='left') #print("1.7: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, feat_cust_attr, on='顧客ID', how='left') #print("1.8: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, feat_cust, on='顧客ID', how='left') #print("1.9: shape is {}".format(con_file.shape)) con_file = pd.merge(con_file, feat_register_type, on='顧客ID', how='left') #print("1.10: shape is {}".format(con_file.shape)) #con_file = pd.merge(con_file, feat_status, on='顧客ID',how='left') #print("1.11: shape is {}".format(con_file.shape)) '''con_file = pd.concat([ self.cust_payment, feat_cust_attr, feat_cust, self.cancel, self.contact, self.cti, feat_register_type, feat_status, self.stay_time, self.pv_sum, self.session], axis=1, join_axes=['顧客ID'])''' # --- check --- #print("--- con_file shape ---\n {}\n".format(con_file.shape)) #print(con_file.head()) # 結合処理 con_product_file = pd.merge(self.id, self.cust_payment, on='顧客ID', how='left') con_product_file = pd.merge(con_product_file, feat_product_attr, on='顧客ID', how='left') #print("2.1: shape is {}".format(con_file.shape)) # 重複がある場合、削除 con_file = con_file.drop_duplicates() con_product_file = con_product_file.drop_duplicates() con_product_file = con_product_file.drop(['施術時間', '売上単価', '数量'], axis=1) # 書き出し処理 self.file_io.export_csv_from_pandas(con_file, out_path) self.file_io.export_csv_from_pandas(con_product_file, out_path2) self.file_io.export_csv_from_pandas(feat_shop, './data/out/feat_shop.csv') self.file_io.export_csv_from_pandas(feat_pref, './data/out/feat_pref.csv')