def read_dataset(is_merged): debug = DEBUG if debug: filename_train = '../input/debug{}/{}_debug{}.feather'.format( debug, 'train_translated', debug) filename_test = '../input/debug{}/{}_debug{}.feather'.format( debug, 'test_translated', debug) else: filename_train = '../input/{}.feather'.format('train_translated') filename_test = '../input/{}.feather'.format('test_translated') print_doing('reading train, test and merge') if is_merged: df = read_train_test(filename_train, filename_test, '.feather', is_merged=True) if debug: print(df.head()) else: train_df, test_df = read_train_test(filename_train, filename_test, '.feather', is_merged=False) if debug: print(train_df.head()) print(test_df.head()) print_memory() if is_merged: return df else: return train_df, test_df
def predict_sub(model_lgb, testdex, test, subfilename): print_doing('predicting') lgpred = model_lgb.predict(test) lgsub = pd.DataFrame(lgpred, columns=["deal_probability"], index=testdex) lgsub['deal_probability'].clip(0.0, 1.0, inplace=True) print('saving submission file to', subfilename) lgsub.to_csv(subfilename, index=True, header=True) print('done')
def read_dataset_origin(dataset): filename_train = '../input/train.csv' filename_test = '../input/test.csv' print_doing('reading train, test and merge') df = read_train_test(filename_train, filename_test, '.feather', is_merged=1) print_memory() print(df.head()) return df
def prepare_training(mat_filename, dir_feature, predictors, is_textadded): print_header('Load features') df, y, len_train, traindex, testdex = load_train_test(['item_id'], TARGET, DEBUG) del len_train gc.collect() df = drop_col(df, REMOVED_LIST) # add features print_doing('add tabular features') for feature in predictors: dir_feature_file = dir_feature + feature + '.pickle' if not os.path.exists(dir_feature_file): print('can not find {}. Please check'.format(dir_feature_file)) else: if feature in df: print('{} already added'.format(feature)) else: print_doing_in_task('adding {}'.format(feature)) df = add_feature(df, dir_feature_file) print_memory() if is_textadded: # add text_feature print_doing_in_task('add text features') ready_df, tfvocab = get_text_matrix(mat_filename, 'all', 2, 0) # stack print_doing_in_task('stack') X = hstack([ csr_matrix(df.loc[traindex, :].values), ready_df[0:traindex.shape[0]] ]) # Sparse Matrix testing = hstack([ csr_matrix(df.loc[testdex, :].values), ready_df[traindex.shape[0]:] ]) print_memory() print_doing_in_task('prepare vocab') tfvocab = df.columns.tolist() + tfvocab for shape in [X, testing]: print("{} Rows and {} Cols".format(*shape.shape)) print("Feature Names Length: ", len(tfvocab)) else: tfvocab = df.columns.tolist() testing = hstack([csr_matrix(df.loc[testdex, :].values)]) X = hstack([csr_matrix(df.loc[traindex, :].values)]) # Sparse Matrix return X, y, testing, tfvocab, df.columns.tolist(), testdex
def do_dataset(dataset): train_df, test_df = read_dataset(False, DEBUG) len_train = len(train_df) if dataset=='train': df = train_df del test_df; gc.collect() else: df = test_df del train_df; gc.collect() if DEBUG: storename = '../processed_features_debug{}/{}_debug{}.h5'.format(DEBUG, dataset, DEBUG) featuredir = '../processed_features_debug{}/'.format(DEBUG) else: storename = '../processed_features/{}.h5'.format(dataset) featuredir = '../processed_features/' temp = add_dataset_to_hdf5(storename, df) if DEBUG: print(temp.isnull().sum(axis=0)) files = glob.glob(featuredir + '*.pickle') for file in files: if 'text_feature_kernel' not in file: print(file) filename = file print ('\n>> doing', filename) df = load_pickle(filename) if DEBUG: print(df.tail()) print_doing('extract') if DEBUG: print(df.head()); print(df.tail()) if dataset=='train': df_new = df.iloc[:len_train] if DEBUG: print('train: ', df.head()) print(df_new.isnull().sum(axis=0)) else: df_new = df.iloc[len_train:] if DEBUG: print('test: ', df.tail()) print(df_new.isnull().sum(axis=0)) print('merging...') temp = add_dataset_to_hdf5(storename, df_new) if DEBUG: print(temp.isnull().sum(axis=0)) print_memory()
def read_dataset(): debug = DEBUG if debug: filename_train = '../input/debug{}/{}_debug{}.feather'.format( debug, 'train', debug) filename_test = '../input/debug{}/{}_debug{}.feather'.format( debug, 'test', debug) else: filename_train = '../input/{}.feather'.format('train') filename_test = '../input/{}.feather'.format('test') print_doing('reading train, test and merge') df = read_train_test(filename_train, filename_test, '.feather', is_merged=1) print_memory() print(df.head()) return df
def read_dataset_deal_probability(seed): debug = DEBUG if debug: filename_train = '../input/debug{}/{}_debug{}.feather'.format( debug, 'train', debug) filename_test = '../input/debug{}/{}_debug{}.feather'.format( debug, 'test', debug) else: filename_train = '../input/{}.feather'.format('train') filename_test = '../input/{}.feather'.format('test') print_doing('reading train, test and merge') train_df, test_df = read_train_test(filename_train, filename_test, '.feather', is_merged=0) df = find_df_local_valid_and_make_deal_prob_nan(train_df, test_df, seed) print_memory() print(df.head()) return df
def get_svdtruncated_vectorizer(todir): print_doing('doing svdtruncated text feature') filename = todir + 'text_feature_kernel.pickle' savename = todir + 'truncated_text_feature_kernel.pickle' if os.path.exists(savename): print('done already...') with open(savename, "rb") as f: svd_matrix, vocab = pickle.load(f) with open(filename, "rb") as f: tfid_matrix, tfvocab = pickle.load(f) else: with open(filename, "rb") as f: tfid_matrix, tfvocab = pickle.load(f) svdT = TruncatedSVD(n_components=400) print_doing_in_task('truncated svd') svd_matrix = svdT.fit_transform(tfid_matrix) print_doing_in_task('convert to sparse') svd_matrix = sparse.csr_matrix(svd_matrix, dtype=np.float32) vocab = [] for i in range(np.shape(svd_matrix)[1]): vocab.append('lsa' + str(i + 1)) with open(savename, "wb") as f: pickle.dump((svd_matrix, vocab), f, protocol=pickle.HIGHEST_PROTOCOL) print('---- before truncate') print(tfid_matrix.shape), print('len of feature:', len(tfvocab)) print('---- after truncate') print(svd_matrix.shape), print('len of feature:', len(vocab)) if DEBUG: print(tfid_matrix) print('\n') print(svd_matrix) del svd_matrix, vocab, tfid_matrix, tfvocab gc.collect() print_memory()
def do_dataset(dataset): train_df, test_df = read_dataset(False) len_train = len(train_df) if dataset == 'train': df = train_df del test_df gc.collect() else: df = test_df del train_df gc.collect() if DEBUG: storename = '../processed_features_debug{}/{}_debug{}.h5'.format( DEBUG, dataset, DEBUG) featuredir = '../processed_features_debug{}/'.format(DEBUG) else: storename = '../processed_features/{}.h5'.format(dataset) featuredir = '../processed_features/' add_dataset_to_hdf5(storename, df) files = glob.glob(featuredir + '*.pickle') for file in files: if 'train' not in file and 'test' not in file: if 'text_feature_kernel' in file: print(file) filename = file print('\n>> doing', filename) if DEBUG: if '_en' in file: savename = '../processed_features_debug{}/{}_text_dense_en_debug{}.pickle'.format( DEBUG, dataset, DEBUG) else: savename = '../processed_features_debug{}/{}_text_dense_debug{}.pickle'.format( DEBUG, dataset, DEBUG) else: if '_en' in file: savename = '../processed_features/{}_text_dense_en.h5'.format( dataset) else: savename = '../processed_features/{}_text_dense.h5'.format( dataset) if os.path.exists(savename): print('done already') else: mat = load_pickle(filename) mat = mat.todense() if DEBUG: print(mat.shape, np.sum(mat)) print_doing('extract') if DEBUG: print(mat[0:5, 0:7]) print(mat[-5:, 0:7]) if dataset == 'train': mat = mat[:len_train, :] if DEBUG: print(mat.shape, np.sum(mat)) if DEBUG: print('train: ', print(mat[0:5, 0:7])) else: mat = mat[len_train:, :] print(mat.shape, np.sum(mat)) if DEBUG: print(mat.shape, np.sum(mat)) if DEBUG: print('test: ', print(mat[-5:, 0:7])) print(mat) print('merging...') save_file(mat, savename, '.pickle') print_memory() else: print(file) filename = file print('\n>> doing', filename) df = load_pickle(filename) print_doing('extract') if DEBUG: print(df.head()) print(df.tail()) if dataset == 'train': df = df.iloc[:len_train] if DEBUG: print('train: ', df.head()) else: df = df.iloc[len_train:] if DEBUG: print('test: ', df.tail()) print('merging...') add_dataset_to_hdf5(storename, df) print_memory()
def DO(mat_filename, storename,num_leaves,max_depth, option, boosting_type): frac = FRAC print('------------------------------------------------') print('start...') print('fraction:', frac) print('prepare predictors, categorical and target...') predictors = PREDICTORS print (predictors) categorical = get_categorical(predictors) target = TARGET subfilename = yearmonthdate_string + '_' + str(len(predictors)) + \ 'features_' + boosting_type + '_cv_' + str(int(100*frac)) + \ 'percent_full_%d_%d'%(num_leaves,max_depth) + '_OPTION' + str(option) + '.csv.gz' modelfilename = yearmonthdate_string + '_' + str(len(predictors)) + \ 'features_' + boosting_type + '_cv_' + str(int(100*frac)) + \ 'percent_full_%d_%d'%(num_leaves,max_depth) + '_OPTION' + str(option) print('----------------------------------------------------------') print('SUMMARY:') print('----------------------------------------------------------') print('predictors:',predictors) print('number of predictors: {} \n'.format(len(predictors))) print('categorical', categorical) print('number of predictors: {} \n'.format(len(categorical))) print('taget {} \n'.format(target)) print('submission file name: {} \n'.format(subfilename)) print('model file name: {} \n'.format(modelfilename)) # print('fraction:', frac) # print('option:', option) print('----------------------------------------------------------') train_df = read_processed_h5(storename, predictors+target, categorical) print(train_df.info()) print(train_df.head()) train_df["price"] = np.log(train_df["price"]+0.001) train_df["price"].fillna(-999,inplace=True) # train_df["price"] = train_df["price"].astype('float') # train_df["image_top_1"].fillna(-999,inplace=True) print(train_df.head()); print(train_df.info()) # train_df = train_df.sample(frac=frac, random_state = SEED) print_memory('afer reading train:') print(train_df.head()) print("train size: ", len(train_df)) gc.collect() print_doing('cleaning train...') train_df_array = train_df[predictors].values train_df_labels = train_df[target].values.astype('int').flatten() del train_df; gc.collect() print_memory() print_doing('reading text matrix') train_mat_text, tfvocab = get_text_matrix(mat_filename, 'train', DEBUG, train_df_array.shape[0]) print_memory() print_doing('stack two matrix') train_df_array = hstack([csr_matrix(train_df_array),train_mat_text]) print_memory() new_predictors = tfvocab predictors = predictors + new_predictors del train_mat_text; gc.collect() print('----------------------------------------------------------') print("Training...") start_time = time.time() params = { 'boosting_type': boosting_type, 'objective': 'regression', 'metric': 'rmse', 'learning_rate': 0.02, 'num_leaves': num_leaves, # we should let it be smaller than 2^(max_depth) 'max_depth': max_depth, # -1 means no limit 'subsample': 0.9, # Subsample ratio of the training instance. 'subsample_freq': 1, # frequence of subsample, <=0 means no enable 'feature_fraction': 0.9, # Subsample ratio of columns when constructing each tree. # 'min_child_weight': 0, # Minimum sum of instance weight(hessian) needed in a child(leaf) # 'subsample_for_bin': 200000, # Number of samples for constructing bin # 'min_split_gain': 0, # lambda_l1, lambda_l2 and min_gain_to_split to regularization # 'reg_alpha': 10, # L1 regularization term on weights # 'reg_lambda': 0, # L2 regularization term on weights 'nthread': 4, 'verbose': 0 } print('>> prepare dataset...') dtrain_lgb = lgb.Dataset(train_df_array, label=train_df_labels, feature_name=predictors, categorical_feature=categorical) del train_df_array, train_df_labels; gc.collect() print_memory() print(params) print('>> start cv...') cv_results = lgb.cv(params, dtrain_lgb, categorical_feature = categorical, num_boost_round=20000, metrics='rmse', seed = SEED, shuffle = False, nfold=10, show_stdv=True, early_stopping_rounds=100, verbose_eval=50) print('[{}]: model training time'.format(time.time() - start_time)) print_memory() # print (cv_results) print('--------------------------------------------------------------------') num_boost_rounds_lgb = len(cv_results['rmse-mean']) print('num_boost_rounds_lgb=' + str(num_boost_rounds_lgb)) print ('>> start trainning... ') model_lgb = lgb.train( params, dtrain_lgb, num_boost_round=num_boost_rounds_lgb, feature_name = predictors, categorical_feature = categorical) del dtrain_lgb gc.collect() print('--------------------------------------------------------------------') print('>> save model...') # save model to file # if not DEBUG: model_lgb.save_model(modelfilename+'.txt')