def transform_datetime(df: pd.DataFrame, config: Config): date_parts = ["year", "weekday", "month", "day", "hour"] if "date_columns" not in config: config["date_columns"] = {} for c in [c for c in df if c.startswith("datetime_")]: config["date_columns"][c] = [] for part in date_parts: part_col = c + "_" + part df[part_col] = getattr(df[c].dt, part).astype(np.uint16 if part == "year" else np.uint8).values if not (df[part_col] != df[part_col].iloc[0]).any(): Log.print(part_col + " is constant") df.drop(part_col, axis=1, inplace=True) else: config["date_columns"][c].append(part) df.drop(c, axis=1, inplace=True) else: for c, parts in config["date_columns"].items(): for part in parts: part_col = c + "_" + part df[part_col] = getattr(df[c].dt, part) df.drop(c, axis=1, inplace=True)
def transform_categorical(df: pd.DataFrame, config: Config): if "categorical_columns" not in config: config["categorical_columns"] = [] # https://www.kaggle.com/ogrellier/python-target-encoding-for-categorical-features prior = config["categorical_prior"] = df["target"].mean() min_samples_leaf = 10 smoothing = 5 config["categorical_columns_string"] = {} for c in [c for c in df if c.startswith("string_")]: Log.print(c) config["categorical_columns"].append(c) averages = df[[c, "target"]].groupby(c)["target"].agg(["mean", "count"]) smooth = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing)) averages["target"] = prior * (1 - smooth) + averages["mean"] * smooth config["categorical_columns_string"][c] = averages["target"].to_dict() config["categorical_columns_id"] = {} for c in [c for c in df if c.startswith("id_")]: Log.print(c) config["categorical_columns"].append(c) if df[c].dtype == str or df[c].dtype == object: config["categorical_columns_id"][c] = {v: i for i, v in enumerate(df[c].unique())} for c, values in config["categorical_columns_string"].items(): df.loc[:, c] = df[c].apply(lambda x: values[x] if x in values else config["categorical_prior"]) for c, values in config["categorical_columns_id"].items(): df.loc[:, c] = df[c].apply(lambda x: values[x] if x in values else -1)
def train_lightgbm(X: pd.DataFrame, y: pd.Series, config: Config): params = { "objective": "regression" if config.is_regression() else "binary", "metric": "rmse" if config.is_regression() else "auc", "verbosity": -1, "seed": 1, } X_sample, y_sample = data_sample(X, y, config, nrows=20000) hyperparams = hyperopt_lightgbm(X_sample, y_sample, params, config) X_train, X_val, y_train, y_val = data_split(X, y, config) config["model"] = lgb.train( {**params, **hyperparams}, lgb.Dataset(X_train, label=y_train), 5000, lgb.Dataset(X_val, label=y_val), early_stopping_rounds=100, verbose_eval=100, ) config.save() try: with time_limit(config.time_left() - 10): config["model"] = lgb.train( {**params, **hyperparams}, lgb.Dataset(X, label=y), int(1.2 * config["model"].best_iteration), ) except TimeoutException: Log.print("Timed out!")
def drop_constant_columns(df: pd.DataFrame, config: Config): if "constant_columns" not in config: config["constant_columns"] = [c for c in df if c.startswith("number_") and not (df[c] != df[c].iloc[0]).any()] Log.print("Constant columns: {}".format(config["constant_columns"])) if len(config["constant_columns"]) > 0: df.drop(config["constant_columns"], axis=1, inplace=True)
def validate_dataset(alias: str, mode: str, train_limit: int) -> np.float64: Log.print(alias) automl = AutoML("models/check_{}".format(alias)) automl.config["time_limit"] = train_limit automl.train("data/check_{}/train.csv".format(alias), mode) automl.config["time_limit"] = 300 automl.config["start_time"] = time.time() _, score = automl.predict("data/check_{}/test.csv".format(alias), "predictions/check_{}.csv".format(alias)) return score
def subsample(df: pd.DataFrame, config: Config, max_size_mb: float=2.0): if config.is_train(): df_size_mb = df.memory_usage(deep=True).sum() / 1024 / 1024 if df_size_mb > max_size_mb: mem_per_row = df_size_mb / len(df) sample_rows = int(max_size_mb / mem_per_row) Log.print("Size limit exceeded: {:0.2f} Mb. Dataset rows: {}. Subsample to {} rows.".format(df_size_mb, len(df), sample_rows)) _, df_drop = train_test_split(df, train_size=sample_rows, random_state=1) df.drop(df_drop.index, inplace=True) config["nrows"] = sample_rows else: config["nrows"] = len(df)
def to_int8(df: pd.DataFrame, config: Config): if "int8_columns" not in config: config["int8_columns"] = [] vals = [-1, 0, 1] for c in [c for c in df if c.startswith("number_")]: if (~df[c].isin(vals)).any(): continue config["int8_columns"].append(c) Log.print("Num columns: {}".format(len(config["int8_columns"]))) if len(config["int8_columns"]) > 0: df.loc[:, config["int8_columns"]] = df.loc[:, config["int8_columns"]].astype(np.int8)
def objective(hyperparams): if config.is_time_fraction_limit(): score = np.inf if config.is_regression() else 0 return {'loss': score, 'status': STATUS_OK} model = lgb.train({**params, **hyperparams}, train_data, 300, valid_data, early_stopping_rounds=100, verbose_eval=False) score = model.best_score["valid_0"][params["metric"]] Log.print(score) if config.is_classification(): score = -score return {'loss': score, 'status': STATUS_OK}
def hyperopt_lightgbm(X: pd.DataFrame, y: pd.Series, params: Dict, config: Config): X_train, X_val, y_train, y_val = data_split(X, y, config, test_size=0.5) train_data = lgb.Dataset(X_train, label=y_train) valid_data = lgb.Dataset(X_val, label=y_val) space = { "learning_rate": hp.choice("learning_rate", np.arange(0.01, 0.05, 0.01)), "boost_from_average": hp.choice("boost_from_average", [True, False]), "is_unbalance": hp.choice("is_unbalance", [True, False]), "zero_as_missing": hp.choice("zero_as_missing", [True, False]), "max_depth": hp.choice("max_depth", [-1, 2, 3, 4, 5, 6, 7]), "num_leaves": hp.choice("num_leaves", [11, 31, 51, 101, 151, 201]), "feature_fraction": hp.choice("feature_fraction", np.arange(0.5, 1.0, 0.1)), "bagging_fraction": hp.choice("bagging_fraction", np.arange(0.5, 1.0, 0.1)), "bagging_freq": hp.choice("bagging_freq", [1, 3, 5, 10, 20, 50]), "reg_alpha": hp.uniform("reg_alpha", 0, 10), "reg_lambda": hp.uniform("reg_lambda", 0, 10), "min_child_weight": hp.uniform("min_child_weight", 0, 10), } config.limit_time_fraction(0.15) def objective(hyperparams): if config.is_time_fraction_limit(): score = np.inf if config.is_regression() else 0 return {'loss': score, 'status': STATUS_OK} model = lgb.train({**params, **hyperparams}, train_data, 300, valid_data, early_stopping_rounds=100, verbose_eval=False) score = model.best_score["valid_0"][params["metric"]] Log.print(score) if config.is_classification(): score = -score return {'loss': score, 'status': STATUS_OK} trials = Trials() best = hyperopt.fmin(fn=objective, space=space, trials=trials, algo=tpe.suggest, max_evals=100, verbose=1, rstate= np.random.RandomState(1)) hyperparams = space_eval(space, best) Log.print("{:0.4f} {}".format(trials.best_trial['result']['loss'], hyperparams)) return hyperparams
def feature_selection(df: pd.DataFrame, config: Config): if config.is_train(): df_size_mb = df.memory_usage(deep=True).sum() / 1024 / 1024 if df_size_mb < 2 * 1024: return selected_columns = [] config_sample = copy.deepcopy(config) config.limit_time_fraction(0.1) for i in range(20): if config.is_time_fraction_limit(): break df_sample = df.sample(min(3000, len(df)), random_state=i).copy() transform(df_sample, config_sample) y = df_sample["target"] X = df_sample.drop("target", axis=1) if len(selected_columns) > 0: X = X.drop(selected_columns, axis=1) if len(X.columns) > 0: selected_columns += select_features(X, y, config["mode"]) else: break Log.print("Selected columns: {}".format(selected_columns)) drop_number_columns = [c for c in df if c.startswith("number_") and c not in selected_columns] if len(drop_number_columns) > 0: config["drop_number_columns"] = drop_number_columns config["date_columns"] = {} for c in [c for c in selected_columns if c.startswith("datetime_")]: d = c.split("_") date_col = d[0] + "_" + d[1] date_part = d[2] if date_col not in config["date_columns"]: config["date_columns"][date_col] = [] config["date_columns"][date_col].append(date_part) drop_datetime_columns = [c for c in df if c.startswith("datetime_") and c not in config["date_columns"]] if len(drop_datetime_columns) > 0: config["drop_datetime_columns"] = drop_datetime_columns if "drop_number_columns" in config: Log.print("Drop number columns: {}".format(config["drop_number_columns"])) df.drop(config["drop_number_columns"], axis=1, inplace=True) if "drop_datetime_columns" in config: Log.print("Drop datetime columns: {}".format(config["drop_datetime_columns"])) df.drop(config["drop_datetime_columns"], axis=1, inplace=True)
pre="sz" else: pre="sh" symbol=pre+strNum formatDate=time.strftime("%Y%m%d",time.strptime(date,"%Y%m%d")) url=detailSource.get("qq2") %(symbol, formatDate) return url def handleDetail(code,date,parser , conf): url=getUrl(num = code,date = date,conf=conf) fetchData=Fetcher.fetch(url) parseData=parser.parse(fetchData) path = Dumper.getPath(code = code , date = date ,dataType = "detail" ,conf = conf) Dumper.dump(path = path,data = parseData) return True if __name__ == '__main__': import doctest SYS_HOME=os.path.join(__dir__,"..","..","..") sys.path.insert(0,SYS_HOME) from lib.util import Conf,Log conf=Conf.load(\ os.path.join(SYS_HOME,"conf","stock.yaml"), os.path.join(SYS_HOME,"conf","downloader.yaml")) Fetcher.DEBUG=True logPath=os.path.join(SYS_HOME , conf.get("LOG_PATH","")) Log.set(logPath = logPath , printLevel=logging.DEBUG) conf["SYS_HOME"]=SYS_HOME conf["DEBUG"]=True doctest.testmod()
""" """ init """ import os,sys,logging from lib.stk.element import Stock,Date,Dumper from lib.util import Log,Conf __dir__ = os.path.realpath(os.path.dirname(__file__)) APP_HOME=os.path.join(__dir__,"..") SYS_HOME=os.path.join(APP_HOME,"..") SYS_LIB_HOME=os.path.join(SYS_HOME,"lib") sys.path.insert(0,SYS_HOME) conf=Conf.load(os.path.join(SYS_HOME,"conf","stock.yaml"),os.path.join(SYS_HOME,"conf","downloader.yaml")) conf["SYS_HOME"] = SYS_HOME logPath=os.path.join(SYS_HOME , conf.get("LOG_PATH","")) Log.set(logPath = logPath,printLevel = logging.ERROR) stock = Stock(conf) ###################### TRAIN """ 训练 整体 statistics 和 strategic likelihood 用 element/statistics来载入这些数据 """ TRAIN_DATA_PATH = os.path.join(SYS_HOME,"data","train") #train all days data and gaussian data from lib.stk.statistics.Gaussian import Gaussian subject = [[-0.1,-0.09],[-0.09,-0.08],[-0.08,-0.07],[-0.07,-0.06],[-0.06,-0.05],[-0.05,-0.04],[-0.04,-0.03],[-0.03,-0.02],[-0.02,-0.01],[-0.01,0],[0,0.01],[0.01,0.02],[0.02,0.03],[0.03,0.04],[0.04,0.05],[0.05,0.06],[0.06,0.07],[0.07,0.08],[0.08,0.09],[0.09,0.1]] # strategic likelihood from lib.stk.element.Strategic import Strategic
def preview_df(train_csv: str, config: Config, nrows: int = 3000): num_rows = sum(1 for line in open(train_csv)) - 1 Log.print("Rows in train: {}".format(num_rows)) df = pd.read_csv(train_csv, encoding="utf-8", low_memory=False, nrows=nrows) mem_per_row = df.memory_usage(deep=True).sum() / nrows Log.print("Memory per row: {:0.2f} Kb".format(mem_per_row / 1024)) df_size = (num_rows * mem_per_row) / 1024 / 1024 Log.print("Approximate dataset size: {:0.2f} Mb".format(df_size)) config["parse_dates"] = [] config["dtype"] = { "line_id": int, } counters = { "id": 0, "number": 0, "string": 0, "datetime": 0, } for c in df: if c.startswith("number_"): counters["number"] += 1 elif c.startswith("string_"): counters["string"] += 1 config["dtype"][c] = str elif c.startswith("datetime_"): counters["datetime"] += 1 config["dtype"][c] = str config["parse_dates"].append(c) elif c.startswith("id_"): counters["id"] += 1 Log.print("Number columns: {}".format(counters["number"])) Log.print("String columns: {}".format(counters["string"])) Log.print("Datetime columns: {}".format(counters["datetime"])) config["counters"] = counters
def time_series_detect(df: pd.DataFrame, config: Config): sample_size = 10000 model_params = { "objective": "regression" if config["mode"] == "regression" else "binary", "metric": "rmse" if config["mode"] == "regression" else "auc", "learning_rate": 0.01, "verbosity": -1, "seed": 1, "max_depth": -1, } if config.is_train(): datetime_columns = [c for c in df if c.startswith("datetime_")] id_columns = [c for c in df if c.startswith("id_")] sort_columns = [] for dc in datetime_columns: sort_columns.append([dc]) for ic in id_columns: sort_columns.append([ic, dc]) else: for ic in id_columns: sort_columns.append([ic]) scores = [] config.limit_time_fraction(0.1) for sc in sort_columns: if config.is_time_fraction_limit(): break Log.silent(True) df.sort_values(sc, inplace=True) config_sample = copy.deepcopy(config) df_sample = df.iloc[-sample_size:].copy() if len(df) > sample_size else df.copy() df_sample = df_sample[[c for c in df_sample if c.startswith("number_") or c == "target" or c in sc]] shift_columns(df_sample, group= sc[0] if len(sc) > 1 else None) transform(df_sample, config_sample) y = df_sample["target"] X = df_sample.drop("target", axis=1) X_train, X_test, y_train, y_test = ts_split(X, y, test_size=0.5) model_sorted = lgb.train(model_params, lgb.Dataset(X_train, label=y_train), 3000, lgb.Dataset(X_test, label=y_test), early_stopping_rounds=100, verbose_eval=False) score_sorted = model_sorted.best_score["valid_0"][model_params["metric"]] sampled_columns = [c for c in X if "_shift" not in c] model_sampled = lgb.train(model_params, lgb.Dataset(X_train[sampled_columns], label=y_train), 3000, lgb.Dataset(X_test[sampled_columns], label=y_test), early_stopping_rounds=100, verbose_eval=False) score_sampled = model_sampled.best_score["valid_0"][model_params["metric"]] if config.is_classification(): score_sorted = -score_sorted score_sampled = -score_sampled Log.silent(False) Log.print("Sort: {}. Score sorted: {:0.4f}. Score sampled: {:0.4f}".format(sc, score_sorted, score_sampled)) score_ratio = score_sampled / score_sorted if config.is_regression() else abs(score_sorted / score_sampled) if score_ratio >= 1.03: Log.print(score_ratio) scores.append((score_sorted, sc)) if len(scores) > 0: scores = sorted(scores, key=lambda x: x[0]) Log.print("Scores: {}".format(scores)) config["sort_values"] = scores[0][1] df.sort_values(config["sort_values"], inplace=True) config_sample = copy.deepcopy(config) df_sample = df.iloc[-sample_size:].copy() if len(df) > sample_size else df.copy() shift_columns(df_sample, group=config["sort_values"][0] if len(config["sort_values"]) > 1 else None) transform(df_sample, config_sample) y = df_sample["target"] X = df_sample.drop("target", axis=1) model = lgb.train(model_params, lgb.Dataset(X, label=y), 1000) fi = pd.Series(model.feature_importance(importance_type="gain"), index=X.columns) fi = fi[fi > 0].sort_values() selected_columns = fi[fi >= fi.quantile(0.75)].index.tolist() selected_shift_columns = [c.replace("_shift", "") for c in selected_columns if "_shift" in c] if len(selected_shift_columns) > 0: Log.print("Shift columns: {}".format(selected_shift_columns)) config["shift_columns"] = selected_shift_columns if "shift_columns" in config: shift_columns(df, group=config["sort_values"][0] if len(config["sort_values"]) > 1 else None, number_columns=config["shift_columns"])
automl.train("data/check_{}/train.csv".format(alias), mode) automl.config["time_limit"] = 300 automl.config["start_time"] = time.time() _, score = automl.predict("data/check_{}/test.csv".format(alias), "predictions/check_{}.csv".format(alias)) return score if __name__ == '__main__': scores = { "dataset": [], "score": [], "time": [], } for i, mode, train_limit in DATASETS: alias = "{}_{}".format(i, mode[0]) start_time = time.time() score = validate_dataset(alias, mode, train_limit) end_time = time.time() scores["dataset"].append(alias) scores["score"].append(score) scores["time"].append(end_time - start_time) scores = pd.DataFrame(scores) scores.to_csv("scores/{}.csv".format(int(time.time()))) Log.print(scores, nesting=False)
logging.error("No %s commands." % cmd_name) else: cmd = imp.load_module(cmd_name, fp, pathname, description) cmd.run(conf = conf) finally: if fp: fp.close() if __name__=="__main__" : if sys.argv[1:]: conf=Conf.load(\ os.path.join(SYS_HOME,"conf","stock.yaml"), os.path.join(SYS_HOME,"conf","downloader.yaml")) conf["SYS_HOME"]=SYS_HOME conf.update(parseArgs(conf)) Log.set(os.path.join(SYS_HOME,conf.get("LOG_PATH"))) action=conf.get("action","run") dates=conf.get("dates") #如果指定多个日期,则按顺序启动实例一个一个运行 stockCache = {} #@todo add stockCache if not dates or len(dates)==0: conf['date']=Date.getDate() execute(cmd_name = action,conf=conf) elif len(dates)>=1: for sDate in dates: conf["date"]=sDate if stockCache: conf['stock']=stockCache execute(cmd_name = action,conf=conf) else: print __doc__
m = item else: m = (item - expection)/(deviation *(2 **0.5)) #res = 0.5 * (1 + norm.cdf(m)) res = 0.5 * (1 + special.erf(m)) if preValue !=None : ret = res - preValue else: preValue = res ret = res return ret if __name__ == '__main__': import doctest SYS_HOME=os.path.join(LIB_HOME,"..","..") sys.path.insert(0,SYS_HOME) from lib.util import Log,Conf conf=Conf.load(\ os.path.join(SYS_HOME,"conf","stock.yaml"), os.path.join(SYS_HOME,"conf","downloader.yaml") ) conf["SYS_HOME"] = SYS_HOME logPath=os.path.join(SYS_HOME , conf.get("LOG_PATH","")) Log.set(logPath = logPath) gaussian = Gaussian() stock = Stock(conf) stock.date = '20111024' doctest.testmod()
def validate(preds: pd.DataFrame, target_csv: str, mode: str) -> np.float64: df = pd.merge(preds, pd.read_csv(target_csv), on="line_id", left_index=True) score = roc_auc_score(df.target.values, df.prediction.values) if mode == "classification" else \ np.sqrt(mean_squared_error(df.target.values, df.prediction.values)) Log.print("Score: {:0.4f}".format(score)) return score