def traditional_surv_analysis(datas, opts): # tidy data as ndarray train_X, train_Y = datas["train"].xs.numpy(), datas["train"].ys.numpy() test_X, test_Y = datas["test"].xs.numpy(), datas["test"].ys.numpy() if "val" in datas.keys(): train_X = np.concatenate([train_X, datas["val"].xs]) train_Y = np.concatenate([train_Y, datas["val"].ys]) # construct structured array train_Y = Surv.from_arrays(train_Y[:, 1].astype("bool"), train_Y[:, 0]) test_Y = Surv.from_arrays(test_Y[:, 1].astype("bool"), test_Y[:, 0]) # construct estimators estimators = { "CoxPH": CoxPHSurvivalAnalysis(), "CGBSA": CGBSA(n_estimators=500, random_state=opts.random_seed), "GBSA": GBSA(n_estimators=500, random_state=opts.random_seed), "FKSVM": FKSVM(random_state=opts.random_seed), "FSVM": FSVM(random_state=opts.random_seed) } # training for name, estimator in estimators.items(): print("%s training." % name) estimator.fit(train_X, train_Y) # evaluation train_scores = {} test_scores = {} for name, estimator in estimators.items(): print("%s evaluation." % name) train_scores[name] = estimator.score(train_X, train_Y) test_scores[name] = estimator.score(test_X, test_Y) # return return train_scores, test_scores
verbose=args.verbose, minibatch_frac=1.0, Base=base_name_to_learner[args.base], Score=eval(args.score), ) train_losses = ngb.fit(X_train, Y_train, E_train) forecast = ngb.pred_dist(X_test) train_forecast = ngb.pred_dist(X_train) print("NGB score: %.4f (val), %.4f (train)" % ( concordance_index_censored(E_test.astype(bool), Y_test, -forecast.mean())[0], concordance_index_censored(E_train.astype(bool), Y_train, -train_forecast.mean())[0], )) ## ## sksurv ## gbsa = GBSA( n_estimators=args.n_est, learning_rate=args.lr, subsample=args.minibatch_frac, verbose=args.verbose, ) gbsa.fit(X_train, Y_join(Y_train, E_train)) print("GBSA score: %.4f (val), %.4f (train)" % ( gbsa.score(X_test, Y_join(Y_test, E_test)), gbsa.score(X_train, Y_join(Y_train, E_train)), ))