def main(): log_args(args) loader = Loader( deep_feature_dir=args["deep_feature_dir"], texture_feature_dir=args["texture_feature_dir"], clinical_feature_file=args["snuh_brmh_clinic_feature_file"], oneside=args["oneside"], label_file=args["label_file"]) for data_type in args["data_type"]: if phase == "train": input_x, input_y = loader.get_data(data_type) run_args = dict(**args) trainer = Trainer(run_args) trainer.run(input_x, input_y) elif phase == "test": input_x, subjects = loader.get_data(data_type) run_args = dict(subjects=subjects, input_x=input_x, test_type=data_type, **args) inferencer = Inferencer(run_args) inferencer.run()
def run(*, src: str, schema: str) -> None: with open(schema) as rf: schema = yaml.load(rf) with open(src) as rf: loader = Loader(rf) try: assert loader.check_data() data = loader.get_data() finally: loader.dispose() jsonschema.Draft4Validator.check_schema(schema) validator = jsonschema.Draft4Validator(schema) for err in validator.iter_errors(data): print("E", err) a = Accessor() path = list(err.path) ob = a.access(data, path[:-1]) ev = mem[id(ob)] for kev, vev in ev.value: if kev.value == path[-1]: print("----------------------------------------") print(str(vev.start_mark).lstrip()) lineno = vev.start_mark.line + 1 with open(src) as rf: for i, line in enumerate(rf, 1): if lineno == i: print(f" {i:02d}: -> {line}", end="") else: print(f" {i:02d}: {line}", end="") break
def encode_and_decode(mode_auto, exp_condition): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True sess = tf.compat.v1.Session(config=config) keras.backend.set_session(sess) old_session = KTF.get_session() session = tf.compat.v1.Session('') KTF.set_session(session) KTF.set_learning_phase(1) loader_ins = Loader(exp_condition["load_dir"]) loader_ins.load(gray=True, size=(196, 136)) # 横×縦 data = loader_ins.get_data(norm=True) # (None, Height, Width) if mode_auto == "CAE": input_shape = (data.shape[1], data.shape[2], 1) data = np.reshape(data, (data.shape[0], data.shape[1], data.shape[2], 1)) # (None, Height, Width, 1) elif mode_auto == "AE": input_shape = (data.shape[1]*data.shape[2],) data = np.reshape(data, (data.shape[0], data.shape[1]*data.shape[2],)) # (None, Height*Width, 1) else: raise Exception x_train = data[:int(len(data) * exp_condition["train_rate"])] x_val = data[int(len(data) * exp_condition["train_rate"]):] train_auto(mode_auto=mode_auto, x_train=x_train, x_val=x_val, input_shape=input_shape, weights_dir=exp_condition["weights_dir"], batch_size=exp_condition["batch_size"], verbose=1, epochs=exp_condition["epochs"], num_compare=2 ) data = loader_ins.get_data(norm=True) model_name = get_latest_modified_file_path(exp_condition["weights_dir"]) print(model_name, "をモデルとして分散表現化します.") img2vec(data, model_name, mode_auto=mode_auto, mode_out="hwf") KTF.set_session(old_session)
y = proc.get_yvals(data, args.YCOL) #processor xfolds Xu, yu = proc.under_sample(data, args.YCOL) Xu_train, Xu_test, yu_train, yu_test = proc.cross_validation_sets( Xu, yu, .3, 0) X_train, X_test, y_train, y_test = proc.cross_validation_sets(X, y, .3, 0) if args.LR_DRIVE: lin = LogReg() #under sampled data c = lin.printing_Kfold_scores(Xu_train, yu_train) lin.logistic_regression(Xu_train, Xu_test, yu_train, yu_test, c) lin.logistic_regression(Xu_train, X_test, yu_train, y_test, c) lin.get_roc_curve(Xu_train, Xu_test, yu_train, yu_test, c) #regular data c = lin.printing_Kfold_scores(X_train, y_train) lin.logistic_regression(X_train, X_test, y_train, y_test, c) lin.get_roc_curve(X_train, X_test, y_train, y_test, c) if args.SVM_DRIVE: sv = SVM() sv.svm_run(Xu_train, Xu_test, yu_train, yu_test) if args.SVML_DRIVE: sv = SVM() sv.svm_run(X_train, X_test, y_train, y_test) loader = Loader('AAPL', '2016-11-01', '2016-11-30') aapl = loader.get_data('AAPL') print aapl.data
from logger import Logging from symbol import Symbol from loader import Loader print "***** logging test *****" l = Logging() l.error("missing symbol") l.info("missing symbol") l.refresh("missing symbol") l.buy("missing symbol") l.profit("missing symbol") l.terminate("missing symbol") print "***** symbol test *****" s = Symbol('AMD') s.market_cap() print s.market_cap s.earnings_per_share() print s.eps print "***** loader test *****" load = Loader('AMD', '2016-11-01', '2016-11-21') amd = load.get_data('AMD') amd.book_value() print amd.book print load.data_to_csv('AMD')
out = out.reshape((out.shape[0], out.shape[1] * out.shape[2])) np.save("distributed/{}.npy".format(mode_auto), out) print("分散表現のサイズは{}です.".format(out.shape)) print("分散表現をnpy形式で保存しました.") return out else: raise Exception else: raise Exception # 各層のoutputを取得したい場合 # for i, activation in enumerate(activations): # print("{}: {}".format(i, str(activation.shape))) if __name__ == "__main__": mode_auto = "AE" # mode_auto = "CAE" mode_out = "hwf" load_dir = os.path.join(os.getcwd(), "imgs_param/ALL") model_name = "MODEL/auto/model_{}_auto.hdf5".format(mode_auto) loader_ins = Loader(load_dir) loader_ins.load(gray=True, size=(196, 136)) # 横×縦 data = loader_ins.get_data(norm=True) output = img2vec(data, model_name, mode_out=mode_out, mode_auto=mode_auto) print(output.shape)
from logger import Logging from symbol import Symbol from loader import Loader print "***** logging test *****" l = Logging() l.error("missing symbol") l.info("missing symbol") l.refresh("missing symbol") l.buy("missing symbol") l.profit("missing symbol") l.terminate("missing symbol") print "***** symbol test *****" s = Symbol('AMD') s.market_cap() print s.market_cap s.earnings_per_share() print s.eps print "***** loader test *****" load = Loader('AMD', '2016-11-01', '2016-11-21') amd = load.get_data('AMD') amd.book_value() print amd.book print load.data_to_csv('AMD')