# **Parameters**: # # - model_fn: 학습 및 예측에 사용할 모델 # - n_classes: label에 해당하는 클래스 수 (0: prediction, 1이상: classification) 확인필요 # - verbose: 과정 출력 # - steps: 학습 스텝 # - optimizer: 최적화 기법 ("SGD", "Adam", "Adagrad") # - learning_rate: learning rate # - batch_size: batch size # # # In[ ]: X, y = generate_data(np.sin, np.linspace(0, 100, 10000), TIMESTEPS, seperate=False) # create a lstm instance and validation monitor validation_monitor = learn.monitors.ValidationMonitor( X['val'], y['val'], every_n_steps=PRINT_STEPS, early_stopping_rounds=1000) # ## Generate a dataset # # 1. generate_data: 학습에 사용될 데이터를 특정 함수를 이용하여 만듭니다. # - fct: 데이터를 생성할 함수 # - x: 함수값을 관측할 위치 # - time_steps: 관측(observation) # - seperate: check multimodal # 1. ValidationMonitor: training 이후, validation 과정을 모니터링 # - x
LOG_DIR = 'resources/logs/' TIMESTEPS = 1 RNN_LAYERS = [{'num_units': 400}] DENSE_LAYERS = None TRAINING_STEPS = 500 PRINT_STEPS = TRAINING_STEPS # / 10 BATCH_SIZE = 100 regressor = SKCompat( learn.Estimator(model_fn=lstm_model(TIMESTEPS, RNN_LAYERS, DENSE_LAYERS), )) # model_dir=LOG_DIR) X, y = generate_data(np.sin, np.linspace(0, 100, 10000, dtype=np.float32), TIMESTEPS, seperate=False) noise_train = np.asmatrix(np.random.normal(0, 0.2, len(y['train'])), dtype=np.float32) noise_val = np.asmatrix(np.random.normal(0, 0.2, len(y['val'])), dtype=np.float32) noise_test = np.asmatrix(np.random.normal(0, 0.2, len(y['test'])), dtype=np.float32) #asmatrix noise_train = np.transpose(noise_train) noise_val = np.transpose(noise_val) noise_test = np.transpose(noise_test) y['train'] = np.add(y['train'], noise_train) y['val'] = np.add(y['val'], noise_val)