train_time = 10 * 60 # 6. 끝까지 학습 (cost: 4-5) (6분) # # # 7. 좀더 줄이려면 어떻게 해야 할까요? (decay) log.info('%s -> %s -> %s -> %s -> %s' % (x_train.shape[1], n_hiddens, activation.__name__, n_hiddens, 1)) log.info('weights_initializer: %s' % weights_initializer.__name__) log.info('learning_rate: %.4f' % learning_rate) log.info('train_time: %s' % train_time) how_many_trains = 3 if train_time < 10 else 1 log.info('how_many_trains: %s' % how_many_trains) for _ in range(how_many_trains): time.sleep(1) tf.reset_default_graph() # Clears the default graph stack and resets the global default graph. tf.set_random_seed(7942) # 3. 결과를 규칙적으로 만들자. (cost: 600-700) scope_name = '%s.%s' % (func.__name__, DateUtil.current_yyyymmdd_hhmmss()) x, y, y_hat, cost, rsme, train_step, summary = build_graph(scope_name, n_features, n_hiddens, n_classes, learning_rate, activation=activation, weights_initializer=weights_initializer, bias_value=bias_value) try: watch = WatchUtil() model_file_saved = False model_file = os.path.join(MODELS_DIR, '%s_%s/model' % (os.path.basename(__file__.replace('.py', '')), func.__name__)) model_dir = os.path.dirname(model_file) # log.info('model_file: %s' % model_file) if not os.path.exists(model_dir): # log.info('model_dir: %s' % model_dir) os.makedirs(model_dir) config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) saver = tf.train.Saver()
learning_rate = 0.001 # Adeam은 3승 이하 추천 / 590 -> 670으로 증가 | best/total 체크가 필요 : 431/431인 경우로 러닝 시간이 부족한 상황 train_time = 10 * 60 # best 10분 / cpu에서는 10배 정도 늦음 ### 최적화 테스트 : 끝 print('%s -> %s -> %s -> %s -> %s' % (x_train.shape[1], n_hiddens, activation.__name__, n_hiddens, 1)) print('weights_initializer: %s' % weights_initializer.__name__) print('learning_rate: %.4f' % learning_rate) print('train_time: %s' % train_time) how_many_trains = 3 if train_time <= 1 else 1 # 1초 실행하는 경우, 3번 실험 그 외에는 1번 실험. for _ in range(how_many_trains) : # time.sleep(1) tf.reset_default_graph() # 기존 session을 초기화 tf.set_random_seed(7942) # tf.random_normal_initializer 사용하기 때문에 설정 필요 scope_name = '%s.%s' % (func.__name__,DateUtil.current_yyyymmdd_hhmmss()) # graph 겹치지 않게 하기 위해서, func + 날짜 이름으로 설정하는 것을 추천 x, y, y_hat, cost, rsme, train_step, summary = build_graph(scope_name, n_features, n_hiddens, n_classes, learning_rate, activation=activation, weights_initializer=weights_initializer, bias_value=bias_value) try : watch = WatchUtil() model_file_saved = False model_file = os.path.join('%s/workspace/nlp4kor/models/%s_%s/model' % (os.getcwd(), os.path.basename(__name__.replace('.py', '')), func.__name__)) model_dir = os.path.dirname(model_file) # print('model_file: %s' % model_file) if not os.path.exists(model_dir): # print('model_dir: %s' % model_dir) os.makedirs(model_dir) config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)) saver = tf.train.Saver() # 최근 5개만 남개 되어서 max_to_keep=None 해야함