# tensorboard 出力
    board_train = tf.summary.create_file_writer(
        logdir=os.path.join(args.tensorboard_dir, args.exper_name))
    board_valid = tf.summary.create_file_writer(
        logdir=os.path.join(args.tensorboard_dir, args.exper_name + "_valid"))
    board_train.set_as_default()
    #board_valid.set_as_default()

    #================================
    # データセットの読み込み
    #================================
    # 学習用データセットとテスト用データセットの設定
    ds_train, ds_valid, n_trains, n_valids = load_dataset(
        args.dataset_dir,
        image_height=args.image_height,
        image_width=args.image_width,
        n_channels=3,
        batch_size=args.batch_size,
        use_tfrecord=args.use_tfrecord,
        seed=args.seed)
    if (args.debug):
        print("n_trains : ", n_trains)
        print("n_valids : ", n_valids)
        print("ds_train : ", ds_train)
        print("ds_valid : ", ds_valid)

    #================================
    # モデルの構造を定義する。
    #================================
    model_G = TempleteNetworks(out_dim=3)
    if (args.debug):
        model_G(
 # データセットの読み込み
 #================================
 # 学習用データセットとテスト用データセットの設定
 if (args.use_datagen):
     datagen_train = TempleteDataGen(
         dataset_dir=args.dataset_dir,
         datamode="train",
         image_height=args.image_height,
         image_width=args.image_width,
         batch_size=args.batch_size,
     )
 else:
     image_s_trains, image_t_trains, image_s_valids, image_t_valids = load_dataset(
         args.dataset_dir,
         image_height=args.image_height,
         image_width=args.image_width,
         n_channels=3,
         batch_size=args.batch_size,
         seed=args.seed)
     if (args.debug):
         print("[image_s_trains] shape={}, dtype={}, min={}, max={}".format(
             image_s_trains.shape, image_s_trains.dtype,
             np.min(image_s_trains), np.max(image_s_trains)))
         print("[image_t_trains] shape={}, dtype={}, min={}, max={}".format(
             image_t_trains.shape, image_t_trains.dtype,
             np.min(image_t_trains), np.max(image_t_trains)))
         print("[image_s_valids] shape={}, dtype={}, min={}, max={}".format(
             image_s_valids.shape, image_s_valids.dtype,
             np.min(image_s_valids), np.max(image_s_valids)))
         print("[image_t_valids] shape={}, dtype={}, min={}, max={}".format(
             image_t_valids.shape, image_t_valids.dtype,
Esempio n. 3
0
# fetch model
if args.model == 'dae':
    agent = DAE(params, logger, args.gpu)
elif args.model == 'protonet':
    agent = SupportQueryAgent(params, logger, args.gpu)
elif args.model == 'dve':
    agent = DVE(params, logger, args.gpu)
elif args.model == 'daie':
    agent = DAIE(params, logger, args.gpu)
else:
    raise NotImplementedError

# get dataset

dataset = load_dataset(params)

valid, test = None, None

if len(params['data']['split']) == 1:
    train = dataset['train']
elif len(params['data']['split']) == 2:
    train, test = dataset['train'], dataset['test']
elif len(params['data']['split']) == 3:
    train, valid, test = dataset['train'], dataset['valid'], dataset['test']

# start training
if args.type == 'train':
    if args.pretrain_dir is not None:
        agent.start(train, args.pretrain_dir)
    else: