if args.pre_model_path is not None: load_params = tl.files.load_npz(path=args.pre_model_path, name=args.pre_model_name) tl.files.assign_params(sess, load_params, network) print 'set done' nf.customfit(sess=sess, network=network, cost=cost, train_op=train_op, tra_provider=tra_provider, x=x, y_=y_, acc=None, n_epoch=args.epoch, print_freq=1, val_provider=val_provider, save_model=1, tra_kwag=tra_kwag, val_kwag=val_kwag, save_path=args.model_path + args.model_name, epoch_identifier=None) if args.ouroboros_e > 0: nf.Ouroborosfit(sess=sess, network=network, cost=cost, train_op=train_op, x=x, y_=y_,
epsilon=1e-08, use_locking=False).minimize( cost, var_list=train_params) # initialize all variables sess.run(tf.initialize_all_variables()) if args.pre_model_path is not None: load_params = tl.files.load_npz(path=args.pre_model_path, name=args.pre_model_name) tl.files.assign_params(sess, load_params, network) print 'set done' nf.customfit(sess=sess, network=network, cost=cost, train_op=train_op, tra_provider=tra_provider, x=x, y_=y_, acc=None, n_epoch=args.epoch, print_freq=1, val_provider=val_provider, save_model=1, tra_kwag=tra_kwag, val_kwag=val_kwag, save_path=args.model_path + args.model_name, epoch_identifier=None)
test_val = { 'inputs':test_set_y, 'targets':test_set_y, 'special': True if downscale == 'mix' else False, 'keepdims':True} if args.downscale == 2: from up2 import zipper if args.downscale == 4 or 'mix': from up4 import zipper if args.downscale == 10: from up10 import zipper x = tf.placeholder(tf.float32, shape=[None, observations, args.input_x, args.input_y], name='x') network = zipper(x, downscale, args.input_x, args.input_y, is_train=True, observation=args.observations) if args.pre_model_path is not None: load_params = tl.files.load_npz(path=args.pre_model_path, name=args.pre_model_name) tl.files.assign_params(sess, load_params, network) print 'set done' prediction = nf.custompredict(sess=sess, network=network, output_provider=test_provider, x=x, fragment_size=1, output_length=6400, y_op=None, out_kwag=test_val) prediction = prediction[0]*args.std+args.mean np.save(args.prediction_file, prediction)
for e in range(joint_epoch): print 'Joint epoch', e + 1 if e == 0: d_epoch = args.d_pretrain else: d_epoch = args.d_step nf.customfit(sess=sess, network=D_generate, cost=D_lost, train_op=D_op, tra_provider=tra_provider, x=g_x, y_=d_x, acc=None, n_epoch=d_epoch, print_freq=1, val_provider=None, save_model=args.save_d, tra_kwag=tra_kwag, val_kwag=None, save_path=args.save_model_path + args.save_d_name, epoch_identifier=None) nf.customfit(sess=sess, network=G, cost=G_lost, train_op=G_op, tra_provider=tra_provider, x=g_x, y_=g_y,
'inputs': test_set[:, :, args.start_t - args.observations - 1:args.start_t], 'framebatch': framebatch, 'mean': args.mean, 'std': args.std, 'norm_tar': True } dstn_predictions = nf.out_futurepredictor(sess=sess, network=network, network2=network_ots, timestamp=args.period_t, season=history, output_provider=test_provider, x=x, mean=args.mean, std=args.std, fragment_size=args.fragment_size, output_length=1, y_op=None, out_kwag=test_kwag, frameshape=frameshape, future=args.step, w=args.w, bias=args.b, low_weight=args.delta, weight_decay=1.0 / args.observations) prediction = dstn_predictions[0].reshape(-1, frameshape[0], frameshape[1]) * args.std + args.mean np.save(args.save_file, prediction)