LR = 1e-3 # Learning rate, will be dropped at 400k and 500k by sqrt(10.) each time VALFREQ = 1e3 SAVEFREQ = 5e3 MAXITER = 60e4 # Checkpoints and training log will be saved to wts = 'wts/nstd_%d/train'%(opts.nstd) if not os.path.exists(wts): os.makedirs(wts) ######################################################################### # Check for saved weights & optimizer states msave = ut.ckpter(wts + '/iter_*.model.npz') ssave = ut.ckpter(wts + '/iter_*.state.npz') ut.logopen(wts+'/train.log') niter = msave.iter ######################################################################### # Setup Graphs net.toTest() model = net.Net() CSZ = net.csz PSZ = net.psz assert WSZ%2 == 1 assert CSZ%2 == 0 and PSZ%2 == 0
import dops import os.path import sys # Params BSZ = 4 LR = 1e-4 WD = 1e-5 MAXITER = 500e3 SAVEITER = 1e4 DISPITER = 10 VALITER = 1000 VALREP = 2 saver = ut.ckpter('wts/model*.npz') if saver.iter >= MAXITER: MAXITER=550e3 LR = 1e-5 if saver.iter >= MAXITER: MAXITER=600e3 LR = 1e-6 #### Build Graph # Build phase2 d = data.dataset(BSZ) net = model.Net() output = net.predict(d.limgs, d.cv, d.lrl)
['{} {}'.format(k.upper(), v) for k, v in vars(prm).items()]) print(prm_str + '\n') ######################################################################### # Create exp dir if does not exist exp_dir = 'wts/{}/visibnet'.format(prm.input_attr) os.system('mkdir -p {}'.format(exp_dir)) # redirect stdout and stderr to log files if prm.log_file: sys.stdout = open(exp_dir + '/train.log', 'a') sys.stderr = open(exp_dir + '/info.log', 'a') # Check for saved weights & find iter vsave = ut.ckpter(exp_dir + '/iter_*.vmodel.npz') osave = ut.ckpter(exp_dir + '/iter_*.opt.npz') vpath = lambda itr: '%s/iter_%07d.vmodel.npz' % (exp_dir, itr) opath = lambda itr: '%s/iter_%07d.opt.npz' % (exp_dir, itr) niter = vsave.iter # Load annotations ut.mprint("Loading annotations") tbchr = ut.batcher(prm.trn_anns, prm.batch_size, niter) vbchr = ut.batcher(prm.val_anns, prm.batch_size, niter) ut.mprint("Done!") ######################################################################### # Set up data fetch
parser.add_argument( '-m', default=None, help='Path to saved model.npz file, default to the latest model') parser.add_argument('-w', type=int, default=31, help='Window size') opts = parser.parse_args() ######################################################################### IMSZ = 304 # Sizes of original images to crop niter = 100 mfile = opts.m # Get the latest model if mfile is None if mfile is None: msave = ut.ckpter('wts/nstd_%d/%s/iter_*.model.npz' % (opts.nstd, opts.type)) mfile = msave.latest if opts.type == 'train': tlist = 'data/train.txt' bsz = 512 wsz = opts.w nstd = opts.nstd / 255. net.toTest() net.base = baseBn model = net.Net() tset = TrainDataset(tlist, bsz, IMSZ, wsz, net.csz, net.psz, nstd, 0) _ = model.encode(tset.pnzbatch) elif opts.type == 'regress':
# Config options here KEEPLAST = 50 SAVE_FREQ = 1000 DISP_FREQ = 100 BSZ = 64 WEIGHT_DECAY = 0. LR = 0.0001 MOM = 0.9 MAX_ITER = int(1.2e7) # val VAL_FREQ = 100 VAL_numBatches = 20 # Check for saved weights saved = ut.ckpter(WTS_DIR + 'iter_*.model.npz') iter = saved.iter # Set up batching batcher = ut.batcher(LIST, sourcePath, BSZ, iter) batcher_val = ut.batcher(VAL, sourcePath, BSZ, iter) # Set up data prep data = ldr.trainload(BSZ) labels = tf.placeholder(shape=(BSZ, ), dtype=tf.int32) labels_val = tf.placeholder(shape=(BSZ, ), dtype=tf.int32) # Load model-def iftrain = tf.placeholder(tf.bool, shape=[]) net = md.model(data.batch, iftrain)
######################################################################### # Get params from command line ######################################################################### if len(sys.argv) != 5: sys.exit("USAGE: train.py nBits1 nBits2 nHidden wtsdir") nBits1 = int(sys.argv[1]) nBits2 = int(sys.argv[2]) nHidden = int(sys.argv[3]) wtdir = sys.argv[4] ######################################################################### ######################################################################### # Model save & logging setup ockp = ut.ckpter(wtdir+'/iter_*.state.npz') # Optimization state ockp_s = wtdir+'/iter_%d.state.npz' mckp = ut.ckpter(wtdir+'/iter_*.model.npz') # Model Files mckp_s = wtdir+'/iter_%d.model.npz' log = open(wtdir+'/train.log','a') def mprint(s): sys.stdout.write(time.strftime("%Y-%m-%d %H:%M:%S ") + s + "\n") log.write(time.strftime("%Y-%m-%d %H:%M:%S ") + s + "\n") sys.stdout.flush() log.flush() #########################################################################
# set path to visibnet wts for demo if prm.vnet_model == None: prm.vnet_model = 'wts/pretrained/{}/visibnet.model.npz'.format( prm.input_attr) if prm.cnet_model == None: prm.cnet_model = 'wts/pretrained/{}/coarsenet.model.npz'.format( prm.input_attr) # redirect stdout and stderr to log files if prm.log_file: sys.stdout = open(exp_dir + '/train.log', 'a') sys.stderr = open(exp_dir + '/info.log', 'a') # Check for saved weights & find iter rsave = ut.ckpter(exp_dir + '/iter_*.rmodel.npz') dsave = ut.ckpter(exp_dir + '/iter_*.dmodel.npz') osave = ut.ckpter(exp_dir + '/iter_*.opt.npz') rpath = lambda itr: '%s/iter_%07d.rmodel.npz' % (exp_dir, itr) dpath = lambda itr: '%s/iter_%07d.dmodel.npz' % (exp_dir, itr) opath = lambda itr: '%s/iter_%07d.opt.npz' % (exp_dir, itr) niter = rsave.iter # Load annotations ut.mprint("Loading annotations") tbchr = ut.batcher(prm.trn_anns, prm.batch_size, niter) vbchr = ut.batcher(prm.val_anns, prm.batch_size, niter) ut.mprint("Done!") #########################################################################
def arrtoimg(img): img = tf.depth_to_space( tf.reshape(img, [1, args.BSIZE, args.BSIZE, args.SIZE * args.SIZE * 3]), args.SIZE) img = tf.reshape(img, [args.SIZE * args.BSIZE, args.SIZE * args.BSIZE, 3]) img = sess.run(img) img = np.clip(img, 0., 1.) * 255 img = np.uint8(img) img = Image.fromarray(img) return img saved = ut.ckpter(args.WTS + 'iter_*.model.npz') iter = saved.iter std_num = len(args.STD.split(' ')) stds = [float(i) for i in args.STD.split(' ')] # Start session sess = tf.Session() batcher = ut.batcher(args.LIST, 1) data = tldr.testload(args.SIZE, args.BSIZE) std_img = tf.placeholder(shape=[args.BSIZE * args.BSIZE], dtype=tf.float32) std_denoise = tf.placeholder(shape=[args.BSIZE * args.BSIZE], dtype=tf.float32) rnd = tf.placeholder(shape=[], dtype=tf.float32) comp = tf.placeholder(shape=[], dtype=tf.float32) if args.MD == 1: