Ejemplo n.º 1
0
            util.toMagenta("loading pretrained ({0}) to fine-tune...".format(
                opt.load)))
        summaryWriter.add_graph(sess.graph)
    print(util.toMagenta("start training..."))

    chunkResumeN, chunkMaxN = opt.fromIt // opt.itPerChunk, opt.toIt // opt.itPerChunk
    # training loop
    for c in range(chunkResumeN, chunkMaxN):
        dataloader.shipChunk()
        dataloader.thread = threading.Thread(target=dataloader.loadChunk,
                                             args=[opt])
        dataloader.thread.start()
        for i in range(c * opt.itPerChunk, (c + 1) * opt.itPerChunk):
            lr = opt.lr * opt.lrDecay**(i // opt.lrStep)
            # make training batch
            batch = data.makeBatch(opt, dataloader, PH)
            batch[lr_PH] = lr
            # run one step
            runList = [optim, loss, loss_depth, loss_mask, maskLogit]
            _, l, ld, lm, ml = sess.run(runList, feed_dict=batch)
            if (i + 1) % 20 == 0:
                print(
                    "it. {0}/{1}, lr={2}, loss={4} ({5},{6}), time={3}".format(
                        util.toCyan("{0}".format(i + 1)), opt.toIt,
                        util.toYellow("{0:.0e}".format(lr)),
                        util.toGreen("{0:.2f}".format(time.time() -
                                                      timeStart)),
                        util.toRed("{0:.2f}".format(l)),
                        util.toRed("{0:.2f}".format(ld)),
                        util.toRed("{0:.2f}".format(lm))))
            if (i + 1) % 100 == 0:
Ejemplo n.º 2
0
with tf.Session(config=tfConfig) as sess:
	sess.run(tf.global_variables_initializer())
	summaryWriter.add_graph(sess.graph)
	if opt.fromIt!=0:
		util.restoreModelFromIt(opt,sess,saver_D,"D",opt.fromIt)
		print(util.toMagenta("resuming from iteration {0}...".format(opt.fromIt)))
	elif opt.loadD:
		util.restoreModel(opt,sess,saver_D,opt.loadD,"D")
		print(util.toMagenta("loading pretrained D {0}...".format(opt.loadD)))
	print(util.toMagenta("start training..."))

	# training loop
	for i in range(opt.fromIt,opt.toIt):
		lrD = opt.lrD*opt.lrDdecay**(i//opt.lrDstep)
		# make training batch
		batch = data.makeBatch(opt,trainData,PH)
		batch[lrD_PH] = lrD
		# update discriminator
		runList = [optimD,loss_D,grad_D_norm_mean]
		for u in range(opt.updateD):
			_,ld,gdn = sess.run(runList,feed_dict=batch)
		if (i+1)%10==0:
			print("it.{0}/{1}  lr={3}(GP),{4}(D)  loss={5}(GP),{6}(D)  norm={7}  time={2}"
				.format(util.toCyan("{0}".format(i+1)),
						opt.toIt,
						util.toGreen("{0:.2f}".format(time.time()-timeStart)),
						util.toYellow("X"),
						util.toYellow("{0:.0e}".format(lrD)),
						util.toRed("X"),
						util.toRed("{0:.4f}".format(ld)),
						util.toBlue("{0:.4f}".format(gdn))))
Ejemplo n.º 3
0
print(util.toYellow("======= TRAINING START ======="))
timeStart = time.time()
# start session
tfConfig = tf.ConfigProto(allow_soft_placement=True)
tfConfig.gpu_options.allow_growth = True

with tf.Session(config=tfConfig) as sess:
	sess.run(tf.global_variables_initializer())
	
	summaryWriter.add_graph(sess.graph)

	# training loop
	for i in range(toIt):
		lrGP = lrGP*lrGPdecay**(i//lrGPstep)
		batch = data.makeBatch(batchSize,trainData,PH)
		batch[lrGP_PH] = lrGP
		if(i<1200):
			optim = optimGP1
		else:
			optim = optimGP2
		'''
		if(i<2000):
			optim = optimGP1
		elif(i<10000):
			optim = optimGP2
		else:
			optim = optimGP3
		'''
		runList = [optim,loss_GP,vars_all,summary_op,imageWarped,StandardData]
		_,lg,var,summary_res,image,stimage = sess.run(runList,feed_dict=batch)