Ejemplo n.º 1
0
	imageFGfake = tf.placeholder(tf.float32,shape=[opt.batchSize,opt.H,opt.W,4])
	PH = [imageBGfakeData,imageRealData,imageFGfake]
	# ------ generate perturbation ------
	imageReal = data.perturbBG(opt,imageRealData)
	imageBGfake = data.perturbBG(opt,imageBGfakeData)
	pPertFG = opt.pertFG*tf.random_normal([opt.batchSize,opt.warpDim])
	# ------ define GP and D ------
	geometric = graph.geometric_multires
	discriminator = graph.discriminator
	# ------ geometric predictor ------
	imageFGwarpAll,pAll,_ = geometric(opt,imageBGfake,imageFGfake,pPertFG)
	pWarp = pAll[-1]
	# ------ composite image ------
	summaryImageTrain = []
	summaryImageTest = []
	summaryImageTrain.append(util.imageSummary(opt,imageReal,"TRAIN_real",opt.H,opt.W))
	summaryImageTest.append(util.imageSummary(opt,imageReal,"TEST_real",opt.H,opt.W))
	imageFGwarp = imageFGwarpAll[0]
	imageComp = graph.composite(opt,imageBGfake,imageFGwarp)
	summaryImageTrain.append(util.imageSummary(opt,imageComp,"TRAIN_compST{0}".format(0),opt.H,opt.W))
	summaryImageTest.append(util.imageSummary(opt,imageComp,"TEST_compST{0}".format(0),opt.H,opt.W))
	alpha = tf.random_uniform(shape=[opt.batchSize,1,1,1])
	imageIntp = alpha*imageReal+(1-alpha)*imageComp
	# ------ discriminator ------
	outComps,outIntps = [],[]
	outReal = discriminator(opt,imageReal)
	outComp = discriminator(opt,imageComp,reuse=True)
	outIntp = discriminator(opt,imageIntp,reuse=True)
	# ------ discriminator gradient ------
	grad_D_fake = tf.gradients(outIntp,imageIntp)[0]
	grad_D_norm = tf.sqrt(tf.reduce_sum(grad_D_fake**2+1e-8,reduction_indices=[1,2,3]))
Ejemplo n.º 2
0
 fuseTrans = tf.nn.l2_normalize(opt.fuseTrans, dim=1)
 XYZid, ML = transform.fuse3D(opt, XYZ, maskLogit, fuseTrans)  # [B,1,VHW]
 newDepth, newMaskLogit, collision = transform.render2D(
     opt, XYZid, ML, renderTrans)  # [B,N,H,W,1]
 # ------ define loss ------
 loss_depth = graph.masked_l1_loss(newDepth - depthGT, tf.equal(
     collision, 1)) / (opt.batchSize * opt.novelN)
 loss_mask = graph.cross_entropy_loss(newMaskLogit,
                                      maskGT) / (opt.batchSize * opt.novelN)
 loss = loss_mask + opt.lambdaDepth * loss_depth
 # ------ optimizer ------
 lr_PH = tf.placeholder(tf.float32, shape=[])
 optim = tf.train.AdamOptimizer(learning_rate=lr_PH).minimize(loss)
 # ------ generate summaries ------
 summaryImage = [
     util.imageSummary(opt, "image_RGB", inputImage, opt.inH, opt.inW),
     util.imageSummary(opt, "image_depth/pred",
                       ((1 - newDepth) *
                        tf.to_float(tf.equal(collision, 1)))[:, 0, :, :,
                                                             0:1], opt.H,
                       opt.W),
     util.imageSummary(opt, "image_depth/GT",
                       (1 - depthGT)[:, 0, :, :, 0:1], opt.H, opt.W),
     util.imageSummary(opt, "image_mask/new",
                       tf.sigmoid(newMaskLogit[:, 0, :, :, 0:1]), opt.H,
                       opt.W),
     util.imageSummary(opt, "image_mask", tf.sigmoid(maskLogit[:, :, :,
                                                               0:1]),
                       opt.outH, opt.outW),
     util.imageSummary(opt, "image_mask/GT", maskGT[:, 0, :, :, 0:1], opt.H,
                       opt.W)
Ejemplo n.º 3
0
 PH = [imageBGfakeData, imageRealData, imageFGfake]
 # ------ generate perturbation ------
 imageReal = imageRealData
 imageBGfake = imageBGfakeData
 pPertFG = opt.pertFG * tf.random_normal([opt.batchSize, opt.warpDim])
 # ------ define GP and D ------
 geometric = graph.geometric_multires
 discriminator = graph.discriminator
 # ------ geometric predictor ------
 imageFGwarpAll, pAll, _ = geometric(opt, imageBGfake, imageFGfake, pPertFG)
 pWarp = pAll[-1]
 # ------ composite image ------
 summaryImageTrain = []
 summaryImageTest = []
 summaryImageTrain.append(
     util.imageSummary(opt, imageReal, "TRAIN_real", opt.H, opt.W))
 summaryImageTest.append(
     util.imageSummary(opt, imageReal, "TEST_real", opt.H, opt.W))
 imageFGwarp = imageFGwarpAll[0]
 imageComp = graph.composite(opt, imageBGfake, imageFGwarp)
 summaryImageTrain.append(
     util.imageSummary(opt, imageComp, "TRAIN_compST{0}".format(0), opt.H,
                       opt.W))
 summaryImageTest.append(
     util.imageSummary(opt, imageComp, "TEST_compST{0}".format(0), opt.H,
                       opt.W))
 alpha = tf.random_uniform(shape=[opt.batchSize, 1, 1, 1])
 imageIntp = alpha * imageReal + (1 - alpha) * imageComp
 # ------ discriminator ------
 outComps, outIntps = [], []
 outReal = discriminator(opt, imageReal)
Ejemplo n.º 4
0
 ],
                       axis=0)  # [V,H,W]
 XYGT = np.expand_dims(np.transpose(XYGT, axes=[1, 2, 0]),
                       axis=0)  # [1,H,W,2V]
 XY = XYZ[:, :, :, :opt.outViewN * 2]
 loss_XYZ = graph.l1_loss(XY - XYGT) / opt.batchSize
 loss_XYZ += graph.masked_l1_loss(depth - depthGT,
                                  maskLogit > 0) / opt.batchSize
 loss_mask = graph.cross_entropy_loss(maskLogit, maskGT) / opt.batchSize
 loss = loss_mask + opt.lambdaDepth * loss_XYZ
 # ------ optimizer ------
 lr_PH = tf.placeholder(tf.float32, shape=[])
 optim = tf.train.AdamOptimizer(learning_rate=lr_PH).minimize(loss)
 # ------ generate summaries ------
 summaryImage = [
     util.imageSummary(opt, "image_RGB", inputImage, opt.inH, opt.inW),
     util.imageSummary(opt, "image_depth/pred", (1 - depth)[:, :, :, 0:1],
                       opt.outH, opt.outW),
     util.imageSummary(opt, "image_depth/valid",
                       ((1 - depth) * mask)[:, :, :,
                                            0:1], opt.outH, opt.outW),
     util.imageSummary(opt, "image_depth/GT", (1 - depthGT)[:, :, :, 0:1],
                       opt.outH, opt.outW),
     util.imageSummary(opt, "image_mask", tf.sigmoid(maskLogit[:, :, :,
                                                               0:1]),
                       opt.outH, opt.outW),
     util.imageSummary(opt, "image_mask/GT", maskGT[:, :, :, 0:1], opt.outH,
                       opt.outW)
 ]
 summaryImage = tf.summary.merge(summaryImage)
 summaryLoss = [
Ejemplo n.º 5
0
		output = graph.CNN(opt,imageWarp)
	softmax = tf.nn.softmax(output)
	labelOnehot = tf.one_hot(label,opt.labelN)
	prediction = tf.equal(tf.argmax(softmax,1),label)
	# ------ define loss ------
	softmaxLoss = tf.nn.softmax_cross_entropy_with_logits(logits=output,labels=labelOnehot)
	loss = tf.reduce_mean(softmaxLoss)
	# ------ optimizer ------
	lrGP_PH,lrC_PH = tf.placeholder(tf.float32,shape=[]),tf.placeholder(tf.float32,shape=[])
	optim = util.setOptimizer(opt,loss,lrGP_PH,lrC_PH)
	# ------ generate summaries ------
	summaryImageTrain = []
	summaryImageTest = []
	if opt.netType=="STN" or opt.netType=="IC-STN":
		for l in range(opt.warpN+1):
			summaryImageTrain.append(util.imageSummary(opt,imageWarpAll[l],"TRAIN_warp{0}".format(l),opt.H,opt.W))
			summaryImageTest.append(util.imageSummary(opt,imageWarpAll[l],"TEST_warp{0}".format(l),opt.H,opt.W))
		summaryImageTrain = tf.summary.merge(summaryImageTrain)
		summaryImageTest = tf.summary.merge(summaryImageTest)
	summaryLossTrain = tf.summary.scalar("TRAIN_loss",loss)
	testErrorPH = tf.placeholder(tf.float32,shape=[])
	testImagePH = tf.placeholder(tf.float32,shape=[opt.labelN,opt.H,opt.W,1])
	summaryErrorTest = tf.summary.scalar("TEST_error",testErrorPH)
	if opt.netType=="STN" or opt.netType=="IC-STN":
		summaryMeanTest0 = util.imageSummaryMeanVar(opt,testImagePH,"TEST_mean_init",opt.H,opt.W)
		summaryMeanTest1 = util.imageSummaryMeanVar(opt,testImagePH,"TEST_mean_warped",opt.H,opt.W)
		summaryVarTest0 = util.imageSummaryMeanVar(opt,testImagePH*3,"TEST_var_init",opt.H,opt.W)
		summaryVarTest1 = util.imageSummaryMeanVar(opt,testImagePH*3,"TEST_var_warped",opt.H,opt.W)

# load data
print(util.toMagenta("loading MNIST dataset..."))
Ejemplo n.º 6
0
		imageWarpRescale = imageWarp*tf.sqrt(imageVar)+imageMean
	softmax = tf.nn.softmax(output)
	labelOnehot = tf.one_hot(label,opt.labelN)
	prediction = tf.equal(tf.argmax(softmax,1),label)
	# ------ define loss ------
	softmaxLoss = tf.nn.softmax_cross_entropy_with_logits(logits=output,labels=labelOnehot)
	loss = tf.reduce_mean(softmaxLoss)
	# ------ optimizer ------
	lrGP_PH,lrC_PH = tf.placeholder(tf.float32,shape=[]),tf.placeholder(tf.float32,shape=[])
	optim = util.setOptimizer(opt,loss,lrGP_PH,lrC_PH)
	# ------ generate summaries ------
	summaryImageTrain = []
	summaryImageTest = []
	if opt.netType=="STN" or opt.netType=="IC-STN":
		for l in range(opt.warpN+1):
			summaryImageTrain.append(util.imageSummary(opt,imageWarpAll[l]*tf.sqrt(imageVar)+imageMean,"TRAIN_warp{0}".format(l),opt.H,opt.W))
			summaryImageTest.append(util.imageSummary(opt,imageWarpAll[l]*tf.sqrt(imageVar)+imageMean,"TEST_warp{0}".format(l),opt.H,opt.W))
		summaryImageTrain = tf.summary.merge(summaryImageTrain)
		summaryImageTest = tf.summary.merge(summaryImageTest)
	summaryLossTrain = tf.summary.scalar("TRAIN_loss",loss)
	testErrorPH = tf.placeholder(tf.float32,shape=[])
	testImagePH = tf.placeholder(tf.float32,shape=[opt.labelN,opt.H,opt.W,3])
	summaryErrorTest = tf.summary.scalar("TEST_error",testErrorPH)
	if opt.netType=="STN" or opt.netType=="IC-STN":
		summaryMeanTest0 = util.imageSummaryMeanVar(opt,testImagePH,"TEST_mean_init",opt.H,opt.W)
		summaryMeanTest1 = util.imageSummaryMeanVar(opt,testImagePH,"TEST_mean_warped",opt.H,opt.W)
		summaryVarTest0 = util.imageSummaryMeanVar(opt,testImagePH,"TEST_var_init",opt.H,opt.W)
		summaryVarTest1 = util.imageSummaryMeanVar(opt,testImagePH,"TEST_var_warped",opt.H,opt.W)

# load data
print(util.toMagenta("loading GTSRB dataset..."))