Пример #1
0
def ICSTN(opt,imageFull,p):
	def conv2Layer(opt,feat,outDim):
		weight,bias = createVariable(opt,[7,7,int(feat.shape[-1]),outDim],stddev=opt.stdGP)
		conv = tf.nn.conv2d(feat,weight,strides=[1,1,1,1],padding="VALID")+bias
		return conv
	def linearLayer(opt,feat,outDim):
		weight,bias = createVariable(opt,[int(feat.shape[-1]),outDim],stddev=opt.stdGP)
		fc = tf.matmul(feat,weight)+bias
		return fc
	imageWarpAll = []
	for l in range(opt.warpN):
		with tf.variable_scope("geometric",reuse=l>0):
			pMtrx = warp.vec2mtrx(opt,p)
			imageWarp = warp.transformCropImage(opt,imageFull,pMtrx)
			imageWarpAll.append(imageWarp)
			feat = imageWarp
			with tf.variable_scope("conv1"):
				feat = conv2Layer(opt,feat,6)
				feat = tf.nn.relu(feat)
			with tf.variable_scope("conv2"):
				feat = conv2Layer(opt,feat,24)
				feat = tf.nn.relu(feat)
			feat = tf.reshape(feat,[opt.batchSize,-1])
			with tf.variable_scope("fc3"):
				feat = linearLayer(opt,feat,opt.warpDim)
			dp = feat
		p = warp.compose(opt,p,dp)
	pMtrx = warp.vec2mtrx(opt,p)
	imageWarp = warp.transformCropImage(opt,imageFull,pMtrx)
	imageWarpAll.append(imageWarp)
	return imageWarpAll
Пример #2
0
def cSTrecur_depth2_CF(imageInput, p, STlayerN, dimShape, stddev, params):
    [STconv1dim] = dimShape
    STconv1fcDim = (params.H - 8) * (params.W - 8) * STconv1dim
    with tf.name_scope("cSTrecur"):
        with tf.variable_scope("conv1"):
            weight1, bias1 = createVariable([9, 9, 1, STconv1dim], stddev)
        with tf.variable_scope("fc2"):
            weight2, bias2 = createVariable([STconv1fcDim, params.pDim], 0,
                                            True)
    for l in range(STlayerN):
        with tf.name_scope("cSTrecur{0}".format(l)):
            warpMtrx = warp.vec2mtrxBatch(p, params)
            ImWarp = data.imageWarpIm(imageInput, warpMtrx, params)
            makeImageSummary("imageST{0}".format(l), ImWarp, params)
            with tf.variable_scope("conv1"):
                STconv1 = tf.nn.conv2d(
                    ImWarp, weight1, strides=[1, 1, 1, 1
                                              ], padding="VALID") + bias1
                STrelu1 = tf.nn.relu(STconv1)
            STrelu1vec = tf.reshape(STrelu1, [-1, STconv1fcDim])
            with tf.variable_scope("fc2"):
                STfc2 = tf.matmul(STrelu1vec, weight2) + bias2
            p = warp.compose(p, STfc2, params)
    warpMtrx = warp.vec2mtrxBatch(p, params)
    ImWarp = data.imageWarpIm(imageInput, warpMtrx, params)
    makeImageSummary("imageST{0}".format(STlayerN), ImWarp, params)
    return ImWarp, p
Пример #3
0
def ST_depth1_F(ImWarp, p, STlayerN, dimShape, stddev, params):
    makeImageSummary("image", ImWarp, params)
    for l in range(STlayerN):
        with tf.name_scope("ST{0}".format(l)):
            ImWarpVec = tf.reshape(ImWarp, [-1, params.H * params.W])
            with tf.variable_scope("fc1"):
                weight, bias = createVariable(
                    [params.H * params.W, params.pDim], 0, True)
                STfc1 = tf.matmul(ImWarpVec, weight) + bias
            warpMtrx = warp.vec2mtrxBatch(STfc1, params)
            ImWarp = data.ImWarpIm(ImWarp, warpMtrx, params)
            makeImageSummary("imageST{0}".format(l), ImWarp, params)
            p = warp.compose(p, STfc1, params)
    return ImWarp, p
def cSTrecur_depth4_CCFF(imageInput, p, STlayerN, dimShape, stddev, params):
    [STconv1dim, STconv2dim, STfc3dim] = dimShape
    STconv2fcDim = (params.H - 12) // 2 * (params.W - 12) // 2 * STconv2dim
    with tf.name_scope("cSTrecur"):
        with tf.variable_scope("conv1"):
            weight1, bias1 = createVariable([7, 7, 1, STconv1dim], stddev)
        with tf.variable_scope("conv2"):
            weight2, bias2 = createVariable([7, 7, STconv1dim, STconv2dim],
                                            stddev)
        with tf.variable_scope("fc3"):
            weight3, bias3 = createVariable([STconv2fcDim, STfc3dim], stddev)
        with tf.variable_scope("fc4"):
            weight4, bias4 = createVariable([STfc3dim, params.pDim], 0, True)
        ImWarp = imageInput
    k = 1
    for l in range(STlayerN):
        with tf.name_scope("cSTrecur{0}".format(l)):
            with tf.variable_scope("conv1"):
                STconv1 = tf.nn.conv2d(
                    ImWarp, weight1, strides=[1, 1, 1, 1
                                              ], padding="VALID") + bias1
                STrelu1 = tf.nn.relu(STconv1)
            with tf.variable_scope("conv2"):
                STconv2 = tf.nn.conv2d(
                    STrelu1, weight2, strides=[1, 1, 1, 1
                                               ], padding="VALID") + bias2
                STrelu2 = tf.nn.relu(STconv2)
                STmaxpool2 = tf.nn.max_pool(STrelu2,
                                            ksize=[1, 2, 2, 1],
                                            strides=[1, 2, 2, 1],
                                            padding="VALID")
            STmaxpool2vec = tf.reshape(STmaxpool2, [-1, STconv2fcDim])
            with tf.variable_scope("fc3"):
                STfc3 = tf.matmul(STmaxpool2vec, weight3) + bias3
                STrelu3 = tf.nn.relu(STfc3)
            with tf.variable_scope("fc4"):
                STfc4 = tf.matmul(STrelu3, weight4) + bias4
            if k == 1:
                p = STfc4
            else:
                p = warp.compose(p, STfc4, params)
            k = k + 1
            warpMtrx = warp.vec2mtrxBatch(p, params)
            ImWarp = data.imageWarpIm(imageInput, warpMtrx, params)
            makeImageSummary("imageST{0}".format(l), ImWarp, params)
    warpMtrx = warp.vec2mtrxBatch(p, params)
    ImWarp = data.imageWarpIm(imageInput, warpMtrx, params)
    makeImageSummary("imageST{0}".format(STlayerN), ImWarp, params)
    return ImWarp, p
Пример #5
0
 def forward(self, opt, image, p):
     imageWarpAll = []
     for l in range(opt.warpN):
         pMtrx = warp.vec2mtrx(opt, p)
         imageWarp = warp.transformImage(opt, image, pMtrx)
         imageWarpAll.append(imageWarp)
         feat = imageWarp
         feat = self.conv2Layers(feat).view(opt.batchSize, -1)
         feat = self.linearLayers(feat)
         dp = feat
         p = warp.compose(opt, p, dp)
     pMtrx = warp.vec2mtrx(opt, p)
     imageWarp = warp.transformImage(opt, image, pMtrx)
     imageWarpAll.append(imageWarp)
     return imageWarpAll
Пример #6
0
def PT_STN(image, p, stdGP, warpN, batchSize, dataH, dataW):
    shape = image.shape
    appendarr = tf.ones([shape[0], shape[1], shape[2], 1])
    image = tf.concat((image, appendarr), axis=3)
    with tf.variable_scope("pt"):
        dp = None
        # define recurrent spatial transformations
        for l in range(warpN):
            with tf.variable_scope("warp{0}".format(l)):
                pMtrx = warp.vec2mtrx(batchSize, p)
                imagewarp = warp.transformImage(batchSize, image, pMtrx, dataH,
                                                dataW)
                # geometric predictor
                imageConcat = imagewarp
                feat = imageConcat
                with tf.variable_scope("conv1"):
                    feat, imageConcat = conv2Layer(feat, imageConcat, 32,
                                                   stdGP)  # 72x72
                with tf.variable_scope("conv2"):
                    feat, imageConcat = conv2Layer(feat, imageConcat, 64,
                                                   stdGP)  # 36x36
                with tf.variable_scope("conv3"):
                    feat, imageConcat = conv2Layer(feat, imageConcat, 128,
                                                   stdGP)  # 18x18
                with tf.variable_scope("conv4"):
                    feat, imageConcat = conv2Layer(feat, imageConcat, 256,
                                                   stdGP)  # 9x9
                with tf.variable_scope("conv5"):
                    feat, imageConcat = conv2Layer(feat, imageConcat, 512,
                                                   stdGP)  # 5x5
                feat = tf.reshape(feat, [batchSize, -1])
                with tf.variable_scope("fc6"):
                    feat = linearLayer(feat, 256, stdGP)
                with tf.variable_scope("fc7"):
                    feat = linearLayer(feat, 8, stdGP, final=True)
                dp = feat
                p = warp.compose(p, dp)
        # warp image with final p
        #print(imagewarp.shape)
        pMtrx = warp.vec2mtrx(batchSize, p)
        imagewarp = warp.transformImage(batchSize, imagewarp, pMtrx, dataH,
                                        dataW)
        colorFG, maskFG = imagewarp[:, :, :, :3], imagewarp[:, :, :, 3:]
        imagewarp = colorFG * maskFG
        #print(imagewarp.shape)
    return imagewarp
Пример #7
0
def geometric_multires(opt,imageBG,imageFG,p):
	def downsample(x):
		padH,padW = int(x.shape[1])%2,int(x.shape[2])%2
		if padH!=0 or padW!=0: x = tf.pad(x,[[0,0],[0,padH],[0,padW],[0,0]])
		return tf.nn.avg_pool(x,[1,2,2,1],[1,2,2,1],"VALID")
	def conv2Layer(opt,feat,imageConcat,outDim,final=False):
		weight,bias = createVariable(opt,[4,4,int(feat.shape[-1]),outDim],stddev=opt.stdGP)
		conv = tf.nn.conv2d(feat,weight,strides=[1,2,2,1],padding="SAME")+bias
		feat = tf.nn.relu(conv)
		imageConcat = downsample(imageConcat)
		feat = tf.concat([feat,imageConcat],axis=3)
		return (feat if not final else conv),imageConcat
	def linearLayer(opt,feat,outDim,final=False):
		weight,bias = createVariable(opt,[int(feat.shape[-1]),outDim],stddev=opt.stdGP)
		fc = tf.matmul(feat,weight)+bias
		feat = tf.nn.relu(fc)
		return feat if not final else fc
	with tf.variable_scope("geometric"):
		imageFGwarpAll,pAll = [],[p]
		dp = None
		# define recurrent spatial transformations
		for l in range(opt.warpN):
			with tf.variable_scope("warp{0}".format(l)):
				pMtrx = warp.vec2mtrx(opt,p)
				imageFGwarp = warp.transformImage(opt,imageFG,pMtrx)
				imageFGwarpAll.append(imageFGwarp)
				# geometric predictor
				imageConcat = tf.concat([imageBG,imageFGwarp],axis=3)
				feat = imageConcat # 144x144
				with tf.variable_scope("conv1"): feat,imageConcat = conv2Layer(opt,feat,imageConcat,32) # 72x72
				with tf.variable_scope("conv2"): feat,imageConcat = conv2Layer(opt,feat,imageConcat,64) # 36x36
				with tf.variable_scope("conv3"): feat,imageConcat = conv2Layer(opt,feat,imageConcat,128) # 18x18
				with tf.variable_scope("conv4"): feat,imageConcat = conv2Layer(opt,feat,imageConcat,256) # 9x9
				with tf.variable_scope("conv5"): feat,imageConcat = conv2Layer(opt,feat,imageConcat,512) # 5x5
				feat = tf.reshape(feat,[opt.batchSize,-1])
				with tf.variable_scope("fc6"): feat = linearLayer(opt,feat,256)
				with tf.variable_scope("fc7"): feat = linearLayer(opt,feat,opt.warpDim,final=True)
				dp = feat
				p = warp.compose(opt,p,dp)
				pAll.append(p)
		# warp image with final p
		pMtrx = warp.vec2mtrx(opt,p)
		imageFGwarp = warp.transformImage(opt,imageFG,pMtrx)
		imageFGwarpAll.append(imageFGwarp)
	return imageFGwarpAll,pAll,dp
Пример #8
0
def cSTrecur_depth1_F(imageInput, p, STlayerN, dimShape, stddev, params):
    with tf.name_scope("cSTrecur"):
        with tf.variable_scope("fc1"):
            weight, bias = createVariable([params.H * params.W, params.pDim],
                                          0, True)
    for l in range(STlayerN):
        with tf.name_scope("cSTrecur{0}".format(l)):
            warpMtrx = warp.vec2mtrxBatch(p, params)
            ImWarp = data.imageWarpIm(imageInput, warpMtrx, params)
            makeImageSummary("imageST{0}".format(l), ImWarp, params)
            imageWarpVec = tf.reshape(ImWarp, [-1, params.H * params.W])
            with tf.variable_scope("fc1"):
                STfc1 = tf.matmul(imageWarpVec, weight) + bias
            p = warp.compose(p, STfc1, params)
    warpMtrx = warp.vec2mtrxBatch(p, params)
    ImWarp = data.imageWarpIm(imageInput, warpMtrx, params)
    makeImageSummary("imageST{0}".format(STlayerN), ImWarp, params)
    return ImWarp, p
Пример #9
0
def cSTN(opt, imageInput, p, STlayerN, dimShape, stddev):
    for l in range(STlayerN):
        with tf.name_scope("cST{0}".format(l)):
            warpMtrx = warp.vec2mtrxBatch(p, opt)
            ImWarp = data.imageWarpIm(imageInput, warpMtrx, opt)
            util.makeImageSummary("imageST{0}".format(l), ImWarp, opt)
            [STconv1dim, STconv2dim, STfc3dim] = dimShape
            STconv2fcDim = (opt.H - 12) // 2 * (opt.W - 12) // 2 * STconv2dim
            with tf.variable_scope("conv1"):
                weight, bias = createVariable([7, 7, 1, STconv1dim], stddev)
                STconv1 = tf.nn.conv2d(
                    ImWarp, weight, strides=[1, 1, 1, 1
                                             ], padding="VALID") + bias
                STrelu1 = tf.nn.relu(STconv1)
            with tf.variable_scope("conv2"):
                weight, bias = createVariable([7, 7, STconv1dim, STconv2dim],
                                              stddev)
                STconv2 = tf.nn.conv2d(
                    STrelu1, weight, strides=[1, 1, 1, 1
                                              ], padding="VALID") + bias
                STrelu2 = tf.nn.relu(STconv2)
                STmaxpool2 = tf.nn.max_pool(STrelu2,
                                            ksize=[1, 2, 2, 1],
                                            strides=[1, 2, 2, 1],
                                            padding="VALID")
            STmaxpool2vec = tf.reshape(STmaxpool2, [-1, STconv2fcDim])
            with tf.variable_scope("fc3"):
                weight, bias = createVariable([STconv2fcDim, STfc3dim], stddev)
                STfc3 = tf.matmul(STmaxpool2vec, weight) + bias
                STrelu3 = tf.nn.relu(STfc3)
            with tf.variable_scope("fc4"):
                weight, bias = createVariable([STfc3dim, opt.pDim], 0, True)
                STfc4 = tf.matmul(STrelu3, weight) + bias
            p = warp.compose(p, STfc4, opt)
    warpMtrx = warp.vec2mtrxBatch(p, opt)
    ImWarp = data.imageWarpIm(imageInput, warpMtrx, opt)
    util.makeImageSummary("imageST{0}".format(STlayerN), ImWarp, opt)
    return ImWarp, p
Пример #10
0
def ST_depth4_CCFF(ImWarp, p, STlayerN, dimShape, stddev, params):
    makeImageSummary("image", ImWarp, params)
    for l in range(STlayerN):
        with tf.name_scope("ST{0}".format(l)):
            [STconv1dim, STconv2dim, STfc3dim] = dimShape
            STconv2fcDim = (params.H - 12) // 2 * (params.W -
                                                   12) // 2 * STconv2dim
            with tf.variable_scope("conv1"):
                weight, bias = createVariable([7, 7, 1, STconv1dim], stddev)
                STconv1 = tf.nn.conv2d(
                    ImWarp, weight, strides=[1, 1, 1, 1
                                             ], padding="VALID") + bias
                STrelu1 = tf.nn.relu(STconv1)
            with tf.variable_scope("conv2"):
                weight, bias = createVariable([7, 7, STconv1dim, STconv2dim],
                                              stddev)
                STconv2 = tf.nn.conv2d(
                    STrelu1, weight, strides=[1, 1, 1, 1
                                              ], padding="VALID") + bias
                STrelu2 = tf.nn.relu(STconv2)
                STmaxpool2 = tf.nn.max_pool(STrelu2,
                                            ksize=[1, 2, 2, 1],
                                            strides=[1, 2, 2, 1],
                                            padding="VALID")
            STmaxpool2vec = tf.reshape(STmaxpool2, [-1, STconv2fcDim])
            with tf.variable_scope("fc3"):
                weight, bias = createVariable([STconv2fcDim, STfc3dim], stddev)
                STfc3 = tf.matmul(STmaxpool2vec, weight) + bias
                STrelu3 = tf.nn.relu(STfc3)
            with tf.variable_scope("fc4"):
                weight, bias = createVariable([STfc3dim, params.pDim], 0, True)
                STfc4 = tf.matmul(STrelu3, weight) + bias
            warpMtrx = warp.vec2mtrxBatch(STfc4, params)
            ImWarp = data.ImWarpIm(ImWarp, warpMtrx, params)
            makeImageSummary("imageST{0}".format(l), ImWarp, params)
            p = warp.compose(p, STfc4, params)
    return ImWarp, p
Пример #11
0
def geometric_multires(opt, imageBG, imageFG, p):
    """
	build geometric predictor
	기하학적 예측자(생성자) 설계하는 함수
	:param opt: 옵션들
	:param imageBG: 백그라운드(인물) 이미지 데이터셋
	:param imageFG: 포그라운드(안경) 이미지 데이터셋
	:param p: warp 파라미터
	:return imageFGwarpAll:
	        pAll:
	        dp:
	"""
    def downsample(x):
        """
		차원을 줄이는 함수(레이어)
		:param x: 차원을 줄일 대상, input
		:return: 평균 풀링을 수행한 중간 output matrix
		"""
        # 패딩할 높이와 너비를 구함
        # 2의 나머지로 구하는 이유는 패딩 크기가 2*2이고 stride가 2이기 때문에
        # 높이 너비가 짝수여야 원할함 => 짝수로 맞춰주기 위해 패딩 수행
        padH, padW = int(x.shape[1]) % 2, int(x.shape[2]) % 2

        # 패딩할 높이와 너비가 0이 아니라면 패딩 수행
        if padH != 0 or padW != 0:
            x = tf.pad(x, [[0, 0], [0, padH], [0, padW], [0, 0]])

        # 패딩한 데이터를 평균 풀링하여 출력
        return tf.nn.avg_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], "VALID")

    # convolutional 함수(레이어)
    def conv2Layer(opt, feat, imageConcat, outDim):
        weight, bias = createVariable(opt,
                                      [4, 4, int(feat.shape[-1]), outDim],
                                      stddev=opt.stdGP)
        conv = tf.nn.conv2d(feat, weight, strides=[1, 2, 2, 1],
                            padding="SAME") + bias
        feat = tf.nn.relu(conv)
        imageConcat = downsample(imageConcat)
        feat = tf.concat([feat, imageConcat], axis=3)
        return feat, imageConcat

    def linearLayer(opt, feat, outDim, final=False):
        weight, bias = createVariable(opt, [int(feat.shape[-1]), outDim],
                                      stddev=opt.stdGP)
        fc = tf.matmul(feat, weight) + bias
        feat = tf.nn.relu(fc)
        return feat if not final else fc

    with tf.variable_scope("geometric"):
        imageFGwarpAll, pAll = [], [p]
        dp = None
        # define spatial transformations
        for l in range(opt.warpN):
            with tf.variable_scope("warp{0}".format(l)):
                pMtrx = warp.vec2mtrx(opt, p)
                imageFGwarp = warp.transformImage(opt, imageFG, pMtrx)
                imageFGwarpAll.append(imageFGwarp)
                # geometric predictor
                imageConcat = tf.concat([imageBG, imageFGwarp], axis=3)
                feat = imageConcat  # 144x144
                with tf.variable_scope("conv1"):
                    feat, imageConcat = conv2Layer(opt, feat, imageConcat,
                                                   32)  # 72x72
                with tf.variable_scope("conv2"):
                    feat, imageConcat = conv2Layer(opt, feat, imageConcat,
                                                   64)  # 36x36
                with tf.variable_scope("conv3"):
                    feat, imageConcat = conv2Layer(opt, feat, imageConcat,
                                                   128)  # 18x18
                with tf.variable_scope("conv4"):
                    feat, imageConcat = conv2Layer(opt, feat, imageConcat,
                                                   256)  # 9x9
                with tf.variable_scope("conv5"):
                    feat, imageConcat = conv2Layer(opt, feat, imageConcat,
                                                   512)  # 5x5
                feat = tf.reshape(feat, [opt.batchSize, -1])
                with tf.variable_scope("fc6"):
                    feat = linearLayer(opt, feat, 256)
                with tf.variable_scope("fc7"):
                    feat = linearLayer(opt, feat, opt.warpDim, final=True)
                dp = feat
                p = warp.compose(opt, p, dp)
                pAll.append(p)
        # warp image with final p
        pMtrx = warp.vec2mtrx(opt, p)
        imageFGwarp = warp.transformImage(opt, imageFG, pMtrx)
        imageFGwarpAll.append(imageFGwarp)

    return imageFGwarpAll, pAll, dp