# loss function
lossDict = {}
# objective function => 이것을 이용해서 loss를 구하거나 함.
objDict={} 

#   X0, X1: Gray frames
#   Y0, Y1: Ground truth color frames
#   C0, C1: Colorized frames
with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()):
    X0, X1 = input_i[:,:,:,0:1], input_i[:,:,:,1:2]
    Y0, Y1 = input_target[:,:,:,0:3], input_target[:,:,:,3:6]
    
    # colorization network 구축
    with tf.compat.v1.variable_scope('individual'):
        C0=net.VCN(utils.build(tf.tile(X0, [1,1,1,3])),reuse=False, div_num=4)
        C1=net.VCN(utils.build(tf.tile(X1, [1,1,1,3])),reuse=True, div_num=4)        

    objDict["mask"],_=occlusion_mask(Y0,Y1,input_flow_backward[:,:,:,0:2])
#     objDict["warped"]=flow_warp_op(C0,input_flow_backward[:,:,:,0:2])
#     objDict["warped"] = tfa.image.dense_image_warp(C0, input_flow_backward[:,:,:,0:2], name="op_warp")

    # objDict에 warped를 저장할 때, C0를 warp 시키는 것이기 때문에 C0의 output channel에 맞춰서 shape를 맞춰줘야 함.
    # 혹시 dense_image_warp의 channel이 3이어야만 한다면, 학습에 영향을 주는 것 같다면 for문을 div_num만큼 돌려서 concat(axis=3)하고 set_shape하기
    temp_warped = tfa.image.dense_image_warp(C0, input_flow_backward[:,:,:,0:2], name="op_warp")
    temp_warped.set_shape([None, None, None, 3*div_num])
    objDict["warped"]= temp_warped

    lossDict["RankDiv_im1"]=loss.RankDiverse_loss(C0, tf.tile(input_target[:,:,:,0:3], [1,1,1,div_num]),div_num)
    lossDict["RankDiv_im2"]=loss.RankDiverse_loss(C1, tf.tile(input_target[:,:,:,3:6], [1,1,1,div_num]),div_num)
    lossDict["RankDiv"]=lossDict["RankDiv_im1"]+lossDict["RankDiv_im2"]
# loss function
lossDict = {}
# objective function => 이것을 이용해서 loss를 구하거나 함.
objDict = {}

#   X0, X1: Gray frames
#   Y0, Y1: Ground truth color frames
#   C0, C1: Colorized frames
with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()):
    X0, X1 = input_i[:, :, :, 0:1], input_i[:, :, :, 1:2]
    Y0, Y1 = input_target[:, :, :, 0:3], input_target[:, :, :, 3:6]

    # colorization network 구축
    with tf.compat.v1.variable_scope('individual'):
        C0 = net.VCN(utils.build(tf.tile(X0, [1, 1, 1, 3])),
                     reuse=False,
                     div_num=4)
        C1 = net.VCN(utils.build(tf.tile(X1, [1, 1, 1, 3])),
                     reuse=True,
                     div_num=4)

    objDict["mask"], _ = occlusion_mask(Y0, Y1, input_flow_backward[:, :, :,
                                                                    0:2])
    #     objDict["warped"]=flow_warp_op(C0,input_flow_backward[:,:,:,0:2])
    #     objDict["warped"] = tfa.image.dense_image_warp(C0, input_flow_backward[:,:,:,0:2], name="op_warp")

    # objDict에 warped를 저장할 때, C0를 warp 시키는 것이기 때문에 C0의 output channel에 맞춰서 shape를 맞춰줘야 함.
    # 혹시 dense_image_warp의 channel이 3이어야만 한다면, 학습에 영향을 주는 것 같다면 for문을 div_num만큼 돌려서 concat(axis=3)하고 set_shape하기
    temp_warped = tfa.image.dense_image_warp(C0,
                                             input_flow_backward[:, :, :, 0:2],
        activation=None,
        kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(
            scale=1.0, mode="fan_avg", distribution="uniform"),
        name=ext + 'g_conv9_2')
    return conv9


config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
input_i = tf.compat.v1.placeholder(tf.float32, shape=[1, None, None, 2])
input_target = tf.compat.v1.placeholder(tf.float32, shape=[1, None, None, 6])

with tf.compat.v1.variable_scope(tf.compat.v1.get_variable_scope()):
    with tf.compat.v1.variable_scope('individual'):
        g0 = VCN(utils.build(tf.tile(input_i[:, :, :, 0:1], [1, 1, 1, 3])),
                 reuse=False)
        g1 = VCN(utils.build(tf.tile(input_i[:, :, :, 1:2], [1, 1, 1, 3])),
                 reuse=True)

saver = tf.compat.v1.train.Saver(max_to_keep=1000)

# +
sess.run([tf.compat.v1.global_variables_initializer()])

var_restore = [v for v in tf.compat.v1.trainable_variables()]
saver_restore = tf.compat.v1.train.Saver(var_restore)
ckpt = tf.train.get_checkpoint_state(model)
saver_restore.restore(sess, ckpt.model_checkpoint_path)
# -