def __init__(self, nJoints, is_training, batch_size, img_size=256, loss_weight_heatmap=1.0, loss_weight_volume=1.0, loss_weight_rank=1.0, loss_weight_2d=1000.0): self.loss_weight_heatmap = loss_weight_heatmap self.loss_weight_volume = loss_weight_volume # Loss weight for no gt training self.loss_weight_rank = loss_weight_rank self.loss_weight_2d = loss_weight_2d self.nJoints = nJoints self.img_size = img_size self.is_use_bias = True self.is_tiny = False self.is_training = is_training self.res_utils = mResidualUtils(is_training=self.is_training, is_use_bias=self.is_use_bias, is_tiny=self.is_tiny) self.batch_size = batch_size self.feature_size = 64
def __init__(self, nJoints, is_training, batch_size, img_size=256, coords_scale=1000.0, coords_2d_scale=255.0, coords_2d_offset=127.5, keyp_loss_weight=100.0, rank_loss_weight=1.0, coords_loss_weight=1000.0): self.nJoints = nJoints self.img_size = img_size self.is_use_bias = True self.is_tiny = False self.is_training = is_training self.res_utils = mResidualUtils(is_training=self.is_training, is_use_bias=self.is_use_bias, is_tiny=self.is_tiny) self.batch_size = batch_size self.coords_scale = coords_scale self.coords_2d_scale = coords_2d_scale self.coords_2d_offset = coords_2d_offset self.coords_loss_weight = coords_loss_weight self.keyp_loss_weight = keyp_loss_weight self.rank_loss_weight = rank_loss_weight
def __init__(self, nJoints, is_training, batch_size, img_size=256, loss_weight_heatmap=1.0, loss_weight_xyzmap=1.0, joints_2d_scale=4.0, joints_3d_scale=1000.0): self.loss_weight_heatmap = loss_weight_heatmap self.loss_weight_xyzmap = loss_weight_xyzmap self.joints_2d_scale = joints_2d_scale self.joints_3d_scale = joints_3d_scale self.nJoints = nJoints self.img_size = img_size self.is_use_bias = True self.is_tiny = False self.is_training = is_training self.res_utils = mResidualUtils(is_training=self.is_training, is_use_bias=self.is_use_bias, is_tiny=self.is_tiny) self.batch_size = batch_size self.feature_size = 64
def build_hourglass(inputs, nOut=256, nPooling=4, name='hourglass', is_training=True, res_utils=None): if res_utils is None: print("Use the default resblock settings!") res_utils = mResidualUtils(is_training=is_training, is_tiny=False, is_use_bias=True) with tf.variable_scope(name): # encoding block up1 = inputs with tf.variable_scope("up_1"): for i in range(3): up1 = res_utils.residual_block(up1, nOut, name="res{}".format(i)) with tf.variable_scope("low_1"): low1 = tf.layers.max_pooling2d(inputs, pool_size=2, strides=2, padding="VALID", name="down_samplint") for i in range(3): low1 = res_utils.residual_block(low1, nOut, name="res{}".format(i)) if nPooling > 1: low2 = build_hourglass(low1, nOut, nPooling - 1, name="inner_hg", res_utils=res_utils) else: # TODO The 2017 version in https://github.com/geopavlakos/c2f-vol-train/blob/master/src/models/hg-stacked-no-int.lua # contains only one residual block, but the paper contains three low2 = low1 with tf.variable_scope("mid"): for i in range(3): low2 = res_utils.residual_block(low2, nOut, name="res{}".format(i)) with tf.variable_scope("low_2"): low3 = res_utils.residual_block(low2, nOut, name="res") with tf.variable_scope("up_2"): cur_shape = low3.get_shape()[1:3].as_list() up2 = tf.image.resize_nearest_neighbor( low3, [cur_shape[0] * 2, cur_shape[1] * 2], name="up_sampling") return tf.add_n([up1, up2], name="out_hg")
def __init__(self, nJoints, is_training, batch_size, img_size=256, depth_scale=1000.0, loss_weight=1000): self.nJoints = nJoints self.img_size = img_size self.is_use_bias = True self.is_tiny = False self.is_training = is_training self.res_utils = mResidualUtils(is_training=self.is_training, is_use_bias=self.is_use_bias, is_tiny=self.is_tiny) self.batch_size = batch_size self.depth_scale = depth_scale self.loss_weight = loss_weight
def __init__(self, nJoints, is_training, batch_size, img_size=256, loss_weight_volume=1.0, rank_loss_weight=1.0, hm_loss_weight=100.0): self.loss_weight_volume = loss_weight_volume self.nJoints = nJoints self.img_size = img_size self.is_use_bias = True self.is_tiny = False self.is_training = is_training self.res_utils = mResidualUtils(is_training=self.is_training, is_use_bias=self.is_use_bias, is_tiny=self.is_tiny) self.batch_size = batch_size self.feature_size = 64 self.rank_loss_weight = rank_loss_weight self.hm_loss_weight = hm_loss_weight