Example #1
0
def test_procedure(net, test_records):
    # Load data
    reader = SketchReader(tfrecord_list=test_records,
                          raw_size=[256, 256, 17],
                          shuffle=False,
                          num_threads=hyper_params['nbThreads'],
                          batch_size=1,
                          nb_epoch=1)
    raw_input = reader.next_batch()

    user_strokes, _, context_normal, context_depth, face_heatm, _, _, _, _, _, _, \
    _, _, _, line_reg = net.cook_raw_inputs(raw_input)

    # Network forward
    logit_face, logit_curve, _ = net.load_addSub_reg_net(
        userStroke_input,
        cnormal_input,
        cdepth_input,
        hyper_params['rootFt'],
        is_training=False)

    # Loss
    test_loss, test_fh_loss, test_real_fh_loss, test_line_loss, test_real_line_loss, \
    test_gt_face, test_pred_face, test_gt_line, test_pred_curve, test_userStroke, \
    test_strokeMask = loss(logit_face,
                           logit_curve,
                           gtFH_input,
                           gtCurve_input,
                           userStroke_input,
                           scope='test_loss')

    return test_loss, test_fh_loss, test_real_fh_loss, test_line_loss, test_real_line_loss, test_gt_face, \
           test_pred_face, test_gt_line, test_pred_curve, test_userStroke, test_strokeMask, \
           [user_strokes, context_normal, context_depth, face_heatm, line_reg]
Example #2
0
def test_procedure(net, test_records):
    # load data
    reader = SketchReader(tfrecord_list=test_records,
                          raw_size=[256, 256, 17],
                          shuffle=False,
                          num_threads=hyper_params['nbThreads'],
                          batch_size=1,
                          nb_epoch=1)
    raw_input = reader.next_batch()

    user_strokes, _, context_normal, context_depth, gt_face, gt_bCurve, gt_pCurve, gt_oCurve, \
    _, _, _, _, opt_label, _, _ = net.cook_raw_inputs(raw_input)

    # network forward
    logit_prob, _ = net.load_cls_opt_tiny_net(
        user_strokes,
        context_normal,
        context_depth,
        num_classes=hyper_params['nb_cls'],
        is_training=False)

    # Loss
    val_loss = loss(logit_prob, opt_label, scope='test_loss')
    val_acc = cal_accuracy(logit_prob, opt_label, scope='test_acc')

    return val_loss, val_acc, \
           [user_strokes, context_normal, context_depth, gt_face, gt_bCurve, gt_pCurve, gt_oCurve]
Example #3
0
def test_procedure(net, test_records):
    # load data
    reader = SketchReader(tfrecord_list=test_records, raw_size=[256, 256, 17], shuffle=False,
                          num_threads=hyper_params['nbThreads'], batch_size=1, nb_epoch=1)
    raw_input = reader.next_batch()

    user_strokes, _, context_normal, context_depth, face_heatm, base_curve, _, _, _, _, _, _, _, _, _ \
        = net.cook_raw_inputs(raw_input)

    # Network Forward
    logit_fh, logit_bc, _ = net.load_bevel_reg_net(userStroke_input,
                                                   cnormal_input,
                                                   cdepth_input,
                                                   hyper_params['rootFt'],
                                                   is_training=False)

    test_loss, fh_loss, bc_loss, real_fh_loss, real_bc_loss, pred_fh, pred_bc, gt_fh, gt_bc, userStroke, \
    cnormal, cdepth, cstrokeMask = loss(logit_fh,
                                        logit_bc,
                                        gtFH_input,
                                        gtBC_input,
                                        userStroke_input,
                                        cnormal_input,
                                        cdepth_input,
                                        scope='test')

    return test_loss, fh_loss, bc_loss, real_fh_loss, real_bc_loss, pred_fh, pred_bc, gt_fh, gt_bc, \
           userStroke, cnormal, cdepth, cstrokeMask, \
           [user_strokes, context_normal, context_depth, face_heatm, base_curve]
Example #4
0
def validation_procedure(net, val_records):
    # Load data
    with tf.name_scope('eval_inputs') as _:
        reader = SketchReader(tfrecord_list=val_records,
                              raw_size=[256, 256, 17],
                              shuffle=False,
                              num_threads=hyper_params['nbThreads'],
                              batch_size=hyper_params['batchSize'])
        raw_input = reader.next_batch()

        user_strokes, _, context_normal, context_depth, _, _, _, _, _, _, _, _, opt_label, _, _ = \
            net.cook_raw_inputs(raw_input)

    # network forward
    logit_prob, _ = net.load_cls_opt_tiny_net(
        user_strokes,
        context_normal,
        context_depth,
        num_classes=hyper_params['nb_cls'],
        is_training=False,
        reuse=True)

    # Loss
    val_loss = loss(logit_prob, opt_label, scope='train_loss')
    val_acc = cal_accuracy(logit_prob, opt_label, scope='train_acc')

    # TensorBoard
    proto_list = collect_vis_img(user_strokes,
                                 context_normal,
                                 context_depth,
                                 scope='collect_val_imgs')

    merged_val = tf.summary.merge(proto_list)

    return merged_val, val_loss, val_acc
Example #5
0
def train_procedure(net, train_records):
    nb_gpus = hyper_params['nb_gpus']

    # Load data
    with tf.name_scope('train_input') as _:
        bSize = hyper_params['batchSize'] * nb_gpus
        nbThreads = hyper_params['nbThreads'] * nb_gpus
        reader = SketchReader(tfrecord_list=train_records,
                              raw_size=[256, 256, 17],
                              shuffle=True,
                              num_threads=nbThreads,
                              batch_size=bSize)
        raw_input = reader.next_batch()

        user_strokes, _, context_normal, context_depth, _, _, _, _, _, _, _, _, opt_label, _, _ = \
            net.cook_raw_inputs(raw_input)

    # network forward
    logit_prob, _ = net.load_cls_opt_tiny_net(
        user_strokes,
        context_normal,
        context_depth,
        num_classes=hyper_params['nb_cls'],
        is_training=True)

    # loss
    train_loss = loss(logit_prob, opt_label, scope='train_loss')
    train_acc = cal_accuracy(logit_prob, opt_label, scope='train_acc')

    # TensorBoard: visualization
    train_diff_cls_proto = tf.summary.scalar('Training_ClsLoss', train_loss)
    train_diff_acc_proto = tf.summary.scalar('Training_AccLoss', train_acc)

    # Solver
    with tf.name_scope('solve') as _:
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            train_step = tf.train.AdamOptimizer().minimize(train_loss)

    proto_list = collect_vis_img(user_strokes,
                                 context_normal,
                                 context_depth,
                                 scope='collect_train_imgs')

    proto_list.append(train_diff_cls_proto)
    proto_list.append(train_diff_acc_proto)

    merged_train = tf.summary.merge(proto_list)

    return merged_train, train_step, train_loss, train_acc
Example #6
0
def validation_procedure(net, val_records):
    # Load data
    with tf.name_scope('eval_inputs') as _:
        reader = SketchReader(tfrecord_list=val_records,
                              raw_size=[256, 256, 17],
                              shuffle=False,
                              num_threads=hyper_params['nbThreads'],
                              batch_size=hyper_params['batchSize'])
        raw_input = reader.next_batch()

        user_strokes, _, context_normal, context_depth, face_heatm, _, _, _, _, _, _, \
        _, _, _, line_reg = net.cook_raw_inputs(raw_input)

    # Network forward
    logit_face, logit_curve, _ = net.load_addSub_reg_net(
        userStroke_input,
        cnormal_input,
        cdepth_input,
        hyper_params['rootFt'],
        is_training=False,
        reuse=True)

    # Loss
    val_loss, val_fh_loss, val_real_fh_loss, val_line_loss, val_real_line_loss \
        = loss(logit_face,
               logit_curve,
               gtFH_input,
               gtCurve_input,
               userStroke_input,
               scope='val_loss')

    # TensorBoard
    proto_list = collect_vis_img(logit_face,
                                 logit_curve,
                                 userStroke_input,
                                 cnormal_input,
                                 cdepth_input,
                                 gtFH_input,
                                 gtCurve_input,
                                 scope='collect_val_imgs',
                                 is_training=False)

    merged_val = tf.summary.merge(proto_list)

    return merged_val, val_loss, val_fh_loss, val_real_fh_loss, val_line_loss, val_real_line_loss, \
           [user_strokes, context_normal, context_depth, face_heatm, line_reg]
def test_procedure(net, test_records):
    # load data
    reader = SketchReader(tfrecord_list=test_records, raw_size=[256, 256, 17], shuffle=False,
                          num_threads=hyper_params['nbThreads'], batch_size=1, nb_epoch=1)
    raw_input = reader.next_batch()

    user_strokes, _, context_normal, context_depth, face_heatm, base_curve, _, off_curve, _, _, _, \
    _, opt_label, _, line_reg = net.cook_raw_inputs(raw_input)

    # cls network forward
    _, cls_var = net.load_cls_opt_tiny_net(user_strokes,
                                           context_normal,
                                           context_depth,
                                           num_classes=hyper_params['nb_cls'],
                                           is_training=False)

    # addSub network forward
    _, _, add_var = net.load_addSub_reg_net(userStroke_input,
                                            cnormal_input,
                                            cdepth_input,
                                            hyper_params['rootFt'],
                                            is_training=False)

    # extrusion network forward
    logit_fh_e, _, ext_var = net.load_extrusion_reg_net(userStroke_input,
                                                        cnormal_input,
                                                        cdepth_input,
                                                        hyper_params['rootFt'],
                                                        is_training=False)
    # bevel network forward
    _, _, bel_var = net.load_bevel_reg_net(userStroke_input,
                                           cnormal_input,
                                           cdepth_input,
                                           hyper_params['rootFt'],
                                           is_training=False)

    # sweep network forward
    swp_var = None
    _, _, swp_var = net.load_sweep_reg_net(userStroke_input,
                                           cnormal_input,
                                           cdepth_input,
                                           hyper_params['rootFt'],
                                           is_training=False)

    return logit_fh_e, cls_var, ext_var, add_var, bel_var, swp_var, \
           [user_strokes, context_normal, context_depth]
Example #8
0
def train_procedure(net, train_records):
    nb_gpus = hyper_params['nb_gpus']

    # Load data
    with tf.name_scope('train_input') as _:
        bSize = hyper_params['batchSize'] * nb_gpus
        nbThreads = hyper_params['nbThreads'] * nb_gpus
        reader = SketchReader(tfrecord_list=train_records,
                              raw_size=[256, 256, 17],
                              shuffle=True,
                              num_threads=nbThreads,
                              batch_size=bSize)
        raw_input = reader.next_batch()

        user_strokes, _, context_normal, context_depth, face_heatm, base_curve, _, _, _, _, _, _, _, _, _ \
            = net.cook_raw_inputs(raw_input)

    # initialize optimizer
    opt = tf.train.AdamOptimizer()

    # split data
    with tf.name_scope('divide_data'):
        gpu_user_strokes = tf.split(userStroke_input, nb_gpus, axis=0)
        gpu_cnormal = tf.split(cnormal_input, nb_gpus, axis=0)
        gpu_cdepth = tf.split(cdepth_input, nb_gpus, axis=0)
        gpu_fh = tf.split(gtFH_input, nb_gpus, axis=0)
        gpu_bc = tf.split(gtBC_input, nb_gpus, axis=0)

    tower_grads = []
    tower_loss_collected = []
    tower_total_losses = []
    tower_fh_losses = []
    tower_bc_losses = []
    tower_abs_fh_losses = []
    tower_abs_bc_losses = []

    # TensorBoard images
    gpu0_logit_fh = None
    gpu0_logit_bc = None
    gpu0_userStroke = None
    gpu0_normal = None
    gpu0_depth = None
    gpu0_faceMap = None
    gpu0_baseCurve = None

    with tf.variable_scope(tf.get_variable_scope()):
        for gpu_id in range(nb_gpus):
            with tf.device('/gpu:%d' % gpu_id):
                with tf.name_scope('tower_%s' % gpu_id) as _:
                    # network forward
                    logit_fh, logit_bc, _ = net.load_bevel_reg_net(
                        gpu_user_strokes[gpu_id],
                        gpu_cnormal[gpu_id],
                        gpu_cdepth[gpu_id],
                        hyper_params['rootFt'],
                        is_training=True)

                    # training loss
                    train_loss, train_fh_loss, train_bc_loss, train_real_fh_loss, \
                    train_real_bc_loss = loss(logit_fh,
                                              logit_bc,
                                              gpu_fh[gpu_id],
                                              gpu_bc[gpu_id],
                                              gpu_user_strokes[gpu_id],
                                              scope='train_loss')

                    # reuse variables
                    tf.get_variable_scope().reuse_variables()

                    # collect gradients and every loss
                    tower_grads.append(opt.compute_gradients(train_loss))
                    tower_total_losses.append(train_loss)
                    tower_fh_losses.append(train_fh_loss)
                    tower_bc_losses.append(train_bc_loss)
                    tower_abs_fh_losses.append(train_real_fh_loss)
                    tower_abs_bc_losses.append(train_real_bc_loss)

                    # TensorBoard: collect images from GPU 0
                    if gpu_id == 0:
                        gpu0_logit_fh = logit_fh
                        gpu0_logit_bc = logit_bc
                        gpu0_userStroke = gpu_user_strokes[gpu_id]
                        gpu0_normal = gpu_cnormal[gpu_id]
                        gpu0_depth = gpu_cdepth[gpu_id]
                        gpu0_faceMap = gpu_fh[gpu_id]
                        gpu0_baseCurve = gpu_bc[gpu_id]

        tower_loss_collected.append(tower_total_losses)
        tower_loss_collected.append(tower_fh_losses)
        tower_loss_collected.append(tower_bc_losses)
        tower_loss_collected.append(tower_abs_fh_losses)
        tower_loss_collected.append(tower_abs_bc_losses)

    # Solver
    with tf.name_scope('solve') as _:
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            grads = average_gradient(tower_grads)
            averaged_losses = average_losses(tower_loss_collected)
            apply_gradient_op = opt.apply_gradients(grads)
            train_op = tf.group(apply_gradient_op)

    # TensorBoard: visualization
    train_diff_proto = tf.summary.scalar('Training_TotalLoss',
                                         averaged_losses[0])
    train_diff_fh_proto = tf.summary.scalar('Training_FhL2Loss',
                                            averaged_losses[1])
    train_diff_bc_proto = tf.summary.scalar('Training_BcL2Loss',
                                            averaged_losses[2])
    train_diff_real_fn_proto = tf.summary.scalar('Training_FhL1Loss',
                                                 averaged_losses[3])
    train_diff_real_bc_proto = tf.summary.scalar('Training_BcL1Loss',
                                                 averaged_losses[4])

    proto_list = collect_vis_img(gpu0_logit_fh,
                                 gpu0_logit_bc,
                                 gpu0_userStroke,
                                 gpu0_normal,
                                 gpu0_depth,
                                 gpu0_faceMap,
                                 gpu0_baseCurve,
                                 scope='collect_train_imgs')

    proto_list.append(train_diff_proto)
    proto_list.append(train_diff_fh_proto)
    proto_list.append(train_diff_bc_proto)
    proto_list.append(train_diff_real_fn_proto)
    proto_list.append(train_diff_real_bc_proto)

    merged_train = tf.summary.merge(proto_list)

    return merged_train, train_op, averaged_losses[0], \
           [user_strokes, context_normal, context_depth, face_heatm, base_curve]