Пример #1
0
    def __init__(self,
                 input_size,
                 model_path,
                 depth_multiplier=4.0,
                 depth_gamma=1.0,
                 normalizer_fn=None,
                 normalizer_params={},
                 batch_size=8):
        self.input_size = input_size
        self.batch_size = batch_size

        # with slim.arg_scope(net.arg_scope(weight_decay=0.0005)):
        with tf.Graph().as_default():
            self.sess = tf.Session()
            # Build a Graph that computes the logits predictions from the
            # inference model.
            self.images_pholder = tf.placeholder(
                tf.float32, [self.batch_size, input_size, input_size, 3])

            with tf.variable_scope('model') as scope:
                self.landmarks, _ = net.lannet(
                    self.images_pholder,
                    normalizer_fn=normalizer_fn,
                    normalizer_params=normalizer_params,
                    depth_mul=depth_multiplier,
                    depth_gamma=depth_gamma)

            saver = tf.train.Saver()
            saver.restore(self.sess, model_path)
Пример #2
0
def restore_and_save(ckpt_path):
    # ckpt to pb & tflite
    # load train_setting
    settings = load_settings(ckpt_path)
    normalizer_fn = settings['normalizer_fn']
    normalizer_params = settings['normalizer_params']
    depth_multiplier = settings['depth_multiplier']
    depth_gamma = settings['depth_gamma']
    is_color = settings['is_color']

    # count_records = data.get_tfr_record_count(tfr_path)
    # dataset = data.load_tfrecord(tfr_path, batch_size=64, num_parallel_calls=16, is_color=is_color)
    # iterator = dataset.make_initializable_iterator()

    # BATCH_WIDTH = 8
    # BATCH_SIZE = BATCH_WIDTH*BATCH_WIDTH
    # NUM_ITER = int(count_records/BATCH_SIZE)
    dir_path = os.path.dirname(ckpt_path)
    pb_path = os.path.join(dir_path, 'frozen_model.pb')
    tflite_float_path = os.path.join(dir_path, 'landmark.float.tflite')
    # tflite_qint8_path = os.path.join(dir_path, 'landmark.qint8.tflite')

    with tf.Session() as sess:

        # image, points = iterator.get_next()
        dph = tf.placeholder(tf.float32, (1, 56, 56, 3), 'input')

        with tf.variable_scope('model') as scope:
            predicts, _ = net.lannet(dph,
                                     depth_mul=depth_multiplier,
                                     depth_gamma=depth_gamma,
                                     normalizer_fn=normalizer_fn,
                                     normalizer_params=normalizer_params)
            saver = tf.train.Saver(tf.global_variables())

            # Load weights
            saver.restore(sess, ckpt_path)

            # Freeze the graph
            frozen_graph_def = tf.graph_util.convert_variables_to_constants(
                sess, sess.graph_def,
                ['model/lannet/fc7/Relu'])  #output_node_names)

            # Save the frozen graph
            with open(pb_path, 'wb') as f:
                f.write(frozen_graph_def.SerializeToString())

    # if tf.__version__[:4] == "1.13":
    converter = tf.contrib.lite.TFLiteConverter.from_frozen_graph(
        pb_path, ['input'], ['model/lannet/fc7/Relu'],
        input_shapes={'input': [1, 56, 56, 3]})
    # pb_path, input_node_names, output_node_names,
    # input_shapes=input_shapes)
    # else:
    #     converter = tf.contrib.lite.TFLiteConverter.from_frozen_graph(
    #         pb_path, input_node_names, output_node_names,
    #         input_shapes=input_shapes)

    tflite_model = converter.convert()
    open(tflite_float_path, "wb").write(tflite_model)
    print('>> %s' % tflite_float_path)
Пример #3
0
def train(list2train,
          max_epoch=16,
          batch_size=32,
          num_threads=4,
          save_path='./train/model.ckpt'):

    num_samples = len(list2train)

    with slim.arg_scope(net.arg_scope()):

        data_ph = tf.placeholder(tf.float32,
                                 [None, INPUT_SIZE, INPUT_SIZE, CH],
                                 name='input')
        ans_ph = tf.placeholder(tf.float32, [None, NUM_LANDMARK_POINTS * 2])

        estims, _ = net.lannet(data_ph, is_training=True)

        global_step = tf.Variable(0, trainable=False)
        starter_learning_rate = 0.005

        learning_rate = tf.train.exponential_decay(
            starter_learning_rate,
            global_step,
            len(list2train),  #len(list2train)/batch_size*4,
            0.998,
            staircase=True)
        # loss = tf.losses.mean_squared_error(ans_ph, estims, scope='mse')
        # loss = tf.losses.huber_loss(ans_ph, estims, delta=0.01, scope='mse')
        loss = tf.losses.absolute_difference(ans_ph, estims)
        tf.summary.scalar('loss', loss)
        # train_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss, global_step=global_step)
        train_op = tf.group(tf.train.AdamOptimizer().minimize(loss),
                            tf.assign_add(global_step, batch_size))
        # ema = tf.train.ExponentialMovingAverage(decay=0.999)

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        sess = tf.Session()
        sess.run(init_op)

        num_iter = 0
        epoch = 0
        pos = 0

        while epoch < max_epoch:

            input_batch, ans_batch, pos = read_data(list2train,
                                                    pos,
                                                    batch_size=batch_size)

            steps, lr, val_loss, ans_pred, _ = sess.run(
                [global_step, learning_rate, loss, estims, train_op],
                feed_dict={
                    data_ph: input_batch,
                    ans_ph: ans_batch
                })
            num_iter += 1

            steps = num_iter * batch_size
            epoch = int(steps / num_samples)
            print('(pos %4d) Epoch %d, iter %d : loss=%f. lr=%f' %
                  (pos, epoch, num_iter, val_loss, lr))

            # if epoch % int(max_epoch/48) == 0:
            #     patch = np.asarray((input_batch[0, :, :, :]+1.0)*255.0).astype('uint8').reshape((48, 48))
            #     img = Image.fromarray(patch)
            #     pts = np.asarray(ans_batch[0, :]).reshape((68, 2))
            #
            #     draw = ImageDraw.Draw(img)
            #     w, h = img.width, img.height
            #     for p in pts:
            #         l, t, r, b = int(p[0] * w) - 1, int(p[1] * h) - 1, int(p[0] * w) + 1, int(p[1] * h) + 1
            #         draw.ellipse((l, t, r, b))
            #
            #     del draw
            #
            #     for proc in psutil.process_iter():
            #         if proc.name() == 'display':
            #             proc.kill()
            #
            #     img.show()

        # write save code here
        if save_path:
            path_to_save = save_path
            if os.path.exists(path_to_save):
                shutil.rmtree(path_to_save)

            os.makedirs(path_to_save)

            saver = tf.train.Saver()
            save_path = saver.save(sess,
                                   os.path.join(path_to_save, 'trained.ckpt'))
            tf.train.write_graph(sess.graph.as_graph_def(),
                                 path_to_save,
                                 'trained.pbtxt',
                                 as_text=True)
            tf.train.write_graph(sess.graph.as_graph_def(),
                                 path_to_save,
                                 'trained.pb',
                                 as_text=False)
            print('model saved: %s' % save_path)

        sess.close()
def evaluate(ckpt_path, tfr_path):

    # load train_setting
    settings = load_landmark_settings(ckpt_path)
    normalizer_fn = settings['normalizer_fn']
    normalizer_params = settings['normalizer_params']
    depth_multiplier = settings['depth_multiplier']
    depth_gamma = settings['depth_gamma']
    is_color = settings['is_color'].strip() == 'True'

    count_records = data.get_tfr_record_count(tfr_path)
    dataset = data.load_tfrecord(tfr_path,
                                 batch_size=64,
                                 num_parallel_calls=16,
                                 is_color=is_color)
    iterator = dataset.make_initializable_iterator()

    BATCH_WIDTH = 8
    BATCH_SIZE = BATCH_WIDTH * BATCH_WIDTH
    NUM_ITER = int(count_records / BATCH_SIZE)

    KEEP_WIDTH = 10
    MAX_KEEP = KEEP_WIDTH * KEEP_WIDTH

    CH = 3 if is_color else 1

    bests = []
    worsts = []

    image, points = iterator.get_next()

    with tf.variable_scope('model') as scope:
        predicts, _ = net.lannet(image,
                                 depth_mul=depth_multiplier,
                                 depth_gamma=depth_gamma,
                                 normalizer_fn=normalizer_fn,
                                 normalizer_params=normalizer_params)

    with tf.Session() as sess:
        init = [tf.initialize_all_variables(), iterator.initializer]
        sess.run(init)

        saver = tf.train.Saver()
        saver.restore(sess, ckpt_path)

        errs = []

        with open(os.path.join(os.path.dirname(ckpt_path), 'err.csv'),
                  'w') as ewf:
            for i in range(68):
                ewf.write('x%d, y%d' % (i, i))
                if i < 68 - 1:
                    ewf.write(', ')
                else:
                    ewf.write('\n')

            for i in range(NUM_ITER):
                img, pts, prs = sess.run([image, points, predicts])
                img = np.asarray((img + 1.0) * 255.0 / 2.0, dtype=np.uint8)
                mosaic = np.zeros((56 * BATCH_WIDTH, 56 * BATCH_WIDTH, 3),
                                  dtype=np.uint8)

                perr = np.subtract(pts, prs)
                for pes in perr:
                    for j, pe in enumerate(pes):
                        ewf.write('%f' % abs(pe))
                        if j < len(pes) - 1:
                            ewf.write(', ')
                        else:
                            ewf.write('\n')

                for y in range(BATCH_WIDTH):
                    for x in range(BATCH_WIDTH):
                        pos = y * BATCH_WIDTH + x
                        cur_img = img[pos, :, :, :]

                        if not is_color:
                            cur_img = cv2.cvtColor(cur_img, cv2.COLOR_GRAY2BGR)

                        cur_pts = pts[pos]
                        cur_prs = prs[pos]

                        diff = cur_pts - cur_prs
                        err = 0

                        for p in range(68):
                            ix, iy = p * 2, p * 2 + 1
                            e = np.sqrt(diff[ix] * diff[ix] +
                                        diff[iy] * diff[iy])
                            err += e

                        err /= 68

                        paste_mosaic_patch(mosaic, pos, cur_img, cur_pts,
                                           cur_prs, err)
                        errs.append(err)

                        bests.append({
                            'err': err,
                            'img': copy(cur_img),
                            'pts': copy(pts[pos]),
                            'prs': copy(prs[pos])
                        })
                        worsts.append({
                            'err': err,
                            'img': copy(cur_img),
                            'pts': copy(pts[pos]),
                            'prs': copy(prs[pos])
                        })

                        if len(bests) > MAX_KEEP:
                            bests = sorted(bests,
                                           key=itemgetter('err'),
                                           reverse=False)[:MAX_KEEP]

                        if len(worsts) > MAX_KEEP:
                            worsts = sorted(worsts,
                                            key=itemgetter('err'),
                                            reverse=True)[:MAX_KEEP]

                cv2.imshow("mosaic", mosaic)
                img_save_path = ('%s_%03d.jpg' % (ckpt_path, i))
                cv2.imwrite(img_save_path, mosaic)
                cv2.waitKey(1000)

        err_total = np.mean(errs)
        cv2.imshow("mosaic", mosaic)
        img_save_path = ('%s_%03d.jpg' % (ckpt_path, i))
        cv2.imwrite(img_save_path, mosaic)
        cv2.waitKey(1000)

        # make mosaic images for best & worst
        img_bests = np.zeros((56 * KEEP_WIDTH, 56 * KEEP_WIDTH, 3),
                             dtype=np.uint8)
        img_worsts = np.zeros((56 * KEEP_WIDTH, 56 * KEEP_WIDTH, 3),
                              dtype=np.uint8)

        for i in range(MAX_KEEP):
            paste_mosaic_patch(img_bests, i, bests[i]['img'], bests[i]['pts'],
                               bests[i]['prs'], bests[i]['err'], KEEP_WIDTH)
            paste_mosaic_patch(img_worsts, i, worsts[i]['img'],
                               worsts[i]['pts'], worsts[i]['prs'],
                               worsts[i]['err'], KEEP_WIDTH)

        cv2.imshow('bests', img_bests)
        cv2.imshow('worsts', img_worsts)
        cv2.imwrite('%s_bests.jpg' % ckpt_path, img_bests)
        cv2.imwrite('%s_worsts.jpg' % ckpt_path, img_worsts)

        cv2.waitKey(100)

        return err_total
Пример #5
0
            if FLAGS.regularizer == 'l1' or FLAGS.regularizer == 'l2':
                regularizer = _config_weights_regularizer(
                    FLAGS.regularizer, FLAGS.regularizer_lambda)
            elif FLAGS.regularizer == 'None':
                regularizer = None
            else:
                regularizer = _config_weights_regularizer(
                    FLAGS.regularizer, FLAGS.regularizer_lambda,
                    FLAGS.regularizer_lambda_2)

        with tf.variable_scope('model') as scope:
            intensor = tf.identity(image, 'input')
            predictions, _ = net.lannet(intensor,
                                        is_training=True,
                                        normalizer_fn=norm_fn,
                                        normalizer_params=norm_params,
                                        regularizer=regularizer,
                                        depth_mul=FLAGS.depth_multiplier,
                                        depth_gamma=FLAGS.depth_gamma)
            val_pred, _ = net.lannet(val_imgs,
                                     is_training=False,
                                     normalizer_fn=norm_fn,
                                     normalizer_params=norm_params,
                                     regularizer=regularizer,
                                     depth_mul=FLAGS.depth_multiplier,
                                     depth_gamma=FLAGS.depth_gamma)

        loss = _config_loss_function(points, predictions)
        total_loss = slim.losses.get_total_loss()
        val_loss = l2_loss(val_pts, val_pred)