def main(model_config, train_config, track_config):
  # Create training directory
  train_dir = train_config['train_dir']
  if not tf.gfile.IsDirectory(train_dir):
    tf.logging.info('Creating training directory: %s', train_dir)
    tf.gfile.MakeDirs(train_dir)

  # Build the Tensorflow graph
  g = tf.Graph()
  with g.as_default():
    # Set fixed seed
    np.random.seed(train_config['seed'])
    tf.set_random_seed(train_config['seed'])

    # Build the model
    model = siamese_model.SiameseModel(model_config, train_config, mode='inference')
    model.build()

    # Save configurations for future reference
    save_cfgs(train_dir, model_config, train_config, track_config)

    saver = tf.train.Saver(tf.global_variables(),
                           max_to_keep=train_config['max_checkpoints_to_keep'])

    # Dynamically allocate GPU memory
    gpu_options = tf.GPUOptions(allow_growth=True)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)

    sess = tf.Session(config=sess_config)
    model_path = tf.train.latest_checkpoint(train_config['train_dir'])

    if not model_path:
      # Initialize all variables
      sess.run(tf.global_variables_initializer())
      sess.run(tf.local_variables_initializer())
      start_step = 0

      # Load pretrained embedding model if needed
      if model_config['embed_config']['embedding_checkpoint_file']:
        model.init_fn(sess)

    else:
      logging.info('Restore from last checkpoint: {}'.format(model_path))
      sess.run(tf.local_variables_initializer())
      saver.restore(sess, model_path)
      start_step = tf.train.global_step(sess, model.global_step.name) + 1

    checkpoint_path = osp.join(train_config['train_dir'], 'model.ckpt')
    saver.save(sess, checkpoint_path, global_step=start_step)
def test_load_embedding_from_converted_TF_model():
    """Test if the embedding model loaded from converted TensorFlow checkpoint
     produces the same features as the original implementation"""
    checkpoint = osp.join(
        PARENT_DIR,
        'Logs/SiamFC/track_model_checkpoints/SiamFC-3s-color-pretrained')
    test_im = osp.join(CURRENT_DIR, '01.jpg')
    gt_feat = osp.join(CURRENT_DIR, 'result.mat')

    if not osp.exists(checkpoint):
        raise Exception('SiamFC-3s-color-pretrained is not generated yet.')
    model_config, train_config, track_config = load_cfgs(checkpoint)

    # Build the model
    g = tf.Graph()
    with g.as_default():
        model = siamese_model.SiameseModel(model_config,
                                           train_config,
                                           mode='inference')
        model.build()

        with tf.Session() as sess:
            # Load model here
            saver = tf.train.Saver(tf.global_variables())
            if osp.isdir(checkpoint):
                model_path = tf.train.latest_checkpoint(checkpoint)
            else:
                model_path = checkpoint

            saver.restore(sess, model_path)

            # Load image
            im = imread(test_im)
            im_batch = np.expand_dims(im, 0)

            # Feed image
            feature = sess.run([model.exemplar_embeds],
                               feed_dict={model.examplar_feed: im_batch})

            # Compare with features computed from original source code
            ideal_feature = sio.loadmat(gt_feat)['r']['z_features'][0][0]
            diff = feature - ideal_feature
            diff = np.sqrt(np.mean(np.square(diff)))
            print('Feature computation difference: {}'.format(diff))
            print('You should get something like: 0.00892720464617')
def test_load_embedding_from_mat():
    """Test if the embedding model loaded from .mat
     produces the same features as the original MATLAB implementation"""
    matpath = osp.join(PARENT_DIR, 'assets/2016-08-17.net.mat')
    test_im = osp.join(CURRENT_DIR, '01.jpg')
    gt_feat = osp.join(CURRENT_DIR, 'result.mat')

    model_config = configuration.MODEL_CONFIG
    model_config['embed_config']['embedding_name'] = 'convolutional_alexnet'
    model_config['embed_config'][
        'embedding_checkpoint_file'] = matpath  # For SiameseFC
    model_config['embed_config']['train_embedding'] = False

    g = tf.Graph()
    with g.as_default():
        model = siamese_model.SiameseModel(model_config,
                                           configuration.TRAIN_CONFIG,
                                           mode='inference')
        model.build()

        with tf.Session() as sess:
            # Initialize models
            init = tf.global_variables_initializer()
            sess.run(init)

            # Load model here
            model.init_fn(sess)

            # Load image
            im = imread(test_im)
            im_batch = np.expand_dims(im, 0)

            # Feed image
            feature = sess.run([model.exemplar_embeds],
                               feed_dict={model.examplar_feed: im_batch})

            # Compare with features computed from original source code
            ideal_feature = sio.loadmat(gt_feat)['r']['z_features'][0][0]
            diff = feature - ideal_feature
            diff = np.sqrt(np.mean(np.square(diff)))
            print('Feature computation difference: {}'.format(diff))
            print('You should get something like: 0.00892720464617')
Example #4
0
def main(model_config, train_config, track_config):
  os.environ['CUDA_VISIBLE_DEVICES'] = auto_select_gpu()

  # Create training directory which will be used to save: configurations, model files, TensorBoard logs
  train_dir = train_config['train_dir']
  if not osp.isdir(train_dir):
    logging.info('Creating training directory: %s', train_dir)
    mkdir_p(train_dir)

  if have_cfgs(train_dir):
    model_config, train_config, track_config = load_cfgs(train_dir)
    print("=================== load cfg ")
  else:
    save_cfgs(train_dir, model_config, train_config, track_config)
    print("=================== save default cfg, please modify files in {}".format(train_dir))
    return

  g = tf.Graph()
  with g.as_default():
    # Set fixed seed for reproducible experiments
    random.seed(train_config['seed'])
    np.random.seed(train_config['seed'])
    tf.set_random_seed(train_config['seed'])

    # Build the training and validation model
    model = siamese_model.SiameseModel(model_config, train_config, track_config, mode='train')
    model.build()
    model_va = siamese_model.SiameseModel(model_config, train_config, track_config, mode='validation')
    model_va.build(reuse=True)

    learning_rate = _configure_learning_rate(train_config, model.global_step)
    optimizer = _configure_optimizer(train_config, learning_rate)
    tf.summary.scalar('learning_rate', learning_rate)

    # general way for run train: https://qiita.com/horiem/items/00ec6488b23895cc4fe2
    # tensorflow 2.1: https://www.tensorflow.org/tutorials/customization/custom_training_walkthrough
    # Set up the training ops
    opt_op = tensorflow.contrib.layers.optimize_loss(
      loss=model.total_loss,
      global_step=model.global_step,
      learning_rate=learning_rate,
      optimizer=optimizer,
      clip_gradients=train_config['clip_gradients'],
      learning_rate_decay_fn=None,
      summaries=['learning_rate'])

    with tf.control_dependencies([opt_op]):
      train_op = tf.no_op(name='train')

    saver = tf.train.Saver(tf.global_variables(),
                           max_to_keep=train_config['max_checkpoints_to_keep'])

    summary_writer = tf.summary.FileWriter(train_dir, g)
    summary_op = tf.summary.merge_all()

    global_variables_init_op = tf.global_variables_initializer()
    local_variables_init_op = tf.local_variables_initializer()
    g.finalize()  # Finalize graph to avoid adding ops by mistake

    # Dynamically allocate GPU memory
    gpu_options = tf.GPUOptions(allow_growth=True)
    sess_config = tf.ConfigProto(gpu_options=gpu_options)

    sess = tf.Session(config=sess_config)
    model_path = tf.train.latest_checkpoint(train_config['train_dir'])

    if not model_path:
      sess.run(global_variables_init_op)
      sess.run(local_variables_init_op)
      start_step = 0

      if model_config['embed_config']['embedding_checkpoint_file']:
        model.init_fn(sess)
    else:
      logging.info('Restore from last checkpoint: {}'.format(model_path))
      sess.run(local_variables_init_op)
      saver.restore(sess, model_path)
      start_step = tf.train.global_step(sess, model.global_step.name) + 1

    # export
    if train_config["export"]:
      # still debugging
      '''
      frozen_graph_def = tf.graph_util.convert_variables_to_constants(sess, tf.get_default_graph().as_graph_def(), ["train/detection/add"])
      frozen_graph = tf.Graph()
      with frozen_graph.as_default():
        tf.import_graph_def(frozen_graph_def)
        save_model_dir = osp.join(train_config['train_dir'], 'models')
        tf.train.write_graph(frozen_graph_def, save_model_dir, 'quantized_frozen_graph.pb', as_text=False)
        tf.train.write_graph(frozen_graph_def, save_model_dir, 'quantized_frozen_graph.pbtxt', as_text=True)

        output_op = sess.graph.get_tensor_by_name("validation/detection/add:0")
        input1_op = sess.graph.get_tensor_by_name("validation/template_image:0")
        input2_op = sess.graph.get_tensor_by_name("validation/input_image:0")

        converter = tf.lite.TFLiteConverter.from_session(sess, [input1_op, input2_op], [output_op])
        converter.inference_type = tf.lite.constants.QUANTIZED_UINT8
        input_arrays = converter.get_input_arrays()
        converter.quantized_input_stats = {input_arrays[0] : (0., 1.), input_arrays[1] : (0., 1.)}  # mean, std_dev
        converter.default_ranges_stats = (0, 255)
        tflite_model = converter.convert()
        open(osp.join(save_model_dir, 'quantized_frozen_graph.tflite'), "wb").write(tflite_model)
      '''
      return

    # Training loop
    data_config = train_config['train_data_config']
    total_steps = int(data_config['epoch'] *
                      data_config['num_examples_per_epoch'] /
                      data_config['batch_size'])
    logging.info('Train for {} steps'.format(total_steps))
    save_step = int(data_config['num_examples_per_epoch'] / data_config['batch_size'])
    print("=========== save_step: {}".format(save_step))
    for step in range(start_step, total_steps):
      start_time = time.time()
      # no "feed_dict"
      # has "feed_dict" exmaple (mnist): https://qiita.com/SwitchBlade/items/6677c283b2402d060cd0
      _, loss, batch_loss, instances, response = sess.run([train_op, model.total_loss, model.batch_loss, model.instances, model.response])
      duration = time.time() - start_time
      if step % 10 == 0:
        examples_per_sec = data_config['batch_size'] / float(duration)
        time_remain = data_config['batch_size'] * (total_steps - step) / examples_per_sec
        m, s = divmod(time_remain, 60)
        h, m = divmod(m, 60)
        format_str = ('%s: step %d, total loss = %.2f, batch loss = %.2f (%.1f examples/sec; %.3f '
                      'sec/batch; %dh:%02dm:%02ds remains)')
        logging.info(format_str % (datetime.now(), step, loss, batch_loss,
                                   examples_per_sec, duration, h, m, s))

      if step % 100 == 0:
        summary_str = sess.run(summary_op)
        summary_writer.add_summary(summary_str, step)

      if step % save_step == 0 or (step + 1) == total_steps:
        checkpoint_path = osp.join(train_config['train_dir'], 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)
def main(model_config, train_config, track_config):
    os.environ['CUDA_VISIBLE_DEVICES'] = auto_select_gpu()

    # Create training directory which will be used to save: configurations, model files, TensorBoard logs
    train_dir = train_config['train_dir']
    if not osp.isdir(train_dir):
        logging.info('Creating training directory: %s', train_dir)
        mkdir_p(train_dir)

    g = tf.Graph()
    with g.as_default():
        # Set fixed seed for reproducible experiments
        random.seed(train_config['seed'])
        np.random.seed(train_config['seed'])
        tf.set_random_seed(train_config['seed'])

        # Build the training and validation model
        model = siamese_model.SiameseModel(model_config,
                                           train_config,
                                           mode='train')
        model.build()
        model_va = siamese_model.SiameseModel(model_config,
                                              train_config,
                                              mode='validation')
        model_va.build(reuse=True)

        # Save configurations for future reference
        save_cfgs(train_dir, model_config, train_config, track_config)

        learning_rate = _configure_learning_rate(train_config,
                                                 model.global_step)
        optimizer = _configure_optimizer(train_config, learning_rate)
        tf.summary.scalar('learning_rate', learning_rate)

        # Set up the training ops
        opt_op = tf.contrib.layers.optimize_loss(
            loss=model.total_loss,
            global_step=model.global_step,
            learning_rate=learning_rate,
            optimizer=optimizer,
            clip_gradients=train_config['clip_gradients'],
            learning_rate_decay_fn=None,
            summaries=['learning_rate'])

        with tf.control_dependencies([opt_op]):
            train_op = tf.no_op(name='train')

        saver = tf.train.Saver(
            tf.global_variables(),
            max_to_keep=train_config['max_checkpoints_to_keep'])

        summary_writer = tf.summary.FileWriter(train_dir, g)
        summary_op = tf.summary.merge_all()

        global_variables_init_op = tf.global_variables_initializer()
        local_variables_init_op = tf.local_variables_initializer()
        g.finalize()  # Finalize graph to avoid adding ops by mistake

        # Dynamically allocate GPU memory
        gpu_options = tf.GPUOptions(allow_growth=True)
        sess_config = tf.ConfigProto(gpu_options=gpu_options)

        sess = tf.Session(config=sess_config)
        model_path = tf.train.latest_checkpoint(train_config['train_dir'])

        if not model_path:
            sess.run(global_variables_init_op)
            sess.run(local_variables_init_op)
            start_step = 0

            if model_config['embed_config']['embedding_checkpoint_file']:
                model.init_fn(sess)
        else:
            logging.info('Restore from last checkpoint: {}'.format(model_path))
            sess.run(local_variables_init_op)
            saver.restore(sess, model_path)
            start_step = tf.train.global_step(sess, model.global_step.name) + 1

        # Training loop
        data_config = train_config['train_data_config']
        total_steps = int(data_config['epoch'] *
                          data_config['num_examples_per_epoch'] /
                          data_config['batch_size'])
        logging.info('Train for {} steps'.format(total_steps))
        for step in range(start_step, total_steps):
            start_time = time.time()
            _, loss, batch_loss = sess.run(
                [train_op, model.total_loss, model.batch_loss])
            duration = time.time() - start_time

            if step % 10 == 0:
                examples_per_sec = data_config['batch_size'] / float(duration)
                time_remain = data_config['batch_size'] * (
                    total_steps - step) / examples_per_sec
                m, s = divmod(time_remain, 60)
                h, m = divmod(m, 60)
                format_str = (
                    '%s: step %d, total loss = %.2f, batch loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch; %dh:%02dm:%02ds remains)')
                logging.info(format_str %
                             (datetime.now(), step, loss, batch_loss,
                              examples_per_sec, duration, h, m, s))

            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            if step % train_config['save_model_every_n_step'] == 0 or (
                    step + 1) == total_steps:
                checkpoint_path = osp.join(train_config['train_dir'],
                                           'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)