def setup(self, bottom, top):

        if self.phase == 0:  # train phase
            import train_config
            config = train_config.Config()
        else:  # val or test phase
            import test_config
            config = test_config.Config()

        self.N = config.N
        self.context_dim = config.context_dim
        self.spatial_dim = config.spatial_dim
        self.HW = config.spatial_pool_map * config.spatial_pool_map
        self.T = config.T
        self.key_word_thresh = config.key_word_thresh
        self.hard_word_att_idx = []

        # query_aware_context features for every image location
        top[0].reshape(self.N, self.context_dim + self.spatial_dim, self.HW)
Пример #2
0
    def setup(self, bottom, top):

        if self.phase == 0:  # train phase
            import train_config
            config = train_config.Config()
        else:  # val or test phase
            import test_config
            config = test_config.Config()

        self.N = config.N
        self.context_dim = config.context_dim
        self.spatial_dim = config.spatial_dim
        self.HW = config.spatial_pool_map * config.spatial_pool_map
        self.T = config.T
        self.key_word_thresh = config.key_word_thresh
        self.hard_word_att_idx = []

        # query-aware appear pool for every word
        top[0].reshape(self.N, self.context_dim, self.T)
        # query-aware spatial position pool for every word
        top[1].reshape(self.N, self.spatial_dim, self.T)
Пример #3
0
from __future__ import absolute_import, division, print_function

import caffe
import numpy as np
import os
import skimage.io
import threading
import Queue as queue
import ast

import train_config
import test_config
import track_model_train as track_model
from glob import glob, iglob

config = train_config.Config()
test_config = test_config.Config()

np.random.seed(1000)


def load_and_process_imgs(im_tuple):
    im1 = skimage.io.imread(im_tuple[0])
    im2 = skimage.io.imread(im_tuple[1])
    if im1.ndim == 2:
        im1 = np.tile(im1[:, :, np.newaxis], (1, 1, 3))
    if im2.ndim == 2:
        im2 = np.tile(im2[:, :, np.newaxis], (1, 1, 3))
    imcrop1 = im1.astype(np.float32) - track_model.channel_mean
    imcrop2 = im2.astype(np.float32) - track_model.channel_mean
    return (imcrop1, imcrop2)
Пример #4
0
def main():
  tf.config.optimizer.set_jit(True)
  
  strategy = tf.distribute.MirroredStrategy()
  replicas = strategy.num_replicas_in_sync
  print('replicas:', replicas)
  if replicas == 1:
    strategy = DummyScopeStrategy()
  config = train_config.Config(replicas)
  idx2class, train_steps, val_steps, test_steps, \
      train, val, val_noaugment, test, test_ids = load_data.load_data(config)

  with strategy.scope():
    reduce_lr = callbacks.ReduceLROnPlateau(
      monitor='val_categorical_accuracy',
      factor=0.5,
      patience=6,
      cooldown=0,
      min_lr=3e-6,
      min_delta=0.002,
      verbose=1
    )
    model = create_model.create_model(config)
    optimizer = adamw.AdamW(lr=1e-3, weight_decay=3e-5,
                            steps_per_epoch=train_steps)
    model.compile(
      optimizer=optimizer,
      loss='categorical_crossentropy',
      weighted_metrics=['categorical_accuracy'],
      metrics=[tfa.metrics.F1Score(config.classes, 'macro')]
    )

  prob_test = np.zeros((len(test_ids), config.classes), dtype=np.float16)

  callback_list = [reduce_lr, Validate(val, val_steps)]
  """
    Horizontal voting motivated by Horizontal and Vertical Ensemble
    with Deep Representation for Classification [arxiv.org/abs/1306.2759]
  """
  for test_sample in range(10):
    initial_epoch = 0
    epochs = 100 + test_sample
    validation_data = val_noaugment
    validation_steps = val_steps
    steps_per_epoch = train_steps
    testing = test_sample > 0
    if testing:
      initial_epoch = epochs - 1
      validation_data = None
      validation_steps = None
      steps_per_epoch = train_steps // 5
      callback_list = []
    try:
      model.fit(
        train,
        validation_data=validation_data,
        epochs=epochs,
        initial_epoch=initial_epoch,
        steps_per_epoch=steps_per_epoch,
        validation_steps=validation_steps,
        verbose=1,
        callbacks=callback_list,
        workers=0,
        max_queue_size=1
      )
    except KeyboardInterrupt:
      pass
    try:
      logger = progress_logger.ProgressLogger('predict', test_steps)
      for i, batch in zip(range(test_steps), test):
        prob_test[i * config.bs_inf:(i + 1) * config.bs_inf] += \
            model.predict_on_batch(batch[0])
        logger.update_log()
    except KeyboardInterrupt:
      break
    print()

  y_test = np.argmax(prob_test, axis=1)
  pred = [idx2class[i] for i in y_test]
  df_pred = pd.DataFrame({'Id' : test_ids, 'Predicted' : pred})
  df_pred.to_csv('submission.csv', index=False)
  
  with open('predicted_probs', 'wb') as fout:
    for megabatch in range(0, test_steps, config.bs_inf_mem):
      fout.write(prob_test[megabatch * config.bs_inf:
          (megabatch + config.bs_inf_mem) * config.bs_inf].tobytes())