Ejemplo n.º 1
0
 def init_cmd_logger(self):
   # Logger
   if self.opt['logs'] is not None:
     self.logs_folder = self.opt['logs']
     self.logs_folder = os.path.join(self.logs_folder, self.model_id)
     self.log = logger.get(os.path.join(self.logs_folder, 'raw'))
   else:
     self.log = logger.get()
Ejemplo n.º 2
0
    def __init__(self, filename, labels, name, buffer_size=1, restore_step=0):
        """
        Args:
            label: list of string
            name: string
            buffer_size: int
        """
        self.filename = filename
        self.folder = os.path.dirname(filename)
        self.written_catalog = False
        self.log = logger.get()

        if type(labels) != list:
            labels = [labels]
        if name is None:
            self.name = labels[0]
        else:
            self.name = name

        self.labels = labels
        self.buffer_size = buffer_size
        self.buffer = []
        self.label_table = {}
        for ll, label in enumerate(labels):
            self.label_table[label] = ll
        self.log.info('Time series data "{}" log to "{}"'.format(
            labels, filename))
        self._has_init = False
Ejemplo n.º 3
0
 def __init__(self,
              sess,
              model,
              dataset,
              train_opt,
              model_opt,
              step=StepCounter(0),
              loggers=None,
              steps_per_log=10):
   outputs = ['loss', 'train_step']
   num_batch = steps_per_log
   self.log = logger.get()
   if model_opt['finetune']:
     self.log.warning('Finetuning')
     sess.run(tf.assign(model['global_step'], 0))
   super(Trainer, self).__init__(
       sess,
       model,
       dataset,
       num_batch,
       train_opt,
       model_opt,
       outputs,
       step=step,
       loggers=loggers,
       phase_train=True,
       increment_step=True)
Ejemplo n.º 4
0
 def __init__(self,
              sess,
              model,
              dataset,
              num_batch,
              train_opt,
              model_opt,
              outputs,
              step=StepCounter(0),
              loggers=None,
              phase_train=True,
              increment_step=False):
     self.dataset = dataset
     self.loggers = loggers
     self.log = logger.get()
     self.model_opt = model_opt
     self.train_opt = train_opt
     self.input_variables = self.get_input_variables()
     num_ex = dataset.get_dataset_size()
     batch_iter = BatchIterator(num_ex,
                                batch_size=train_opt['batch_size'],
                                get_fn=self.get_batch,
                                cycle=True,
                                shuffle=True,
                                log_epoch=-1)
     super(Runner, self).__init__(sess,
                                  model,
                                  batch_iter,
                                  outputs,
                                  num_batch=num_batch,
                                  step=step,
                                  phase_train=phase_train,
                                  increment_step=increment_step)
def main(reset_process, initialize_db, experiment_name, remove=None):
    """Populate db with experiments to run."""
    main_config = config.Config()
    log = logger.get(os.path.join(main_config.log_dir, 'prepare_experiments'))
    if reset_process:
        db.reset_in_process()
        log.info('Reset experiment progress counter in DB.')
    if initialize_db:
        db.initialize_database()
        log.info('Initialized DB.')
    if experiment_name is not None:  # TODO: add capability for bayesian opt.
        db_config = credentials.postgresql_connection()
        experiment_dict = experiments()[experiment_name]()
        if 'hp_optim' in experiment_dict.keys(
        ) and experiment_dict['hp_optim'] is not None:
            exp_combos = hp_optim_parameters(experiment_dict, log)
            log.info('Preparing an hp-optimization experiment.')
        else:
            exp_combos = package_parameters(experiment_dict, log)
            log.info('Preparing a grid-search experiment.')
        with db.db(db_config) as db_conn:
            db_conn.populate_db(exp_combos)
            db_conn.return_status('CREATE')
        log.info('Added new experiments.')
    if remove is not None:
        db_config = credentials.postgresql_connection()
        with db.db(db_config) as db_conn:
            db_conn.remove_experiment(remove)
        log.info('Removed %s.' % remove)
 def __init__(self, q, batch_iter):
   super(BatchProducer, self).__init__()
   threading.Thread.__init__(self)
   self.q = q
   self.batch_iter = batch_iter
   self.log = logger.get()
   self._stop = threading.Event()
   self.daemon = True
Ejemplo n.º 7
0
    def __init__(self,
                 sess,
                 model,
                 dataset,
                 opt,
                 model_opt,
                 outputs,
                 start_idx=-1,
                 end_idx=-1):
        self.dataset = dataset
        self.log = logger.get()
        self.model_opt = model_opt
        self.opt = opt
        self.input_variables = self.get_input_variables()
        if start_idx != -1 and end_idx != -1:
            if start_idx < 0 or end_idx < 0:
                self.log.fatal('Indices must be non-negative.')
            elif start_idx >= end_idx:
                self.log.fatal('End index must be greater than start index.')
            num_ex = end_idx - start_idx
            if end_idx > dataset.get_dataset_size():
                self.log.warning('End index exceeds dataset size.')
                end_idx = dataset.get_dataset_size()
                num_ex = end_idx - start_idx
            self.log.info('Running partial dataset: start {} end {}'.format(
                start_idx, end_idx))
            self.all_idx = np.arange(start_idx, end_idx)
        else:
            self.log.info('Running through entire dataset.')
            num_ex = dataset.get_dataset_size()
            self.all_idx = np.arange(num_ex)
        if num_ex == -1:
            num_ex = dataset.get_dataset_size()

        self.log.info('\nnum_ex: {:d} -- opt batch_size: {:d}\n'.format(
            num_ex, opt['batch_size']))

        batch_iter = BatchIterator(num_ex,
                                   batch_size=opt['batch_size'],
                                   get_fn=self.get_batch,
                                   cycle=False,
                                   shuffle=False)
        if opt['prefetch']:
            batch_iter = ConcurrentBatchIterator(
                batch_iter,
                max_queue_size=opt['queue_size'],
                num_threads=opt['num_worker'],
                log_queue=-1)
        super(OneTimeEvalBase, self).__init__(sess,
                                              model,
                                              batch_iter,
                                              outputs,
                                              num_batch=1,
                                              phase_train=False,
                                              increment_step=False)
        pass
Ejemplo n.º 8
0
def main(initialize_database, start_exp=None, end_exp=None):
    """Main function to process Allen data."""
    config = Config()
    boc = BrainObservatoryCache(manifest_file=config.manifest_file)
    timestamp = py_utils.timestamp()
    log_file = os.path.join(config.log_dir, timestamp)
    log = logger.get(log_file)
    if initialize_database:
        data_db.initialize_database()
        log.info('Initialized DB.')
    populate_db(config, boc, log, timestamp, start_exp, end_exp)
Ejemplo n.º 9
0
    def __init__(self, opt, output_fname):
        self.opt = opt
        self.log = logger.get()
        self.output_fname = output_fname

        self.log.info('Output h5 dataset: {}'.format(self.output_fname))
        self.log.info('Reading image IDs')
        self.img_ids = self.read_ids()

        # Shuffle sequence.
        random = np.random.RandomState(2)
        shuffle = np.arange(len(self.img_ids))
        random.shuffle(shuffle)
        self.img_ids = [
            self.img_ids[shuffle[idx]] for idx in xrange(len(self.img_ids))
        ]
Ejemplo n.º 10
0
 def __init__(self,
              sess,
              model,
              batch_iter,
              outputs,
              num_batch=1,
              step=StepCounter(0),
              phase_train=True,
              increment_step=False):
     self._sess = sess
     self._model = model
     self._batch_iter = batch_iter
     self._num_batch = num_batch
     self._phase_train = phase_train
     self._step = step
     self._outputs = outputs
     self._current_batch = {}
     self._log = logger.get()
     self._increment_step = increment_step
     pass
Ejemplo n.º 11
0
 def __init__(self,
              sess,
              model,
              dataset,
              num_batch,
              train_opt,
              model_opt,
              outputs,
              step=StepCounter(0),
              loggers=None,
              phase_train=True,
              increment_step=False):
     self.dataset = dataset
     self.log = logger.get()
     self.loggers = loggers
     self.add_orientation = model_opt['add_orientation']
     self.num_orientation_classes = model_opt['num_orientation_classes']
     self.input_variables = self.get_input_variables()
     num_ex = dataset.get_dataset_size()
     batch_iter = BatchIterator(num_ex,
                                batch_size=train_opt['batch_size'],
                                get_fn=self.get_batch,
                                cycle=True,
                                progress_bar=False,
                                shuffle=True,
                                log_epoch=-1)
     if train_opt['prefetch']:
         batch_iter = ConcurrentBatchIterator(
             batch_iter,
             max_queue_size=train_opt['queue_size'],
             num_threads=train_opt['num_worker'],
             log_queue=-1)
     super(Runner, self).__init__(sess,
                                  model,
                                  batch_iter,
                                  outputs,
                                  num_batch=num_batch,
                                  step=step,
                                  phase_train=phase_train,
                                  increment_step=increment_step)
Ejemplo n.º 12
0
def weight_variable(shape,
                    initializer=None,
                    init_val=None,
                    wd=None,
                    name=None,
                    trainable=True):
    """Initialize weights.
  Args:
    shape: shape of the weights, list of int
    wd: weight decay
  """
    log = logger.get()
    if initializer is None:
        initializer = tf.truncated_normal_initializer(stddev=0.01)
    if init_val is None:
        var = tf.Variable(initializer(shape), name=name, trainable=trainable)
    else:
        var = tf.Variable(init_val, name=name, trainable=trainable)
    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
 def __init__(self,
              batch_iter,
              max_queue_size=10,
              num_threads=5,
              log_queue=20,
              name=None):
   """
   Data provider wrapper that supports concurrent data fetching.
   """
   super(ConcurrentBatchIterator, self).__init__()
   self.max_queue_size = max_queue_size
   self.num_threads = num_threads
   self.q = Queue.Queue(maxsize=max_queue_size)
   self.log = logger.get()
   self.batch_iter = batch_iter
   self.fetchers = []
   self.init_fetchers()
   self.counter = 0
   self.relaunch = True
   self.log_queue = log_queue
   self.name = name
   pass
Ejemplo n.º 14
0
def gru(inp_dim, hid_dim, wd=None, scope='gru'):
    """Adds a GRU component.

    Args:
        inp_dim: Input data dim
        hid_dim: Hidden state dim
        wd: Weight decay
        scope: Prefix
    """
    log = logger.get()

    log.info('GRU: {}'.format(scope))
    log.info('Input dim: {}'.format(inp_dim))
    log.info('Hidden dim: {}'.format(hid_dim))

    with tf.variable_scope(scope):
        w_xi = weight_variable([inp_dim, hid_dim], wd=wd, name='w_xi')
        w_hi = weight_variable([hid_dim, hid_dim], wd=wd, name='w_hi')
        b_i = weight_variable([hid_dim], name='b_i')

        w_xu = weight_variable([inp_dim, hid_dim], wd=wd, name='w_xu')
        w_hu = weight_variable([hid_dim, hid_dim], wd=wd, name='w_hu')
        b_u = weight_variable([hid_dim], name='b_u')

        w_xr = weight_variable([inp_dim, hid_dim], wd=wd, name='w_xr')
        w_hr = weight_variable([hid_dim, hid_dim], wd=wd, name='w_hr')
        b_r = weight_variable([hid_dim], name='b_r')

    def unroll(inp, state):
        g_i = tf.sigmoid(tf.matmul(inp, w_xi) + tf.matmul(state, w_hi) + b_i)
        g_r = tf.sigmoid(tf.matmul(inp, w_xr) + tf.matmul(state, w_hr) + b_r)
        u = tf.tanh(tf.matmul(inp, w_xu) + g_r * tf.matmul(state, w_hu) + b_u)
        state = state * (1 - g_i) + u * g_i

        return state

    return unroll
Ejemplo n.º 15
0
def main(experiment_name, list_experiments=False, gpu_device='/gpu:0'):
    """Create a tensorflow worker to run experiments in your DB."""
    if list_experiments:
        exps = db.list_experiments()
        print '_' * 30
        print 'Initialized experiments:'
        print '_' * 30
        for l in exps:
            print l.values()[0]
        print '_' * 30
        print 'You can add to the DB with: '\
            'python prepare_experiments.py --experiment=%s' % \
            exps[0].values()[0]
        return
    if experiment_name is None:
        print 'No experiment specified. Pulling one out of the DB.'
        experiment_name = db.get_experiment_name()

    # Prepare to run the model
    config = Config()
    condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp())
    experiment_label = '%s' % (experiment_name)
    log = logger.get(os.path.join(config.log_dir, condition_label))
    experiment_dict = experiments.experiments()[experiment_name]()
    config = add_to_config(d=experiment_dict, config=config)  # Globals
    config, exp_params = process_DB_exps(
        experiment_name=experiment_name, log=log,
        config=config)  # Update config w/ DB params
    dataset_module = py_utils.import_module(model_dir=config.dataset_info,
                                            dataset=config.dataset)
    dataset_module = dataset_module.data_processing()  # hardcoded class name
    train_data, train_means = get_data_pointers(
        dataset=config.dataset,
        base_dir=config.tf_records,
        cv=dataset_module.folds.keys()[1],  # TODO: SEARCH FOR INDEX.
        log=log)
    val_data, val_means = get_data_pointers(dataset=config.dataset,
                                            base_dir=config.tf_records,
                                            cv=dataset_module.folds.keys()[0],
                                            log=log)

    # Initialize output folders
    dir_list = {
        'checkpoints':
        os.path.join(config.checkpoints, condition_label),
        'summaries':
        os.path.join(config.summaries, condition_label),
        'condition_evaluations':
        os.path.join(config.condition_evaluations, condition_label),
        'experiment_evaluations':
        os.path.join(  # DEPRECIATED
            config.experiment_evaluations, experiment_label),
        'visualization':
        os.path.join(config.visualizations, condition_label),
        'weights':
        os.path.join(config.condition_evaluations, condition_label, 'weights')
    }
    [py_utils.make_dir(v) for v in dir_list.values()]

    # Prepare data loaders on the cpu
    config.data_augmentations = py_utils.flatten_list(
        config.data_augmentations, log)
    with tf.device('/cpu:0'):
        train_images, train_labels = data_loader.inputs(
            dataset=train_data,
            batch_size=config.batch_size,
            model_input_image_size=dataset_module.model_input_image_size,
            tf_dict=dataset_module.tf_dict,
            data_augmentations=config.data_augmentations,
            num_epochs=config.epochs,
            tf_reader_settings=dataset_module.tf_reader,
            shuffle=config.shuffle)
        val_images, val_labels = data_loader.inputs(
            dataset=val_data,
            batch_size=config.batch_size,
            model_input_image_size=dataset_module.model_input_image_size,
            tf_dict=dataset_module.tf_dict,
            data_augmentations=config.data_augmentations,
            num_epochs=config.epochs,
            tf_reader_settings=dataset_module.tf_reader,
            shuffle=config.shuffle)
    log.info('Created tfrecord dataloader tensors.')

    # Load model specification
    struct_name = config.model_struct.split(os.path.sep)[-1]
    try:
        model_dict = py_utils.import_module(
            dataset=struct_name,
            model_dir=os.path.join('models', 'structs',
                                   experiment_name).replace(os.path.sep, '.'))
    except IOError:
        print 'Could not find the model structure: %s' % experiment_name

    # Inject model_dict with hyperparameters if requested
    model_dict.layer_structure = hp_opt_utils.inject_model_with_hps(
        layer_structure=model_dict.layer_structure, exp_params=exp_params)

    # Prepare model on GPU
    with tf.device(gpu_device):
        with tf.variable_scope('cnn') as scope:

            # Training model
            if len(dataset_module.output_size) > 1:
                log.warning('Found > 1 dimension for your output size.'
                            'Converting to a scalar.')
                dataset_module.output_size = np.prod(
                    dataset_module.output_size)

            if hasattr(model_dict, 'output_structure'):
                # Use specified output layer
                output_structure = model_dict.output_structure
            else:
                output_structure = None
            model = model_utils.model_class(
                mean=train_means,
                training=True,
                output_size=dataset_module.output_size)
            train_scores, model_summary = model.build(
                data=train_images,
                layer_structure=model_dict.layer_structure,
                output_structure=output_structure,
                log=log,
                tower_name='cnn')
            log.info('Built training model.')
            log.debug(json.dumps(model_summary, indent=4), verbose=0)
            print_model_architecture(model_summary)

            # Prepare the loss function
            train_loss, _ = loss_utils.loss_interpreter(
                logits=train_scores,
                labels=train_labels,
                loss_type=config.loss_function,
                dataset_module=dataset_module)

            # Add weight decay if requested
            if len(model.regularizations) > 0:
                train_loss = loss_utils.wd_loss(
                    regularizations=model.regularizations,
                    loss=train_loss,
                    wd_penalty=config.regularization_strength)
            train_op = loss_utils.optimizer_interpreter(
                loss=train_loss,
                lr=config.lr,
                optimizer=config.optimizer,
                constraints=config.optimizer_constraints,
                model=model)
            log.info('Built training loss function.')

            train_accuracy = eval_metrics.metric_interpreter(
                metric=dataset_module.score_metric,
                pred=train_scores,
                labels=train_labels)  # training accuracy
            if int(train_images.get_shape()[-1]) <= 3:
                tf.summary.image('train images', train_images)
            tf.summary.scalar('training loss', train_loss)
            tf.summary.scalar('training accuracy', train_accuracy)
            log.info('Added training summaries.')

            # Validation model
            scope.reuse_variables()
            val_model = model_utils.model_class(
                mean=val_means,
                training=True,
                output_size=dataset_module.output_size)
            val_scores, _ = val_model.build(  # Ignore summary
                data=val_images,
                layer_structure=model_dict.layer_structure,
                output_structure=output_structure,
                log=log,
                tower_name='cnn')
            log.info('Built validation model.')

            val_loss, _ = loss_utils.loss_interpreter(
                logits=val_scores,
                labels=val_labels,
                loss_type=config.loss_function,
                dataset_module=dataset_module)
            val_accuracy = eval_metrics.metric_interpreter(
                metric=dataset_module.score_metric,
                pred=val_scores,
                labels=val_labels)  # training accuracy
            if int(train_images.get_shape()[-1]) <= 3:
                tf.summary.image('val images', val_images)
            tf.summary.scalar('validation loss', val_loss)
            tf.summary.scalar('validation accuracy', val_accuracy)
            log.info('Added validation summaries.')

    # Set up summaries and saver
    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    # Initialize the graph
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

    # Need to initialize both of these if supplying num_epochs to inputs
    sess.run(
        tf.group(tf.global_variables_initializer(),
                 tf.local_variables_initializer()))
    summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph)

    # Set up exemplar threading
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    # Create dictionaries of important training and validation information
    train_dict = {
        'train_loss': train_loss,
        'train_accuracy': train_accuracy,
        'train_images': train_images,
        'train_labels': train_labels,
        'train_op': train_op,
        'train_scores': train_scores
    }
    val_dict = {
        'val_loss': val_loss,
        'val_accuracy': val_accuracy,
        'val_images': val_images,
        'val_labels': val_labels,
        'val_scores': val_scores,
    }

    # Start training loop
    np.save(
        os.path.join(dir_list['condition_evaluations'],
                     'training_config_file'), config)
    log.info('Starting training')
    output_dict = training.training_loop(
        config=config,
        db=db,
        coord=coord,
        sess=sess,
        summary_op=summary_op,
        summary_writer=summary_writer,
        saver=saver,
        threads=threads,
        summary_dir=dir_list['summaries'],
        checkpoint_dir=dir_list['checkpoints'],
        weight_dir=dir_list['weights'],
        train_dict=train_dict,
        val_dict=val_dict,
        train_model=model,
        val_model=val_model,
        exp_params=exp_params)
    log.info('Finished training.')

    model_name = config.model_struct.replace('/', '_')
    py_utils.save_npys(data=output_dict,
                       model_name=model_name,
                       output_string=dir_list['experiment_evaluations'])
Ejemplo n.º 16
0
def dcnn(f,
         ch,
         pool,
         act,
         use_bn,
         skip_ch=None,
         phase_train=None,
         wd=None,
         scope='dcnn',
         model=None,
         init_weights=None,
         frozen=None):
    """Add DCNN. N = number of layers.
  Args:
    f: filter size, list of size N  int
    ch: number of channels, list of (N + 1) int
    pool: pooling ratio, list of N int
    act: activation function, list of N function
    use_bn: whether to use batch normalization, list of N bool
    skip_ch: skip connection, list of N int
    phase_train: whether in training phase, tf bool variable
    wd: weight decay
  Returns:
    run_dcnn: a function that runs the DCNN
  """
    log = logger.get()

    nlayers = len(f)
    w = [None] * nlayers
    b = [None] * nlayers
    bn = [None] * nlayers

    log.info('DCNN: {}'.format(scope))
    log.info('Channels: {}'.format(ch))
    log.info('Activation: {}'.format(act))
    log.info('Unpool: {}'.format(pool))
    log.info('Skip channels: {}'.format(skip_ch))
    log.info('BN: {}'.format(use_bn))

    with tf.variable_scope(scope):
        in_ch = ch[0]
        for ii in range(nlayers):
            with tf.variable_scope('layer_{}'.format(ii)):
                out_ch = ch[ii + 1]
                if skip_ch is not None:
                    if skip_ch[ii] is not None:
                        in_ch += skip_ch[ii]

                if init_weights is not None and init_weights[ii] is not None:
                    init_val_w = init_weights[ii]['w']
                    init_val_b = init_weights[ii]['b']
                else:
                    init_val_w = None
                    init_val_b = None

                if frozen is not None and frozen[ii]:
                    trainable = False
                else:
                    trainable = True

                w[ii] = weight_variable([f[ii], f[ii], out_ch, in_ch],
                                        name='w',
                                        init_val=init_val_w,
                                        wd=wd,
                                        trainable=trainable)
                b[ii] = weight_variable([out_ch],
                                        init_val=init_val_b,
                                        name='b',
                                        trainable=trainable)
                log.info('Filter: {}, Trainable: {}'.format(
                    [f[ii], f[ii], out_ch, in_ch], trainable))

                in_ch = out_ch

                if model is not None:
                    model['{}_w_{}'.format(scope, ii)] = w[ii]
                    model['{}_b_{}'.format(scope, ii)] = b[ii]

    copy = [0]

    def run_dcnn(x, skip=None):
        """Run DCNN on an input.
    Args:
      x: input image, [B, H, W, D]
      skip: skip connection activation map, list of 4-D tensor
    """
        with tf.variable_scope(scope):
            h = [None] * nlayers
            out_shape = [None] * nlayers
            batch = tf.shape(x)[0:1]
            inp_size = tf.shape(x)[1:3]
            cum_pool = 1

            for ii in range(nlayers):
                with tf.variable_scope('layer_{}'.format(ii)):
                    cum_pool *= pool[ii]
                    out_ch = ch[ii + 1]

                    if ii == 0:
                        prev_inp = x
                    else:
                        prev_inp = h[ii - 1]

                    if skip is not None:
                        if skip[ii] is not None:
                            if ii == 0:
                                prev_inp = tf.concat(3, [prev_inp, skip[ii]])
                            else:
                                prev_inp = tf.concat(3, [prev_inp, skip[ii]])

                    out_shape[ii] = tf.concat(
                        0, [batch, inp_size * cum_pool,
                            tf.constant([out_ch])])

                    h[ii] = tf.nn.conv2d_transpose(
                        prev_inp,
                        w[ii],
                        out_shape[ii],
                        strides=[1, pool[ii], pool[ii], 1]) + b[ii]

                    if use_bn[ii]:
                        if frozen is not None and frozen[ii]:
                            bn_frozen = True
                        else:
                            bn_frozen = False

                        if init_weights is not None and \
                                init_weights[ii] is not None:
                            init_beta = init_weights[ii]['beta_{}'.format(
                                copy[0])]
                            init_gamma = init_weights[ii]['gamma_{}'.format(
                                copy[0])]
                        else:
                            init_beta = None
                            init_gamma = None
                        h[ii] = batch_norm(h[ii],
                                           out_ch,
                                           phase_train,
                                           scope2='{}_{}_{}'.format(
                                               scope, ii, copy[0]),
                                           init_beta=init_beta,
                                           init_gamma=init_gamma,
                                           model=model)
                    if act[ii] is not None:
                        h[ii] = act[ii](h[ii])
        copy[0] += 1
        return h

    return run_dcnn
Ejemplo n.º 17
0
def cnn(f,
        ch,
        pool,
        act,
        use_bn,
        phase_train=None,
        wd=None,
        scope='cnn',
        model=None,
        init_weights=None,
        frozen=None,
        shared_weights=None):
    """Add CNN. N = number of layers.

    Args:
        f: filter size, list of N int
        ch: number of channels, list of (N + 1) int
        pool: pooling ratio, list of N int
        act: activation function, list of N function
        use_bn: whether to use batch normalization, list of N bool
        phase_train: whether in training phase, tf bool variable
        wd: weight decay

    Returns:
        run_cnn: a function that runs the CNN
    """
    log = logger.get()

    nlayers = len(f)
    w = [None] * nlayers
    b = [None] * nlayers
    log.info('CNN: {}'.format(scope))
    log.info('Channels: {}'.format(ch))
    log.info('Activation: {}'.format(act))
    log.info('Pool: {}'.format(pool))
    log.info('BN: {}'.format(use_bn))
    log.info('Shared weights: {}'.format(shared_weights))
    net_scope = None
    layer_scope = [None] * nlayers
    with tf.variable_scope(scope):
        for ii in range(nlayers):
            with tf.variable_scope('layer_{}'.format(ii)):
                if init_weights:
                    init = tf.constant_initializer
                else:
                    init = None

                if init_weights is not None and init_weights[ii] is not None:
                    init_val_w = init_weights[ii]['w']
                    init_val_b = init_weights[ii]['b']
                else:
                    init_val_w = None
                    init_val_b = None

                if frozen is not None and frozen[ii]:
                    trainable = False
                else:
                    trainable = True

                if shared_weights:
                    w[ii] = shared_weights[ii]['w']
                    b[ii] = shared_weights[ii]['b']
                else:
                    w[ii] = weight_variable([f[ii], f[ii], ch[ii], ch[ii + 1]],
                                            name='w',
                                            init_val=init_val_w,
                                            wd=wd,
                                            trainable=trainable)
                    b[ii] = weight_variable([ch[ii + 1]],
                                            init_val=init_val_b,
                                            name='b',
                                            trainable=trainable)

                log.info('Filter: {}, Trainable: {}'.format(
                    [f[ii], f[ii], ch[ii], ch[ii + 1]], trainable))

                if model is not None:
                    for name, param in zip(['w', 'b'], [w[ii], b[ii]]):
                        key = '{}_{}_{}'.format(scope, name, ii)
                        if key in model:
                            raise Exception('Key exists: {}'.format(key))
                        model[key] = param
    copy = [0]

    def run_cnn(x):
        """
    Run CNN on an input.
    Args:
      x: input image, [B, H, W, D]
    """
        h = [None] * nlayers
        with tf.variable_scope(scope):
            for ii in range(nlayers):
                with tf.variable_scope('layer_{}'.format(ii)):
                    out_ch = ch[ii + 1]
                    if ii == 0:
                        prev_inp = x
                    else:
                        prev_inp = h[ii - 1]
                    h[ii] = conv2d(prev_inp, w[ii]) + b[ii]
                    if use_bn[ii]:
                        if frozen is not None and frozen[ii]:
                            bn_frozen = True
                        else:
                            bn_frozen = False
                        if init_weights is not None and \
                                init_weights[ii] is not None:
                            init_beta = init_weights[ii]['beta_{}'.format(
                                copy[0])]
                            init_gamma = init_weights[ii]['gamma_{}'.format(
                                copy[0])]
                        else:
                            init_beta = None
                            init_gamma = None
                        h[ii] = batch_norm(h[ii],
                                           out_ch,
                                           phase_train,
                                           scope2='{}_{}_{}'.format(
                                               scope, ii, copy[0]),
                                           init_beta=init_beta,
                                           init_gamma=init_gamma,
                                           model=model)
                    if act[ii] is not None:
                        h[ii] = act[ii](h[ii])
                    if pool[ii] > 1:
                        h[ii] = max_pool(h[ii], pool[ii])
        copy[0] += 1
        return h

    return run_cnn
Ejemplo n.º 18
0
from utils.step_counter import StepCounter
from utils.time_series_logger import TimeSeriesLogger

import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"

from cmd_args_parser import TrainArgsParser, DataArgsParser, CmdArgsParser
from experiment import TrainingExperimentBase
from runner import RunnerBase
# from box_model_old import get_model
from box_model import get_model

import tensorflow as tf

log = logger.get()


class Runner(RunnerBase):
    def __init__(self,
                 sess,
                 model,
                 dataset,
                 num_batch,
                 train_opt,
                 model_opt,
                 outputs,
                 step=StepCounter(0),
                 loggers=None,
                 phase_train=True,
                 increment_step=False):
Ejemplo n.º 19
0
                                   -out boxes
"""

from __future__ import print_function
from utils import list_reader
from utils import logger
from utils.sharded_hdf5 import ShardedFile, ShardedFileWriter
import argparse
import cv2
import numpy
import os
import paths
import selective_search
import sys

log = logger.get()


def patch(original_boxes, error_image_list, full_image_list, batch_size=10):
    """Pathches the search boxes for some of the images

    Args:
        original_boxes: numpy.ndarray, boxes to be patched.
        error_image_list: list, list of image filenames to be patched.
        full_image_list: list, full image list decribing each row of the array.
        batch_size: number, number of images to run in one batch.
    Returns:
        original_boxes: numpy.ndarray, updated search boxes.
    """
    raise Exception('Not implemented')
    # boxes, processed, error = run_selective_search(
Ejemplo n.º 20
0
import numpy as np
import os
import tensorflow as tf
import time

from utils import logger
from utils.batch_iter import BatchIterator
from utils.lazy_registerer import LazyRegisterer
from utils.log_manager import LogManager
from utils.saver import Saver
from utils.time_series_logger import TimeSeriesLogger

import ris_box_model as box_model
import ris_train_base as trainer

log = logger.get()


def add_model_args(parser):
    parser.add_argument('--padding', default=16, type=int)
    parser.add_argument('--filter_height', default=48, type=int)
    parser.add_argument('--filter_width', default=48, type=int)
    parser.add_argument('--ctrl_cnn_filter_size', default='3,3,3,3,3,3,3,3')
    parser.add_argument('--ctrl_cnn_depth', default='4,4,8,8,16,16,32,64')
    parser.add_argument('--ctrl_cnn_pool', default='1,2,1,2,1,2,2,2')
    parser.add_argument('--box_loss_fn', default='iou')
    parser.add_argument('--fixed_order', action='store_true')
    parser.add_argument('--pretrain_cnn', default=None)
    parser.add_argument('--ctrl_rnn_hid_dim', default=256, type=int)
    parser.add_argument('--num_ctrl_mlp_layers', default=2, type=int)
    parser.add_argument('--ctrl_mlp_dim', default=256, type=int)
Ejemplo n.º 21
0
import numpy as np
import os
import tensorflow as tf
import time

from utils import logger
from utils.batch_iter import BatchIterator
from utils.lazy_registerer import LazyRegisterer
from utils.log_manager import LogManager
from utils.saver import Saver
from utils.time_series_logger import TimeSeriesLogger

import ris_box_model as box_model
import ris_train_base as trainer

log = logger.get()


def add_model_args(parser):
    parser.add_argument('--padding', default=16, type=int)
    parser.add_argument('--filter_height', default=48, type=int)
    parser.add_argument('--filter_width', default=48, type=int)
    parser.add_argument('--ctrl_cnn_filter_size', default='3,3,3,3,3,3,3,3')
    parser.add_argument('--ctrl_cnn_depth', default='4,4,8,8,16,16,32,64')
    parser.add_argument('--ctrl_cnn_pool', default='1,2,1,2,1,2,2,2')
    parser.add_argument('--box_loss_fn', default='iou')
    parser.add_argument('--fixed_order', action='store_true')
    parser.add_argument('--pretrain_cnn', default=None)
    parser.add_argument('--ctrl_rnn_hid_dim', default=256, type=int)
    parser.add_argument('--num_ctrl_mlp_layers', default=2, type=int)
    parser.add_argument('--ctrl_mlp_dim', default=256, type=int)
Ejemplo n.º 22
0
def mlp(dims,
        act,
        add_bias=True,
        dropout_keep=None,
        phase_train=None,
        wd=None,
        scope='mlp',
        model=None,
        init_weights=None,
        frozen=None):
    """Add MLP. N = number of layers.
  Args:
    dims: layer-wise dimensions, list of N int
    act: activation function, list of N function
    dropout_keep: keep prob of dropout, list of N float
    phase_train: whether in training phase, tf bool variable
    wd: weight decay
  """
    log = logger.get()

    nlayers = len(dims) - 1
    w = [None] * nlayers
    b = [None] * nlayers

    log.info('MLP: {}'.format(scope))
    log.info('Dimensions: {}'.format(dims))
    log.info('Activation: {}'.format(act))
    log.info('Dropout: {}'.format(dropout_keep))
    log.info('Add bias: {}'.format(add_bias))

    with tf.variable_scope(scope):
        for ii in range(nlayers):
            with tf.variable_scope('layer_{}'.format(ii)):
                nin = dims[ii]
                nout = dims[ii + 1]
                if init_weights is not None and init_weights[ii] is not None:
                    init_val_w = init_weights[ii]['w']
                    init_val_b = init_weights[ii]['b']
                else:
                    init_val_w = None
                    init_val_b = None
                if frozen is not None and frozen[ii]:
                    trainable = False
                else:
                    trainable = True
                w[ii] = weight_variable([nin, nout],
                                        init_val=init_val_w,
                                        wd=wd,
                                        name='w',
                                        trainable=trainable)
                log.info('Weights: {} Trainable: {}'.format([nin, nout],
                                                            trainable))
                if add_bias:
                    b[ii] = weight_variable([nout],
                                            init_val=init_val_b,
                                            name='b',
                                            trainable=trainable)
                    log.info('Bias: {} Trainable: {}'.format([nout],
                                                             trainable))

                if model is not None:
                    model['{}_w_{}'.format(scope, ii)] = w[ii]
                    if add_bias:
                        model['{}_b_{}'.format(scope, ii)] = b[ii]

    def run_mlp(x):
        h = [None] * nlayers
        with tf.variable_scope(scope):
            for ii in range(nlayers):
                with tf.variable_scope('layer_{}'.format(ii)):
                    if ii == 0:
                        prev_inp = x
                    else:
                        prev_inp = h[ii - 1]
                    if dropout_keep is not None:
                        if dropout_keep[ii] is not None:
                            prev_inp = dropout(prev_inp, dropout_keep[ii],
                                               phase_train)
                    h[ii] = tf.matmul(prev_inp, w[ii])
                    if add_bias:
                        h[ii] += b[ii]
                    if act[ii]:
                        h[ii] = act[ii](h[ii])
        return h

    return run_mlp
Ejemplo n.º 23
0
def get_model(opt, is_training=True):
    """The attention model"""
    log = logger.get()
    model = {}

    timespan = opt['timespan']
    inp_height = opt['inp_height']
    inp_width = opt['inp_width']
    inp_depth = opt['inp_depth']
    padding = opt['padding']
    filter_height = opt['filter_height']
    filter_width = opt['filter_width']

    ctrl_cnn_filter_size = opt['ctrl_cnn_filter_size']
    ctrl_cnn_depth = opt['ctrl_cnn_depth']
    ctrl_cnn_pool = opt['ctrl_cnn_pool']
    ctrl_rnn_hid_dim = opt['ctrl_rnn_hid_dim']

    num_ctrl_mlp_layers = opt['num_ctrl_mlp_layers']
    ctrl_mlp_dim = opt['ctrl_mlp_dim']

    attn_cnn_filter_size = opt['attn_cnn_filter_size']
    attn_cnn_depth = opt['attn_cnn_depth']
    attn_cnn_pool = opt['attn_cnn_pool']
    attn_dcnn_filter_size = opt['attn_dcnn_filter_size']
    attn_dcnn_depth = opt['attn_dcnn_depth']
    attn_dcnn_pool = opt['attn_dcnn_pool']

    mlp_dropout_ratio = opt['mlp_dropout']
    attn_box_padding_ratio = opt['attn_box_padding_ratio']

    wd = opt['weight_decay']
    use_bn = opt['use_bn']
    segm_loss_fn = opt['segm_loss_fn']
    box_loss_fn = opt['box_loss_fn']
    loss_mix_ratio = opt['loss_mix_ratio']
    base_learn_rate = opt['base_learn_rate']
    learn_rate_decay = opt['learn_rate_decay']
    steps_per_learn_rate_decay = opt['steps_per_learn_rate_decay']
    use_knob = opt['use_knob']
    knob_base = opt['knob_base']
    knob_decay = opt['knob_decay']
    steps_per_knob_decay = opt['steps_per_knob_decay']
    knob_box_offset = opt['knob_box_offset']
    knob_segm_offset = opt['knob_segm_offset']
    knob_use_timescale = opt['knob_use_timescale']
    gt_box_ctr_noise = opt['gt_box_ctr_noise']
    gt_box_pad_noise = opt['gt_box_pad_noise']
    gt_segm_noise = opt['gt_segm_noise']

    squash_ctrl_params = opt['squash_ctrl_params']
    fixed_order = opt['fixed_order']
    clip_gradient = opt['clip_gradient']
    fixed_gamma = opt['fixed_gamma']
    num_ctrl_rnn_iter = opt['num_ctrl_rnn_iter']
    num_glimpse_mlp_layers = opt['num_glimpse_mlp_layers']
    pretrain_ctrl_net = opt['pretrain_ctrl_net']
    pretrain_attn_net = opt['pretrain_attn_net']
    pretrain_net = opt['pretrain_net']

    if 'freeze_ctrl_cnn' in opt:
        freeze_ctrl_cnn = opt['freeze_ctrl_cnn']
        freeze_ctrl_rnn = opt['freeze_ctrl_rnn']
        freeze_attn_net = opt['freeze_attn_net']
    else:
        freeze_ctrl_cnn = True
        freeze_ctrl_rnn = True
        freeze_attn_net = True

    if 'freeze_ctrl_mlp' in opt:
        freeze_ctrl_mlp = opt['freeze_ctrl_mlp']
    else:
        freeze_ctrl_mlp = freeze_ctrl_rnn

    if 'fixed_var' in opt:
        fixed_var = opt['fixed_var']
    else:
        fixed_var = False

    if 'dynamic_var' in opt:
        dynamic_var = opt['dynamic_var']
    else:
        dynamic_var = False

    if 'use_iou_box' in opt:
        use_iou_box = opt['use_iou_box']
    else:
        use_iou_box = False

    if 'stop_canvas_grad' in opt:
        stop_canvas_grad = opt['stop_canvas_grad']
    else:
        stop_canvas_grad = True

    if 'add_skip_conn' in opt:
        add_skip_conn = opt['add_skip_conn']
    else:
        add_skip_conn = True

    if 'attn_cnn_skip' in opt:
        attn_cnn_skip = opt['attn_cnn_skip']
    else:
        attn_cnn_skip = [add_skip_conn] * len(attn_cnn_filter_size)

    if 'disable_overwrite' in opt:
        disable_overwrite = opt['disable_overwrite']
    else:
        disable_overwrite = True

    if 'add_d_out' in opt:
        add_d_out = opt['add_d_out']
        add_y_out = opt['add_y_out']
    else:
        add_d_out = False
        add_y_out = False

    if 'attn_add_d_out' in opt:
        attn_add_d_out = opt['attn_add_d_out']
        attn_add_y_out = opt['attn_add_y_out']
        attn_add_inp = opt['attn_add_inp']
        attn_add_canvas = opt['attn_add_canvas']
    else:
        attn_add_d_out = add_d_out
        attn_add_y_out = add_y_out
        attn_add_inp = True
        attn_add_canvas = True

    if 'ctrl_add_d_out' in opt:
        ctrl_add_d_out = opt['ctrl_add_d_out']
        ctrl_add_y_out = opt['ctrl_add_y_out']
        ctrl_add_inp = opt['ctrl_add_inp']
        ctrl_add_canvas = opt['ctrl_add_canvas']
    else:
        ctrl_add_d_out = add_d_out
        ctrl_add_y_out = add_y_out
        ctrl_add_inp = not ctrl_add_d_out
        ctrl_add_canvas = not ctrl_add_d_out

    if 'num_semantic_classes' in opt:
        num_semantic_classes = opt['num_semantic_classes']
    else:
        num_semantic_classes = 1

    rnd_hflip = opt['rnd_hflip']
    rnd_vflip = opt['rnd_vflip']
    rnd_transpose = opt['rnd_transpose']
    rnd_colour = opt['rnd_colour']

    ############################
    # Input definition
    ############################
    # Input image, [B, H, W, D]
    x = tf.placeholder('float', [None, inp_height, inp_width, inp_depth],
                       name='x')
    x_shape = tf.shape(x)
    num_ex = x_shape[0]

    # Groundtruth segmentation, [B, T, H, W]
    y_gt = tf.placeholder('float', [None, timespan, inp_height, inp_width],
                          name='y_gt')

    # Groundtruth confidence score, [B, T]
    s_gt = tf.placeholder('float', [None, timespan], name='s_gt')

    if add_d_out:
        d_in = tf.placeholder('float', [None, inp_height, inp_width, 8],
                              name='d_in')
        model['d_in'] = d_in
    if add_y_out:
        y_in = tf.placeholder(
            'float', [None, inp_height, inp_width, num_semantic_classes],
            name='y_in')
        model['y_in'] = y_in

    # Whether in training stage.
    phase_train = tf.placeholder('bool', name='phase_train')
    phase_train_f = tf.to_float(phase_train)

    model['x'] = x
    model['y_gt'] = y_gt
    model['s_gt'] = s_gt
    model['phase_train'] = phase_train

    # Global step
    if 'freeze_ctrl_cnn' in opt:
        global_step = tf.Variable(0.0, name='global_step')
    else:
        global_step = tf.Variable(0.0)

    ###############################
    # Random input transformation
    ###############################
    # Either add both or add nothing.
    assert (add_d_out and add_y_out) or (not add_d_out and not add_y_out)
    if not add_d_out:
        results = img.random_transformation(x,
                                            padding,
                                            phase_train,
                                            rnd_hflip=rnd_hflip,
                                            rnd_vflip=rnd_vflip,
                                            rnd_transpose=rnd_transpose,
                                            rnd_colour=rnd_colour,
                                            y=y_gt)
        x, y_gt = results['x'], results['y']
    else:
        results = img.random_transformation(x,
                                            padding,
                                            phase_train,
                                            rnd_hflip=rnd_hflip,
                                            rnd_vflip=rnd_vflip,
                                            rnd_transpose=rnd_transpose,
                                            rnd_colour=rnd_colour,
                                            y=y_gt,
                                            d=d_in,
                                            c=y_in)
        x, y_gt, d_in, y_in = results['x'], results['y'], results[
            'd'], results['c']
        model['d_in_trans'] = d_in
        model['y_in_trans'] = y_in
    model['x_trans'] = x
    model['y_gt_trans'] = y_gt

    ############################
    # Canvas: external memory
    ############################
    canvas = tf.zeros(tf.pack([num_ex, inp_height, inp_width, 1]))
    ccnn_inp_depth = 0
    acnn_inp_depth = 0
    if ctrl_add_inp:
        ccnn_inp_depth += inp_depth
    if ctrl_add_canvas:
        ccnn_inp_depth += 1
    if attn_add_inp:
        acnn_inp_depth += inp_depth
    if attn_add_canvas:
        acnn_inp_depth += 1

    if ctrl_add_d_out:
        ccnn_inp_depth += 8
    if ctrl_add_y_out:
        ccnn_inp_depth += num_semantic_classes
    if attn_add_d_out:
        acnn_inp_depth += 8
    if attn_add_y_out:
        acnn_inp_depth += num_semantic_classes

    #############################
    # Controller CNN definition
    #############################
    ccnn_filters = ctrl_cnn_filter_size
    ccnn_nlayers = len(ccnn_filters)
    acnn_nlayers = len(attn_cnn_filter_size)
    ccnn_channels = [ccnn_inp_depth] + ctrl_cnn_depth
    ccnn_pool = ctrl_cnn_pool
    ccnn_act = [tf.nn.relu] * ccnn_nlayers
    ccnn_use_bn = [use_bn] * ccnn_nlayers

    pt = pretrain_net or pretrain_ctrl_net
    if pt:
        log.info(
            'Loading pretrained controller CNN weights from {}'.format(pt))
        with h5py.File(pt, 'r') as h5f:
            ccnn_init_w = [{
                'w': h5f['ctrl_cnn_w_{}'.format(ii)][:],
                'b': h5f['ctrl_cnn_b_{}'.format(ii)][:]
            } for ii in range(ccnn_nlayers)]
            for ii in range(ccnn_nlayers):
                for tt in range(timespan):
                    for w in ['beta', 'gamma']:
                        ccnn_init_w[ii]['{}_{}'.format(
                            w,
                            tt)] = h5f['ctrl_cnn_{}_{}_{}'.format(ii, tt,
                                                                  w)][:]
        ccnn_frozen = [freeze_ctrl_cnn] * ccnn_nlayers
    else:
        ccnn_init_w = None
        ccnn_frozen = [freeze_ctrl_cnn] * ccnn_nlayers

    ccnn = nn.cnn(ccnn_filters,
                  ccnn_channels,
                  ccnn_pool,
                  ccnn_act,
                  ccnn_use_bn,
                  phase_train=phase_train,
                  wd=wd,
                  scope='ctrl_cnn',
                  model=model,
                  init_weights=ccnn_init_w,
                  frozen=ccnn_frozen)
    h_ccnn = [None] * timespan

    ############################
    # Controller RNN definition
    ############################
    ccnn_subsample = np.array(ccnn_pool).prod()
    crnn_h = inp_height / ccnn_subsample
    crnn_w = inp_width / ccnn_subsample
    crnn_dim = ctrl_rnn_hid_dim
    canvas_dim = inp_height * inp_width / (ccnn_subsample**2)

    glimpse_map_dim = crnn_h * crnn_w
    glimpse_feat_dim = ccnn_channels[-1]
    crnn_inp_dim = glimpse_feat_dim

    pt = pretrain_net or pretrain_ctrl_net
    if pt:
        log.info(
            'Loading pretrained controller RNN weights from {}'.format(pt))
        with h5py.File(pt, 'r') as h5f:
            crnn_init_w = {}
            for w in [
                    'w_xi', 'w_hi', 'b_i', 'w_xf', 'w_hf', 'b_f', 'w_xu',
                    'w_hu', 'b_u', 'w_xo', 'w_ho', 'b_o'
            ]:
                key = 'ctrl_lstm_{}'.format(w)
                crnn_init_w[w] = h5f[key][:]
            crnn_frozen = freeze_ctrl_rnn
    else:
        crnn_init_w = None
        crnn_frozen = freeze_ctrl_rnn

    crnn_state = [None] * (timespan + 1)
    crnn_glimpse_map = [None] * timespan
    crnn_g_i = [None] * timespan
    crnn_g_f = [None] * timespan
    crnn_g_o = [None] * timespan
    h_crnn = [None] * timespan
    crnn_state[-1] = tf.zeros(tf.pack([num_ex, crnn_dim * 2]))
    crnn_cell = nn.lstm(crnn_inp_dim,
                        crnn_dim,
                        wd=wd,
                        scope='ctrl_lstm',
                        init_weights=crnn_init_w,
                        frozen=crnn_frozen,
                        model=model)

    ############################
    # Glimpse MLP definition
    ############################
    gmlp_dims = [crnn_dim] * num_glimpse_mlp_layers + [glimpse_map_dim]
    gmlp_act = [tf.nn.relu] * \
        (num_glimpse_mlp_layers - 1) + [tf.nn.softmax]
    gmlp_dropout = None

    pt = pretrain_net or pretrain_ctrl_net
    if pt:
        log.info('Loading pretrained glimpse MLP weights from {}'.format(pt))
        with h5py.File(pt, 'r') as h5f:
            gmlp_init_w = [{
                'w': h5f['glimpse_mlp_w_{}'.format(ii)][:],
                'b': h5f['glimpse_mlp_b_{}'.format(ii)][:]
            } for ii in range(num_glimpse_mlp_layers)]
            gmlp_frozen = [freeze_ctrl_rnn] * num_glimpse_mlp_layers
    else:
        gmlp_init_w = None
        gmlp_frozen = [freeze_ctrl_rnn] * num_glimpse_mlp_layers

    gmlp = nn.mlp(gmlp_dims,
                  gmlp_act,
                  add_bias=True,
                  dropout_keep=gmlp_dropout,
                  phase_train=phase_train,
                  wd=wd,
                  scope='glimpse_mlp',
                  init_weights=gmlp_init_w,
                  frozen=gmlp_frozen,
                  model=model)

    ############################
    # Controller MLP definition
    ############################
    cmlp_dims = [crnn_dim] + [ctrl_mlp_dim] * \
        (num_ctrl_mlp_layers - 1) + [9]
    cmlp_act = [tf.nn.relu] * (num_ctrl_mlp_layers - 1) + [None]
    cmlp_dropout = None

    pt = pretrain_net or pretrain_ctrl_net
    if pt:
        log.info(
            'Loading pretrained controller MLP weights from {}'.format(pt))
        with h5py.File(pt, 'r') as h5f:
            cmlp_init_w = [{
                'w': h5f['ctrl_mlp_w_{}'.format(ii)][:],
                'b': h5f['ctrl_mlp_b_{}'.format(ii)][:]
            } for ii in range(num_ctrl_mlp_layers)]
            cmlp_frozen = [freeze_ctrl_mlp] * num_ctrl_mlp_layers
    else:
        cmlp_init_w = None
        cmlp_frozen = [freeze_ctrl_mlp] * num_ctrl_mlp_layers

    cmlp = nn.mlp(cmlp_dims,
                  cmlp_act,
                  add_bias=True,
                  dropout_keep=cmlp_dropout,
                  phase_train=phase_train,
                  wd=wd,
                  scope='ctrl_mlp',
                  init_weights=cmlp_init_w,
                  frozen=cmlp_frozen,
                  model=model)

    ###########################
    # Attention CNN definition
    ###########################
    acnn_filters = attn_cnn_filter_size
    acnn_nlayers = len(acnn_filters)
    acnn_channels = [acnn_inp_depth] + attn_cnn_depth
    acnn_pool = attn_cnn_pool
    acnn_act = [tf.nn.relu] * acnn_nlayers
    acnn_use_bn = [use_bn] * acnn_nlayers

    pt = pretrain_net or pretrain_attn_net
    if pt:
        log.info('Loading pretrained attention CNN weights from {}'.format(pt))
        with h5py.File(pt, 'r') as h5f:
            acnn_init_w = [{
                'w': h5f['attn_cnn_w_{}'.format(ii)][:],
                'b': h5f['attn_cnn_b_{}'.format(ii)][:]
            } for ii in range(acnn_nlayers)]
            for ii in range(acnn_nlayers):
                for tt in range(timespan):
                    for w in ['beta', 'gamma']:
                        key = 'attn_cnn_{}_{}_{}'.format(ii, tt, w)
                        acnn_init_w[ii]['{}_{}'.format(w, tt)] = h5f[key][:]
        acnn_frozen = [freeze_attn_net] * acnn_nlayers
    else:
        acnn_init_w = None
        acnn_frozen = [freeze_attn_net] * acnn_nlayers

    acnn = nn.cnn(acnn_filters,
                  acnn_channels,
                  acnn_pool,
                  acnn_act,
                  acnn_use_bn,
                  phase_train=phase_train,
                  wd=wd,
                  scope='attn_cnn',
                  model=model,
                  init_weights=acnn_init_w,
                  frozen=acnn_frozen)

    x_patch = [None] * timespan
    h_acnn = [None] * timespan
    h_acnn_last = [None] * timespan

    acnn_subsample = np.array(acnn_pool).prod()
    acnn_h = filter_height / acnn_subsample
    acnn_w = filter_width / acnn_subsample
    core_depth = acnn_channels[-1]
    core_dim = acnn_h * acnn_w * core_depth

    ##########################
    # Score MLP definition
    ##########################
    pt = pretrain_net
    if pt:
        log.info('Loading score mlp weights from {}'.format(pt))
        with h5py.File(pt, 'r') as h5f:
            smlp_init_w = [{
                'w': h5f['score_mlp_w_{}'.format(ii)][:],
                'b': h5f['score_mlp_b_{}'.format(ii)][:]
            } for ii in range(1)]
    else:
        smlp_init_w = None
    smlp = nn.mlp([crnn_dim + core_dim, 1], [tf.sigmoid],
                  wd=wd,
                  scope='score_mlp',
                  init_weights=smlp_init_w,
                  model=model)
    s_out = [None] * timespan

    #############################
    # Attention DCNN definition
    #############################
    adcnn_filters = attn_dcnn_filter_size
    adcnn_nlayers = len(adcnn_filters)
    adcnn_unpool = attn_dcnn_pool
    adcnn_act = [tf.nn.relu] * adcnn_nlayers
    adcnn_channels = [core_depth] + attn_dcnn_depth

    adcnn_bn_nlayers = adcnn_nlayers
    adcnn_use_bn = [use_bn] * adcnn_bn_nlayers + \
        [False] * (adcnn_nlayers - adcnn_bn_nlayers)

    if add_skip_conn:
        adcnn_skip_ch = [0]
        adcnn_channels_rev = acnn_channels[::-1][1:] + [acnn_inp_depth]
        adcnn_skip_rev = attn_cnn_skip[::-1]
        for sk, ch in zip(adcnn_skip_rev, adcnn_channels_rev):
            adcnn_skip_ch.append(ch if sk else 0)
            pass
    else:
        adcnn_skip_ch = None

    pt = pretrain_net or pretrain_attn_net
    if pt:
        log.info(
            'Loading pretrained attention DCNN weights from {}'.format(pt))
        with h5py.File(pt, 'r') as h5f:
            adcnn_init_w = [{
                'w': h5f['attn_dcnn_w_{}'.format(ii)][:],
                'b': h5f['attn_dcnn_b_{}'.format(ii)][:]
            } for ii in range(adcnn_nlayers)]
            for ii in range(adcnn_bn_nlayers):
                for tt in range(timespan):
                    for w in ['beta', 'gamma']:
                        key = 'attn_dcnn_{}_{}_{}'.format(ii, tt, w)
                        adcnn_init_w[ii]['{}_{}'.format(w, tt)] = h5f[key][:]

        adcnn_frozen = [freeze_attn_net] * adcnn_nlayers
    else:
        adcnn_init_w = None
        adcnn_frozen = [freeze_attn_net] * adcnn_nlayers

    adcnn = nn.dcnn(adcnn_filters,
                    adcnn_channels,
                    adcnn_unpool,
                    adcnn_act,
                    use_bn=adcnn_use_bn,
                    skip_ch=adcnn_skip_ch,
                    phase_train=phase_train,
                    wd=wd,
                    model=model,
                    init_weights=adcnn_init_w,
                    frozen=adcnn_frozen,
                    scope='attn_dcnn')
    h_adcnn = [None] * timespan

    ##########################
    # Attention box
    ##########################
    attn_ctr_norm = [None] * timespan
    attn_lg_size = [None] * timespan
    attn_ctr = [None] * timespan
    attn_size = [None] * timespan
    attn_lg_var = [None] * timespan
    attn_lg_gamma = [None] * timespan
    attn_gamma = [None] * timespan
    attn_box_lg_gamma = [None] * timespan
    attn_top_left = [None] * timespan
    attn_bot_right = [None] * timespan
    attn_box = [None] * timespan
    iou_soft_box = [None] * timespan
    const_ones = tf.ones(tf.pack([num_ex, filter_height, filter_width, 1]))
    attn_box_beta = tf.constant([-5.0])
    attn_box_gamma = [None] * timespan

    #############################
    # Groundtruth attention box
    #############################
    # [B, T, 2]
    attn_ctr_gt, attn_size_gt, attn_lg_var_gt, attn_lg_gamma_gt, \
        attn_box_gt, \
        attn_top_left_gt, attn_bot_right_gt = \
        modellib.get_gt_attn(y_gt, filter_height, filter_width,
                         padding_ratio=attn_box_padding_ratio,
                         center_shift_ratio=0.0,
                         min_padding=padding + 4)
    attn_ctr_gt_noise, attn_size_gt_noise, attn_lg_var_gt_noise, \
        attn_lg_gamma_gt_noise, \
        attn_box_gt_noise, \
        attn_top_left_gt_noise, attn_bot_right_gt_noise = \
        modellib.get_gt_attn(y_gt, filter_height, filter_width,
                         padding_ratio=tf.random_uniform(
                             tf.pack([num_ex, timespan, 1]),
                             attn_box_padding_ratio - gt_box_pad_noise,
                             attn_box_padding_ratio + gt_box_pad_noise),
                         center_shift_ratio=tf.random_uniform(
                             tf.pack([num_ex, timespan, 2]),
                             -gt_box_ctr_noise, gt_box_ctr_noise),
                         min_padding=padding + 4)
    attn_ctr_norm_gt = modellib.get_normalized_center(attn_ctr_gt, inp_height,
                                                      inp_width)
    attn_lg_size_gt = modellib.get_normalized_size(attn_size_gt, inp_height,
                                                   inp_width)

    ##########################
    # Groundtruth mix
    ##########################
    grd_match_cum = tf.zeros(tf.pack([num_ex, timespan]))

    # Scale mix ratio on different timesteps.
    if knob_use_timescale:
        gt_knob_time_scale = tf.reshape(
            1.0 + tf.log(1.0 + tf.to_float(tf.range(timespan)) * 3.0),
            [1, timespan, 1])
    else:
        gt_knob_time_scale = tf.ones([1, timespan, 1])

    # Mix in groundtruth box.
    global_step_box = tf.maximum(0.0, global_step - knob_box_offset)
    gt_knob_prob_box = tf.train.exponential_decay(knob_base,
                                                  global_step_box,
                                                  steps_per_knob_decay,
                                                  knob_decay,
                                                  staircase=False)
    gt_knob_prob_box = tf.minimum(1.0, gt_knob_prob_box * gt_knob_time_scale)
    gt_knob_box = tf.to_float(
        tf.random_uniform(tf.pack([num_ex, timespan, 1]), 0, 1.0) <=
        gt_knob_prob_box)
    model['gt_knob_prob_box'] = gt_knob_prob_box[0, 0, 0]

    # Mix in groundtruth segmentation.
    global_step_segm = tf.maximum(0.0, global_step - knob_segm_offset)
    gt_knob_prob_segm = tf.train.exponential_decay(knob_base,
                                                   global_step_segm,
                                                   steps_per_knob_decay,
                                                   knob_decay,
                                                   staircase=False)
    gt_knob_prob_segm = tf.minimum(1.0, gt_knob_prob_segm * gt_knob_time_scale)
    gt_knob_segm = tf.to_float(
        tf.random_uniform(tf.pack([num_ex, timespan, 1]), 0, 1.0) <=
        gt_knob_prob_segm)
    model['gt_knob_prob_segm'] = gt_knob_prob_segm[0, 0, 0]

    ##########################
    # Segmentation output
    ##########################
    y_out_patch = [None] * timespan
    y_out = [None] * timespan
    y_out_lg_gamma = [None] * timespan
    y_out_beta = tf.constant([-5.0])

    ##########################
    # Computation graph
    ##########################
    for tt in range(timespan):
        # Controller CNN
        ccnn_inp_list = []
        acnn_inp_list = []

        if ctrl_add_inp:
            ccnn_inp_list.append(x)
        if attn_add_inp:
            acnn_inp_list.append(x)
        if ctrl_add_canvas:
            ccnn_inp_list.append(canvas)
        if attn_add_canvas:
            acnn_inp_list.append(canvas)
        if ctrl_add_d_out:
            ccnn_inp_list.append(d_in)
        if attn_add_d_out:
            acnn_inp_list.append(d_in)
        if ctrl_add_y_out:
            ccnn_inp_list.append(y_in)
        if attn_add_y_out:
            acnn_inp_list.append(y_in)

        acnn_inp = tf.concat(3, acnn_inp_list)
        ccnn_inp = tf.concat(3, ccnn_inp_list)

        h_ccnn[tt] = ccnn(ccnn_inp)
        _h_ccnn = h_ccnn[tt]
        h_ccnn_last = _h_ccnn[-1]

        # Controller RNN [B, R1]
        crnn_inp = tf.reshape(h_ccnn_last,
                              [-1, glimpse_map_dim, glimpse_feat_dim])
        crnn_state[tt] = [None] * (num_ctrl_rnn_iter + 1)
        crnn_g_i[tt] = [None] * num_ctrl_rnn_iter
        crnn_g_f[tt] = [None] * num_ctrl_rnn_iter
        crnn_g_o[tt] = [None] * num_ctrl_rnn_iter
        h_crnn[tt] = [None] * num_ctrl_rnn_iter
        crnn_state[tt][-1] = tf.zeros(tf.pack([num_ex, crnn_dim * 2]))
        crnn_glimpse_map[tt] = [None] * num_ctrl_rnn_iter
        crnn_glimpse_map[tt][0] = tf.ones(tf.pack([num_ex, glimpse_map_dim, 1
                                                   ])) / glimpse_map_dim
        # Inner glimpse RNN
        for tt2 in range(num_ctrl_rnn_iter):
            crnn_glimpse = tf.reduce_sum(crnn_inp * crnn_glimpse_map[tt][tt2],
                                         [1])
            crnn_state[tt][tt2], crnn_g_i[tt][tt2], crnn_g_f[tt][tt2], \
                crnn_g_o[tt][tt2] = crnn_cell(
                    crnn_glimpse, crnn_state[tt][tt2 - 1])
            h_crnn[tt][tt2] = tf.slice(crnn_state[tt][tt2], [0, crnn_dim],
                                       [-1, crnn_dim])
            h_gmlp = gmlp(h_crnn[tt][tt2])
            if tt2 < num_ctrl_rnn_iter - 1:
                crnn_glimpse_map[tt][tt2 + 1] = tf.expand_dims(h_gmlp[-1], 2)
        ctrl_out = cmlp(h_crnn[tt][-1])[-1]

        attn_ctr_norm[tt] = tf.slice(ctrl_out, [0, 0], [-1, 2])
        attn_lg_size[tt] = tf.slice(ctrl_out, [0, 2], [-1, 2])

        # Restrict to (-1, 1), (-inf, 0)
        if squash_ctrl_params:
            attn_ctr_norm[tt] = tf.tanh(attn_ctr_norm[tt])
            attn_lg_size[tt] = -tf.nn.softplus(attn_lg_size[tt])

        attn_ctr[tt], attn_size[tt] = modellib.get_unnormalized_attn(
            attn_ctr_norm[tt], attn_lg_size[tt], inp_height, inp_width)

        if fixed_var:
            attn_lg_var[tt] = tf.zeros(tf.pack([num_ex, 2]))
        else:
            attn_lg_var[tt] = modellib.get_normalized_var(
                attn_size[tt], filter_height, filter_width)

        if dynamic_var:
            attn_lg_var[tt] = tf.slice(ctrl_out, [0, 4], [-1, 2])

        if fixed_gamma:
            attn_lg_gamma[tt] = tf.constant([0.0])
            y_out_lg_gamma[tt] = tf.constant([2.0])
        else:
            attn_lg_gamma[tt] = tf.slice(ctrl_out, [0, 6], [-1, 1])
            y_out_lg_gamma[tt] = tf.slice(ctrl_out, [0, 8], [-1, 1])

        attn_box_lg_gamma[tt] = tf.slice(ctrl_out, [0, 7], [-1, 1])
        attn_gamma[tt] = tf.reshape(tf.exp(attn_lg_gamma[tt]), [-1, 1, 1, 1])
        attn_box_gamma[tt] = tf.reshape(tf.exp(attn_box_lg_gamma[tt]),
                                        [-1, 1, 1, 1])
        y_out_lg_gamma[tt] = tf.reshape(y_out_lg_gamma[tt], [-1, 1, 1, 1])

        attn_top_left[tt], attn_bot_right[tt] = modellib.get_box_coord(
            attn_ctr[tt], attn_size[tt])

        # Initial filters (predicted)
        filter_y = modellib.get_gaussian_filter(attn_ctr[tt][:, 0],
                                                attn_size[tt][:, 0],
                                                attn_lg_var[tt][:, 0],
                                                inp_height, filter_height)
        filter_x = modellib.get_gaussian_filter(attn_ctr[tt][:, 1],
                                                attn_size[tt][:, 1],
                                                attn_lg_var[tt][:, 1],
                                                inp_width, filter_width)
        filter_y_inv = tf.transpose(filter_y, [0, 2, 1])
        filter_x_inv = tf.transpose(filter_x, [0, 2, 1])

        # Attention box
        attn_box[tt] = modellib.extract_patch(const_ones * attn_box_gamma[tt],
                                              filter_y_inv, filter_x_inv, 1)
        attn_box[tt] = tf.sigmoid(attn_box[tt] + attn_box_beta)
        attn_box[tt] = tf.reshape(attn_box[tt], [-1, 1, inp_height, inp_width])

        # Kick in GT bbox.
        if use_knob:
            if fixed_order:
                attn_ctr_gtm = attn_ctr_gt_noise[:, tt, :]
                # attn_delta_gtm = attn_delta_gt_noise[:, tt, :]
                attn_size_gtm = attn_size_gt_noise[:, tt, :]
            else:
                if use_iou_box:
                    iou_soft_box[tt] = modellib.f_iou_box(
                        tf.expand_dims(attn_top_left[tt], 1),
                        tf.expand_dims(attn_bot_right[tt], 1),
                        attn_top_left_gt, attn_bot_right_gt)
                else:
                    iou_soft_box[tt] = modellib.f_inter(
                        attn_box[tt], attn_box_gt) / \
                        modellib.f_union(attn_box[tt], attn_box_gt, eps=1e-5)
                grd_match = modellib.f_greedy_match(iou_soft_box[tt],
                                                    grd_match_cum)

                # [B, T, 1]
                grd_match = tf.expand_dims(grd_match, 2)
                attn_ctr_gtm = tf.reduce_sum(grd_match * attn_ctr_gt_noise, 1)
                attn_size_gtm = tf.reduce_sum(grd_match * attn_size_gt_noise,
                                              1)

            attn_ctr[tt] = phase_train_f * gt_knob_box[:, tt, 0: 1] * \
                attn_ctr_gtm + \
                (1 - phase_train_f * gt_knob_box[:, tt, 0: 1]) * \
                attn_ctr[tt]
            attn_size[tt] = phase_train_f * gt_knob_box[:, tt, 0: 1] * \
                attn_size_gtm + \
                (1 - phase_train_f * gt_knob_box[:, tt, 0: 1]) * \
                attn_size[tt]

        attn_top_left[tt], attn_bot_right[tt] = modellib.get_box_coord(
            attn_ctr[tt], attn_size[tt])

        filter_y = modellib.get_gaussian_filter(attn_ctr[tt][:, 0],
                                                attn_size[tt][:, 0],
                                                attn_lg_var[tt][:, 0],
                                                inp_height, filter_height)
        filter_x = modellib.get_gaussian_filter(attn_ctr[tt][:, 1],
                                                attn_size[tt][:, 1],
                                                attn_lg_var[tt][:, 1],
                                                inp_width, filter_width)
        filter_y_inv = tf.transpose(filter_y, [0, 2, 1])
        filter_x_inv = tf.transpose(filter_x, [0, 2, 1])

        # Attended patch [B, A, A, D]
        x_patch[tt] = attn_gamma[tt] * modellib.extract_patch(
            acnn_inp, filter_y, filter_x, acnn_inp_depth)

        # CNN [B, A, A, D] => [B, RH2, RW2, RD2]
        h_acnn[tt] = acnn(x_patch[tt])
        h_acnn_last[tt] = h_acnn[tt][-1]
        h_core = tf.reshape(h_acnn_last[tt], [-1, core_dim])
        h_core_img = h_acnn_last[tt]

        # DCNN
        if add_skip_conn:
            h_acnn_rev = h_acnn[tt][::-1][1:] + [x_patch[tt]]
            adcnn_skip = [None]
            for sk, hcnn in zip(adcnn_skip_rev, h_acnn_rev):
                adcnn_skip.append(hcnn if sk else None)
                pass
        else:
            adcnn_skip = None
        h_adcnn[tt] = adcnn(h_core_img, skip=adcnn_skip)
        y_out_patch[tt] = tf.expand_dims(h_adcnn[tt][-1], 1)

        # Output
        y_out[tt] = modellib.extract_patch(h_adcnn[tt][-1], filter_y_inv,
                                           filter_x_inv, 1)
        y_out[tt] = tf.exp(y_out_lg_gamma[tt]) * y_out[tt] + y_out_beta
        y_out[tt] = tf.sigmoid(y_out[tt])
        y_out[tt] = tf.reshape(y_out[tt], [-1, 1, inp_height, inp_width])

        if disable_overwrite:
            y_out[tt] = tf.reshape(1 - canvas,
                                   [-1, 1, inp_height, inp_width]) * y_out[tt]

        # Scoring network
        smlp_inp = tf.concat(1, [h_crnn[tt][-1], h_core])
        s_out[tt] = smlp(smlp_inp)[-1]

        # Here is the knob kick in GT segmentations at this timestep.
        # [B, N, 1, 1]
        if use_knob:
            _gt_knob_segm = tf.expand_dims(
                tf.expand_dims(gt_knob_segm[:, tt, 0:1], 2), 3)

            if fixed_order:
                _y_out = tf.expand_dims(y_gt[:, tt, :, :], 3)
            else:
                grd_match = tf.expand_dims(grd_match, 3)
                _y_out = tf.expand_dims(tf.reduce_sum(grd_match * y_gt, 1), 3)
            # Add independent uniform noise to groundtruth.
            _noise = tf.random_uniform(
                tf.pack([num_ex, inp_height, inp_width, 1]), 0, gt_segm_noise)
            _y_out = _y_out - _y_out * _noise
            _y_out = phase_train_f * _gt_knob_segm * _y_out + \
                (1 - phase_train_f * _gt_knob_segm) * \
                tf.reshape(y_out[tt], [-1, inp_height, inp_width, 1])
        else:
            _y_out = tf.reshape(y_out[tt], [-1, inp_height, inp_width, 1])
        y_out_last = _y_out
        canvas = tf.maximum(_y_out, canvas)
        if stop_canvas_grad:
            canvas = tf.stop_gradient(canvas)
            y_out_last = tf.stop_gradient(y_out_last)

    #########################
    # Model outputs
    #########################
    s_out = tf.concat(1, s_out)
    model['s_out'] = s_out
    y_out = tf.concat(1, y_out)
    model['y_out'] = y_out
    y_out_patch = tf.concat(1, y_out_patch)
    model['y_out_patch'] = y_out_patch
    attn_box = tf.concat(1, attn_box)
    model['attn_box'] = attn_box
    x_patch = tf.concat(
        1, [tf.expand_dims(x_patch[tt], 1) for tt in range(timespan)])
    model['x_patch'] = x_patch

    attn_top_left = tf.concat(
        1, [tf.expand_dims(tmp, 1) for tmp in attn_top_left])
    attn_bot_right = tf.concat(
        1, [tf.expand_dims(tmp, 1) for tmp in attn_bot_right])
    attn_ctr = tf.concat(1, [tf.expand_dims(tmp, 1) for tmp in attn_ctr])
    attn_size = tf.concat(1, [tf.expand_dims(tmp, 1) for tmp in attn_size])
    attn_lg_gamma = tf.concat(
        1, [tf.expand_dims(tmp, 1) for tmp in attn_lg_gamma])
    attn_box_lg_gamma = tf.concat(
        1, [tf.expand_dims(tmp, 1) for tmp in attn_box_lg_gamma])
    y_out_lg_gamma = tf.concat(
        1, [tf.expand_dims(tmp, 1) for tmp in y_out_lg_gamma])
    model['attn_ctr'] = attn_ctr
    model['attn_size'] = attn_size
    model['attn_top_left'] = attn_top_left
    model['attn_bot_right'] = attn_bot_right
    model['attn_ctr_gt'] = attn_ctr_gt
    model['attn_size_gt'] = attn_size_gt
    model['attn_top_left_gt'] = attn_top_left_gt
    model['attn_bot_right_gt'] = attn_bot_right_gt
    model['attn_box_gt'] = attn_box_gt
    attn_ctr_norm = tf.concat(
        1, [tf.expand_dims(tmp, 1) for tmp in attn_ctr_norm])
    attn_lg_size = tf.concat(1,
                             [tf.expand_dims(tmp, 1) for tmp in attn_lg_size])
    model['attn_ctr_norm'] = attn_ctr_norm
    model['attn_lg_size'] = attn_lg_size
    attn_params = tf.concat(2, [attn_ctr_norm, attn_lg_size])
    attn_params_gt = tf.concat(2, [attn_ctr_norm_gt, attn_lg_size_gt])

    ####################
    # Glimpse
    ####################
    # T * T2 * [H', W'] => [T, T2, H', W']
    crnn_glimpse_map = tf.concat(1, [
        tf.expand_dims(
            tf.concat(1, [
                tf.expand_dims(crnn_glimpse_map[tt][tt2], 1)
                for tt2 in range(num_ctrl_rnn_iter)
            ]), 1) for tt in range(timespan)
    ])
    crnn_glimpse_map = tf.reshape(
        crnn_glimpse_map, [-1, timespan, num_ctrl_rnn_iter, crnn_h, crnn_w])
    model['ctrl_rnn_glimpse_map'] = crnn_glimpse_map

    model['global_step'] = global_step
    if not is_training:
        return model

    #########################
    # Loss function
    #########################
    num_ex_f = tf.to_float(x_shape[0])
    max_num_obj = tf.to_float(timespan)

    ############################
    # Box loss
    ############################
    if fixed_order:
        # [B, T] for fixed order.
        iou_soft_box = modellib.f_iou(attn_box, attn_box_gt, pairwise=False)
    else:
        if use_knob:
            # [B, T, T] for matching.
            iou_soft_box = tf.concat(1, [
                tf.expand_dims(iou_soft_box[tt], 1) for tt in range(timespan)
            ])
        else:
            iou_soft_box = modellib.f_iou(attn_box,
                                          attn_box_gt,
                                          timespan,
                                          pairwise=True)
        # iou_soft_box = modellib.f_iou_pair_new(attn_box, attn_box_gt)

    identity_match = modellib.get_identity_match(num_ex, timespan, s_gt)
    if fixed_order:
        match_box = identity_match
    else:
        match_box = modellib.f_segm_match(iou_soft_box, s_gt)

    model['match_box'] = match_box
    match_sum_box = tf.reduce_sum(match_box, reduction_indices=[2])
    match_count_box = tf.reduce_sum(match_sum_box, reduction_indices=[1])
    match_count_box = tf.maximum(1.0, match_count_box)

    # [B] if fixed order, [B, T] if matching.
    if fixed_order:
        iou_soft_box_mask = iou_soft_box
    else:
        iou_soft_box_mask = tf.reduce_sum(iou_soft_box * match_box, [1])
    iou_soft_box = tf.reduce_sum(iou_soft_box_mask, [1])
    iou_soft_box = tf.reduce_sum(iou_soft_box / match_count_box) / num_ex_f

    if box_loss_fn == 'mse':
        box_loss = modellib.f_match_loss(attn_params,
                                         attn_params_gt,
                                         match_box,
                                         timespan,
                                         modellib.f_squared_err,
                                         model=model)
    elif box_loss_fn == 'huber':
        box_loss = modellib.f_match_loss(attn_params, attn_params_gt,
                                         match_box, timespan, modellib.f_huber)
    elif box_loss_fn == 'iou':
        box_loss = -iou_soft_box
    elif box_loss_fn == 'wt_cov':
        box_loss = -modellib.f_weighted_coverage(iou_soft_box, attn_box_gt)
    elif box_loss_fn == 'bce':
        box_loss_fn = modellib.f_match_loss(y_out, y_gt, match_box, timespan,
                                            f_bce)
    else:
        raise Exception('Unknown box_loss_fn: {}'.format(box_loss_fn))
    model['box_loss'] = box_loss

    box_loss_coeff = tf.constant(1.0)
    model['box_loss_coeff'] = box_loss_coeff
    tf.add_to_collection('losses', box_loss_coeff * box_loss)

    ##############################
    # Segmentation loss
    ##############################
    # IoU (soft)
    iou_soft_pairwise = modellib.f_iou(y_out, y_gt, timespan, pairwise=True)
    real_match = modellib.f_segm_match(iou_soft_pairwise, s_gt)
    if fixed_order:
        iou_soft = modellib.f_iou(y_out, y_gt, pairwise=False)
        match = identity_match
    else:
        iou_soft = iou_soft_pairwise
        match = real_match
    model['match'] = match
    match_sum = tf.reduce_sum(match, reduction_indices=[2])
    match_count = tf.reduce_sum(match_sum, reduction_indices=[1])
    match_count = tf.maximum(1.0, match_count)

    # Weighted coverage (soft)
    wt_cov_soft = modellib.f_weighted_coverage(iou_soft_pairwise, y_gt)
    model['wt_cov_soft'] = wt_cov_soft
    unwt_cov_soft = modellib.f_unweighted_coverage(iou_soft_pairwise,
                                                   match_count)
    model['unwt_cov_soft'] = unwt_cov_soft

    # [B] if fixed order, [B, T] if matching.
    if fixed_order:
        iou_soft_mask = iou_soft
    else:
        iou_soft_mask = tf.reduce_sum(iou_soft * match, [1])
    iou_soft = tf.reduce_sum(iou_soft_mask, [1])
    iou_soft = tf.reduce_sum(iou_soft / match_count) / num_ex_f
    model['iou_soft'] = iou_soft

    if segm_loss_fn == 'iou':
        segm_loss = -iou_soft
    elif segm_loss_fn == 'wt_cov':
        segm_loss = -wt_cov_soft
    elif segm_loss_fn == 'bce':
        segm_loss = f_match_bce(y_out, y_gt, match, timespan)
    else:
        raise Exception('Unknown segm_loss_fn: {}'.format(segm_loss_fn))
    model['segm_loss'] = segm_loss
    segm_loss_coeff = tf.constant(1.0)
    tf.add_to_collection('losses', segm_loss_coeff * segm_loss)

    ####################
    # Score loss
    ####################
    conf_loss = modellib.f_conf_loss(s_out, match, timespan, use_cum_min=True)
    model['conf_loss'] = conf_loss
    tf.add_to_collection('losses', loss_mix_ratio * conf_loss)

    ####################
    # Total loss
    ####################
    total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
    model['loss'] = total_loss

    ####################
    # Optimizer
    ####################
    learn_rate = tf.train.exponential_decay(base_learn_rate,
                                            global_step,
                                            steps_per_learn_rate_decay,
                                            learn_rate_decay,
                                            staircase=True)
    model['learn_rate'] = learn_rate
    eps = 1e-7

    optimizer = tf.train.AdamOptimizer(learn_rate, epsilon=eps)
    gvs = optimizer.compute_gradients(total_loss)
    capped_gvs = []
    for grad, var in gvs:
        if grad is not None:
            capped_gvs.append((tf.clip_by_value(grad, -1, 1), var))
        else:
            capped_gvs.append((grad, var))
    train_step = optimizer.apply_gradients(capped_gvs, global_step=global_step)
    model['train_step'] = train_step

    ####################
    # Statistics
    ####################
    # Here statistics (hard measures) is always using matching.
    y_out_hard = tf.to_float(y_out > 0.5)
    iou_hard = modellib.f_iou(y_out_hard, y_gt, timespan, pairwise=True)
    wt_cov_hard = modellib.f_weighted_coverage(iou_hard, y_gt)
    model['wt_cov_hard'] = wt_cov_hard
    unwt_cov_hard = modellib.f_unweighted_coverage(iou_hard, match_count)
    model['unwt_cov_hard'] = unwt_cov_hard
    iou_hard_mask = tf.reduce_sum(iou_hard * real_match, [1])
    iou_hard = tf.reduce_sum(
        tf.reduce_sum(iou_hard_mask, [1]) / match_count) / num_ex_f
    model['iou_hard'] = iou_hard

    dice = modellib.f_dice(y_out_hard, y_gt, timespan, pairwise=True)
    dice = tf.reduce_sum(tf.reduce_sum(
        dice * real_match, reduction_indices=[1, 2]) / match_count) / \
        num_ex_f
    model['dice'] = dice
    model['count_acc'] = modellib.f_count_acc(s_out, s_gt)
    model['dic'] = modellib.f_dic(s_out, s_gt, abs=False)
    model['dic_abs'] = modellib.f_dic(s_out, s_gt, abs=True)

    ################################
    # Controller output statistics
    ################################
    if fixed_gamma:
        attn_lg_gamma_mean = tf.constant([0.0])
        attn_box_lg_gamma_mean = tf.constant([2.0])
        y_out_lg_gamma_mean = tf.constant([2.0])
    else:
        attn_lg_gamma_mean = tf.reduce_sum(attn_lg_gamma) / num_ex_f / timespan
        attn_box_lg_gamma_mean = tf.reduce_sum(
            attn_box_lg_gamma) / num_ex_f / timespan
        y_out_lg_gamma_mean = tf.reduce_sum(
            y_out_lg_gamma) / num_ex_f / timespan
    model['attn_lg_gamma_mean'] = attn_lg_gamma_mean
    model['attn_box_lg_gamma_mean'] = attn_box_lg_gamma_mean
    model['y_out_lg_gamma_mean'] = y_out_lg_gamma_mean

    return model
Ejemplo n.º 24
0
 def __init__(self, h5_fname):
     self.log = logger.get()
     self.h5_fname = h5_fname
     self.log.info('Reading image IDs')
     self.img_ids = self._read_ids()
     pass
Ejemplo n.º 25
0
def get_model(opt):
  """The box model"""
  log = logger.get()
  model = {}

  timespan = opt['timespan']
  inp_height = opt['inp_height']
  inp_width = opt['inp_width']
  inp_depth = opt['inp_depth']
  padding = opt['padding']
  filter_height = opt['filter_height']
  filter_width = opt['filter_width']

  ctrl_cnn_filter_size = opt['ctrl_cnn_filter_size']
  ctrl_cnn_depth = opt['ctrl_cnn_depth']
  ctrl_cnn_pool = opt['ctrl_cnn_pool']
  ctrl_rnn_hid_dim = opt['ctrl_rnn_hid_dim']

  num_ctrl_mlp_layers = opt['num_ctrl_mlp_layers']
  ctrl_mlp_dim = opt['ctrl_mlp_dim']

  attn_box_padding_ratio = opt['attn_box_padding_ratio']

  wd = opt['weight_decay']
  use_bn = opt['use_bn']
  box_loss_fn = opt['box_loss_fn']
  base_learn_rate = opt['base_learn_rate']
  learn_rate_decay = opt['learn_rate_decay']
  steps_per_learn_rate_decay = opt['steps_per_learn_rate_decay']
  pretrain_cnn = opt['pretrain_cnn']

  if 'pretrain_net' in opt:
    pretrain_net = opt['pretrain_net']
  else:
    pretrain_net = None

  if 'freeze_pretrain_cnn' in opt:
    freeze_pretrain_cnn = opt['freeze_pretrain_cnn']
  else:
    freeze_pretrain_cnn = True

  squash_ctrl_params = opt['squash_ctrl_params']
  clip_gradient = opt['clip_gradient']
  fixed_order = opt['fixed_order']
  num_ctrl_rnn_iter = opt['num_ctrl_rnn_iter']
  num_glimpse_mlp_layers = opt['num_glimpse_mlp_layers']

  if 'fixed_var' in opt:
    fixed_var = opt['fixed_var']
  else:
    fixed_var = True

  if 'use_iou_box' in opt:
    use_iou_box = opt['use_iou_box']
  else:
    use_iou_box = False

  if 'dynamic_var' in opt:
    dynamic_var = opt['dynamic_var']
  else:
    dynamic_var = False

  if 'num_semantic_classes' in opt:
    num_semantic_classes = opt['num_semantic_classes']
  else:
    num_semantic_classes = 1

  if 'add_d_out' in opt:
    add_d_out = opt['add_d_out']
    add_y_out = opt['add_y_out']
  else:
    add_d_out = False
    add_y_out = False

  rnd_hflip = opt['rnd_hflip']
  rnd_vflip = opt['rnd_vflip']
  rnd_transpose = opt['rnd_transpose']
  rnd_colour = opt['rnd_colour']

  ############################
  # Input definition
  ############################
  # Input image, [B, H, W, D]
  x = tf.placeholder(
      'float', [None, inp_height, inp_width, inp_depth], name='x')
  x_shape = tf.shape(x)
  num_ex = x_shape[0]

  # Groundtruth segmentation, [B, T, H, W]
  y_gt = tf.placeholder(
      'float', [None, timespan, inp_height, inp_width], name='y_gt')

  # Groundtruth confidence score, [B, T]
  s_gt = tf.placeholder('float', [None, timespan], name='s_gt')

  if add_d_out:
    d_in = tf.placeholder(
        'float', [None, inp_height, inp_width, 8], name='d_in')
    model['d_in'] = d_in
  if add_y_out:
    y_in = tf.placeholder(
        'float', [None, inp_height, inp_width, num_semantic_classes],
        name='y_in')
    model['y_in'] = y_in
  # Whether in training stage.
  phase_train = tf.placeholder('bool', name='phase_train')
  phase_train_f = tf.to_float(phase_train)
  model['x'] = x
  model['y_gt'] = y_gt
  model['s_gt'] = s_gt
  model['phase_train'] = phase_train

  # Global step
  global_step = tf.Variable(0.0, name='global_step')

  ###############################
  # Random input transformation
  ###############################
  # Either add both or add nothing.
  assert (add_d_out and add_y_out) or (not add_d_out and not add_y_out)
  if not add_d_out:
    results = img.random_transformation(
        x,
        padding,
        phase_train,
        rnd_hflip=rnd_hflip,
        rnd_vflip=rnd_vflip,
        rnd_transpose=rnd_transpose,
        rnd_colour=rnd_colour,
        y=y_gt)
    x, y_gt = results['x'], results['y']
  else:
    results = img.random_transformation(
        x,
        padding,
        phase_train,
        rnd_hflip=rnd_hflip,
        rnd_vflip=rnd_vflip,
        rnd_transpose=rnd_transpose,
        rnd_colour=rnd_colour,
        y=y_gt,
        d=d_in,
        c=y_in)
    x, y_gt, d_in, y_in = results['x'], results['y'], results['d'], results['c']
    model['d_in_trans'] = d_in
    model['y_in_trans'] = y_in
  model['x_trans'] = x
  model['y_gt_trans'] = y_gt

  ############################
  # Canvas: external memory
  ############################
  canvas = tf.zeros(tf.pack([num_ex, inp_height, inp_width, 1]))
  ccnn_inp_depth = inp_depth + 1
  acnn_inp_depth = inp_depth + 1
  if add_d_out:
    ccnn_inp_depth += 8
    acnn_inp_depth += 8
  if add_y_out:
    ccnn_inp_depth += num_semantic_classes
    acnn_inp_depth += num_semantic_classes

  ############################
  # Controller CNN definition
  ############################
  ccnn_filters = ctrl_cnn_filter_size
  ccnn_nlayers = len(ccnn_filters)
  ccnn_channels = [ccnn_inp_depth] + ctrl_cnn_depth
  ccnn_pool = ctrl_cnn_pool
  ccnn_act = [tf.nn.relu] * ccnn_nlayers
  ccnn_use_bn = [use_bn] * ccnn_nlayers
  pt = pretrain_net or pretrain_cnn

  if pt:
    log.info('Loading pretrained weights from {}'.format(pt))
    with h5py.File(pt, 'r') as h5f:
      pt_cnn_nlayers = 0
      # Assuming pt_cnn_nlayers is smaller than or equal to
      # ccnn_nlayers.
      for ii in range(ccnn_nlayers):
        if 'attn_cnn_w_{}'.format(ii) in h5f:
          cnn_prefix = 'attn_'
          log.info('Loading attn_cnn_w_{}'.format(ii))
          log.info('Loading attn_cnn_b_{}'.format(ii))
          pt_cnn_nlayers += 1
        elif 'cnn_w_{}'.format(ii) in h5f:
          cnn_prefix = ''
          log.info('Loading cnn_w_{}'.format(ii))
          log.info('Loading cnn_b_{}'.format(ii))
          pt_cnn_nlayers += 1
        elif 'ctrl_cnn_w_{}'.format(ii) in h5f:
          cnn_prefix = 'ctrl_'
          log.info('Loading ctrl_cnn_w_{}'.format(ii))
          log.info('Loading ctrl_cnn_b_{}'.format(ii))
          pt_cnn_nlayers += 1

      ccnn_init_w = [{
          'w': h5f['{}cnn_w_{}'.format(cnn_prefix, ii)][:],
          'b': h5f['{}cnn_b_{}'.format(cnn_prefix, ii)][:]
      } for ii in range(pt_cnn_nlayers)]
      for ii in range(pt_cnn_nlayers):
        for tt in range(timespan):
          for w in ['beta', 'gamma']:
            ccnn_init_w[ii]['{}_{}'.format(w, tt)] = h5f[
                '{}cnn_{}_{}_{}'.format(cnn_prefix, ii, tt, w)][:]
      ccnn_frozen = [freeze_pretrain_cnn] * pt_cnn_nlayers
      for ii in range(pt_cnn_nlayers, ccnn_nlayers):
        ccnn_init_w.append(None)
        ccnn_frozen.append(False)
  else:
    ccnn_init_w = None
    ccnn_frozen = None

  ccnn = nn.cnn(ccnn_filters,
                ccnn_channels,
                ccnn_pool,
                ccnn_act,
                ccnn_use_bn,
                phase_train=phase_train,
                wd=wd,
                scope='ctrl_cnn',
                model=model,
                init_weights=ccnn_init_w,
                frozen=ccnn_frozen)
  h_ccnn = [None] * timespan

  ############################
  # Controller RNN definition
  ############################
  ccnn_subsample = np.array(ccnn_pool).prod()
  crnn_h = inp_height / ccnn_subsample
  crnn_w = inp_width / ccnn_subsample
  crnn_dim = ctrl_rnn_hid_dim
  canvas_dim = inp_height * inp_width / (ccnn_subsample**2)

  glimpse_map_dim = crnn_h * crnn_w
  glimpse_feat_dim = ccnn_channels[-1]
  crnn_inp_dim = glimpse_feat_dim

  pt = pretrain_net
  if pt:
    log.info('Loading pretrained controller RNN weights from {}'.format(pt))
    h5f = h5py.File(pt, 'r')
    crnn_init_w = {}
    for w in [
        'w_xi', 'w_hi', 'b_i', 'w_xf', 'w_hf', 'b_f', 'w_xu', 'w_hu', 'b_u',
        'w_xo', 'w_ho', 'b_o'
    ]:
      key = 'ctrl_lstm_{}'.format(w)
      crnn_init_w[w] = h5f[key][:]
    crnn_frozen = None
  else:
    crnn_init_w = None
    crnn_frozen = None

  crnn_state = [None] * (timespan + 1)
  crnn_glimpse_map = [None] * timespan
  crnn_g_i = [None] * timespan
  crnn_g_f = [None] * timespan
  crnn_g_o = [None] * timespan
  h_crnn = [None] * timespan
  crnn_state[-1] = tf.zeros(tf.pack([num_ex, crnn_dim * 2]))
  crnn_cell = nn.lstm(
      crnn_inp_dim,
      crnn_dim,
      wd=wd,
      scope='ctrl_lstm',
      init_weights=crnn_init_w,
      frozen=crnn_frozen,
      model=model)

  ############################
  # Glimpse MLP definition
  ############################
  gmlp_dims = [crnn_dim] * num_glimpse_mlp_layers + [glimpse_map_dim]
  gmlp_act = [tf.nn.relu] * \
      (num_glimpse_mlp_layers - 1) + [tf.nn.softmax]
  gmlp_dropout = None

  pt = pretrain_net
  if pt:
    log.info('Loading pretrained glimpse MLP weights from {}'.format(pt))
    h5f = h5py.File(pt, 'r')
    gmlp_init_w = [{
        'w': h5f['glimpse_mlp_w_{}'.format(ii)][:],
        'b': h5f['glimpse_mlp_b_{}'.format(ii)][:]
    } for ii in range(num_glimpse_mlp_layers)]
    gmlp_frozen = None
  else:
    gmlp_init_w = None
    gmlp_frozen = None

  gmlp = nn.mlp(gmlp_dims,
                gmlp_act,
                add_bias=True,
                dropout_keep=gmlp_dropout,
                phase_train=phase_train,
                wd=wd,
                scope='glimpse_mlp',
                init_weights=gmlp_init_w,
                frozen=gmlp_frozen,
                model=model)

  ############################
  # Controller MLP definition
  ############################
  cmlp_dims = [crnn_dim] + [ctrl_mlp_dim] * \
      (num_ctrl_mlp_layers - 1) + [9]
  cmlp_act = [tf.nn.relu] * (num_ctrl_mlp_layers - 1) + [None]
  cmlp_dropout = None

  pt = pretrain_net
  if pt:
    log.info('Loading pretrained controller MLP weights from {}'.format(pt))
    h5f = h5py.File(pt, 'r')
    cmlp_init_w = [{
        'w': h5f['ctrl_mlp_w_{}'.format(ii)][:],
        'b': h5f['ctrl_mlp_b_{}'.format(ii)][:]
    } for ii in range(num_ctrl_mlp_layers)]
    cmlp_frozen = None
  else:
    cmlp_init_w = None
    cmlp_frozen = None

  cmlp = nn.mlp(cmlp_dims,
                cmlp_act,
                add_bias=True,
                dropout_keep=cmlp_dropout,
                phase_train=phase_train,
                wd=wd,
                scope='ctrl_mlp',
                init_weights=cmlp_init_w,
                frozen=cmlp_frozen,
                model=model)

  ##########################
  # Score MLP definition
  ##########################
  pt = pretrain_net
  if pt:
    log.info('Loading score mlp weights from {}'.format(pt))
    h5f = h5py.File(pt, 'r')
    smlp_init_w = [{
        'w': h5f['score_mlp_w_{}'.format(ii)][:],
        'b': h5f['score_mlp_b_{}'.format(ii)][:]
    } for ii in range(1)]
  else:
    smlp_init_w = None
  smlp = nn.mlp([crnn_dim, num_semantic_classes], [None],
                wd=wd,
                scope='score_mlp',
                init_weights=smlp_init_w,
                model=model)
  s_out = [None] * timespan

  ##########################
  # Attention box
  ##########################
  attn_ctr_norm = [None] * timespan
  attn_lg_size = [None] * timespan
  attn_lg_var = [None] * timespan
  attn_ctr = [None] * timespan
  attn_size = [None] * timespan
  attn_top_left = [None] * timespan
  attn_bot_right = [None] * timespan
  attn_box = [None] * timespan
  attn_box_lg_gamma = [None] * timespan
  attn_box_gamma = [None] * timespan
  const_ones = tf.ones(tf.pack([num_ex, filter_height, filter_width, 1]))
  attn_box_beta = tf.constant([-5.0])
  iou_soft_box = [None] * timespan

  #############################
  # Groundtruth attention box
  #############################
  attn_top_left_gt, attn_bot_right_gt, attn_box_gt = modellib.get_gt_box(
      y_gt, padding_ratio=attn_box_padding_ratio, center_shift_ratio=0.0)
  attn_ctr_gt, attn_size_gt = modellib.get_box_ctr_size(attn_top_left_gt,
                                                        attn_bot_right_gt)
  attn_ctr_norm_gt = modellib.get_normalized_center(attn_ctr_gt, inp_height,
                                                    inp_width)
  attn_lg_size_gt = modellib.get_normalized_size(attn_size_gt, inp_height,
                                                 inp_width)

  ##########################
  # Groundtruth mix
  ##########################
  grd_match_cum = tf.zeros(tf.pack([num_ex, timespan]))

  ##########################
  # Computation graph
  ##########################
  for tt in range(timespan):
    # Controller CNN
    ccnn_inp_list = [x, canvas]
    if add_d_out:
      ccnn_inp_list.append(d_in)
    if add_y_out:
      ccnn_inp_list.append(y_in)
    ccnn_inp = tf.concat(3, ccnn_inp_list)
    acnn_inp = ccnn_inp
    h_ccnn[tt] = ccnn(ccnn_inp)
    _h_ccnn = h_ccnn[tt]
    h_ccnn_last = _h_ccnn[-1]

    # Controller RNN [B, R1]
    crnn_inp = tf.reshape(h_ccnn_last, [-1, glimpse_map_dim, glimpse_feat_dim])
    crnn_state[tt] = [None] * (num_ctrl_rnn_iter + 1)
    crnn_g_i[tt] = [None] * num_ctrl_rnn_iter
    crnn_g_f[tt] = [None] * num_ctrl_rnn_iter
    crnn_g_o[tt] = [None] * num_ctrl_rnn_iter
    h_crnn[tt] = [None] * num_ctrl_rnn_iter

    crnn_state[tt][-1] = tf.zeros(tf.pack([num_ex, crnn_dim * 2]))

    crnn_glimpse_map[tt] = [None] * num_ctrl_rnn_iter
    crnn_glimpse_map[tt][0] = tf.ones(tf.pack([num_ex, glimpse_map_dim, 1
                                              ])) / glimpse_map_dim

    # Inner glimpse RNN
    for tt2 in range(num_ctrl_rnn_iter):
      crnn_glimpse = tf.reduce_sum(crnn_inp * crnn_glimpse_map[tt][tt2], [1])
      crnn_state[tt][tt2], crnn_g_i[tt][tt2], crnn_g_f[tt][tt2], \
          crnn_g_o[tt][tt2] = \
          crnn_cell(crnn_glimpse, crnn_state[tt][tt2 - 1])
      h_crnn[tt][tt2] = tf.slice(crnn_state[tt][tt2], [0, crnn_dim],
                                 [-1, crnn_dim])
      h_gmlp = gmlp(h_crnn[tt][tt2])
      if tt2 < num_ctrl_rnn_iter - 1:
        crnn_glimpse_map[tt][tt2 + 1] = tf.expand_dims(h_gmlp[-1], 2)

    ctrl_out = cmlp(h_crnn[tt][-1])[-1]

    attn_ctr_norm[tt] = tf.slice(ctrl_out, [0, 0], [-1, 2])
    attn_lg_size[tt] = tf.slice(ctrl_out, [0, 2], [-1, 2])

    # Restrict to (-1, 1), (-inf, 0)
    if squash_ctrl_params:
      attn_ctr_norm[tt] = tf.tanh(attn_ctr_norm[tt])
      attn_lg_size[tt] = -tf.nn.softplus(attn_lg_size[tt])

    attn_ctr[tt], attn_size[tt] = modellib.get_unnormalized_attn(
        attn_ctr_norm[tt], attn_lg_size[tt], inp_height, inp_width)
    attn_box_lg_gamma[tt] = tf.slice(ctrl_out, [0, 7], [-1, 1])

    if fixed_var:
      attn_lg_var[tt] = tf.zeros(tf.pack([num_ex, 2]))
    else:
      attn_lg_var[tt] = modellib.get_normalized_var(attn_size[tt],
                                                    filter_height, filter_width)
    if dynamic_var:
      attn_lg_var[tt] = tf.slice(ctrl_out, [0, 4], [-1, 2])
    attn_box_gamma[tt] = tf.reshape(
        tf.exp(attn_box_lg_gamma[tt]), [-1, 1, 1, 1])
    attn_top_left[tt], attn_bot_right[tt] = modellib.get_box_coord(
        attn_ctr[tt], attn_size[tt])

    # Initial filters (predicted)
    filter_y = modellib.get_gaussian_filter(
        attn_ctr[tt][:, 0], attn_size[tt][:, 0], attn_lg_var[tt][:, 0],
        inp_height, filter_height)
    filter_x = modellib.get_gaussian_filter(
        attn_ctr[tt][:, 1], attn_size[tt][:, 1], attn_lg_var[tt][:, 1],
        inp_width, filter_width)
    filter_y_inv = tf.transpose(filter_y, [0, 2, 1])
    filter_x_inv = tf.transpose(filter_x, [0, 2, 1])

    # Attention box
    attn_box[tt] = attn_box_gamma[tt] * modellib.extract_patch(
        const_ones, filter_y_inv, filter_x_inv, 1)
    attn_box[tt] = tf.sigmoid(attn_box[tt] + attn_box_beta)
    attn_box[tt] = tf.reshape(attn_box[tt], [-1, 1, inp_height, inp_width])

    if fixed_order:
      _y_out = tf.expand_dims(y_gt[:, tt, :, :], 3)
    else:
      if use_iou_box:
        iou_soft_box[tt] = modellib.f_iou_box(
            tf.expand_dims(attn_top_left[tt], 1),
            tf.expand_dims(attn_bot_right[tt], 1), attn_top_left_gt,
            attn_bot_right_gt)
      else:
        iou_soft_box[tt] = modellib.f_inter(
            attn_box[tt], attn_box_gt) / \
            modellib.f_union(attn_box[tt], attn_box_gt, eps=1e-5)
      grd_match = modellib.f_greedy_match(iou_soft_box[tt], grd_match_cum)
      grd_match = tf.expand_dims(tf.expand_dims(grd_match, 2), 3)
      _y_out = tf.expand_dims(tf.reduce_sum(grd_match * y_gt, 1), 3)

    # Add independent uniform noise to groundtruth.
    _noise = tf.random_uniform(
        tf.pack([num_ex, inp_height, inp_width, 1]), 0, 0.3)
    _y_out = _y_out - _y_out * _noise
    canvas = tf.stop_gradient(tf.maximum(_y_out, canvas))
    # canvas += tf.stop_gradient(_y_out)

    # Scoring network
    s_out[tt] = smlp(h_crnn[tt][-1])[-1]

    if num_semantic_classes == 1:
      s_out[tt] = tf.sigmoid(s_out[tt])
    else:
      s_out[tt] = tf.nn.softmax(s_out[tt])

  #########################
  # Model outputs
  #########################
  s_out = tf.concat(1, [tf.expand_dims(tmp, 1) for tmp in s_out])
  if num_semantic_classes == 1:
    s_out = s_out[:, :, 0]
  model['s_out'] = s_out
  attn_box = tf.concat(1, attn_box)
  model['attn_box'] = attn_box
  attn_top_left = tf.concat(1,
                            [tf.expand_dims(tmp, 1) for tmp in attn_top_left])
  attn_bot_right = tf.concat(1,
                             [tf.expand_dims(tmp, 1) for tmp in attn_bot_right])
  attn_ctr = tf.concat(1, [tf.expand_dims(tmp, 1) for tmp in attn_ctr])
  attn_size = tf.concat(1, [tf.expand_dims(tmp, 1) for tmp in attn_size])
  model['attn_top_left'] = attn_top_left
  model['attn_bot_right'] = attn_bot_right
  model['attn_ctr'] = attn_ctr
  model['attn_size'] = attn_size
  model['attn_ctr_norm_gt'] = attn_ctr_norm_gt
  model['attn_lg_size_gt'] = attn_lg_size_gt
  model['attn_top_left_gt'] = attn_top_left_gt
  model['attn_bot_right_gt'] = attn_bot_right_gt
  model['attn_box_gt'] = attn_box_gt
  attn_ctr_norm = tf.concat(1,
                            [tf.expand_dims(tmp, 1) for tmp in attn_ctr_norm])
  attn_lg_size = tf.concat(1, [tf.expand_dims(tmp, 1) for tmp in attn_lg_size])
  model['attn_ctr_norm'] = attn_ctr_norm
  model['attn_lg_size'] = attn_lg_size

  attn_params = tf.concat(2, [attn_ctr_norm, attn_lg_size])
  attn_params_gt = tf.concat(2, [attn_ctr_norm_gt, attn_lg_size_gt])

  #########################
  # Loss function
  #########################
  y_gt_shape = tf.shape(y_gt)
  num_ex_f = tf.to_float(y_gt_shape[0])
  max_num_obj = tf.to_float(y_gt_shape[1])

  ############################
  # Box loss
  ############################
  if fixed_order:
    # [B, T] for fixed order.
    iou_soft_box = modellib.f_iou(attn_box, attn_box_gt, pairwise=False)
  else:
    # [B, T, T] for matching.
    iou_soft_box = tf.concat(
        1, [tf.expand_dims(iou_soft_box[tt], 1) for tt in range(timespan)])

  identity_match = modellib.get_identity_match(num_ex, timespan, s_gt)
  if fixed_order:
    match_box = identity_match
  else:
    match_box = modellib.f_segm_match(iou_soft_box, s_gt)

  model['match_box'] = match_box
  match_sum_box = tf.reduce_sum(match_box, reduction_indices=[2])
  match_count_box = tf.reduce_sum(match_sum_box, reduction_indices=[1])
  match_count_box = tf.maximum(1.0, match_count_box)

  # [B] if fixed order, [B, T] if matching.
  if fixed_order:
    iou_soft_box_mask = iou_soft_box
  else:
    iou_soft_box_mask = tf.reduce_sum(iou_soft_box * match_box, [1])
  iou_soft_box = tf.reduce_sum(iou_soft_box_mask, [1])
  iou_soft_box = tf.reduce_sum(iou_soft_box / match_count_box) / num_ex_f

  if box_loss_fn == 'mse':
    box_loss = modellib.f_match_loss(
        attn_params,
        attn_params_gt,
        match_box,
        timespan,
        modellib.f_squared_err,
        model=model)
  elif box_loss_fn == 'huber':
    box_loss = modellib.f_match_loss(attn_params, attn_params_gt, match_box,
                                     timespan, modellib.f_huber)
  if box_loss_fn == 'iou':
    box_loss = -iou_soft_box
  elif box_loss_fn == 'wt_iou':
    box_loss = -wt_iou_soft_box
  elif box_loss_fn == 'wt_cov':
    box_loss = -modellib.f_weighted_coverage(iou_soft_box, box_map_gt)
  elif box_loss_fn == 'bce':
    box_loss = modellib.f_match_loss(box_map, box_map_gt, match_box, timespan,
                                     modellib.f_bce)
  else:
    raise Exception('Unknown box_loss_fn: {}'.format(box_loss_fn))
  model['box_loss'] = box_loss

  box_loss_coeff = tf.constant(1.0)
  model['box_loss_coeff'] = box_loss_coeff
  tf.add_to_collection('losses', box_loss_coeff * box_loss)

  ####################
  # Score loss
  ####################
  if num_semantic_classes == 1:
    conf_loss = modellib.f_conf_loss(
        s_out, match_box, timespan, use_cum_min=True)
  else:
    conf_loss = modellib.f_conf_loss(
        1 - s_out[:, :, 0], match_box, timespan, use_cum_min=True)
  model['conf_loss'] = conf_loss
  conf_loss_coeff = tf.constant(1.0)
  tf.add_to_collection('losses', conf_loss_coeff * conf_loss)

  ####################
  # Total loss
  ####################
  total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
  model['loss'] = total_loss

  ####################
  # Optimizer
  ####################
  learn_rate = tf.train.exponential_decay(
      base_learn_rate,
      global_step,
      steps_per_learn_rate_decay,
      learn_rate_decay,
      staircase=True)
  model['learn_rate'] = learn_rate
  eps = 1e-7
  optim = tf.train.AdamOptimizer(learn_rate, epsilon=eps)
  gvs = optim.compute_gradients(total_loss)
  capped_gvs = []
  for grad, var in gvs:
    if grad is not None:
      capped_gvs.append((tf.clip_by_value(grad, -1, 1), var))
    else:
      capped_gvs.append((grad, var))
  train_step = optim.apply_gradients(capped_gvs, global_step=global_step)
  model['train_step'] = train_step

  ####################
  # Glimpse
  ####################
  # T * T2 * [B, H' * W'] => [B, T, T2, H', W']
  crnn_glimpse_map = tf.concat(1, [
      tf.expand_dims(
          tf.concat(1, [
              tf.expand_dims(crnn_glimpse_map[tt][tt2], 1)
              for tt2 in range(num_ctrl_rnn_iter)
          ]), 1) for tt in range(timespan)
  ])
  crnn_glimpse_map = tf.reshape(
      crnn_glimpse_map, [-1, timespan, num_ctrl_rnn_iter, crnn_h, crnn_w])
  model['ctrl_rnn_glimpse_map'] = crnn_glimpse_map

  return model
Ejemplo n.º 26
0
def main(experiment,
         model,
         train,
         val,
         checkpoint,
         use_db=True,
         test=False,
         reduction=0,
         random=True,
         add_config=None,
         gpu_device=['/gpu:0'],
         cpu_device='/cpu:0',
         num_gpus=False,
         transfer=False,
         placeholders=False,
         save_test_npz=True,
         num_batches=None,
         map_out='test_maps',
         out_dir=None):
    """Interpret and run a model."""
    main_config = Config()
    dt_string = py_utils.get_dt_stamp()
    log = logger.get(
        os.path.join(main_config.log_dir, '%s_%s' % (experiment, dt_string)))
    if num_gpus:
        gpu_device = ['/gpu:%d' % i for i in range(num_gpus)]
    if test and save_test_npz and out_dir is None:
        raise RuntimeError('You must specify an out_dir.')
    if use_db:
        exp_params = db.get_parameters(log=log,
                                       experiment=experiment,
                                       random=random)[0]
    else:
        exp = py_utils.import_module(experiment, pre_path='experiments')
        exp_params = exp.experiment_params()
        exp_params['_id'] = -1
        exp_params['experiment'] = experiment
        if model is not None:
            exp_params['model'] = model
        else:
            assert len(exp_params['model']) > 1, 'No model name supplied.'
            exp_params['model'] = exp_params['model'][0]
        if train is not None:
            exp_params['train_dataset'] = train
        if val is not None:
            exp_params['val_dataset'] = val
    # if reduction or out_dir is not None or transfer:
    #     fine_tune = get_fine_tune_params(
    #         out_dir=out_dir, reduction=reduction)
    # else:
    #     pass
    results = model_tools.build_model(exp_params=exp_params,
                                      dt_string=dt_string,
                                      log=log,
                                      test=test,
                                      config=main_config,
                                      use_db=use_db,
                                      num_batches=num_batches,
                                      map_out=map_out,
                                      placeholders=placeholders,
                                      add_config=add_config,
                                      gpu_device=gpu_device,
                                      cpu_device=cpu_device,
                                      checkpoint=checkpoint)
    if test and save_test_npz:
        # Save results somewhere safe
        py_utils.make_dir(out_dir)
        results['checkpoint'] = checkpoint
        results['model'] = model
        results['experiment'] = experiment
        np.savez(os.path.join(out_dir, results['exp_label']), **results)
    log.info('Finished.')
Ejemplo n.º 27
0
def main(
        experiment_name,
        list_experiments=False,
        load_and_evaluate_ckpt=None,
        placeholder_data=None,
        grad_images=False,
        gpu_device='/gpu:0'):
    """Create a tensorflow worker to run experiments in your DB."""
    if list_experiments:
        exps = db.list_experiments()
        print '_' * 30
        print 'Initialized experiments:'
        print '_' * 30
        for l in exps:
            print l.values()[0]
        print '_' * 30
        if len(exps) == 0:
            print 'No experiments found.'
        else:
            print 'You can add to the DB with: '\
                'python prepare_experiments.py --experiment=%s' % \
                exps[0].values()[0]
        return

    if experiment_name is None:
        print 'No experiment specified. Pulling one out of the DB.'
        experiment_name = db.get_experiment_name()

    # Prepare to run the model
    config = Config()
    condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp())
    experiment_label = '%s' % (experiment_name)
    log = logger.get(os.path.join(config.log_dir, condition_label))
    assert experiment_name is not None, 'Empty experiment name.'
    experiment_dict = experiments.experiments()[experiment_name]()
    config = add_to_config(d=experiment_dict, config=config)  # Globals
    config.load_and_evaluate_ckpt = load_and_evaluate_ckpt
    if load_and_evaluate_ckpt is not None:
        # Remove the train operation and add a ckpt pointer
        from ops import evaluation
    config, exp_params = process_DB_exps(
        experiment_name=experiment_name,
        log=log,
        config=config)  # Update config w/ DB params
    dataset_module = py_utils.import_module(
        model_dir=config.dataset_info,
        dataset=config.dataset)
    dataset_module = dataset_module.data_processing()  # hardcoded class name
    train_key = [k for k in dataset_module.folds.keys() if 'train' in k]
    if not len(train_key):
        train_key = 'train'
    else:
        train_key = train_key[0]
    train_data, train_means_image, train_means_label = get_data_pointers(
        dataset=config.dataset,
        base_dir=config.tf_records,
        cv=train_key,
        log=log)
    val_key = [k for k in dataset_module.folds.keys() if 'val' in k]
    if not len(val_key):
        val_key = 'train'
    else:
        val_key = val_key[0]
    val_data, val_means_image, val_means_label = get_data_pointers(
        dataset=config.dataset,
        base_dir=config.tf_records,
        cv=val_key,
        log=log)

    # Initialize output folders
    dir_list = {
        'checkpoints': os.path.join(
            config.checkpoints, condition_label),
        'summaries': os.path.join(
            config.summaries, condition_label),
        'condition_evaluations': os.path.join(
            config.condition_evaluations, condition_label),
        'experiment_evaluations': os.path.join(  # DEPRECIATED
            config.experiment_evaluations, experiment_label),
        'visualization': os.path.join(
            config.visualizations, condition_label),
        'weights': os.path.join(
            config.condition_evaluations, condition_label, 'weights')
    }
    [py_utils.make_dir(v) for v in dir_list.values()]

    # Prepare data loaders on the cpu
    if all(isinstance(i, list) for i in config.data_augmentations):
        if config.data_augmentations:
            config.data_augmentations = py_utils.flatten_list(
                config.data_augmentations,
                log)
    if load_and_evaluate_ckpt is not None:
        config.epochs = 1
        config.train_shuffle = False
        config.val_shuffle = False
    with tf.device('/cpu:0'):
        if placeholder_data:
            placeholder_shape = placeholder_data['train_image_shape']
            placeholder_dtype = placeholder_data['train_image_dtype']
            original_train_images = tf.placeholder(
                dtype=placeholder_dtype,
                shape=placeholder_shape,
                name='train_images')
            placeholder_shape = placeholder_data['train_label_shape']
            placeholder_dtype = placeholder_data['train_label_dtype']
            original_train_labels = tf.placeholder(
                dtype=placeholder_dtype,
                shape=placeholder_shape,
                name='train_labels')
            placeholder_shape = placeholder_data['val_image_shape']
            placeholder_dtype = placeholder_data['val_image_dtype']
            original_val_images = tf.placeholder(
                dtype=placeholder_dtype,
                shape=placeholder_shape,
                name='val_images')
            placeholder_shape = placeholder_data['val_label_shape']
            placeholder_dtype = placeholder_data['val_label_dtype']
            original_val_labels = tf.placeholder(
                dtype=placeholder_dtype,
                shape=placeholder_shape,
                name='val_labels')

            # Apply augmentations
            (
                train_images,
                train_labels
            ) = data_loader.placeholder_image_augmentations(
                images=original_train_images,
                model_input_image_size=dataset_module.model_input_image_size,
                labels=original_train_labels,
                data_augmentations=config.data_augmentations,
                batch_size=config.batch_size)
            (
                val_images,
                val_labels
            ) = data_loader.placeholder_image_augmentations(
                images=original_val_images,
                model_input_image_size=dataset_module.model_input_image_size,
                labels=original_val_labels,
                data_augmentations=config.data_augmentations,
                batch_size=config.batch_size)

            # Store in the placeholder dict
            placeholder_data['train_images'] = original_train_images
            placeholder_data['train_labels'] = original_train_labels
            placeholder_data['val_images'] = original_val_images
            placeholder_data['val_labels'] = original_val_labels
        else:
            train_images, train_labels = data_loader.inputs(
                dataset=train_data,
                batch_size=config.batch_size,
                model_input_image_size=dataset_module.model_input_image_size,
                tf_dict=dataset_module.tf_dict,
                data_augmentations=config.data_augmentations,
                num_epochs=config.epochs,
                tf_reader_settings=dataset_module.tf_reader,
                shuffle=config.shuffle_train,
                resize_output=config.resize_output)
            if hasattr(config, 'val_augmentations'):
                val_augmentations = config.val_augmentations
            else:
                val_augmentations = config.data_augmentations
            val_images, val_labels = data_loader.inputs(
                dataset=val_data,
                batch_size=config.batch_size,
                model_input_image_size=dataset_module.model_input_image_size,
                tf_dict=dataset_module.tf_dict,
                data_augmentations=val_augmentations,
                num_epochs=config.epochs,
                tf_reader_settings=dataset_module.tf_reader,
                shuffle=config.shuffle_val,
                resize_output=config.resize_output)
    log.info('Created tfrecord dataloader tensors.')

    # Load model specification
    struct_name = config.model_struct.split(os.path.sep)[-1]
    try:
        model_dict = py_utils.import_module(
            dataset=struct_name,
            model_dir=os.path.join(
                'models',
                'structs',
                experiment_name).replace(os.path.sep, '.')
            )
    except IOError:
        print 'Could not find the model structure: %s in folder %s' % (
            struct_name,
            experiment_name)

    # Inject model_dict with hyperparameters if requested
    model_dict.layer_structure = hp_opt_utils.inject_model_with_hps(
        layer_structure=model_dict.layer_structure,
        exp_params=exp_params)

    # Prepare variables for the models
    if len(dataset_module.output_size) == 2:
        log.warning(
            'Found > 1 dimension for your output size.'
            'Converting to a scalar.')
        dataset_module.output_size = np.prod(
            dataset_module.output_size)

    if hasattr(model_dict, 'output_structure'):
        # Use specified output layer
        output_structure = model_dict.output_structure
    else:
        output_structure = None

    # Correct number of output neurons if needed
    if config.dataloader_override and\
            'weights' in output_structure[-1].keys():
        output_neurons = output_structure[-1]['weights'][0]
        size_check = output_neurons != dataset_module.output_size
        fc_check = output_structure[-1]['layers'][0] == 'fc'
        if size_check and fc_check:
            output_structure[-1]['weights'][0] = dataset_module.output_size
            log.warning('Adjusted output neurons from %s to %s.' % (
                output_neurons,
                dataset_module.output_size))

    # Prepare model on GPU
    if not hasattr(dataset_module, 'input_normalization'):
        dataset_module.input_normalization = None
    with tf.device(gpu_device):
        with tf.variable_scope('cnn') as scope:
            # Training model
            model = model_utils.model_class(
                mean=train_means_image,
                training=True,
                output_size=dataset_module.output_size,
                input_normalization=dataset_module.input_normalization)
            train_scores, model_summary, _ = model.build(
                data=train_images,
                layer_structure=model_dict.layer_structure,
                output_structure=output_structure,
                log=log,
                tower_name='cnn')
            if grad_images:
                oh_dims = int(train_scores.get_shape()[-1])
                target_scores = tf.one_hot(train_labels, oh_dims) * train_scores
                train_gradients = tf.gradients(target_scores, train_images)[0]
            log.info('Built training model.')
            log.debug(
                json.dumps(model_summary, indent=4),
                verbose=0)
            print_model_architecture(model_summary)

            # Normalize labels on GPU if needed
            if 'normalize_labels' in exp_params.keys():
                if exp_params['normalize_labels'] == 'zscore':
                    train_labels -= train_means_label['mean']
                    train_labels /= train_means_label['std']
                    val_labels -= train_means_label['mean']
                    val_labels /= train_means_label['std']
                    log.info('Z-scoring labels.')
                elif exp_params['normalize_labels'] == 'mean':
                    train_labels -= train_means_label['mean']
                    val_labels -= val_means_label['mean']
                    log.info('Mean-centering labels.')

            # Check the shapes of labels and scores
            if not isinstance(train_scores, list):
                if len(
                        train_scores.get_shape()) != len(
                            train_labels.get_shape()):
                    train_shape = train_scores.get_shape().as_list()
                    label_shape = train_labels.get_shape().as_list()
                    val_shape = val_scores.get_shape().as_list()
                    val_label_shape = val_labels.get_shape().as_list()

                    if len(
                        train_shape) == 2 and len(
                            label_shape) == 1 and train_shape[-1] == 1:
                        train_labels = tf.expand_dims(train_labels, axis=-1)
                        val_labels = tf.expand_dims(val_labels, axis=-1)
                    elif len(
                        train_shape) == 2 and len(
                            label_shape) == 1 and train_shape[-1] == 1:
                        train_scores = tf.expand_dims(train_scores, axis=-1)
                        val_scores = tf.expand_dims(val_scores, axis=-1)

            # Prepare the loss function
            train_loss, _ = loss_utils.loss_interpreter(
                logits=train_scores,  # TODO
                labels=train_labels,
                loss_type=config.loss_function,
                weights=config.loss_weights,
                dataset_module=dataset_module)

            # Add loss tensorboard tracking
            if isinstance(train_loss, list):
                for lidx, tl in enumerate(train_loss):
                    tf.summary.scalar('training_loss_%s' % lidx, tl)
                train_loss = tf.add_n(train_loss)
            else:
                tf.summary.scalar('training_loss', train_loss)

            # Add weight decay if requested
            if len(model.regularizations) > 0:
                train_loss = loss_utils.wd_loss(
                    regularizations=model.regularizations,
                    loss=train_loss,
                    wd_penalty=config.regularization_strength)
            assert config.lr is not None, 'No learning rate.'  # TODO: Make a QC function 
            if config.lr > 1:
                old_lr = config.lr
                config.lr = loss_utils.create_lr_schedule(
                    train_batch=config.batch_size,
                    num_training=config.lr)
                config.optimizer = 'momentum'
                log.info('Forcing momentum classifier.')
            else:
                old_lr = None
            train_op = loss_utils.optimizer_interpreter(
                loss=train_loss,
                lr=config.lr,
                optimizer=config.optimizer,
                constraints=config.optimizer_constraints,
                model=model)
            log.info('Built training loss function.')

            # Add a score for the training set
            train_accuracy = eval_metrics.metric_interpreter(
                metric=dataset_module.score_metric,  # TODO: Attach to exp cnfg
                pred=train_scores,  # TODO
                labels=train_labels)

            # Add aux scores if requested
            train_aux = {}
            if hasattr(dataset_module, 'aux_scores'):
                for m in dataset_module.aux_scores:
                    train_aux[m] = eval_metrics.metric_interpreter(
                        metric=m,
                        pred=train_scores,
                        labels=train_labels)  # [0]  # TODO: Fix for multiloss

            # Prepare remaining tensorboard summaries
            if config.tensorboard_images:
                if len(train_images.get_shape()) == 4:
                    tf_fun.image_summaries(train_images, tag='Training images')
                if (np.asarray(
                        train_labels.get_shape().as_list()) > 1).sum() > 2:
                    tf_fun.image_summaries(
                        train_labels,
                        tag='Training_targets')
                    tf_fun.image_summaries(
                        train_scores,
                        tag='Training_predictions')
            if isinstance(train_accuracy, list):
                for tidx, ta in enumerate(train_accuracy):
                    tf.summary.scalar('training_accuracy_%s' % tidx, ta)
            else:
                tf.summary.scalar('training_accuracy', train_accuracy)
            if config.pr_curve:
                if isinstance(train_scores, list):
                    for pidx, train_score in enumerate(train_scores):
                        train_label = train_labels[:, pidx]
                        pr_summary.op(
                            tag='training_pr_%s' % pidx,
                            predictions=tf.cast(
                                tf.argmax(
                                    train_score,
                                    axis=-1),
                                tf.float32),
                            labels=tf.cast(train_label, tf.bool),
                            display_name='training_precision_recall_%s' % pidx)
                else:
                    pr_summary.op(
                        tag='training_pr',
                        predictions=tf.cast(
                            tf.argmax(
                                train_scores,
                                axis=-1),
                            tf.float32),
                        labels=tf.cast(train_labels, tf.bool),
                        display_name='training_precision_recall')
            log.info('Added training summaries.')

        with tf.variable_scope('cnn', tf.AUTO_REUSE) as scope:
            # Validation model
            scope.reuse_variables()
            val_model = model_utils.model_class(
                mean=train_means_image,  # Normalize with train data
                training=False,
                output_size=dataset_module.output_size,
                input_normalization=dataset_module.input_normalization)
            val_scores, _, _ = val_model.build(  # Ignore summary
                data=val_images,
                layer_structure=model_dict.layer_structure,
                output_structure=output_structure,
                log=log,
                tower_name='cnn')
            if grad_images:
                oh_dims = int(val_scores.get_shape()[-1])
                target_scores = tf.one_hot(val_labels, oh_dims) * val_scores
                val_gradients = tf.gradients(target_scores, val_images)[0]
            log.info('Built validation model.')

            # Check the shapes of labels and scores
            val_loss, _ = loss_utils.loss_interpreter(
                logits=val_scores,
                labels=val_labels,
                loss_type=config.loss_function,
                weights=config.loss_weights,
                dataset_module=dataset_module)

            # Add loss tensorboard tracking
            if isinstance(val_loss, list):
                for lidx, tl in enumerate(val_loss):
                    tf.summary.scalar('validation_loss_%s' % lidx, tl)
                val_loss = tf.add_n(val_loss)
            else:
                tf.summary.scalar('validation_loss', val_loss)

            # Add a score for the validation set
            val_accuracy = eval_metrics.metric_interpreter(
                metric=dataset_module.score_metric,  # TODO
                pred=val_scores,
                labels=val_labels)

            # Add aux scores if requested
            val_aux = {}
            if hasattr(dataset_module, 'aux_scores'):
                for m in dataset_module.aux_scores:
                    val_aux[m] = eval_metrics.metric_interpreter(
                        metric=m,
                        pred=val_scores,
                        labels=val_labels)  # [0]  # TODO: Fix for multiloss

            # Prepare tensorboard summaries
            if config.tensorboard_images:
                if len(val_images.get_shape()) == 4:
                    tf_fun.image_summaries(
                        val_images,
                        tag='Validation')
                if (np.asarray(
                        val_labels.get_shape().as_list()) > 1).sum() > 2:
                    tf_fun.image_summaries(
                        val_labels,
                        tag='Validation_targets')
                    tf_fun.image_summaries(
                        val_scores,
                        tag='Validation_predictions')
            if isinstance(val_accuracy, list):
                for vidx, va in enumerate(val_accuracy):
                    tf.summary.scalar('validation_accuracy_%s' % vidx, va)
            else:
                tf.summary.scalar('validation_accuracy', val_accuracy)
            if config.pr_curve:
                if isinstance(val_scores, list):
                    for pidx, val_score in enumerate(val_scores):
                        val_label = val_labels[:, pidx]
                        pr_summary.op(
                            tag='validation_pr_%s' % pidx,
                            predictions=tf.cast(
                                tf.argmax(
                                    val_score,
                                    axis=-1),
                                tf.float32),
                            labels=tf.cast(val_label, tf.bool),
                            display_name='validation_precision_recall_%s' %
                            pidx)
                else:
                    pr_summary.op(
                        tag='validation_pr',
                        predictions=tf.cast(
                            tf.argmax(
                                val_scores,
                                axis=-1),
                            tf.float32),
                        labels=tf.cast(val_labels, tf.bool),
                        display_name='validation_precision_recall')
            log.info('Added validation summaries.')

    # Set up summaries and saver
    if not hasattr(config, 'max_to_keep'):
        config.max_to_keep = None
    saver = tf.train.Saver(
        var_list=tf.global_variables(),
        max_to_keep=config.max_to_keep)
    summary_op = tf.summary.merge_all()

    # Initialize the graph
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

    # Need to initialize both of these if supplying num_epochs to inputs
    sess.run(
        tf.group(
            tf.global_variables_initializer(),
            tf.local_variables_initializer())
        )
    summary_writer = tf.summary.FileWriter(dir_list['summaries'], sess.graph)

    # Set up exemplar threading
    if placeholder_data:
        coord, threads = None, None
    else:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    # Create dictionaries of important training and validation information
    train_dict = {
        'train_loss': train_loss,
        'train_images': train_images,
        'train_labels': train_labels,
        'train_op': train_op,
        'train_scores': train_scores
    }
    val_dict = {
        'val_loss': val_loss,
        'val_images': val_images,
        'val_labels': val_labels,
        'val_scores': val_scores,
    }

    if grad_images:
        train_dict['train_gradients'] = train_gradients
        val_dict['val_gradients'] = val_gradients

    if isinstance(train_accuracy, list):
        for tidx, (ta, va) in enumerate(zip(train_accuracy, val_accuracy)):
            train_dict['train_accuracy_%s' % tidx] = ta
            val_dict['val_accuracy_%s' % tidx] = va
    else:
        train_dict['train_accuracy_0'] = train_accuracy
        val_dict['val_accuracy_0'] = val_accuracy

    if load_and_evaluate_ckpt is not None:
        # Remove the train operation and add a ckpt pointer
        del train_dict['train_op']

    if hasattr(dataset_module, 'aux_score'):
        # Attach auxillary scores to tensor dicts
        for m in dataset_module.aux_scores:
            train_dict['train_aux_%s' % m] = train_aux[m]
            val_dict['val_aux_%s' % m] = val_aux[m]

    # Start training loop
    if old_lr is not None:
        config.lr = old_lr
    np.save(
        os.path.join(
            dir_list['condition_evaluations'], 'training_config_file'),
        config)
    log.info('Starting training')
    if load_and_evaluate_ckpt is not None:
        return evaluation.evaluation_loop(
            config=config,
            db=db,
            coord=coord,
            sess=sess,
            summary_op=summary_op,
            summary_writer=summary_writer,
            saver=saver,
            threads=threads,
            summary_dir=dir_list['summaries'],
            checkpoint_dir=dir_list['checkpoints'],
            weight_dir=dir_list['weights'],
            train_dict=train_dict,
            val_dict=val_dict,
            train_model=model,
            val_model=val_model,
            exp_params=exp_params,
            placeholder_data=placeholder_data)
    else:
        output_dict = training.training_loop(
            config=config,
            db=db,
            coord=coord,
            sess=sess,
            summary_op=summary_op,
            summary_writer=summary_writer,
            saver=saver,
            threads=threads,
            summary_dir=dir_list['summaries'],
            checkpoint_dir=dir_list['checkpoints'],
            weight_dir=dir_list['weights'],
            train_dict=train_dict,
            val_dict=val_dict,
            train_model=model,
            val_model=val_model,
            exp_params=exp_params)

    log.info('Finished training.')
    model_name = config.model_struct.replace('/', '_')
    if output_dict is not None:
        py_utils.save_npys(
            data=output_dict,
            model_name=model_name,
            output_string=dir_list['experiment_evaluations'])
Ejemplo n.º 28
0
import os
from setuptools import setup, find_packages
from db import credentials
from utils import logger
from config import Config


"""e.g. python setup.py install"""

setup(
    name="cluttered_nist",
    version="0.1",
    packages=find_packages(),
)

config = Config()
log = logger.get(os.path.join(config.log_dir, 'setup'))
log.info('Installed required packages and created paths.')

params = credentials.postgresql_connection()
sys_password = credentials.machine_credentials()['password']
os.popen(
    'sudo -u postgres createuser -sdlP %s' % params['user'], 'w').write(
    sys_password)
os.popen(
    'sudo -u postgres createdb %s -O %s' % (
        params['database'],
        params['user']), 'w').write(sys_password)
log.info('Created DB.')
Ejemplo n.º 29
0
def lstm(inp_dim,
         hid_dim,
         wd=None,
         scope='lstm',
         model=None,
         init_weights=None,
         frozen=False):
    """Adds an LSTM component.

    Args:
        inp_dim: Input data dim
        hid_dim: Hidden state dim
        wd: Weight decay
        scope: Prefix
    """
    log = logger.get()

    log.info('LSTM: {}'.format(scope))
    log.info('Input dim: {}'.format(inp_dim))
    log.info('Hidden dim: {}'.format(hid_dim))

    if init_weights is None:
        init_weights = {}
        for w in [
                'w_xi', 'w_hi', 'b_i', 'w_xf', 'w_hf', 'b_f', 'w_xu', 'w_hu',
                'b_u', 'w_xo', 'w_ho', 'b_o'
        ]:
            init_weights[w] = None

    trainable = not frozen
    log.info('Trainable: {}'.format(trainable))

    with tf.variable_scope(scope):
        # Input gate
        w_xi = weight_variable([inp_dim, hid_dim],
                               init_val=init_weights['w_xi'],
                               wd=wd,
                               name='w_xi',
                               trainable=trainable)
        w_hi = weight_variable([hid_dim, hid_dim],
                               init_val=init_weights['w_hi'],
                               wd=wd,
                               name='w_hi',
                               trainable=trainable)
        b_i = weight_variable([hid_dim],
                              init_val=init_weights['b_i'],
                              initializer=tf.constant_initializer(0.0),
                              name='b_i',
                              trainable=trainable)

        # Forget gate
        w_xf = weight_variable([inp_dim, hid_dim],
                               init_val=init_weights['w_xf'],
                               wd=wd,
                               name='w_xf',
                               trainable=trainable)
        w_hf = weight_variable([hid_dim, hid_dim],
                               init_val=init_weights['w_hf'],
                               wd=wd,
                               name='w_hf',
                               trainable=trainable)
        b_f = weight_variable([hid_dim],
                              init_val=init_weights['b_f'],
                              initializer=tf.constant_initializer(1.0),
                              name='b_f',
                              trainable=trainable)

        # Input activation
        w_xu = weight_variable([inp_dim, hid_dim],
                               init_val=init_weights['w_xu'],
                               wd=wd,
                               name='w_xu',
                               trainable=trainable)
        w_hu = weight_variable([hid_dim, hid_dim],
                               init_val=init_weights['w_hu'],
                               wd=wd,
                               name='w_hu',
                               trainable=trainable)
        b_u = weight_variable([hid_dim],
                              init_val=init_weights['b_u'],
                              initializer=tf.constant_initializer(0.0),
                              name='b_u',
                              trainable=trainable)

        # Output gate
        w_xo = weight_variable([inp_dim, hid_dim],
                               init_val=init_weights['w_xo'],
                               wd=wd,
                               name='w_xo',
                               trainable=trainable)
        w_ho = weight_variable([hid_dim, hid_dim],
                               init_val=init_weights['w_ho'],
                               wd=wd,
                               name='w_ho',
                               trainable=trainable)
        b_o = weight_variable([hid_dim],
                              init_val=init_weights['b_o'],
                              initializer=tf.constant_initializer(0.0),
                              name='b_o',
                              trainable=trainable)

        if model is not None:
            model['{}_w_xi'.format(scope)] = w_xi
            model['{}_w_hi'.format(scope)] = w_hi
            model['{}_b_i'.format(scope)] = b_i
            model['{}_w_xf'.format(scope)] = w_xf
            model['{}_w_hf'.format(scope)] = w_hf
            model['{}_b_f'.format(scope)] = b_f
            model['{}_w_xu'.format(scope)] = w_xu
            model['{}_w_hu'.format(scope)] = w_hu
            model['{}_b_u'.format(scope)] = b_u
            model['{}_w_xo'.format(scope)] = w_xo
            model['{}_w_ho'.format(scope)] = w_ho
            model['{}_b_o'.format(scope)] = b_o

            model['{}_w_x_mean'.format(scope)] = (
                tf.reduce_sum(tf.abs(w_xi)) + tf.reduce_sum(tf.abs(w_xf)) +
                tf.reduce_sum(tf.abs(w_xu)) +
                tf.reduce_sum(tf.abs(w_xo))) / inp_dim / hid_dim / 4
            model['{}_w_h_mean'.format(scope)] = (
                tf.reduce_sum(tf.abs(w_hi)) + tf.reduce_sum(tf.abs(w_hf)) +
                tf.reduce_sum(tf.abs(w_hu)) +
                tf.reduce_sum(tf.abs(w_ho))) / hid_dim / hid_dim / 4
            model['{}_b_mean'.format(scope)] = (tf.reduce_sum(
                tf.abs(b_i)) + tf.reduce_sum(tf.abs(b_f)) + tf.reduce_sum(
                    tf.abs(b_u)) + tf.reduce_sum(tf.abs(b_o))) / hid_dim / 4

    def unroll(inp, state):
        with tf.variable_scope(scope):
            c = tf.slice(state, [0, 0], [-1, hid_dim])
            h = tf.slice(state, [0, hid_dim], [-1, hid_dim])
            g_i = tf.sigmoid(tf.matmul(inp, w_xi) + tf.matmul(h, w_hi) + b_i)
            g_f = tf.sigmoid(tf.matmul(inp, w_xf) + tf.matmul(h, w_hf) + b_f)
            g_o = tf.sigmoid(tf.matmul(inp, w_xo) + tf.matmul(h, w_ho) + b_o)
            u = tf.tanh(tf.matmul(inp, w_xu) + tf.matmul(h, w_hu) + b_u)
            c = g_f * c + g_i * u
            h = g_o * tf.tanh(c)
            state = tf.concat(1, [c, h])

        return state, g_i, g_f, g_o

    return unroll
Ejemplo n.º 30
0
def main(reset_process,
         initialize_db,
         experiment_name,
         remove=None,
         force_repeat=None):
    """Populate db with experiments to run."""
    main_config = config.Config()
    log = logger.get(os.path.join(main_config.log_dir, 'prepare_experiments'))
    if reset_process:
        db.reset_in_process()
        log.info('Reset experiment progress counter in DB.')
    if initialize_db:
        db.initialize_database()
        log.info('Initialized DB.')
    if remove is not None:
        db_config = credentials.postgresql_connection()
        with db.db(db_config) as db_conn:
            db_conn.remove_experiment(remove)
        log.info('Removed %s.' % remove)
    if experiment_name is not None:  # TODO: add capability for bayesian opt.
        if ',' in experiment_name:
            # Parse a comma-delimeted string of experiments
            experiment_name = experiment_name.split(',')
        else:
            experiment_name = [experiment_name]
        db_config = credentials.postgresql_connection()
        for exp in experiment_name:
            experiment_dict = py_utils.import_module(
                module=exp, pre_path=main_config.experiment_classes)
            experiment_dict = experiment_dict.experiment_params()
            if not hasattr(experiment_dict, 'get_map'):
                experiment_dict['get_map'] = [False]
            train_loss_function = experiment_dict.get('train_loss_function',
                                                      None)
            if train_loss_function is None:
                experiment_dict['train_loss_function'] = experiment_dict[
                    'loss_function']
                experiment_dict['val_loss_function'] = experiment_dict[
                    'loss_function']
                experiment_dict.pop('loss_function', None)
            exp_combos = package_parameters(experiment_dict, log)
            log.info('Preparing experiment.')
            assert exp_combos is not None, 'Experiment is empty.'

            # Repeat if requested
            repeats = experiment_dict.get('repeat', 0)
            if force_repeat is not None:
                repeats = force_repeat
            if repeats:
                dcs = []
                for copy in range(repeats):
                    # Need to make deep copies
                    dcs += deepcopy(exp_combos)
                exp_combos = dcs
                log.info('Expanded %sx to %s combinations.' %
                         (experiment_dict['repeat'], len(exp_combos)))

            # Convert augmentations to json
            json_combos = []
            for combo in exp_combos:
                combo['train_augmentations'] = json.dumps(
                    deepcopy(combo['train_augmentations']))
                combo['val_augmentations'] = json.dumps(
                    deepcopy(combo['val_augmentations']))
                json_combos += [combo]

            # Add data to the DB
            with db.db(db_config) as db_conn:
                db_conn.populate_db(json_combos)
                db_conn.return_status('CREATE')
            log.info('Added new experiments.')
Ejemplo n.º 31
0
def training_loop(config,
                  sess,
                  summary_op,
                  summary_writer,
                  saver,
                  summary_dir,
                  checkpoint_dir,
                  prediction_dir,
                  train_dict,
                  test_dict,
                  exp_label,
                  lr,
                  row_id,
                  data_structure,
                  coord,
                  threads,
                  top_test=5):
    """Run the model training loop."""
    log = logger.get(
        os.path.join(config.log_dir,
                     summary_dir.split(os.path.sep)[-1]))
    step = 0
    train_losses, train_prs, timesteps = ([], [], [])
    test_losses, test_prs = ([], [])

    # Set starting lr
    it_lr = config.lr
    lr_info = None

    # Update DB
    if row_id is not None:
        db.update_results(results=summary_dir, row_id=row_id)

    # Start loop
    em = None
    test_perf = np.ones(top_test) * np.inf
    try:
        while not coord.should_stop():
            start_time = time.time()
            feed_dict = {lr: it_lr}
            it_train_dict = sess.run(train_dict, feed_dict=feed_dict)
            import ipdb
            ipdb.set_trace()
            duration = time.time() - start_time
            train_losses += [it_train_dict['train_loss']]
            train_prs += [it_train_dict['train_pr']]
            # train_accs += [it_train_dict['train_accuracy']]
            timesteps += [duration]
            try:
                data_structure.update_training(
                    train_pr=it_train_dict['train_pr'],
                    # train_accuracy=it_train_dict['train_accuracy'],
                    # train_arand=it_train_dict['train_arand'],
                    train_loss=it_train_dict['train_loss'],
                    train_step=step)
                data_structure.save()
            except Exception as e:
                log.warning('Failed to update saver class: %s' % e)
            if step % config.test_iters == 0:
                # Prepare test idx for current epoch
                it_test_loss = []
                # it_test_arand = []
                # it_test_acc = []
                it_test_scores = []
                it_test_labels = []
                it_test_volumes = []
                it_test_pr = []
                for num_vals in range(config['test_evals']):
                    log.info('Testing %s...' % num_vals)

                    # Test accuracy as the average of n batches
                    it_test_dict = sess.run(test_dict)
                    # it_test_acc += [it_test_dict['test_accuracy']]
                    # it_test_arand += [it_test_dict['test_arand']]
                    it_test_pr += [it_test_dict['test_pr']]
                    it_test_loss += [it_test_dict['test_loss']]
                    it_test_labels += [it_test_dict['test_labels']]
                    it_test_scores += [it_test_dict['test_logits']]
                    it_test_volumes += [it_test_dict['test_images']]
                # test_acc = np.mean(it_test_acc)
                # test_aran = np.mean(it_test_arand)
                test_lo = np.mean(it_test_loss)
                test_pr = np.mean(it_test_pr)
                # test_accs += [test_acc]
                # test_arand += [test_aran]
                test_losses += [test_lo]
                test_prs += [test_pr]

                # Update data structure
                try:
                    data_structure.update_test(
                        # test_accuracy=test_acc,
                        # test_arand=test_aran,
                        # test_arand=0.,
                        test_pr=test_pr,
                        test_loss=test_lo,
                        test_step=step,
                        test_lr_info=lr_info,
                        test_lr=it_lr)
                    data_structure.save()
                except Exception as e:
                    log.warning('Failed to update saver class: %s' % e)

                # Update data structure
                try:
                    if row_id is not None:
                        db.update_step(step=step, row_id=row_id)
                except Exception as e:
                    log.warning('Failed to update step count: %s' % e)

                # Save checkpoint
                ckpt_path = os.path.join(checkpoint_dir,
                                         'model_%s.ckpt' % step)
                try:
                    test_check = np.where(test_lo < test_perf)[0]
                    if len(test_check):
                        saver.save(sess, ckpt_path, global_step=step)
                        if len(test_check):
                            test_check = test_check[0]
                        test_perf[test_check] = test_lo
                        log.info('Saved checkpoint to: %s' % ckpt_path)

                        # Save predictions
                        pred_path = os.path.join(prediction_dir,
                                                 'model_%s' % step)
                        np.savez(pred_path,
                                 volumes=it_test_volumes,
                                 predictions=it_test_scores,
                                 labels=it_test_labels)

                except Exception as e:
                    log.info('Failed to save checkpoint.')

                # Update LR
                it_lr, lr_info = tf_fun.update_lr(it_lr=it_lr,
                                                  test_losses=test_losses,
                                                  alg=config.training_routine,
                                                  lr_info=lr_info)

                # Summaries
                # summary_str = sess.run(summary_op)
                # summary_writer.add_summary(summary_str, step)

                # Training status and test accuracy
                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; '
                    '%.3f sec/batch) | Training accuracy = %s | '
                    'Test accuracy = %s | Test pr = %s | logdir = %s')
                log.info(format_str % (
                    datetime.now(),
                    step,
                    it_train_dict['train_loss'],
                    config.test_batch_size / duration,
                    float(duration),
                    # it_train_dict['train_accuracy'],
                    # test_acc,
                    0.,
                    0.,
                    # test_aran,
                    test_pr,
                    summary_dir))
            else:
                # Training status
                format_str = ('%s: step %d, loss = %.5f (%.1f examples/sec; '
                              '%.3f sec/batch) | Training accuracy = %s | '
                              'Training pr = %s')
                log.info(format_str % (
                    datetime.now(),
                    step,
                    it_train_dict['train_loss'],
                    config.train_batch_size / duration,
                    float(duration),
                    # it_train_dict['train_accuracy'],
                    0.,
                    it_train_dict['train_pr']))

            # End iteration
            step += 1
    except Exception as em:
        log.warning('Failed training: %s' % em)
        if row_id is not None:
            db.update_error(error=True, row_id=row_id)
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()

    try:
        data_structure.update_error(msg=em)
        data_structure.save()
    except Exception as e:
        log.warning('Failed to update saver class: %s' % e)
    log.info('Done training for %d epochs, %d steps.' % (config.epochs, step))
    log.info('Saved to: %s' % checkpoint_dir)
    sess.close()

    # Package output variables into a dictionary
    output_dict = {
        'train_losses': train_losses,
        'test_losses': test_losses,
        'train_prs': train_prs,
        'test_prs': test_prs,
        # 'train_accs': train_accs,
        # 'train_arand': train_arand,
        'timesteps': timesteps,
        # 'test_accs': test_accs,
        # 'test_arand': test_arand
    }
    return output_dict
Ejemplo n.º 32
0
def main(experiment_name,
         list_experiments=False,
         load_and_evaluate_ckpt=None,
         config_file=None,
         ckpt_file=None,
         gpu_device='/gpu:0'):
    """Create a tensorflow worker to run experiments in your DB."""
    if list_experiments:
        exps = db.list_experiments()
        print '_' * 30
        print 'Initialized experiments:'
        print '_' * 30
        for l in exps:
            print l.values()[0]
        print '_' * 30
        if len(exps) == 0:
            print 'No experiments found.'
        else:
            print 'You can add to the DB with: '\
                'python prepare_experiments.py --experiment=%s' % \
                exps[0].values()[0]
        return

    if experiment_name is None:
        print 'No experiment specified. Pulling one out of the DB.'
        experiment_name = db.get_experiment_name()

    # Prepare to run the model
    config = Config()
    condition_label = '%s_%s' % (experiment_name, py_utils.get_dt_stamp())
    experiment_label = '%s' % (experiment_name)
    log = logger.get(os.path.join(config.log_dir, condition_label))
    experiment_dict = experiments.experiments()[experiment_name]()
    config = add_to_config(d=experiment_dict, config=config)  # Globals
    config.load_and_evaluate_ckpt = load_and_evaluate_ckpt
    config, exp_params = process_DB_exps(
        experiment_name=experiment_name, log=log,
        config=config)  # Update config w/ DB params
    config = np.load(config_file).item()
    dataset_module = py_utils.import_module(model_dir=config.dataset_info,
                                            dataset=config.dataset)
    dataset_module = dataset_module.data_processing()  # hardcoded class name
    train_data, train_means_image, train_means_label = get_data_pointers(
        dataset=config.dataset,
        base_dir=config.tf_records,
        cv=dataset_module.folds.keys()[1],  # TODO: SEARCH FOR INDEX.
        log=log)
    val_data, val_means_image, val_means_label = get_data_pointers(
        dataset=config.dataset,
        base_dir=config.tf_records,
        cv=dataset_module.folds.keys()[0],
        log=log)

    # Initialize output folders
    dir_list = {
        'checkpoints':
        os.path.join(config.checkpoints, condition_label),
        'summaries':
        os.path.join(config.summaries, condition_label),
        'condition_evaluations':
        os.path.join(config.condition_evaluations, condition_label),
        'experiment_evaluations':
        os.path.join(  # DEPRECIATED
            config.experiment_evaluations, experiment_label),
        'visualization':
        os.path.join(config.visualizations, condition_label),
        'weights':
        os.path.join(config.condition_evaluations, condition_label, 'weights')
    }
    [py_utils.make_dir(v) for v in dir_list.values()]

    # Prepare data loaders on the cpu
    if all(isinstance(i, list) for i in config.data_augmentations):
        if config.data_augmentations:
            config.data_augmentations = py_utils.flatten_list(
                config.data_augmentations, log)
    config.epochs = 1
    config.shuffle = False
    with tf.device('/cpu:0'):
        train_images, train_labels = data_loader.inputs(
            dataset=train_data,
            batch_size=config.batch_size,
            model_input_image_size=dataset_module.model_input_image_size,
            tf_dict=dataset_module.tf_dict,
            data_augmentations=config.data_augmentations,
            num_epochs=config.epochs,
            tf_reader_settings=dataset_module.tf_reader,
            shuffle=config.shuffle_train,
            resize_output=config.resize_output)
        if hasattr(config, 'val_augmentations'):
            val_augmentations = config.val_augmentations
        else:
            val_augmentations = config.data_augmentations
        val_images, val_labels = data_loader.inputs(
            dataset=val_data,
            batch_size=config.batch_size,
            model_input_image_size=dataset_module.model_input_image_size,
            tf_dict=dataset_module.tf_dict,
            data_augmentations=['resize_and_crop'],
            num_epochs=config.epochs,
            tf_reader_settings=dataset_module.tf_reader,
            shuffle=config.shuffle_val,
            resize_output=config.resize_output)
    log.info('Created tfrecord dataloader tensors.')

    # Load model specification
    struct_name = config.model_struct.split(os.path.sep)[-1]
    try:
        model_dict = py_utils.import_module(
            dataset=struct_name,
            model_dir=os.path.join('models', 'structs',
                                   experiment_name).replace(os.path.sep, '.'))
    except IOError:
        print 'Could not find the model structure: %s in folder %s' % (
            struct_name, experiment_name)

    # Inject model_dict with hyperparameters if requested
    model_dict.layer_structure = hp_opt_utils.inject_model_with_hps(
        layer_structure=model_dict.layer_structure, exp_params=exp_params)

    # Prepare model on GPU
    with tf.device(gpu_device):
        with tf.variable_scope('cnn') as scope:
            # Normalize labels if needed
            if 'normalize_labels' in exp_params.keys():
                if exp_params['normalize_labels'] == 'zscore':
                    train_labels -= train_means_label['mean']
                    train_labels /= train_means_label['std']
                    log.info('Z-scoring labels.')
                elif exp_params['normalize_labels'] == 'mean':
                    train_labels -= train_means_label['mean']
                    log.info('Mean-centering labels.')

            # Training model
            if len(dataset_module.output_size) == 2:
                log.warning('Found > 1 dimension for your output size.'
                            'Converting to a scalar.')
                dataset_module.output_size = np.prod(
                    dataset_module.output_size)

            if hasattr(model_dict, 'output_structure'):
                # Use specified output layer
                output_structure = model_dict.output_structure
            else:
                output_structure = None
            model = model_utils.model_class(
                mean=train_means_image,
                training=True,
                output_size=dataset_module.output_size)
            train_scores, model_summary = model.build(
                data=train_images,
                layer_structure=model_dict.layer_structure,
                output_structure=output_structure,
                log=log,
                tower_name='cnn')
            eval_graph = tf.Graph()
            with eval_graph.as_default():
                with eval_graph.gradient_override_map({'selu': 'GradLRP'}):
                    train_grad_images = tf.gradients(
                        train_scores[0] * tf.cast(train_labels, tf.float32),
                        train_images)[0]
            log.info('Built training model.')
            log.debug(json.dumps(model_summary, indent=4), verbose=0)
            print_model_architecture(model_summary)

            # Check the shapes of labels and scores
            if not isinstance(train_scores, list):
                if len(train_scores.get_shape()) != len(
                        train_labels.get_shape()):
                    train_shape = train_scores.get_shape().as_list()
                    label_shape = train_labels.get_shape().as_list()
                    if len(train_shape) == 2 and len(
                            label_shape) == 1 and train_shape[-1] == 1:
                        train_labels = tf.expand_dims(train_labels, axis=-1)
                    elif len(train_shape) == 2 and len(
                            label_shape) == 1 and train_shape[-1] == 1:
                        train_scores = tf.expand_dims(train_scores, axis=-1)

            # Prepare the loss function
            train_loss, _ = loss_utils.loss_interpreter(
                logits=train_scores,  # TODO
                labels=train_labels,
                loss_type=config.loss_function,
                weights=config.loss_weights,
                dataset_module=dataset_module)

            # Add loss tensorboard tracking
            if isinstance(train_loss, list):
                for lidx, tl in enumerate(train_loss):
                    tf.summary.scalar('training_loss_%s' % lidx, tl)
                train_loss = tf.add_n(train_loss)
            else:
                tf.summary.scalar('training_loss', train_loss)

            # Add weight decay if requested
            if len(model.regularizations) > 0:
                train_loss = loss_utils.wd_loss(
                    regularizations=model.regularizations,
                    loss=train_loss,
                    wd_penalty=config.regularization_strength)
            train_op = loss_utils.optimizer_interpreter(
                loss=train_loss,
                lr=config.lr,
                optimizer=config.optimizer,
                constraints=config.optimizer_constraints,
                model=model)
            log.info('Built training loss function.')

            # Add a score for the training set
            train_accuracy = eval_metrics.metric_interpreter(
                metric=dataset_module.score_metric,  # TODO: Attach to exp cnfg
                pred=train_scores,  # TODO
                labels=train_labels)

            # Add aux scores if requested
            train_aux = {}
            if hasattr(dataset_module, 'aux_scores'):
                for m in dataset_module.aux_scores:
                    train_aux[m] = eval_metrics.metric_interpreter(
                        metric=m, pred=train_scores,
                        labels=train_labels)[0]  # TODO: Fix for multiloss

            # Prepare remaining tensorboard summaries
            if len(train_images.get_shape()) == 4:
                tf_fun.image_summaries(train_images, tag='Training images')
            if len(train_labels.get_shape()) > 2:
                tf_fun.image_summaries(train_labels, tag='Training_targets')
                tf_fun.image_summaries(train_scores,
                                       tag='Training_predictions')
            if isinstance(train_accuracy, list):
                for tidx, ta in enumerate(train_accuracy):
                    tf.summary.scalar('training_accuracy_%s' % tidx, ta)
            else:
                tf.summary.scalar('training_accuracy', train_accuracy)
            if config.pr_curve:
                if isinstance(train_scores, list):
                    for pidx, train_score in enumerate(train_scores):
                        train_label = train_labels[:, pidx]
                        pr_summary.op(
                            tag='training_pr_%s' % pidx,
                            predictions=tf.cast(
                                tf.argmax(train_score, axis=-1), tf.float32),
                            labels=tf.cast(train_label, tf.bool),
                            display_name='training_precision_recall_%s' % pidx)
                else:
                    pr_summary.op(tag='training_pr',
                                  predictions=tf.cast(
                                      tf.argmax(train_scores, axis=-1),
                                      tf.float32),
                                  labels=tf.cast(train_labels, tf.bool),
                                  display_name='training_precision_recall')
            log.info('Added training summaries.')

            # Validation model
            scope.reuse_variables()
            val_model = model_utils.model_class(
                mean=train_means_image,  # Normalize with train data
                training=False,  # False,
                output_size=dataset_module.output_size)
            val_scores, _ = val_model.build(  # Ignore summary
                data=val_images,
                layer_structure=model_dict.layer_structure,
                output_structure=output_structure,
                log=log,
                tower_name='cnn')
            eval_graph = tf.Graph()
            with eval_graph.as_default():
                with eval_graph.gradient_override_map({'selu': 'GradLRP'}):
                    val_grad_images = tf.gradients(
                        val_scores[0] * tf.cast(val_labels, tf.float32),
                        val_images)[0]
            log.info('Built validation model.')

            # Check the shapes of labels and scores
            if not isinstance(train_scores, list):
                if len(val_scores.get_shape()) != len(val_labels.get_shape()):
                    val_shape = val_scores.get_shape().as_list()
                    val_label_shape = val_labels.get_shape().as_list()
                    if len(val_shape) == 2 and len(
                            val_label_shape) == 1 and val_shape[-1] == 1:
                        val_labels = tf.expand_dims(val_labels, axis=-1)
                    if len(val_shape) == 2 and len(
                            val_label_shape) == 1 and val_shape[-1] == 1:
                        val_scores = tf.expand_dims(val_scores, axis=-1)
            val_loss, _ = loss_utils.loss_interpreter(
                logits=val_scores,
                labels=val_labels,
                loss_type=config.loss_function,
                weights=config.loss_weights,
                dataset_module=dataset_module)

            # Add loss tensorboard tracking
            if isinstance(val_loss, list):
                for lidx, tl in enumerate(val_loss):
                    tf.summary.scalar('validation_loss_%s' % lidx, tl)
                val_loss = tf.add_n(val_loss)
            else:
                tf.summary.scalar('validation_loss', val_loss)

            # Add a score for the validation set
            val_accuracy = eval_metrics.metric_interpreter(
                metric=dataset_module.score_metric,  # TODO
                pred=val_scores,
                labels=val_labels)

            # Add aux scores if requested
            val_aux = {}
            if hasattr(dataset_module, 'aux_scores'):
                for m in dataset_module.aux_scores:
                    val_aux[m] = eval_metrics.metric_interpreter(
                        metric=m, pred=val_scores,
                        labels=val_labels)[0]  # TODO: Fix for multiloss

            # Prepare tensorboard summaries
            if len(val_images.get_shape()) == 4:
                tf_fun.image_summaries(val_images, tag='Validation')
            if len(val_labels.get_shape()) > 2:
                tf_fun.image_summaries(val_labels, tag='Validation_targets')
                tf_fun.image_summaries(val_scores,
                                       tag='Validation_predictions')
            if isinstance(val_accuracy, list):
                for vidx, va in enumerate(val_accuracy):
                    tf.summary.scalar('validation_accuracy_%s' % vidx, va)
            else:
                tf.summary.scalar('validation_accuracy', val_accuracy)
            if config.pr_curve:
                if isinstance(val_scores, list):
                    for pidx, val_score in enumerate(val_scores):
                        val_label = val_labels[:, pidx]
                        pr_summary.op(
                            tag='validation_pr_%s' % pidx,
                            predictions=tf.cast(tf.argmax(val_score, axis=-1),
                                                tf.float32),
                            labels=tf.cast(val_label, tf.bool),
                            display_name='validation_precision_recall_%s' %
                            pidx)
                else:
                    pr_summary.op(tag='validation_pr',
                                  predictions=tf.cast(
                                      tf.argmax(val_scores, axis=-1),
                                      tf.float32),
                                  labels=tf.cast(val_labels, tf.bool),
                                  display_name='validation_precision_recall')
            log.info('Added validation summaries.')

    # Set up summaries and saver
    saver = tf.train.Saver(tf.global_variables())

    # Initialize the graph
    sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))

    # Need to initialize both of these if supplying num_epochs to inputs
    sess.run(
        tf.group(tf.global_variables_initializer(),
                 tf.local_variables_initializer()))

    # Set up exemplar threading
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    # Create dictionaries of important training and validation information
    train_dict = {
        'train_loss': train_loss,
        'train_images': train_images,
        'train_labels': train_labels,
        'train_op': train_op,
        'train_scores': train_scores,
        'train_grad_images': train_grad_images
    }
    val_dict = {
        'val_loss': val_loss,
        'val_images': val_images,
        'val_labels': val_labels,
        'val_scores': val_scores,
        'val_grad_images': val_grad_images
    }
    if isinstance(train_accuracy, list):
        for tidx, (ta, va) in enumerate(zip(train_accuracy, val_accuracy)):
            train_dict['train_accuracy_%s' % tidx] = ta
            val_dict['val_accuracy_%s' % tidx] = va
    else:
        train_dict['train_accuracy_0'] = train_accuracy
        val_dict['val_accuracy_0'] = val_accuracy

    if load_and_evaluate_ckpt is not None:
        # Remove the train operation and add a ckpt pointer
        del train_dict['train_op']

    if hasattr(dataset_module, 'aux_score'):
        # Attach auxillary scores to tensor dicts
        for m in dataset_module.aux_scores:
            train_dict['train_aux_%s' % m] = train_aux[m]
            val_dict['val_aux_%s' % m] = val_aux[m]

    # Start training loop
    checkpoint_dir = dir_list['checkpoints']
    step = 0
    train_losses, train_accs, train_aux, timesteps = {}, {}, {}, {}
    val_scores, val_aux, val_labels, val_grads = {}, {}, {}, {}
    train_images, val_images = {}, {}
    train_scores, train_labels = {}, {}
    train_aux_check = np.any(['aux_score' in k for k in train_dict.keys()])
    val_aux_check = np.any(['aux_score' in k for k in val_dict.keys()])

    # Restore model
    saver.restore(sess, ckpt_file)

    # Start evaluation
    try:
        while not coord.should_stop():
            start_time = time.time()
            train_vars = sess.run(train_dict.values())
            it_train_dict = {
                k: v
                for k, v in zip(train_dict.keys(), train_vars)
            }
            duration = time.time() - start_time
            train_losses[step] = it_train_dict['train_loss']
            train_accs[step] = it_train_dict['train_accuracy_0']
            train_images[step] = it_train_dict['train_images']
            train_labels[step] = it_train_dict['train_labels']
            train_scores[step] = it_train_dict['train_scores']
            timesteps[step] = duration
            if train_aux_check:
                # Loop through to find aux scores
                it_train_aux = {
                    itk: itv
                    for itk, itv in it_train_dict.iteritems()
                    if 'aux_score' in itk
                }
                train_aux[step] = it_train_aux
            assert not np.isnan(it_train_dict['train_loss']).any(
            ), 'Model diverged with loss = NaN'
            if step % config.validation_iters == 0:
                it_val_scores, it_val_labels, it_val_aux, it_val_grads, it_val_ims = [], [], [], [], []
                for num_vals in range(config.num_validation_evals):
                    # Validation accuracy as the average of n batches
                    val_vars = sess.run(val_dict.values())
                    it_val_dict = {
                        k: v
                        for k, v in zip(val_dict.keys(), val_vars)
                    }
                    it_val_labels += [it_val_dict['val_labels']]
                    it_val_scores += [it_val_dict['val_scores']]
                    it_val_grads += [it_val_dict['val_grad_images']]
                    it_val_ims += [it_val_dict['val_images']]
                    if val_aux_check:
                        iva = {
                            itk: itv
                            for itk, itv in it_val_dict.iteritems()
                            if 'aux_score' in itk
                        }
                        it_val_aux += [iva]
                val_scores[step] = it_val_scores
                val_labels[step] = it_val_labels
                val_aux[step] = it_val_aux
                val_images[step] = it_val_grads
                val_grads[step] = it_val_ims

            # End iteration
            step += 1

    except tf.errors.OutOfRangeError:
        print 'Done with evaluation for %d epochs, %d steps.' % (config.epochs,
                                                                 step)
        print 'Saved to: %s' % checkpoint_dir
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()

    import ipdb
    ipdb.set_trace()
    np.savez(
        'val_imgs_grads',
        val_images=val_images,  # it_val_dict['val_images'],
        val_grads=val_grads,  # it_val_dict['val_grad_images'],
        val_labels=val_labels,  # it_val_dict['val_labels'],
        val_scores=val_scores)  # it_val_dict['val_scores'][0])