Beispiel #1
0
    def inference(self, input_):
        self.loader = BatchLoader(
            self.config.data_dir, self.config.dataset_name,
            self.config.batch_size, self.config.num_classes,
            self.config.preprocessor, self.config.epoch,
            self.config.specialist, self.config.forward_only)

        content, filename = self.loader.prepare_inference()

        #with tf.control_dependencies([self.loader.enqueue]):
        logits = self.build_model(content)
        softmax = tf.nn.softmax(logits)

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        self.sess.run(init_op)

        self.saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(self.config.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
        else:
            print('no checkpoint found...')

        self.sess.run(self.loader.enqueue,
                      feed_dict={self.loader.filenames: input_})

        m_logits, m_softmax, m_filename = self.sess.run(
            [logits, softmax, filename])

        print(m_softmax, m_filename)
Beispiel #2
0
class LandmarkDataLayer(caffe.Layer):
    def setup(self, bottom, top):
        param = eval(self.param_str)
        self.batch = int(param['batch'])
        self.img_size = config.IMG_SIZE

        self.batch_loader = BatchLoader(param, "train")
        self.ohem_batch_loader = BatchLoader(param, 'ohem')
        self.train_ratio = 1.
        self.ohem_ratio = 0
        if self.ohem_batch_loader.is_loaded():
            self.train_ratio = 7 / 8.
            self.ohem_ratio = 1. - self.train_ratio
        top[0].reshape(self.batch, 3, self.img_size, self.img_size)  # data
        top[1].reshape(self.batch, config.LANDMARK_SIZE * 2)  # landmark
        top[2].reshape(self.batch, 1)  # landmark

    def reshape(self, bottom, top):
        pass

    def forward(self, bottom, top):
        batch_data = self.batch_loader.next_batch(
            self.batch * self.train_ratio, '')
        batch_data += self.ohem_batch_loader.next_batch(
            self.batch * self.ohem_ratio, '')
        for i, datum in enumerate(batch_data):
            img, pts, eye_dist = datum
            top[0].data[i, ...] = img
            top[1].data[i, ...] = pts
            top[2].data[i, ...] = eye_dist

    def backward(self, bottom, top):
        pass
Beispiel #3
0
class LandmarkDataLayer(caffe.Layer):
    def setup(self, bottom, top):
        param = eval(self.param_str)
        self.batch = int(param['batch'])

        self.batch_loader = BatchLoader(param, "train")
        self.ohem_batch_loader = BatchLoader(param, 'ohem')
        self.train_ratio = 1.
        self.ohem_ratio = 0
        if self.ohem_batch_loader.is_loaded():
            self.train_ratio = 7 / 8.
            self.ohem_ratio = 1. - self.train_ratio
        top[0].reshape(self.batch, 3, self.img_size, self.img_size)  # data
        top[1].reshape(self.batch, 72)  # landmark

    def reshape(self, bottom, top):
        pass

    def forward(self, bottom, top):
        batch_data = self.batch_loader.next_batch(
            self.batch * self.train_ratio, '')
        batch_data += self.ohem_batch_loader.next_batch(
            self.batch * self.ohem_ratio, '')
        random.shuffle(batch_data)
        for i, datum in enumerate(batch_data):
            img, label, bbox, landm5 = datum
            top[0].data[i, ...] = img
            top[1].data[i, ...] = label
            top[2].data[i, ...] = bbox
            if self.net != 'pnet':
                top[3].data[i, ...] = landm5

    def backward(self, bottom, top):
        pass
Beispiel #4
0
    def setup(self, bottom, top):
        param = eval(self.param_str)
        self.batch = int(param['batch'])

        self.batch_loader = BatchLoader(param, "train")
        self.ohem_batch_loader = BatchLoader(param, 'ohem')
        self.train_ratio = 1.
        self.ohem_ratio = 0
        if self.ohem_batch_loader.is_loaded():
            self.train_ratio = 7 / 8.
            self.ohem_ratio = 1. - self.train_ratio
        top[0].reshape(self.batch, 3, self.img_size, self.img_size)  # data
        top[1].reshape(self.batch, 72)  # landmark
    def __init__(self,
                 batch_size=params.BATCH_SIZE,
                 nb_epochs=params.NB_EPOCHS,
                 mask=None,
                 experiment_path=params.EXPERIMENT_PATH,
                 use_adversarial_loss=params.USE_ADVERSARIAL_LOSS,
                 lambda_decay=params.LAMBDA_DECAY,
                 lambda_adversarial=params.LAMBDA_ADVERSARIAL,
                 patience=params.PATIENCE,
                 discr_whole_image=params.DISCR_WHOLE_IMAGE,
                 discr_loss_limit=params.DISCR_LOSS_LIMIT,
                 use_dropout=params.USE_DROPOUT):
        self.batch_size = batch_size
        self.nb_epochs = nb_epochs
        self.experiment_path = experiment_path
        self.save_path = os.path.join(self.experiment_path, "model/")
        self.save_best_path = os.path.join(self.experiment_path, "best_model/")
        self.logs_path = os.path.join(self.experiment_path, "logs")
        create_dir(self.save_path)
        create_dir(self.logs_path)

        self.global_step = tf.Variable(0, trainable=False, name='global_step')
        self.phase = tf.placeholder(tf.bool, name='phase')
        self.patience = patience

        # parameters for adversarial loss
        self.use_adversarial_loss = use_adversarial_loss
        self.lambda_adversarial = lambda_adversarial
        if lambda_decay:
            self.lambda_adversarial = 1 - tf.train.exponential_decay(
                .1, self.global_step, 10000, .5, staircase=True)
        self.discr_whole_image = discr_whole_image
        self.discr_loss_limit = discr_loss_limit
        self.num_discr_trained = tf.Variable(tf.constant(0, dtype=tf.int32),
                                             trainable=False)

        self.use_dropout = use_dropout

        self.batch_loader = BatchLoader(self.batch_size)

        if mask is not None:
            self.np_mask = mask
        else:
            self.np_mask = np.zeros((1, 64, 64, 1))
            self.np_mask[:, 16:48, 16:48, :] = 1

        self._sess = tf.Session()
        tf.summary.scalar("lambda_adversarial", self.lambda_adversarial)
        tf.summary.scalar("num discr trained", self.num_discr_trained)
Beispiel #6
0
class GanNetTrain(caffe.Layer):
    """Data layer for training"""   
    def setup(self, bottom, top):
        layer_params = yaml.load(self.param_str)
        self.batch_size = layer_params['batch_size']
        self.image_file = layer_params['image_file']
        self.batch_loader = BatchLoader(self.image_file, self.batch_size)

    def forward(self, bottom, top):
        # assign output
        top[0].data[...] = self.images_a
        top[1].data[...] = self.images_b
        top[2].data[...] = self.label_true
        top[3].data[...] = self.label_false

    def backward(self, top, propagate_down, bottom):
        """This layer does not propagate gradients."""
        pass

    def reshape(self, bottom, top):
        images_A, images_B = self.batch_loader.next_batch()
        images_A = np.array(images_A)
        images_B = np.array(images_B)
        images_A = images_A.transpose((0, 3, 1, 2))
        images_B = images_B.transpose((0, 3, 1, 2))

        self.images_a = images_A
        self.images_b = images_B
        self.label_true = np.ones((self.batch_size, 1), dtype='float32')
        self.label_false = np.zeros((self.batch_size, 1), dtype='float32')

        top[0].reshape(*self.images_a.shape)
        top[1].reshape(*self.images_b.shape)
        top[2].reshape(*self.label_true.shape)
        top[3].reshape(*self.label_false.shape)
Beispiel #7
0
def pretrain_generator(gen, gen_opt, epochs):
    global data_df
    n_iter = 0
    loader = BatchLoader(data_df)
    for epoch in range(epochs):
        # print(f'epoch = {epoch}\n --------------------------------')
        total_loss = 0
        n_iter += 1
        for batch in tqdm(loader.load_action_batch(MAX_SEQ_LEN, BATCH_SIZE,
                                                   CUDA),
                          total=int(NUM_SAMPLES / BATCH_SIZE / MAX_SEQ_LEN)):
            gen_opt.zero_grad()
            loss = gen.batchNLLLoss(batch)
            loss.backward()
            gen_opt.step()
            total_loss += loss.data.item()
        total_loss /= NUM_SAMPLES / BATCH_SIZE / MAX_SEQ_LEN
        print('iteration = %d, NLL loss = %.4f' % (n_iter, total_loss))
Beispiel #8
0
    def setup(self, bottom, top):
        param = eval(self.param_str)
        self.batch = int(param['batch'])
        self.net = param['net']
        self.img_size = config.NET_IMG_SIZES[self.net]

        self.batch_loader = BatchLoader(param, "train")
        self.ohem_batch_loader = BatchLoader(param, 'ohem')
        self.train_ratio = 1.
        self.ohem_ratio = 0
        if self.ohem_batch_loader.is_loaded():
            self.train_ratio = 7 / 8.
            self.ohem_ratio = 1 / 8.
        top[0].reshape(self.batch, 3, self.img_size, self.img_size)  # data
        top[1].reshape(self.batch, 1)  # label
        top[2].reshape(self.batch, 4)  # bbox
        if self.net != 'pnet':
            top[3].reshape(self.batch, config.LANDMARK_SIZE * 2)
Beispiel #9
0
def train(args):
    yaml_file = yaml.safe_load(args.cfg)
    print(yaml.dump(yaml_file, default_flow_style=False))
    cfg = TrainingConfig(yaml_file)

    output_dir = cfg.output
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    chunks = get_all_chunks(cfg.input)
    print("Found {} files".format(len(chunks)))
    num_train_chunks = int(len(chunks) * cfg.train_ratio)
    training_chunks = chunks[:num_train_chunks]
    test_chunks = chunks[num_train_chunks:]
    print("Chunks Training({}) Testing({})".format(len(training_chunks),
                                                   len(test_chunks)))
    train_loader = BatchLoader(training_chunks, cfg)
    test_loader = BatchLoader(test_chunks, cfg)
    worker = TensorWorker(cfg, train_loader, test_loader)
    print()
Beispiel #10
0
class DataLayer(caffe.Layer):
    def setup(self, bottom, top):
        param = eval(self.param_str)
        self.batch = int(param['batch'])
        self.net = param['net']
        self.img_size = config.NET_IMG_SIZES[self.net]

        self.batch_loader = BatchLoader(param, "train")
        self.ohem_batch_loader = BatchLoader(param, 'ohem')
        self.train_ratio = 1.
        self.ohem_ratio = 0
        if self.ohem_batch_loader.is_loaded():
            self.train_ratio = 7 / 8.
            self.ohem_ratio = 1 / 8.
        top[0].reshape(self.batch, 3, self.img_size, self.img_size)  # data
        top[1].reshape(self.batch, 1)  # label
        top[2].reshape(self.batch, 4)  # bbox
        if self.net != 'pnet':
            top[3].reshape(self.batch, config.LANDMARK_SIZE * 2)

    def reshape(self, bottom, top):
        pass

    def forward(self, bottom, top):
        task = random.choice(config.TRAIN_TASKS[self.net])
        batch_data = self.batch_loader.next_batch(
            self.batch * self.train_ratio, '')
        batch_data += self.ohem_batch_loader.next_batch(
            self.batch * self.ohem_ratio, '')
        random.shuffle(batch_data)
        for i, datum in enumerate(batch_data):
            img, label, bbox, landm5 = datum
            top[0].data[i, ...] = img
            top[1].data[i, ...] = label
            top[2].data[i, ...] = bbox
            if self.net != 'pnet':
                top[3].data[i, ...] = landm5

    def backward(self, bottom, top):
        pass
    def __init__(self, vocab_files, \
                 generator_file, \
                 batch_size=32, load_generator=True):

        self.batch_size = batch_size
        self.generator_data = None

        if generator_file:
            with open(generator_file, 'rb') as f:
                self.generator_data = [pkl.load(f)]

        self.gen_batch_loader = BatchLoader(self.generator_data,
                                            vocab_files,
                                            sentence_array=True)
        self.vocab_size = self.gen_batch_loader.words_vocab_size
Beispiel #12
0
def main():
    np.random.seed(233)
    torch.manual_seed(233)
    data_loader = BatchLoader(10000)
    model = Seq2SeqModel(10, NUM_CLASSES, WINDOW_SIZE)
    trainval(model, data_loader)

    # Should print 'polo '
    inference(model, "nor marco I", "polo ")
    inference(model, "marco nor I", "polo ")
    inference(model, "nor I marco", "polo ")

    # Should print ' '
    inference(model, "nor I neither", " ")

    # More difficult task
    inference(model, "nor ma rco I", " ")
    inference(model, "ma rco nor I", " ")

    pass
Beispiel #13
0
    def train(self,
              train_image_indices,
              batch_size,
              num_epochs=50,
              train_method='normal',
              lambda_1=0,
              lambda_2=0,
              start_from_pretrained_model=True,
              learning_rate=0.01,
              optimizer='SGD'):

        if os.path.exists(self.checkpoint_path):
            os.remove(self.checkpoint_path)

        model = self.initialize_model(
            start_from_pretrained_model=start_from_pretrained_model)

        model = model.to(self.device)
        criterion = nn.CrossEntropyLoss()

        if optimizer == 'SGD':
            optimizer = optim.SGD(model.parameters(),
                                  lr=learning_rate,
                                  momentum=0.9,
                                  weight_decay=5e-4)

        elif optimizer == 'Adam':
            optimizer = optim.Adam(model.parameters(),
                                   lr=learning_rate,
                                   weight_decay=5e-4)

        else:
            optimizer = optim.SGD(model.parameters(),
                                  lr=learning_rate,
                                  momentum=0.9,
                                  weight_decay=5e-4)

        train_batch_loader = BatchLoader(self.train_folder_path,
                                         train_image_indices)

        n_images = len(train_image_indices)
        if n_images % batch_size == 0:
            num_batches = n_images // batch_size
        else:
            num_batches = (n_images // batch_size) + 1

        penalty_inside_list = []
        penalty_outside_list = []
        train_acc_list = []
        train_loss_list = []
        val_loss_list = []
        val_acc_list = []
        best_acc = 0.0

        for epoch in range(num_epochs):
            model.train()
            train_batch_loader.reset()
            print('Epoch: {}/{}'.format(epoch + 1, num_epochs))
            print('-' * 50)

            train_correct = 0.0
            train_loss = 0.0
            penalty_inside = 0.0
            penalty_outside = 0.0

            for batch in range(num_batches):
                batch_indices = train_batch_loader.get_batch_indices(
                    batch_size)
                inputs = self.x_train[batch_indices]
                labels = self.y_train[batch_indices]
                inputs, labels = inputs.to(self.device), labels.to(self.device)

                if train_method == 'bbox':
                    inputs.requires_grad_()
                    outputs = model(inputs)
                    preds = torch.argmax(outputs, dim=1)

                    # cross entropy loss
                    loss = criterion(outputs, labels)
                    input_gradient = torch.autograd.grad(loss,
                                                         inputs,
                                                         create_graph=True)[0]
                    penalty_inside_box, penalty_outside_box = self.calculate_penalty_box(
                        batch_indices, input_gradient)
                    new_loss = loss + lambda_1 * penalty_inside_box + lambda_2 * penalty_outside_box
                    optimizer.zero_grad()
                    new_loss.backward()
                    optimizer.step()

                else:
                    outputs = model(inputs)
                    preds = torch.argmax(outputs, dim=1)

                    # cross entropy loss
                    loss = criterion(outputs, labels)
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                    penalty_inside_box = torch.tensor(0).to(self.device)
                    penalty_outside_box = torch.tensor(0).to(self.device)

                train_loss += loss.item()
                train_correct += torch.sum(preds == labels).float().item()
                penalty_inside += penalty_inside_box.item() * lambda_1
                penalty_outside += penalty_outside_box.item() * lambda_2

            train_loss = train_loss / self.train_dataset_length
            train_loss_list.append(train_loss)
            train_acc = (train_correct / self.train_dataset_length) * 100.0
            train_acc_list.append(train_acc)
            penalty_inside = penalty_inside / self.train_dataset_length
            penalty_outside = penalty_outside / self.train_dataset_length
            penalty_inside_list.append(penalty_inside)
            penalty_outside_list.append(penalty_outside)

            print('Train Loss: {:.4f} Acc: {:.4f} % '.format(
                train_loss, train_acc))
            print(f'Penalty Inside Box: {round(penalty_inside, 4)}')
            print(f'Penalty Outside Box: {round(penalty_outside, 4)}')

            # validate after each epoch
            val_correct = 0.0
            val_loss = 0.0
            model.eval()
            with torch.no_grad():
                for inputs_val, labels_val in self.val_loader:
                    inputs_val, labels_val = inputs_val.to(
                        self.device), labels_val.to(self.device)
                    outputs_val = model(inputs_val)
                    preds_val = torch.argmax(outputs_val, dim=1)
                    loss_test = criterion(outputs_val, labels_val)

                    val_loss += loss_test.item()
                    val_correct += torch.sum(
                        preds_val == labels_val).float().item()

            val_loss = val_loss / self.val_dataset_length
            val_loss_list.append(val_loss)
            val_acc = (val_correct / self.val_dataset_length) * 100.0
            val_acc_list.append(val_acc)
            print('Val Loss: {:.4f} Acc: {:.4f} % \n'.format(
                val_loss, val_acc))

            # save the best model
            if val_acc > best_acc:
                best_acc = val_acc
                model.state_dict()
                if os.path.exists(self.checkpoint_path):
                    os.remove(self.checkpoint_path)

                torch.save(model.state_dict(), self.checkpoint_path)

        return_dict = {
            'train_acc_list': train_acc_list,
            'train_loss_list': train_loss_list,
            'penalty_inside_list': penalty_inside_list,
            'penalty_outside_list': penalty_outside_list,
            'val_loss_list': val_loss_list,
            'val_acc_list': val_acc_list,
            'best_acc': best_acc
        }

        return return_dict
Beispiel #14
0
# ## Import modules

# In[2]:

import os
import cv2
import numpy as np
import tensorflow as tf
import tflearn
from batch_loader import BatchLoader

slim = tf.contrib.slim
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

train_batch_loader = BatchLoader("./train.txt", 120)
test_batch_loader = BatchLoader("./test.txt", 100)
# ## Construct network

# In[3]:

with tf.name_scope('input'):
    input_images = tf.placeholder(tf.float32,
                                  shape=(None, 100, 100, 3),
                                  name='input_images')
    labels = tf.placeholder(tf.int64, shape=(None), name='labels')

global_step = tf.Variable(0, trainable=False, name='global_step')

# In[4]:
  def __init__(self, sess,
               batch_size=100, rnn_size=650, layer_depth=2,
               word_embed_dim=650, char_embed_dim=15,
               feature_maps=[50, 100, 150, 200, 200, 200, 200],
               kernels=[1,2,3,4,5,6,7], seq_length=35, max_word_length=65,
               use_word=False, use_char=True, hsm=0, max_grad_norm=5,
               highway_layers=2, dropout_prob=0.5, use_batch_norm=True,
               checkpoint_dir="checkpoint", forward_only=False,
               data_dir="data", dataset_name="pdb", use_progressbar=False):
    """
    Initialize the parameters for LSTM TDNN

    Args:
      rnn_size: the dimensionality of hidden layers
      layer_depth: # of depth in LSTM layers
      batch_size: size of batch per epoch
      word_embed_dim: the dimensionality of word embeddings
      char_embed_dim: the dimensionality of character embeddings
      feature_maps: list of feature maps (for each kernel width)
      kernels: list of kernel widths
      seq_length: max length of a word
      use_word: whether to use word embeddings or not
      use_char: whether to use character embeddings or not
      highway_layers: # of highway layers to use
      dropout_prob: the probability of dropout
      use_batch_norm: whether to use batch normalization or not
      hsm: whether to use hierarchical softmax
    """
    self.sess = sess

    self.batch_size = batch_size
    self.seq_length = seq_length

    # RNN
    self.rnn_size = rnn_size
    self.layer_depth = layer_depth

    # CNN
    self.use_word = use_word
    self.use_char = use_char
    self.word_embed_dim = word_embed_dim
    self.char_embed_dim = char_embed_dim
    self.feature_maps = feature_maps
    self.kernels = kernels

    # General
    self.highway_layers = highway_layers
    self.dropout_prob = dropout_prob
    self.use_batch_norm = use_batch_norm

    # Training
    self.max_grad_norm = max_grad_norm
    self.max_word_length = max_word_length
    self.hsm = hsm

    self.data_dir = data_dir
    self.dataset_name = dataset_name
    self.checkpoint_dir = checkpoint_dir

    self.forward_only = forward_only
    self.use_progressbar = use_progressbar

    self.loader = BatchLoader(self.data_dir, self.dataset_name, self.batch_size, self.seq_length, self.max_word_length)
    print('Word vocab size: %d, Char vocab size: %d, Max word length (incl. padding): %d' % \
        (len(self.loader.idx2word), len(self.loader.idx2char), self.loader.max_word_length))

    self.max_word_length = self.loader.max_word_length
    self.char_vocab_size = len(self.loader.idx2char)
    self.word_vocab_size = len(self.loader.idx2word)

    # build LSTMTDNN model
    self.prepare_model()

    # load checkpoints
    if self.forward_only == True:
      if self.load(self.checkpoint_dir, self.dataset_name):
        print("[*] SUCCESS to load model for %s." % self.dataset_name)
      else:
        print("[!] Failed to load model for %s." % self.dataset_name)
        sys.exit(1)
class LSTMTDNN(Model):
  """
  Time-delayed Neural Network (cf. http://arxiv.org/abs/1508.06615v4)
  """
  def __init__(self, sess,
               batch_size=100, rnn_size=650, layer_depth=2,
               word_embed_dim=650, char_embed_dim=15,
               feature_maps=[50, 100, 150, 200, 200, 200, 200],
               kernels=[1,2,3,4,5,6,7], seq_length=35, max_word_length=65,
               use_word=False, use_char=True, hsm=0, max_grad_norm=5,
               highway_layers=2, dropout_prob=0.5, use_batch_norm=True,
               checkpoint_dir="checkpoint", forward_only=False,
               data_dir="data", dataset_name="pdb", use_progressbar=False):
    """
    Initialize the parameters for LSTM TDNN

    Args:
      rnn_size: the dimensionality of hidden layers
      layer_depth: # of depth in LSTM layers
      batch_size: size of batch per epoch
      word_embed_dim: the dimensionality of word embeddings
      char_embed_dim: the dimensionality of character embeddings
      feature_maps: list of feature maps (for each kernel width)
      kernels: list of kernel widths
      seq_length: max length of a word
      use_word: whether to use word embeddings or not
      use_char: whether to use character embeddings or not
      highway_layers: # of highway layers to use
      dropout_prob: the probability of dropout
      use_batch_norm: whether to use batch normalization or not
      hsm: whether to use hierarchical softmax
    """
    self.sess = sess

    self.batch_size = batch_size
    self.seq_length = seq_length

    # RNN
    self.rnn_size = rnn_size
    self.layer_depth = layer_depth

    # CNN
    self.use_word = use_word
    self.use_char = use_char
    self.word_embed_dim = word_embed_dim
    self.char_embed_dim = char_embed_dim
    self.feature_maps = feature_maps
    self.kernels = kernels

    # General
    self.highway_layers = highway_layers
    self.dropout_prob = dropout_prob
    self.use_batch_norm = use_batch_norm

    # Training
    self.max_grad_norm = max_grad_norm
    self.max_word_length = max_word_length
    self.hsm = hsm

    self.data_dir = data_dir
    self.dataset_name = dataset_name
    self.checkpoint_dir = checkpoint_dir

    self.forward_only = forward_only
    self.use_progressbar = use_progressbar

    self.loader = BatchLoader(self.data_dir, self.dataset_name, self.batch_size, self.seq_length, self.max_word_length)
    print('Word vocab size: %d, Char vocab size: %d, Max word length (incl. padding): %d' % \
        (len(self.loader.idx2word), len(self.loader.idx2char), self.loader.max_word_length))

    self.max_word_length = self.loader.max_word_length
    self.char_vocab_size = len(self.loader.idx2char)
    self.word_vocab_size = len(self.loader.idx2word)

    # build LSTMTDNN model
    self.prepare_model()

    # load checkpoints
    if self.forward_only == True:
      if self.load(self.checkpoint_dir, self.dataset_name):
        print("[*] SUCCESS to load model for %s." % self.dataset_name)
      else:
        print("[!] Failed to load model for %s." % self.dataset_name)
        sys.exit(1)

  def prepare_model(self):
    with tf.variable_scope("LSTMTDNN"):
      self.char_inputs = []
      self.word_inputs = []
      self.cnn_outputs = []

      if self.use_char:
        char_W = tf.get_variable("char_embed",
            [self.char_vocab_size, self.char_embed_dim])
      if self.use_word:
        word_W = tf.get_variable("word_embed",
            [self.word_vocab_size, self.word_embed_dim])

      with tf.variable_scope("CNN") as scope:
        self.char_inputs = tf.placeholder(tf.int32, [self.batch_size, self.seq_length, self.max_word_length])
        self.word_inputs = tf.placeholder(tf.int32, [self.batch_size, self.seq_length])

        char_indices = tf.split(1, self.seq_length, self.char_inputs)
        word_indices = tf.split(1, self.seq_length, tf.expand_dims(self.word_inputs, -1))

        for idx in xrange(self.seq_length):
          char_index = tf.reshape(char_indices[idx], [-1, self.max_word_length])
          word_index = tf.reshape(word_indices[idx], [-1, 1])

          if idx != 0:
            scope.reuse_variables()

          if self.use_char:
            # [batch_size x word_max_length, char_embed]
            char_embed = tf.nn.embedding_lookup(char_W, char_index)

            char_cnn = TDNN(char_embed, self.char_embed_dim, self.feature_maps, self.kernels)

            if self.use_word:
              word_embed = tf.nn.embedding_lookup(word_W, word_index)
              cnn_output = tf.concat(1, [char_cnn.output, tf.squeeze(word_embed, [1])])
            else:
              cnn_output = char_cnn.output
          else:
            cnn_output = tf.squeeze(tf.nn.embedding_lookup(word_W, word_index))

          if self.use_batch_norm:
            bn = batch_norm()
            norm_output = bn(tf.expand_dims(tf.expand_dims(cnn_output, 1), 1))
            cnn_output = tf.squeeze(norm_output)

          if highway:
            #cnn_output = highway(input_, input_dim_length, self.highway_layers, 0)
            cnn_output = highway(cnn_output, cnn_output.get_shape()[1], self.highway_layers, 0)

          self.cnn_outputs.append(cnn_output)

      with tf.variable_scope("LSTM") as scope:
        self.cell = tf.nn.rnn_cell.BasicLSTMCell(self.rnn_size)
        self.stacked_cell = tf.nn.rnn_cell.MultiRNNCell([self.cell] * self.layer_depth)

        outputs, _ = tf.nn.rnn(self.stacked_cell,
                               self.cnn_outputs,
                               dtype=tf.float32)

        self.lstm_outputs = []
        self.true_outputs = tf.placeholder(tf.int64,
            [self.batch_size, self.seq_length])

        loss = 0
        true_outputs = tf.split(1, self.seq_length, self.true_outputs)

        for idx, (top_h, true_output) in enumerate(zip(outputs, true_outputs)):
          if self.dropout_prob > 0:
            top_h = tf.nn.dropout(top_h, self.dropout_prob)

          if self.hsm > 0:
            self.lstm_outputs.append(top_h)
          else:
            if idx != 0:
              scope.reuse_variables()
            proj = tf.nn.rnn_cell._linear(top_h, self.word_vocab_size, 0)
            self.lstm_outputs.append(proj)

          loss += tf.nn.sparse_softmax_cross_entropy_with_logits(self.lstm_outputs[idx], tf.squeeze(true_output))

        self.loss = tf.reduce_mean(loss) / self.seq_length

        tf.scalar_summary("loss", self.loss)
        tf.scalar_summary("perplexity", tf.exp(self.loss))

  def train(self, epoch):
    cost = 0
    target = np.zeros([self.batch_size, self.seq_length]) 

    N = self.loader.sizes[0]
    for idx in xrange(N):
      target.fill(0)
      x, y, x_char = self.loader.next_batch(0)
      for b in xrange(self.batch_size):
        for t, w in enumerate(y[b]):
          target[b][t] = w

      feed_dict = {
          self.word_inputs: x,
          self.char_inputs: x_char,
          self.true_outputs: target,
      }

      _, loss, step, summary_str = self.sess.run(
          [self.optim, self.loss, self.global_step, self.merged_summary], feed_dict=feed_dict)

      self.writer.add_summary(summary_str, step)

      if idx % 50 == 0:
        if self.use_progressbar:
          progress(idx/N, "epoch: [%2d] [%4d/%4d] loss: %2.6f" % (epoch, idx, N, loss))
        else:
          print("epoch: [%2d] [%4d/%4d] loss: %2.6f" % (epoch, idx, N, loss))

      cost += loss
    return cost / N

  def test(self, split_idx, max_batches=None):
    if split_idx == 1:
      set_name = 'Valid'
    else:
      set_name = 'Test'

    N = self.loader.sizes[split_idx]
    if max_batches != None:
      N = min(max_batches, N)

    self.loader.reset_batch_pointer(split_idx)
    target = np.zeros([self.batch_size, self.seq_length]) 

    cost = 0
    for idx in xrange(N):
      target.fill(0)

      x, y, x_char = self.loader.next_batch(split_idx)
      for b in xrange(self.batch_size):
        for t, w in enumerate(y[b]):
          target[b][t] = w

      feed_dict = {
          self.word_inputs: x,
          self.char_inputs: x_char,
          self.true_outputs: target,
      }

      loss = self.sess.run(self.loss, feed_dict=feed_dict)

      if idx % 50 == 0:
        if self.use_progressbar:
          progress(idx/N, "> %s: loss: %2.6f, perplexity: %2.6f" % (set_name, loss, np.exp(loss)))
        else:
          print(" > %s: loss: %2.6f, perplexity: %2.6f" % (set_name, loss, np.exp(loss)))

      cost += loss

    cost = cost / N
    return cost

  def run(self, epoch=25, 
          learning_rate=1, learning_rate_decay=0.5):
    self.current_lr = learning_rate

    self.lr = tf.Variable(learning_rate, trainable=False)
    self.opt = tf.train.GradientDescentOptimizer(self.lr)
    #self.opt = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(self.loss)

    # clip gradients
    params = tf.trainable_variables()
    grads = []
    for grad in tf.gradients(self.loss, params):
      if grad is not None:
        grads.append(tf.clip_by_norm(grad, self.max_grad_norm))
      else:
        grads.append(grad)

    self.global_step = tf.Variable(0, name="global_step", trainable=False)
    self.optim = self.opt.apply_gradients(zip(grads, params),
                                          global_step=self.global_step)

    # ready for train
    tf.initialize_all_variables().run()

    if self.load(self.checkpoint_dir, self.dataset_name):
      print("[*] SUCCESS to load model for %s." % self.dataset_name)
    else:
      print("[!] Failed to load model for %s." % self.dataset_name)

    self.saver = tf.train.Saver()
    self.merged_summary = tf.merge_all_summaries()
    self.writer = tf.train.SummaryWriter("./logs", self.sess.graph_def)

    self.log_loss = []
    self.log_perp = []

    if not self.forward_only:
      for idx in xrange(epoch):
        train_loss = self.train(idx)
        valid_loss = self.test(1)

        # Logging
        self.log_loss.append([train_loss, valid_loss])
        self.log_perp.append([np.exp(train_loss), np.exp(valid_loss)])

        state = {
          'perplexity': np.exp(train_loss),
          'epoch': idx,
          'learning_rate': self.current_lr,
          'valid_perplexity': np.exp(valid_loss)
        }
        print(state)

        # Learning rate annealing
        if len(self.log_loss) > 1 and self.log_loss[idx][1] > self.log_loss[idx-1][1] * 0.9999:
          self.current_lr = self.current_lr * learning_rate_decay
          self.lr.assign(self.current_lr).eval()
        if self.current_lr < 1e-5: break

        if idx % 2 == 0:
          self.save(self.checkpoint_dir, self.dataset_name)

    test_loss = self.test(2)
    print("[*] Test loss: %2.6f, perplexity: %2.6f" % (test_loss, np.exp(test_loss)))
Beispiel #17
0
 def setup(self, bottom, top):
     layer_params = yaml.load(self.param_str)
     self.batch_size = layer_params['batch_size']
     self.image_file = layer_params['image_file']
     self.batch_loader = BatchLoader(self.image_file, self.batch_size)
def main():
    # setting parameters
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('--num_factors',
                        type=float,
                        default=10,
                        help='embedding size')
    parser.add_argument('--model', type=str, default='mlp_bpr', help='model')
    parser.add_argument('--epoches', type=str, default=1, help='epoches')
    parser.add_argument('--learning_rate',
                        type=float,
                        default=0.01,
                        help='learning rate')
    parser.add_argument('--reg_lambda',
                        type=float,
                        default=1.0,
                        help='l2_regularizer lambda')
    parser.add_argument('--layers',
                        nargs='?',
                        default='[10,1]',
                        help="Size of each layer.")
    parser.add_argument('--batch_size',
                        type=int,
                        default=10000,
                        help='minibatch size')
    parser.add_argument(
        '--recom_mode',
        type=str,
        default='p_u',
        help='recommendation mode, u_p: users to items, p_u: items to users')
    parser.add_argument('--decay_rate',
                        type=float,
                        default=0.99,
                        help='decay rate for Adam')
    parser.add_argument('--keep_prob',
                        type=float,
                        default=0.2,
                        help='dropout probility')
    parser.add_argument(
        '--uti_k',
        type=int,
        default=30,
        help='top-k recommendation for recommending items to user')
    parser.add_argument(
        '--itu_k',
        type=int,
        default=100,
        help='top-k recommendation for recommending users to item')
    parser.add_argument('--log_dir',
                        type=str,
                        default='logs',
                        help='directory to store tensorboard logs')
    parser.add_argument(
        '--mode',
        type=str,
        default='validation',
        help=
        'train: only train the model, validation: train the model and test it with test data, predict: predict new data'
    )
    parser.add_argument(
        '--dev',
        type=str,
        default='cpu',
        help=
        'training by CPU or GPU, input cpu or gpu:0 or gpu:1 or gpu:2 or gpu:3'
    )
    parser.add_argument(
        '--pIter',
        type=int,
        default=2,
        help='how many rounds of iterations show the effect on the test set')
    args = parser.parse_args()

    if FLAGS.mode == 'validation':
        # read data from file
        train_file = 'data/train/train_u.csv'
        test_file = 'data/test/test_u.csv'
        test_count1_file = 'data/test/test_u_count1.csv'

        train_X, train_y, test_X, test_y, test_count1_X, test_count1_y, user_pro_dict, pro_user_dict, user_index_map = utils.read_data_val(
            train_file, test_file, test_count1_file)
        print(test_count1_X)
        # read feature data from file
        user_file = 'data/mid/user_features.csv'
        item_file = 'data/mid/item_features.csv'
        user_feature_df = utils.read_user_data(user_file, user_index_map)
        item_feature_df = utils.read_item_data(item_file)

        # generate batches
        batch_loader = BatchLoader(args.batch_size, train_X, train_y)

        args.num_users = np.max([np.max(train_X['uid'])]) + 1
        args.num_items = np.max([np.max(train_X['pid'])]) + 1

        model = Model(args, args.recom_mode, batch_loader)

        model.train_val(test_X, user_pro_dict, pro_user_dict, user_feature_df,
                        item_feature_df)
        #pred_dict = model.test(test_X, user_pro_dict, pro_user_dict, user_feature_df, item_feature_df)
        #if args.recom_mode=='u_p':
        #	print(model.cal_MRR(pred_dict, test_X))
        #elif args.recom_mode=='p_u':
        #	print(model.cal_pre_k(pred_dict, test_X))

        pred_dict = model.cold_start_test(test_count1_X, user_feature_df,
                                          item_feature_df)
        print(model.cal_pre_k(pred_dict, test_count1_X))
    elif FLAGS.mode == 'train':
        # read data from file
        train_file = 'data/train/train_u.csv'
        train_X, train_y, user_pro_dict, pro_user_dict, user_index_map = utils.read_data(
            train_file)
        # read feature data from file
        user_file = 'data/mid/user_features.csv'
        item_file = 'data/mid/item_features.csv'
        user_feature_df = utils.read_user_data(user_file, user_index_map)
        item_feature_df = utils.read_item_data(item_file)

        # generate batches
        batch_loader = BatchLoader(args.batch_size, train_X, train_y)

        args.num_users = np.max(train_X['uid']) + 1
        args.num_items = len(item_feature_df) + 1
        model = Model(args, args.recom_mode, batch_loader)

        # train and save models
        model.train(user_pro_dict, pro_user_dict, user_feature_df,
                    item_feature_df)
        # save the user and index
        utils.write_file(user_index_map['user'], "save/user_index_map")

    elif FLAGS.mode == 'predict':
        user_index_file = 'save/user_index_map'
        user_index_map = pd.read_csv(user_index_file, names=['user'])
        args.num_users = len(user_index_map)
        user_file = 'data/data_pred/user_file.txt'
        #user_file = 'data/mid/user_features.csv.liaoning'
        user_df = pd.read_csv(user_file, names=['user'])
        #user_df = pd.read_csv(user_file,header=None)
        #user_df.rename(columns={0: 'user'}, inplace=True)
        #user_df = user_df['user']

        item_file = 'data/data_pred/item_file.txt'
        item_df = pd.read_csv(item_file, names=['pid'])

        user_index_map = user_index_map.append(
            user_df[['user']]).drop_duplicates(['user']).reset_index(drop=True)
        user_index_map['uid'] = user_index_map.index

        user_df = pd.merge(user_df,
                           user_index_map,
                           left_on='user',
                           right_on='user',
                           how='left')
        #del user_df['user']

        # read feature data from file
        user_file = 'data/mid/user_features.csv'
        item_file = 'data/mid/item_features.csv'
        user_feature_df = utils.read_user_data(user_file, user_index_map)
        item_feature_df = utils.read_item_data(item_file)
        args.num_items = len(item_feature_df) + 1

        out_file = 'data/predict/predictions'
        model = Model(args, args.recom_mode)
        result_list = model.predict(user_df['uid'].values.tolist(),
                                    item_df['pid'].values.tolist(),
                                    user_feature_df, item_feature_df, out_file)
        user_list = user_df['user'].values.tolist()
        head_line = "cate_id,"
        head_line += (',').join(user_list)
        result_list.insert(0, head_line)
        with codecs.open(out_file, 'w', encoding='utf-8') as f:
            for val in result_list:
                f.write(val + "\n")
    else:
        print('incorrect mode input...')
Beispiel #19
0
from batch_loader import BatchLoader

import config
import random

if __name__ == '__main__':
    param = {"net": "pnet", "batch": 64}
    batch_loader = BatchLoader(param)
    task = random.choice(config.TRAIN_TASKS[param['net']])
    data = batch_loader.next_batch(64, task)
    for datum in data:
        print(datum)
Beispiel #20
0
def main():
    params = args()
    model_prefix = params.model_prefix
    load_epoch = params.load_epoch
    batch_size = params.batch_size
    img1_path = params.image1
    img2_path = params.image2
    img_shape = params.image_shape
    img_dir = params.img_dir
    txt_path = params.file_txt
    # in out files
    file_ = open(txt_path, 'r')
    lines_ = file_.readlines()
    result_ = open("result.txt", 'w')
    #
    if Test_Img == 'True':
        img1 = cv2.imread(img1_path)
        img2 = cv2.imread(img2_path)
        img1 = cv2.resize(img1, (img_shape, img_shape))
        img2 = cv2.resize(img2, (img_shape, img_shape))
        img1 = np.expand_dims(img1, 0)
        img2 = np.expand_dims(img2, 0)

    test_batch_loader = BatchLoader("../data/facescrub_val.list", batch_size,
                                    img_shape)

    tf.reset_default_graph()
    with tf.name_scope('input'):
        input_images = tf.placeholder(tf.float32,
                                      shape=(batch_size, img_shape, img_shape,
                                             3),
                                      name='input_images')
        labels = tf.placeholder(tf.int32, shape=(batch_size), name='labels')
    features, accuracy, pred_class, res1 = build_network(
        input_images, labels, 526, 'test')
    check_ckpt(model_prefix + '-' + str(load_epoch))
    #detect
    detector = face_dect()
    #
    with tf.Session() as sess:
        restore_model = tf.train.Saver()
        #restore_model = tf.train.import_meta_graph(model_prefix+'-'+str(load_epoch) +'.meta')
        restore_model.restore(sess, model_prefix + '-' + str(load_epoch))
        print("face model restore over")
        '''
        all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        key_list = []
        var_dic = dict()
        for v_name in tf.global_variables():
            i=0
            print("name : ",v_name.name[:-2],v_name.shape) 
            print("shape",all_vars[i])
            key_list.append(v_name.name[:-2])
            i+=1
            #print(tf.get_variable_scope())
        #all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
        vas= sess.run([all_vars])
        print(len(vas))
        for i in range(len(vas)):
            cur_name = key_list[i]
            cur_var = vas[i]
            print("name ,shape : ",cur_name,np.shape(cur_var))
            var_dic[cur_name] = cur_var
        '''
        #restore_model = tf.train.import_meta_graph(model_prefix+'-'+str(load_epoch)+'.meta')
        #restore_model.restore(sess,model_prefix+'-'+str(load_epoch))
        if Test_Img == 'False':
            iter_num = 0
            accuracy_sum = 0
            while iter_num < test_batch_loader.batch_num:
                batch_images, batch_labels = test_batch_loader.next_batch()
                images_in = (batch_images - 127.5) * 0.0078125
                feat, batch_accuracy = sess.run([features, accuracy],
                                                feed_dict={
                                                    input_images: images_in,
                                                    labels: batch_labels
                                                })
                accuracy_sum += batch_accuracy
                iter_num += 1
                if iter_num % 10 == 0:
                    print("step ", iter_num, batch_accuracy)
            print("image num: ", test_batch_loader.data_num,
                  "the test accuracy: ",
                  accuracy_sum / (test_batch_loader.batch_num))
        elif Test_Img == 'True':
            with tf.name_scope('valdata'):
                label_t = np.zeros([1])
                feat1 = sess.run([features],
                                 feed_dict={
                                     input_images: img1,
                                     labels: label_t
                                 })
                feat2 = sess.run([features],
                                 feed_dict={
                                     input_images: img2,
                                     labels: label_t
                                 })
                distance = L2_distance(feat1, feat2, 512)
            print("the 2 image dis: ", distance)
            for rot, fdir, fname in os.walk(img_dir):
                if len(fname) != 0:
                    break
            img_list = []
            print(fname)
            for i in range(len(fname)):
                org_img = cv2.imread(os.path.join(rot, fname[i]))
                img_org = cv2.resize(org_img, (img_shape, img_shape))
                img_org = np.expand_dims(img_org, 0)
                img_list.append(img_org)
            for i in range(len(fname)):
                img1 = img_list[i]
                feat1 = sess.run([features],
                                 feed_dict={
                                     input_images: img1,
                                     labels: label_t
                                 })
                j = i + 1
                while j < len(fname):
                    img2 = img_list[j]
                    t1 = time.time()
                    feat2 = sess.run([features],
                                     feed_dict={
                                         input_images: img2,
                                         labels: label_t
                                     })
                    t2 = time.time()
                    print("one image time ", t2 - t1)
                    distance = L2_distance(feat1, feat2, 512)
                    t3 = time.time()
                    print("compere time ", t3 - t2)
                    print(i, j, distance)
                    j += 1
        elif Test_Img == 'File':
            label_t = np.ones([1])
            for i in range(len(lines_)):
                feat_vec = []
                feat1_fg = 0
                feat2_fg = 0
                line_1 = lines_[i]
                line_1 = string.strip(line_1)
                line_s = line_1.split(',')
                dir_path_save = line_s[0][:-4]
                dir_path_save = "../cropface/" + dir_path_save
                mkdir_(dir_path_save)
                for j in range(len(line_s)):
                    feat_vec2 = []
                    if j == 0:
                        #print("line ",line_s)
                        img1_pic = line_s[0]
                        img1_path = os.path.join(img_dir, img1_pic)
                        img1 = cv2.imread(img1_path)
                        bboxes_1 = detector.get_face(img1)
                        if bboxes_1 is not None:
                            for k in range(bboxes_1.shape[0]):
                                crop_img1 = get_img(img1, bboxes_1[k],
                                                    img_shape)
                                if k == 0 and SV_IMG:
                                    img_save_path = dir_path_save + '/' + line_s[
                                        0][:-4] + ".jpg"
                                    save_image(img_save_path, crop_img1)
                                crop_img1 = (crop_img1 - 127.5) * 0.0078125
                                crop_img1 = np.expand_dims(crop_img1, 0)
                                feat1 = sess.run([features],
                                                 feed_dict={
                                                     input_images: crop_img1,
                                                     labels: label_t
                                                 })
                                print("a feature shape ", np.shape(feat1))
                                feat_vec.append(feat1)
                                feat_fg = 1
                        else:
                            print("a no face detect ")
                            break
                    else:
                        img2_pic = line_s[j]
                        img2_path = os.path.join(img_dir, img2_pic)
                        img2 = cv2.imread(img2_path)
                        bboxes_2 = detector.get_face(img2)
                        if bboxes_2 is not None:
                            for k in range(bboxes_2.shape[0]):
                                crop_img2 = get_img(img2, bboxes_2[k],
                                                    img_shape)
                                if SV_IMG:
                                    img_save_path = dir_path_save + '/' + line_s[
                                        j][:-4] + "-" + str(k) + ".jpg"
                                    save_image(img_save_path, crop_img2)
                                crop_img2 = (crop_img2 - 127.5) * 0.0078125
                                crop_img2 = np.expand_dims(crop_img2, 0)
                                feat2 = sess.run([features],
                                                 feed_dict={
                                                     input_images: crop_img2,
                                                     labels: label_t
                                                 })
                                print("b feature shape ", np.shape(feat2))
                                feat_vec2.append(feat2)
                                feat2_fg = 1
                        else:
                            print("b no face detect ")
                            continue
                    if j > 0:
                        t2 = time.time()
                        distance = L2_distance(feat_vec[0], feat_vec2[0], 512)
                        print("distance is ", distance)
                        t3 = time.time()
                        print("compere time ", t3 - t2)
                        result_.write("{} ".format(distance))
                result_.write("\n")
                print(feat2)
    file_.close()
    result_.close()
class ContextEncoder_adv(object):
    def __init__(self,
                 batch_size=params.BATCH_SIZE,
                 nb_epochs=params.NB_EPOCHS,
                 mask=None,
                 experiment_path=params.EXPERIMENT_PATH,
                 use_adversarial_loss=params.USE_ADVERSARIAL_LOSS,
                 lambda_decay=params.LAMBDA_DECAY,
                 lambda_adversarial=params.LAMBDA_ADVERSARIAL,
                 patience=params.PATIENCE,
                 discr_whole_image=params.DISCR_WHOLE_IMAGE,
                 discr_loss_limit=params.DISCR_LOSS_LIMIT,
                 use_dropout=params.USE_DROPOUT):
        self.batch_size = batch_size
        self.nb_epochs = nb_epochs
        self.experiment_path = experiment_path
        self.save_path = os.path.join(self.experiment_path, "model/")
        self.save_best_path = os.path.join(self.experiment_path, "best_model/")
        self.logs_path = os.path.join(self.experiment_path, "logs")
        create_dir(self.save_path)
        create_dir(self.logs_path)

        self.global_step = tf.Variable(0, trainable=False, name='global_step')
        self.phase = tf.placeholder(tf.bool, name='phase')
        self.patience = patience

        # parameters for adversarial loss
        self.use_adversarial_loss = use_adversarial_loss
        self.lambda_adversarial = lambda_adversarial
        if lambda_decay:
            self.lambda_adversarial = 1 - tf.train.exponential_decay(
                .1, self.global_step, 10000, .5, staircase=True)
        self.discr_whole_image = discr_whole_image
        self.discr_loss_limit = discr_loss_limit
        self.num_discr_trained = tf.Variable(tf.constant(0, dtype=tf.int32),
                                             trainable=False)

        self.use_dropout = use_dropout

        self.batch_loader = BatchLoader(self.batch_size)

        if mask is not None:
            self.np_mask = mask
        else:
            self.np_mask = np.zeros((1, 64, 64, 1))
            self.np_mask[:, 16:48, 16:48, :] = 1

        self._sess = tf.Session()
        tf.summary.scalar("lambda_adversarial", self.lambda_adversarial)
        tf.summary.scalar("num discr trained", self.num_discr_trained)

    def build_model(self):
        # x : input
        self.x = tf.placeholder(tf.float32, shape=[self.batch_size, 64, 64, 3])
        # self.x_float = 2 * tf.image.convert_image_dtype(self.x, dtype=tf.float32) - 1
        self.x_float = self.x / 255 * 2 - 1

        self.mask = tf.placeholder(tf.float32, shape=[1, 64, 64, 1])
        self.x_masked = self.x_float * (1 - self.mask)

        self._encode()
        self._channel_wise()
        self._decode()
        self._generate_image()
        self._reconstruction_loss()

        # adversarial loss
        self._init_discriminator_variables()
        self._adversarial_loss()

        self._optimize()
        self.merged_summary = tf.summary.merge_all()

    def _encode(self):
        with tf.name_scope("encode"):
            with tf.name_scope('weights'):
                self._W_conv1 = weight_variable([5, 5, 3, 128])
                self._W_conv2 = weight_variable([5, 5, 128, 256])
                self._W_conv3 = weight_variable([5, 5, 256, 512])
                self._W_conv4 = weight_variable([5, 5, 512, 512])
                self._W_conv5 = weight_variable([3, 3, 512, 512])
                variable_summaries(self._W_conv1)
                variable_summaries(self._W_conv2)
                variable_summaries(self._W_conv3)
                variable_summaries(self._W_conv4)
                variable_summaries(self._W_conv5)
            with tf.name_scope('biases'):
                self._b_conv1 = bias_variable([128])
                self._b_conv2 = bias_variable([256])
                self._b_conv3 = bias_variable([512])
                self._b_conv4 = bias_variable([512])
                self._b_conv5 = bias_variable([512])
                variable_summaries(self._b_conv1)
                variable_summaries(self._b_conv2)
                variable_summaries(self._b_conv3)
                variable_summaries(self._b_conv4)
                variable_summaries(self._b_conv5)

            # 64 64 3
            self.h_conv1 = activation_function(
                conv2d(self.x_masked,
                       self._W_conv1,
                       stride=1,
                       is_training=self.phase) + self._b_conv1)
            self.h_pool1 = avg_pool_2x2(self.h_conv1)

            # 32 32 128
            self.h_conv2 = activation_function(
                conv2d(self.h_pool1,
                       self._W_conv2,
                       stride=1,
                       is_training=self.phase) + self._b_conv2)
            self.h_pool2 = avg_pool_2x2(self.h_conv2)

            # 16 16 256
            self.h_conv3 = activation_function(
                conv2d(self.h_pool2,
                       self._W_conv3,
                       stride=1,
                       is_training=self.phase) + self._b_conv3)
            self.h_pool3 = avg_pool_2x2(self.h_conv3)

            # 8 8 512
            self.h_conv4 = activation_function(
                conv2d(self.h_pool3,
                       self._W_conv4,
                       stride=1,
                       is_training=self.phase) + self._b_conv4)
            self.h_pool4 = avg_pool_2x2(self.h_conv4)

            # 4 4 512
            self.h_conv5 = activation_function(
                conv2d(self.h_pool4,
                       self._W_conv5,
                       stride=1,
                       is_training=self.phase) + self._b_conv5)

            # 4 4 512
            if self.use_dropout:
                keep_prob = tf.cond(self.phase, lambda: tf.constant(.5),
                                    lambda: tf.constant(1.))
                self.h_conv5_drop = tf.nn.dropout(self.h_conv5, keep_prob)
            else:
                self.h_conv5_drop = self.h_conv5

    def _channel_wise(self):
        with tf.name_scope('channel_wise'):
            with tf.name_scope('weights'):
                self._W_fc1 = weight_variable([512, 4 * 4, 4 * 4])
                variable_summaries(self._W_fc1)
            with tf.name_scope('biases'):
                self._b_fc1 = bias_variable([512])
                variable_summaries(self._b_fc1)
            self.h_conv5_flat_img = tf.reshape(self.h_conv5_drop,
                                               [512, self.batch_size, 4 * 4])
            self.h_fc1 = activation_function(
                tf.reshape(tf.matmul(self.h_conv5_flat_img, self._W_fc1),
                           [self.batch_size, 16, 512]) + self._b_fc1)

            self.h_fc1_img = tf.reshape(self.h_fc1,
                                        [self.batch_size, 4, 4, 512])

    def _decode(self):
        with tf.name_scope('decode'):
            with tf.name_scope('weights'):
                self._W_uconv1 = weight_variable([5, 5, 512, 512])
                self._W_uconv2 = weight_variable([5, 5, 256, 512])
                self._W_uconv3 = weight_variable([5, 5, 128, 256])
            with tf.name_scope('biases'):
                self._b_uconv1 = bias_variable([512])
                self._b_uconv2 = bias_variable([256])
                self._b_uconv3 = bias_variable([128])
            self.h_uconv1 = activation_function(
                uconv2d(self.h_fc1_img,
                        self._W_uconv1,
                        output_shape=[self.batch_size, 8, 8, 512],
                        stride=2,
                        is_training=self.phase) + self._b_uconv1)

            # 8 8 512
            self.h_uconv2 = activation_function(
                uconv2d(self.h_uconv1,
                        self._W_uconv2,
                        output_shape=[self.batch_size, 16, 16, 256],
                        stride=2,
                        is_training=self.phase) + self._b_uconv2)

            # 16 16 256
            self.h_uconv3 = activation_function(
                uconv2d(self.h_uconv2,
                        self._W_uconv3,
                        output_shape=[self.batch_size, 32, 32, 128],
                        stride=2,
                        is_training=self.phase) + self._b_uconv3)

            # 32 32 128
            if self.use_dropout:
                keep_prob = tf.cond(self.phase, lambda: tf.constant(.5),
                                    lambda: tf.constant(1.))
                self.h_uconv3_drop = tf.nn.dropout(self.h_uconv3, keep_prob)
            else:
                self.h_uconv3_drop = self.h_uconv3

    def _generate_image(self):
        with tf.name_scope('generated_image'):
            self._W_uconv4 = weight_variable([5, 5, 3, 128])
            self._b_uconv4 = bias_variable([3])
            self.y = tf.nn.tanh(
                uconv2d(self.h_uconv3_drop,
                        self._W_uconv4,
                        output_shape=[self.batch_size, 32, 32, 3],
                        stride=1,
                        is_training=self.phase) + self._b_uconv4)
            # 32 32 3
            self.y_padded = tf.pad(self.y,
                                   [[0, 0], [16, 16], [16, 16], [0, 0]])
            self.generated_image = self.y_padded + self.x_masked
            # 64 64 3
            tf.summary.image("original_image", self.x_float, max_outputs=12)
            tf.summary.image("generated_image",
                             self.generated_image,
                             max_outputs=12)

    def _reconstruction_loss(self):
        with tf.name_scope('reconstruction_loss'):
            self._reconstruction_loss = tf.nn.l2_loss(
                self.mask * (self.x_float - self.y_padded)) / self.batch_size
            tf.summary.scalar('reconstruction_loss', self._reconstruction_loss)

    def _init_discriminator_variables(self):
        with tf.name_scope('discriminator'):
            with tf.name_scope('weights'):
                self._W_discr1 = weight_variable([5, 5, 3, 128])
                self._W_discr2 = weight_variable([5, 5, 128, 256])
                self._W_discr3 = weight_variable([5, 5, 256, 512])
                if self.discr_whole_image:
                    self._W_discr4 = weight_variable([5, 5, 512, 512])
                self._W_dfc = weight_variable([4 * 4 * 512, 1])

            with tf.name_scope('biases'):
                self._b_discr1 = bias_variable([128])
                self._b_discr2 = bias_variable([256])
                self._b_discr3 = bias_variable([512])
                if self.discr_whole_image:
                    self._b_discr4 = bias_variable([512])
                self._b_dfc = bias_variable([1])

    def _discriminator_encoder(self, image):
        with tf.name_scope('discriminator_encoder'):
            # image is 32 32 3 OR 64 64 3 (if whole image)
            h_d1 = activation_function(
                conv2d(image, self._W_discr1, stride=1, is_training=self.phase)
                + self._b_discr1)
            h_dpool1 = avg_pool_2x2(h_d1)

            # 16 16 128 OR 32 32 128 (if whole image)
            h_d2 = activation_function(
                conv2d(
                    h_dpool1, self._W_discr2, stride=1, is_training=self.phase)
                + self._b_discr2)
            h_dpool2 = avg_pool_2x2(h_d2)

            # 8 8 256 OR 16 16 256 (if whole image)
            h_d3 = activation_function(
                conv2d(
                    h_dpool2, self._W_discr3, stride=1, is_training=self.phase)
                + self._b_discr3)
            h_dpool3 = avg_pool_2x2(h_d3)

            if self.discr_whole_image:
                # 8 8 512 (if whole image)
                h_d4 = activation_function(
                    conv2d(h_dpool3,
                           self._W_discr4,
                           stride=1,
                           is_training=self.phase) + self._b_discr4)
                h_dfinal = avg_pool_2x2(h_d4)
            else:
                h_dfinal = h_dpool3

            # 4 4 512
            h_dfinal_flat = tf.reshape(h_dfinal,
                                       [self.batch_size, 4 * 4 * 512])
            if self.use_dropout:
                keep_prob = tf.cond(self.phase, lambda: tf.constant(.5),
                                    lambda: tf.constant(1.))
                h_dfinal_drop = tf.nn.dropout(h_dfinal_flat, keep_prob)
            else:
                h_dfinal_drop = h_dfinal_flat
            discr = tf.matmul(h_dfinal_drop, self._W_dfc) + self._b_dfc
            return discr

    def _adversarial_loss(self):
        with tf.name_scope('adversarial_loss'):
            self._discr_variables = [
                v for v in tf.trainable_variables()
                if v.name.startswith('discriminator')
            ]
            self._gen_variables = [
                v for v in tf.trainable_variables()
                if not v.name.startswith('discriminator')
            ]
            print(len(self._discr_variables), "DISCR VARIABLES ",
                  [v.name for v in self._discr_variables])
            print(len(self._gen_variables), "GEN VARIABLES",
                  [v.name for v in self._gen_variables])

            if self.discr_whole_image:
                # D(real img)
                real_discr = self._discriminator_encoder(self.x_float)
                # D(G(img))
                fake_discr = self._discriminator_encoder(self.generated_image)

            else:
                # discriminate the center of the image only
                self.real_img = tf.slice(self.x_float, [0, 16, 16, 0],
                                         [self.batch_size, 32, 32, 3])
                # D(real img)
                real_discr = self._discriminator_encoder(self.real_img)
                # D(G(img))
                fake_discr = self._discriminator_encoder(self.y)

            real_discr_loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=real_discr, labels=.9 * tf.ones_like(real_discr)))
            fake_discr_loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=fake_discr, labels=tf.zeros_like(fake_discr)))

            self._discr_adversarial_loss = (real_discr_loss +
                                            fake_discr_loss) / 2
            self._gen_adversarial_loss = tf.reduce_mean(
                tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=fake_discr, labels=tf.ones_like(fake_discr)))

            # Disriminator loss
            self._discr_loss = self._discr_adversarial_loss
            # Generator loss (combination of reconstruction and adversarial loss)
            self._gen_loss = self.lambda_adversarial * self._gen_adversarial_loss + \
                             (1 - self.lambda_adversarial) * self._reconstruction_loss

            tf.summary.scalar("discr loss", self._discr_loss)
            tf.summary.scalar("gen full loss (adversarial and reconstruction)",
                              self._gen_loss)
            tf.summary.scalar("gen adversarial loss",
                              self._gen_adversarial_loss)

    def _optimize(self):
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.variable_scope("optimizer"):
            optimizer = tf.train.AdamOptimizer()

            with tf.control_dependencies(update_ops):
                # Context-encoder
                grads = optimizer.compute_gradients(self._reconstruction_loss)
                self.train_fn = optimizer.apply_gradients(
                    grads, global_step=self.global_step)

                # Context-encoder with adversarial loss
                grads_discr = optimizer.compute_gradients(
                    loss=self._discr_loss, var_list=self._discr_variables)
                grads_gen = optimizer.compute_gradients(
                    loss=self._gen_loss, var_list=self._gen_variables)
                self.train_discr = optimizer.apply_gradients(
                    grads_discr, global_step=self.global_step)
                self.train_gen = optimizer.apply_gradients(
                    grads_gen, global_step=self.global_step)

    def _compute_val_loss(self):
        n_val_batches = self.batch_loader.n_valid_batches // 2
        val_loss = 0
        for _ in tqdm(range(n_val_batches)):
            batch = self.batch_loader.load_batch(train=False)
            loss, summary_str = self._sess.run(
                [self._reconstruction_loss, self.merged_summary],
                feed_dict={
                    self.x: batch,
                    self.mask: self.np_mask,
                    self.phase: 0
                })
            val_loss += loss
        val_loss /= n_val_batches
        return val_loss, summary_str

    def _restore(self):
        """
        Retrieve last model saved if possible
        Create a main Saver object
        Create a SummaryWriter object
        Init variables
        :param save_name: string (default : model)
            Name of the model
        :return:
        """
        saver = tf.train.Saver(max_to_keep=2)
        # Try to restore an old model
        last_saved_model = tf.train.latest_checkpoint(self.save_path)

        self._sess.run(tf.global_variables_initializer())

        train_writer = tf.summary.FileWriter(os.path.join(
            self.logs_path, "train"),
                                             graph=self._sess.graph,
                                             flush_secs=20)
        val_writer = tf.summary.FileWriter(os.path.join(self.logs_path, "val"),
                                           graph=self._sess.graph,
                                           flush_secs=20)

        if last_saved_model is not None:
            saver.restore(self._sess, last_saved_model)
            print("[*] Restoring model  {}".format(last_saved_model))
        else:
            tf.train.global_step(self._sess, self.global_step)
            print("[*] New model created")
        return saver, train_writer, val_writer

    def train(self):
        """
        Train the model
        :return:
        """
        # Retrieve a model or create a new
        saver, train_writer, val_writer = self._restore()

        epoch = 0
        n_train_batches = self.batch_loader.n_train_batches

        # Retrieve current global step
        last_step = self._sess.run(self.global_step)
        epoch += last_step // n_train_batches
        last_iter = last_step - n_train_batches * epoch
        print("last iter {}".format(last_iter))
        print("last step {}".format(last_step))
        print("epoch {}".format(epoch))
        # Iterate over epochs

        is_not_restart = False
        patience_count = 0
        best_val_loss = 1e10

        while epoch < self.nb_epochs:

            for i in tqdm(range(n_train_batches)):
                if i < last_iter and not is_not_restart:
                    continue
                is_not_restart = True
                batch = self.batch_loader.load_batch(train=True)

                if self.use_adversarial_loss:
                    # no discr_loss_limit
                    if self.discr_loss_limit >= 1:
                        _ = self._sess.run(self.train_discr,
                                           feed_dict={
                                               self.x: batch,
                                               self.mask: self.np_mask,
                                               self.phase: 1
                                           })
                    # there is a discr_loss_limit
                    # train the discriminator only if its loss is higher than discr_loss_limit
                    else:
                        discr_loss = self._sess.run(self._discr_loss,
                                                    feed_dict={
                                                        self.x: batch,
                                                        self.mask:
                                                        self.np_mask,
                                                        self.phase: 1
                                                    })
                        if discr_loss >= self.discr_loss_limit:
                            self.num_discr_trained += 1
                            _ = self._sess.run(self.train_discr,
                                               feed_dict={
                                                   self.x: batch,
                                                   self.mask: self.np_mask,
                                                   self.phase: 1
                                               })

                ops = [self.train_gen, self.global_step
                       ] if self.use_adversarial_loss else [
                           self.train_fn, self.global_step
                       ]
                if i % 200 == 0:
                    ops.append(self.merged_summary)
                output = self._sess.run(ops,
                                        feed_dict={
                                            self.x: batch,
                                            self.mask: self.np_mask,
                                            self.phase: 1
                                        })

                if i % 200 == 0:
                    # print("nb of black and white images so far : {}".format(self.nb_bw_img))
                    train_writer.add_summary(output[-1], global_step=output[1])

            saver.save(self._sess,
                       global_step=output[1],
                       save_path=self.save_path)
            val_loss, summary_str = self._compute_val_loss()
            val_writer.add_summary(summary_str, global_step=output[1])
            val_writer.add_summary(tf.Summary(value=[
                tf.Summary.Value(tag="val_loss", simple_value=val_loss),
            ]),
                                   global_step=output[1])
            cprint("Epoch {}".format(epoch), color="yellow")

            # early stopping
            if val_loss < best_val_loss:
                patience_count = 0
            else:
                patience_count += 1
                if patience_count >= self.patience:
                    break

            epoch += 1

        cprint("Training done.", "green", attrs=["bold"])

        train_writer.flush()
        val_writer.flush()
        train_writer.close()
        val_writer.close()
Beispiel #22
0
def main():
    LAMBDA = 1e-8
    center_alpha = 0.9
    num_class = 10000
    embd_size = 512
    args = argument()
    checkpoint_dir = args.save_model_name
    lr = args.lr
    batch_size = args.batch_size
    epoch_num = args.epoch_num
    sta = args.sta
    img_shape = args.img_size
    train_file = args.train_file
    val_file = args.val_file
    train_batch_loader = BatchLoader(train_file, batch_size, img_shape)
    test_batch_loader = BatchLoader(val_file, batch_size, img_shape)
    #(Height,Width) = (train_batch_loader.height,train_batch_loader.width)
    #train_batch_loader = mnist_data(batch_size)
    print("img shape", img_shape)
    with tf.name_scope('input'):
        input_images = tf.placeholder(tf.float32,
                                      shape=(batch_size, img_shape[0],
                                             img_shape[1], 3),
                                      name='input_images')
        labels = tf.placeholder(tf.int32, shape=(batch_size), name='labels')
        learn_rate = tf.placeholder(tf.float32,
                                    shape=(None),
                                    name='learn_rate')
    with tf.name_scope('var'):
        global_step = tf.Variable(0, trainable=False, name='global_step')
    loss_op = CenterLoss(center_alpha, num_class, embd_size)
    #with tf.device('/gpu:0'):
    total_loss, accuracy, centers_update_op, center_loss, softmax_loss, pred_class, res1 = build_network(
        input_images, labels, num_class, sta, loss_op, ratio=LAMBDA)
    optimizer = tf.train.AdamOptimizer(learn_rate)
    #optimizer = tf.train.GradientDescentOptimizer(learn_rate)
    with tf.control_dependencies([centers_update_op]):
        train_op = optimizer.minimize(total_loss, global_step=global_step)
    #train_op = optimizer.minimize(total_loss, global_step=global_step)
    summary_op = tf.summary.merge_all()
    with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as sess:
        #sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('../tmp/face_log', sess.graph)
        saver = tf.train.Saver()
        if args.pretrained is not None:
            model_path = args.save_model_name + '-' + str(args.pretrained)
            #saver.restore(sess,'./face_model/high_score-60')
            saver.restore(sess, model_path)
        else:
            init = tf.global_variables_initializer()
            sess.run(init)
        step = sess.run(global_step)
        epoch_idx = 0
        graph_step = 0
        while epoch_idx <= epoch_num:
            step = 0
            ckpt_fg = 'True'
            ps_loss = 0.0
            pc_loss = 0.0
            acc_sum = 0.0
            while step < train_batch_loader.batch_num:
                batch_images, batch_labels = train_batch_loader.next_batch()
                #batch_images, batch_labels = train_batch_loader.get_batchdata()
                in_imgs = (batch_images - 127.5) * 0.0078125
                #print("data in ",in_img[0,:2,:2,0])
                _, summary_str, train_acc, Center_loss, Softmax_loss, Pred_class, res1_o = sess.run(
                    [
                        train_op, summary_op, accuracy, center_loss,
                        softmax_loss, pred_class, res1
                    ],
                    feed_dict={
                        input_images: in_imgs,
                        labels: batch_labels,
                        learn_rate: lr
                    })
                step += 1
                #print("step",step, str(Softmax_loss),str(Center_loss))
                #print("step label",step, str(batch_labels))
                graph_step += 1
                if step % 100 == 0:
                    writer.add_summary(summary_str, global_step=graph_step)
                pc_loss += Center_loss
                ps_loss += Softmax_loss
                acc_sum += train_acc
                if step % 1000 == 0:
                    #lr = lr*0.1
                    #c_loss+=c_loss
                    #s_loss+=s_loss
                    print("****** Epoch {} Step {}: ***********".format(
                        str(epoch_idx), str(step)))
                    print("center loss: {}".format(pc_loss / 1000.0))
                    print("softmax_loss: {}".format(ps_loss / 1000.0))
                    print("train_acc: {}".format(acc_sum / 1000.0))
                    print("centers", res1_o[0, :5])
                    print("*******************************")
                    #if (acc_sum/100.0) >= 0.98 and (pc_loss/100.0)<40 and (ps_loss/100.0) <0.1 and ckpt_fg=='True':
                    if ckpt_fg == 'True':
                        print(
                            "******************************************************************************"
                        )
                        saver.save(sess, checkpoint_dir, global_step=epoch_idx)
                        ckpt_fg = 'False'
                    ps_loss = 0.0
                    pc_loss = 0.0
                    acc_sum = 0.0

            epoch_idx += 1

            if epoch_idx % 10 == 0:
                print(
                    "******************************************************************************"
                )
                saver.save(sess, checkpoint_dir, global_step=epoch_idx)

            #writer.add_summary(summary_str, global_step=step)
            if epoch_idx % 5 == 0:
                lr = lr * 0.5

            if epoch_idx:
                batch_images, batch_labels = test_batch_loader.next_batch()
                #batch_images,batch_labels = train_batch_loader.get_valdata()
                vali_image = (batch_images - 127.5) * 0.0078125
                vali_acc = sess.run(accuracy,
                                    feed_dict={
                                        input_images: vali_image,
                                        labels: batch_labels
                                    })
                print(("epoch: {}, train_acc:{:.4f}, vali_acc:{:.4f}".format(
                    epoch_idx, train_acc, vali_acc)))
        sess.close()
Beispiel #23
0
class TDNN(Model):
    def __init__(self, sess, config):

        self.sess = sess
        self.config = config
        self.tb_dir = "/data/tensorboard_log_dict/TDNN_white"
        self.loader = BatchLoader(self.config)

    def build_model(self, input_):
        with tf.variable_scope("TDNN"):
            with tf.variable_scope("conv") as scope:
                maps = []

                for idx, kernel_dim in enumerate(self.config.kernels):
                    #if idx < 3:
                    #    gpu_num = 0
                    #else:
                    #gpu_num = idx-2
                    #    gpu_num = 1
                    #with tf.device('/gpu:%d' % gpu_num):
                    reduced_length = input_.get_shape()[1] - kernel_dim + 1

                    # [batch_size x seq_length x embed_dim x feature_map_dim]
                    conv = layers.conv2d(
                        input_,
                        self.config.feature_maps[idx],
                        [kernel_dim, self.config.binary_embed_width],
                        1,
                        padding='VALID',
                        scope='conv' + str(idx),
                        weights_initializer=layers.xavier_initializer_conv2d())

                    # [batch_size x 1 x 1 x feature_map_dim]
                    pool = layers.max_pool2d(conv, [reduced_length, 1],
                                             1,
                                             scope='pool' + str(idx))

                    maps.append(tf.squeeze(pool))

                fc = tf.concat(axis=1, values=maps)

            with tf.variable_scope("fully"):

                fc = tf.nn.dropout(fc, self.config.dropout_prob)

                flat = tf.reshape(
                    fc,
                    [self.config.batch_size,
                     sum(self.config.feature_maps)])

                fc1 = layers.fully_connected(
                    flat,
                    2048,
                    activation_fn=tf.nn.relu,
                    scope='fc1',
                    weights_initializer=layers.xavier_initializer(),
                    biases_initializer=tf.constant_initializer(0.01))

                fc1 = tf.contrib.layers.batch_norm(fc1,
                                                   decay=0.9,
                                                   center=True,
                                                   scale=True,
                                                   epsilon=True,
                                                   activation_fn=tf.nn.relu)

                fc2 = layers.fully_connected(
                    fc1,
                    1024,
                    activation_fn=tf.nn.relu,
                    scope='fc2',
                    weights_initializer=layers.xavier_initializer(),
                    biases_initializer=tf.constant_initializer(0.01))

                fc2 = tf.contrib.layers.batch_norm(fc2,
                                                   decay=0.9,
                                                   center=True,
                                                   scale=True,
                                                   epsilon=True,
                                                   activation_fn=tf.nn.relu)

                logits = layers.fully_connected(
                    fc2,
                    self.config.num_classes,
                    activation_fn=None,
                    scope='logits',
                    weights_initializer=layers.xavier_initializer(),
                    biases_initializer=tf.constant_initializer(0.01))

            return logits

    def build_loss(self, logits, labels):
        weight_decay_rate = 0.0001

        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=labels, name='cross_entropy_per_example')
        cross_entropy_mean = tf.reduce_mean(cross_entropy,
                                            name='cross_entropy')

        tf.add_to_collection(tf.GraphKeys.LOSSES, cross_entropy_mean)
        tf.summary.scalar('loss_', cross_entropy_mean)

        weights_only = filter(lambda x: x.name.endswith('w:0'),
                              tf.trainable_variables())
        weight_decay = tf.reduce_sum(
            tf.stack([tf.nn.l2_loss(x)
                      for x in weights_only])) * weight_decay_rate
        cross_entropy_mean += weight_decay

        return cross_entropy_mean

    def inference(self, input_):
        self.loader = BatchLoader(
            self.config.data_dir, self.config.dataset_name,
            self.config.batch_size, self.config.num_classes,
            self.config.preprocessor, self.config.epoch,
            self.config.specialist, self.config.forward_only)

        content, filename = self.loader.prepare_inference()

        #with tf.control_dependencies([self.loader.enqueue]):
        logits = self.build_model(content)
        softmax = tf.nn.softmax(logits)

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        self.sess.run(init_op)

        self.saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(self.config.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
        else:
            print('no checkpoint found...')

        self.sess.run(self.loader.enqueue,
                      feed_dict={self.loader.filenames: input_})

        m_logits, m_softmax, m_filename = self.sess.run(
            [logits, softmax, filename])

        print(m_softmax, m_filename)

    def tower_loss(self, scope, content, label, gpu_num):

        #self.label = label
        #self.path = path

        logits = self.build_model(content)
        loss = self.build_loss(logits, label)

        self.accuracy = self.build_accuracy(logits, label)

        tf.add_to_collection('losses', loss)

        losses = tf.get_collection('losses', scope)
        total_loss = tf.add_n(losses, name='total_loss')

        for l in losses + [total_loss]:
            loss_name = re.sub('%s_[0-9]*/' % 'tower', '', l.op.name)

        return total_loss

    def average_gradients(self, tower_grads):

        average_grads = []
        for grad_and_vars in zip(*tower_grads):

            grads = []
            for g, _ in grad_and_vars:
                expanded_g = tf.expand_dims(g, 0)

                grads.append(expanded_g)

            grad = tf.concat(axis=0, values=grads)
            grad = tf.reduce_mean(grad, 0)

            v = grad_and_vars[0][1]
            grad_and_var = (grad, v)
            average_grads.append(grad_and_var)

        return average_grads

    def run(self):

        self.global_step = tf.Variable(0, name="global_step", trainable=False)

        if not self.config.forward_only:

            self.opt = tf.train.AdamOptimizer(self.config.learning_rate)
            tower_grads = []

            content, label = self.loader.data_type_dict[self.config.data_type](
                self.config.forward_only)
            self.label = label

            with tf.variable_scope(tf.get_variable_scope()):
                for i in range(2):
                    with tf.device('/gpu:%d' % i):
                        with tf.name_scope('%s_%d' % ('tower', i)) as scope:
                            self.loss = self.tower_loss(
                                scope, content, label, i)

                            tf.get_variable_scope().reuse_variables()

                            grads = self.opt.compute_gradients(self.loss)

                            tower_grads.append(grads)

            grads = self.average_gradients(tower_grads)

            apply_gradient_op = self.opt.apply_gradients(
                grads, global_step=self.global_step)

            self.train_op = apply_gradient_op

        else:
            content, label = self.loader.data_type_dict[self.config.data_type](
                self.config.forward_only)
            self.label = label
            logits = self.build_model(content)
            self.accuracy = self.build_accuracy(logits, label)

        # ready for train
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        self.sess.run(init_op)

        self.saver = tf.train.Saver()
        self.merged_summary = tf.summary.merge_all()
        self.writer = tf.summary.FileWriter(
            "/data/tensorboard_log/dict/TDNN_0404/", self.sess.graph)

        self.coord = tf.train.Coordinator()
        self.threads = tf.train.start_queue_runners(self.sess, self.coord)

        ckpt = tf.train.get_checkpoint_state(self.config.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            self.saver.restore(self.sess, ckpt.model_checkpoint_path)
        else:
            print('no checkpoint found...')

        if not self.config.forward_only:
            self.train()
            #self.get_logits()
            print("train")
        else:
            self.test()
Beispiel #24
0
    def __init__(self, sess, config):

        self.sess = sess
        self.config = config
        self.tb_dir = "/data/tensorboard_log_dict/TDNN_white"
        self.loader = BatchLoader(self.config)
Beispiel #25
0
class LSTMTDNN(Model):
    """Time-delayed Nueral Network (cf. http://arxiv.org/abs/1508.06615v4)
  """
    def __init__(self,
                 sess,
                 batch_size=100,
                 rnn_size=650,
                 layer_depth=2,
                 word_embed_dim=650,
                 char_embed_dim=15,
                 feature_maps=[50, 100, 150, 200, 200, 200, 200],
                 kernels=[1, 2, 3, 4, 5, 6, 7],
                 seq_length=35,
                 max_word_length=65,
                 use_word=False,
                 use_char=True,
                 hsm=0,
                 max_grad_norm=5,
                 highway_layers=2,
                 dropout_prob=0.5,
                 use_batch_norm=True,
                 checkpoint_dir="checkpoint",
                 forward_only=False,
                 data_dir="data",
                 dataset_name="pdb",
                 use_progressbar=False):
        """Initialize the parameters for LSTM TDNN

    Args:
      rnn_size: the dimensionality of hidden layers
      layer_depth: # of depth in LSTM layers
      batch_size: size of batch per epoch
      word_embed_dim: the dimensionality of word embeddings
      char_embed_dim: the dimensionality of character embeddings
      feature_maps: list of feature maps (for each kernel width)
      kernels: list of kernel widths
      seq_length: max length of a word
      use_word: whether to use word embeddings or not
      use_char: whether to use character embeddings or not
      highway_layers: # of highway layers to use
      dropout_prob: the probability of dropout
      use_batch_norm: whether to use batch normalization or not
      hsm: whether to use hierarchical softmax
    """
        self.sess = sess

        self.batch_size = batch_size
        self.seq_length = seq_length

        # RNN
        self.rnn_size = rnn_size
        self.layer_depth = layer_depth

        # CNN
        self.use_word = use_word
        self.use_char = use_char
        self.word_embed_dim = word_embed_dim
        self.char_embed_dim = char_embed_dim
        self.feature_maps = feature_maps
        self.kernels = kernels

        # General
        self.highway_layers = highway_layers
        self.dropout_prob = dropout_prob
        self.use_batch_norm = use_batch_norm

        # Training
        self.max_grad_norm = max_grad_norm
        self.max_word_length = max_word_length
        self.hsm = hsm

        self.data_dir = data_dir
        self.dataset_name = dataset_name
        self.checkpoint_dir = checkpoint_dir

        self.forward_only = forward_only
        self.use_progressbar = use_progressbar

        self.loader = BatchLoader(self.data_dir, self.dataset_name,
                                  self.batch_size, self.seq_length,
                                  self.max_word_length)
        print('Word vocab size: %d, Char vocab size: %d, Max word length (incl. padding): %d' % \
            (len(self.loader.idx2word), len(self.loader.idx2char), self.loader.max_word_length))

        self.max_word_length = self.loader.max_word_length
        self.char_vocab_size = len(self.loader.idx2char)
        self.word_vocab_size = len(self.loader.idx2word)

        # build LSTMTDNN model
        self.prepare_model()

        # load checkpoints
        if self.forward_only == True:
            if self.load(self.checkpoint_dir, self.dataset_name):
                print(" [*] SUCCESS to load model for %s." % self.dataset_name)
            else:
                print(" [!] Failed to load model for %s." % self.dataset_name)
                sys.exit(1)

    def prepare_model(self):
        with tf.variable_scope("LSTMTDNN"):
            self.char_inputs = []
            self.word_inputs = []
            self.cnn_outputs = []

            if self.use_char:
                char_W = tf.get_variable(
                    "char_embed", [self.char_vocab_size, self.char_embed_dim])
            else:
                word_W = tf.get_variable(
                    "word_embed", [self.word_vocab_size, self.word_embed_dim])

            with tf.variable_scope("CNN") as scope:
                self.char_inputs = tf.placeholder(
                    tf.int32,
                    [self.batch_size, self.seq_length, self.max_word_length])
                self.word_inputs = tf.placeholder(
                    tf.int32, [self.batch_size, self.seq_length])

                char_indices = tf.split(1, self.seq_length, self.char_inputs)
                word_indices = tf.split(1, self.seq_length,
                                        tf.expand_dims(self.word_inputs, -1))

                for idx in xrange(self.seq_length):
                    char_index = tf.reshape(char_indices[idx],
                                            [-1, self.max_word_length])
                    word_index = tf.reshape(word_indices[idx], [-1, 1])

                    if idx != 0:
                        scope.reuse_variables()

                    if self.use_char:
                        # [batch_size x word_max_length, char_embed]
                        char_embed = tf.nn.embedding_lookup(char_W, char_index)

                        char_cnn = TDNN(char_embed, self.char_embed_dim,
                                        self.feature_maps, self.kernels)

                        if self.use_word:
                            word_embed = tf.nn.embedding_lookup(
                                word_W, word_index)
                            cnn_output = tf.concat(1, char_cnn.output,
                                                   word_embed)
                        else:
                            cnn_output = char_cnn.output
                    else:
                        cnn_output = tf.squeeze(
                            tf.nn.embedding_lookup(word_W, word_index))

                    if self.use_batch_norm:
                        bn = batch_norm()
                        norm_output = bn(
                            tf.expand_dims(tf.expand_dims(cnn_output, 1), 1))
                        cnn_output = tf.squeeze(norm_output)

                    if highway:
                        #cnn_output = highway(input_, input_dim_length, self.highway_layers, 0)
                        cnn_output = highway(cnn_output,
                                             cnn_output.get_shape()[1],
                                             self.highway_layers, 0)

                    self.cnn_outputs.append(cnn_output)

            with tf.variable_scope("LSTM") as scope:
                self.cell = rnn_cell.BasicLSTMCell(self.rnn_size)
                self.stacked_cell = rnn_cell.MultiRNNCell([self.cell] *
                                                          self.layer_depth)

                outputs, _ = rnn.rnn(self.stacked_cell,
                                     self.cnn_outputs,
                                     dtype=tf.float32)

                self.lstm_outputs = []
                self.true_outputs = tf.placeholder(
                    tf.float32,
                    [self.batch_size, self.seq_length, self.word_vocab_size])

                loss = 0
                true_outputs = tf.split(1, self.seq_length, self.true_outputs)

                for idx, (top_h,
                          true_output) in enumerate(zip(outputs,
                                                        true_outputs)):
                    if self.dropout_prob > 0:
                        top_h = tf.nn.dropout(top_h, self.dropout_prob)

                    if self.hsm > 0:
                        self.lstm_outputs.append(top_h)
                    else:
                        if idx != 0:
                            scope.reuse_variables()
                        proj = rnn_cell.linear(top_h, self.word_vocab_size, 0)
                        log_softmax = tf.log(tf.nn.softmax(proj))
                        self.lstm_outputs.append(log_softmax)

                    loss += tf.nn.softmax_cross_entropy_with_logits(
                        self.lstm_outputs[idx], tf.squeeze(true_output))

                self.loss = tf.reduce_mean(loss) / self.seq_length

                tf.scalar_summary("loss", self.loss)
                tf.scalar_summary("perplexity", tf.exp(self.loss))

    def train(self, epoch):
        cost = 0
        target = np.zeros(
            [self.batch_size, self.seq_length, self.word_vocab_size])

        N = self.loader.sizes[0]
        for idx in xrange(N):
            target.fill(0)
            x, y, x_char = self.loader.next_batch(0)
            for b in xrange(self.batch_size):
                for t, w in enumerate(y[b]):
                    target[b][t][w] = 1

            feed_dict = {
                self.word_inputs: x,
                self.char_inputs: x_char,
                self.true_outputs: target,
            }

            _, loss, step, summary_str = self.sess.run(
                [self.optim, self.loss, self.global_step, self.merged_summary],
                feed_dict=feed_dict)

            self.writer.add_summary(summary_str, step)

            if idx % 50 == 0:
                if self.use_progressbar:
                    progress(
                        idx / N, "epoch: [%2d] [%4d/%4d] loss: %2.6f" %
                        (epoch, idx, N, loss))
                else:
                    print("epoch: [%2d] [%4d/%4d] loss: %2.6f" %
                          (epoch, idx, N, loss))

            cost += loss
        return cost / N

    def test(self, split_idx, max_batches=None):
        if split_idx == 1:
            set_name = 'Valid'
        else:
            set_name = 'Test'

        N = self.loader.sizes[split_idx]
        if max_batches != None:
            N = min(max_batches, N)

        self.loader.reset_batch_pointer(split_idx)
        target = np.zeros(
            [self.batch_size, self.seq_length, self.word_vocab_size])

        cost = 0
        for idx in xrange(N):
            target.fill(0)

            x, y, x_char = self.loader.next_batch(split_idx)
            for b in xrange(self.batch_size):
                for t, w in enumerate(y[b]):
                    target[b][t][w] = 1

            feed_dict = {
                self.word_inputs: x,
                self.char_inputs: x_char,
                self.true_outputs: target,
            }

            loss = self.sess.run(self.loss, feed_dict=feed_dict)

            if idx % 50 == 0:
                if self.use_progressbar:
                    progress(
                        idx / N, "> %s: loss: %2.6f, perplexity: %2.6f" %
                        (set_name, loss, np.exp(loss)))
                else:
                    print(" > %s: loss: %2.6f, perplexity: %2.6f" %
                          (set_name, loss, np.exp(loss)))

            cost += loss

        cost = cost / N
        return cost

    def run(self, epoch=25, learning_rate=1, learning_rate_decay=0.5):
        self.current_lr = learning_rate

        self.lr = tf.Variable(learning_rate, trainable=False)
        self.opt = tf.train.GradientDescentOptimizer(self.lr)
        #self.opt = tf.train.AdamOptimizer(learning_rate, beta1=0.5).minimize(self.loss)

        # clip gradients
        params = tf.trainable_variables()
        grads = []
        for grad in tf.gradients(self.loss, params):
            if grad:
                grads.append(tf.clip_by_norm(grad, self.max_grad_norm))
            else:
                grads.append(grad)

        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        self.optim = self.opt.apply_gradients(zip(grads, params),
                                              global_step=self.global_step)

        # ready for train
        tf.initialize_all_variables().run()

        if self.load(self.checkpoint_dir, self.dataset_name):
            print(" [*] SUCCESS to load model for %s." % self.dataset_name)
        else:
            print(" [!] Failed to load model for %s." % self.dataset_name)

        self.saver = tf.train.Saver()
        self.merged_summary = tf.merge_all_summaries()
        self.writer = tf.train.SummaryWriter("./logs", self.sess.graph_def)

        self.log_loss = []
        self.log_perp = []

        if not self.forward_only:
            for idx in xrange(epoch):
                train_loss = self.train(idx)
                valid_loss = self.test(1)

                # Logging
                self.log_loss.append([train_loss, valid_loss])
                self.log_perp.append([np.exp(train_loss), np.exp(valid_loss)])

                state = {
                    'perplexity': np.exp(train_loss),
                    'epoch': idx,
                    'learning_rate': self.current_lr,
                    'valid_perplexity': np.exp(valid_loss)
                }
                print(state)

                # Learning rate annealing
                if len(self.log_loss) > 1 and self.log_loss[idx][
                        1] > self.log_loss[idx - 1][1] * 0.9999:
                    self.current_lr = self.current_lr * learning_rate_decay
                    self.lr.assign(self.current_lr).eval()
                if self.current_lr < 1e-5: break

                if idx % 2 == 0:
                    self.save(self.checkpoint_dir, self.dataset_name)

        test_loss = self.test(2)
        print(" [*] Test loss: %2.6f, perplexity: %2.6f" %
              (test_loss, np.exp(test_loss)))
Beispiel #26
0
def main():
    LAMBDA = 0.0
    num_class = 526
    checkpoint_dir = "../model/"
    with tf.name_scope('input'):
        input_images = tf.placeholder(tf.float32,
                                      shape=(None, 100, 100, 3),
                                      name='input_images')
        labels = tf.placeholder(tf.int64, shape=(None), name='labels')
    logits, features, total_loss, accuracy, centers_update_op, center_loss, softmax_loss = build_network(
        input_images, labels, num_class, ratio=LAMBDA)
    global_step = tf.Variable(0, trainable=False, name='global_step')
    train_batch_loader = BatchLoader("../data/facescrub_train.list", 16)
    test_batch_loader = BatchLoader("../data/facescrub_val.list", 16)
    optimizer = tf.train.AdamOptimizer(0.001)
    with tf.control_dependencies([centers_update_op]):
        train_op = optimizer.minimize(total_loss, global_step=global_step)
    summary_op = tf.summary.merge_all()

    with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
        sess.run(tf.global_variables_initializer())
        writer = tf.summary.FileWriter('../tmp/face_log', sess.graph)
        saver = tf.train.Saver()
        step = sess.run(global_step)
        while step <= 80000:
            batch_images, batch_labels = train_batch_loader.next_batch()
            # print batch_images.shape
            # print batch_labels.shape
            _, summary_str, train_acc, Center_loss, Softmax_loss = sess.run(
                [train_op, summary_op, accuracy, center_loss, softmax_loss],
                feed_dict={
                    input_images:
                    (batch_images - 127.5) * 0.0078125,  # - mean_data,
                    labels: batch_labels,
                })
            step += 1
            print("step", step)
            if step % 100 == 0:
                print("********* Step %s: ***********" % str(step))
                print("center loss: %s" % str(Center_loss))
                print("softmax_loss: %s" % str(Softmax_loss))
                print("train_acc: %s" % str(train_acc))
                print("*******************************")

            if step % 10000 == 0:
                saver.save(sess,
                           checkpoint_dir + 'model.ckpt',
                           global_step=step)

            writer.add_summary(summary_str, global_step=step)

            if step % 2000 == 0:
                batch_images, batch_labels = test_batch_loader.next_batch()
                vali_image = (batch_images - 127.5) * 0.0078125
                vali_acc = sess.run(accuracy,
                                    feed_dict={
                                        input_images: vali_image,
                                        labels: batch_labels
                                    })
                print(("step: {}, train_acc:{:.4f}, vali_acc:{:.4f}".format(
                    step, train_acc, vali_acc)))
        sess.close()
Beispiel #27
0
    def __init__(self,
                 sess,
                 batch_size=100,
                 rnn_size=650,
                 layer_depth=2,
                 word_embed_dim=650,
                 char_embed_dim=15,
                 feature_maps=[50, 100, 150, 200, 200, 200, 200],
                 kernels=[1, 2, 3, 4, 5, 6, 7],
                 seq_length=35,
                 max_word_length=65,
                 use_word=False,
                 use_char=True,
                 hsm=0,
                 max_grad_norm=5,
                 highway_layers=2,
                 dropout_prob=0.5,
                 use_batch_norm=True,
                 checkpoint_dir="checkpoint",
                 forward_only=False,
                 data_dir="data",
                 dataset_name="pdb",
                 use_progressbar=False):
        """Initialize the parameters for LSTM TDNN

    Args:
      rnn_size: the dimensionality of hidden layers
      layer_depth: # of depth in LSTM layers
      batch_size: size of batch per epoch
      word_embed_dim: the dimensionality of word embeddings
      char_embed_dim: the dimensionality of character embeddings
      feature_maps: list of feature maps (for each kernel width)
      kernels: list of kernel widths
      seq_length: max length of a word
      use_word: whether to use word embeddings or not
      use_char: whether to use character embeddings or not
      highway_layers: # of highway layers to use
      dropout_prob: the probability of dropout
      use_batch_norm: whether to use batch normalization or not
      hsm: whether to use hierarchical softmax
    """
        self.sess = sess

        self.batch_size = batch_size
        self.seq_length = seq_length

        # RNN
        self.rnn_size = rnn_size
        self.layer_depth = layer_depth

        # CNN
        self.use_word = use_word
        self.use_char = use_char
        self.word_embed_dim = word_embed_dim
        self.char_embed_dim = char_embed_dim
        self.feature_maps = feature_maps
        self.kernels = kernels

        # General
        self.highway_layers = highway_layers
        self.dropout_prob = dropout_prob
        self.use_batch_norm = use_batch_norm

        # Training
        self.max_grad_norm = max_grad_norm
        self.max_word_length = max_word_length
        self.hsm = hsm

        self.data_dir = data_dir
        self.dataset_name = dataset_name
        self.checkpoint_dir = checkpoint_dir

        self.forward_only = forward_only
        self.use_progressbar = use_progressbar

        self.loader = BatchLoader(self.data_dir, self.dataset_name,
                                  self.batch_size, self.seq_length,
                                  self.max_word_length)
        print('Word vocab size: %d, Char vocab size: %d, Max word length (incl. padding): %d' % \
            (len(self.loader.idx2word), len(self.loader.idx2char), self.loader.max_word_length))

        self.max_word_length = self.loader.max_word_length
        self.char_vocab_size = len(self.loader.idx2char)
        self.word_vocab_size = len(self.loader.idx2word)

        # build LSTMTDNN model
        self.prepare_model()

        # load checkpoints
        if self.forward_only == True:
            if self.load(self.checkpoint_dir, self.dataset_name):
                print(" [*] SUCCESS to load model for %s." % self.dataset_name)
            else:
                print(" [!] Failed to load model for %s." % self.dataset_name)
                sys.exit(1)
Beispiel #28
0

if __name__ == '__main__':
    # Use GPUs if available
    args.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    # Set the random seed for reproducible experiments
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    # Set the logger
    utils.set_logger(os.path.join(args.model_dir, 'train.log'))
    logging.info('device: {}'.format(args.device))
    logging.info('Hyper params:%r' % args.__dict__)

    # Create the input data pipeline
    logging.info('Loading the datasets...')
    bl = BatchLoader(args)
    ## Load train and dev data
    train_data = bl.load_data('train.json')
    dev_data = bl.load_data('dev.json')
    ## Train data
    ner_train_data, re_train_data = bl.build_data(train_data, is_train=True)
    train_bls = bl.batch_loader(ner_train_data,
                                re_train_data,
                                args.ner_max_len,
                                args.re_max_len,
                                args.batch_size,
                                is_train=True)
    num_batchs_per_task = [len(train_bl) for train_bl in train_bls]
    logging.info(
        'num of batch per task for train: {}'.format(num_batchs_per_task))
    train_task_ids = sum([[i] * num_batchs_per_task[i]