Пример #1
0
def get_cycle_decoding_model(inputs, outputs, params, is_training, reuse):
  assert is_training
  dec_cell_fn = NAME_TO_RNNCELL[params.dec_model]
  
  recurrent_dropout_prob = 1.0
  if is_training:
    recurrent_dropout_prob = params.recurrent_dropout_prob
  
  dec_cell = get_rnn_cell(
    dec_cell_fn, params.dec_rnn_size,
    use_dropout=is_training and params.use_recurrent_dropout,
    keep_prob=recurrent_dropout_prob, is_bidir=False)

  fut_decoder = _get_network(FUT_DEC_FN)
  ############################
  ## Generate Future Frames ##
  ############################
  prev_state = outputs['cycle_dec_embedding']
  if hasattr(params, 'dec_style') and params.dec_style > 0:
    cycle_style = outputs['cycle_style']
  else:
    cycle_style = None
  with tf.variable_scope('fut_dec', reuse=True):
    tmp_outputs = fut_decoder(
      prev_state, cycle_style, dec_cell, params.max_length, params, is_training)
  outputs['cycle_fut_landmarks'] = tmp_outputs['keypoint_output']
  return outputs
Пример #2
0
def get_seq_decoding_model(inputs, outputs, params, is_training, reuse, output_length=None):
  dec_cell_fn = NAME_TO_RNNCELL[params.dec_model]
  
  recurrent_dropout_prob = 1.0
  if is_training:
    recurrent_dropout_prob = params.recurrent_dropout_prob

  dec_cell = get_rnn_cell(
    dec_cell_fn, params.dec_rnn_size,
    use_dropout=is_training and params.use_recurrent_dropout,
    keep_prob=recurrent_dropout_prob, is_bidir=False)

  fut_decoder = _get_network(FUT_DEC_FN)
  if output_length is None:
    output_length = params.max_length
   
  prev_state = outputs['dec_embedding']
  if hasattr(params, 'dec_style') and params.dec_style > 0:
    dec_style = outputs['dec_style']
  else:
    dec_style = None
  #
  with tf.variable_scope('fut_dec', reuse=reuse):
    tmp_outputs = fut_decoder(
      prev_state, dec_style, dec_cell, output_length, params, is_training=is_training)
  outputs['fut_landmarks'] = tmp_outputs['keypoint_output']
  return outputs
Пример #3
0
def analogy_seq_encoding_model(inputs, params, is_training, reuse):
    """Factory function to retrieve analogy-making model."""
    enc_cell_fn = NAME_TO_RNNCELL[params.enc_model]
    recurrent_dropout_prob = 1.0
    if is_training:
        recurrent_dropout_prob = params.recurrent_dropout_prob

    rnn_cell = get_rnn_cell(enc_cell_fn,
                            params.enc_rnn_size,
                            use_dropout=is_training
                            and params.use_recurrent_dropout,
                            keep_prob=recurrent_dropout_prob,
                            is_bidir=params.use_bidirection_lstm)
    if params.use_bidirection_lstm:
        enc_cell_fw, enc_cell_bw = rnn_cell[0], rnn_cell[1]
    else:
        enc_cell_fw = rnn_cell
        enc_cell_bw = None
    #########################
    ## Network Declaration ##
    #########################
    seq_encoder = _get_network(ENC_FN)

    outputs = dict()
    ##############################
    ## Encoding T = f(b) - f(a) ##
    ##############################
    with tf.variable_scope('seq_enc', reuse=reuse):
        tmp_outputs = seq_encoder(inputs['A_landmarks'],
                                  inputs['A_lens'],
                                  enc_cell_fw,
                                  enc_cell_bw,
                                  params,
                                  is_training=is_training)
    outputs['A_features'] = tmp_outputs['features']
    outputs['A_content'] = tmp_outputs['content']
    outputs['A_style'] = tmp_outputs['style']

    with tf.variable_scope('seq_enc', reuse=True):
        tmp_outputs = seq_encoder(inputs['B_landmarks'],
                                  inputs['B_lens'],
                                  enc_cell_fw,
                                  enc_cell_bw,
                                  params,
                                  is_training=is_training)
    outputs['B_features'] = tmp_outputs['features']
    outputs['B_content'] = tmp_outputs['content']
    outputs['B_style'] = tmp_outputs['style']

    with tf.variable_scope('seq_enc', reuse=True):
        tmp_outputs = seq_encoder(inputs['C_landmarks'],
                                  inputs['C_lens'],
                                  enc_cell_fw,
                                  enc_cell_bw,
                                  params,
                                  is_training=is_training)
    outputs['C_features'] = tmp_outputs['features']
    outputs['C_content'] = tmp_outputs['content']
    outputs['C_style'] = tmp_outputs['style']
    return outputs
Пример #4
0
def analogy_singleseq_encoding_model(inputs, params, is_training, reuse):
    enc_cell_fn = NAME_TO_RNNCELL[params.enc_model]
    recurrent_dropout_prob = 1.0
    if is_training:
        recurrent_dropout_prob = params.recurrent_dropout_prob

    assert (not params.use_bidirection_lstm)
    enc_cell = get_rnn_cell(enc_cell_fn,
                            params.enc_rnn_size,
                            use_dropout=is_training
                            and params.use_recurrent_dropout,
                            keep_prob=recurrent_dropout_prob,
                            is_bidir=False)
    singleseq_encoder = _get_network(SINGLESEQ_ENC_FN)

    outputs = dict()
    ##############################
    ## Encoding T = f(b) - f(a) ##
    ##############################
    with tf.variable_scope('seq_enc', reuse=reuse):
        tmp_outputs = singleseq_encoder(None,
                                        inputs['A_landmarks'],
                                        inputs['A_lens'],
                                        enc_cell,
                                        params,
                                        is_training=is_training)
    enc_state = tmp_outputs['states']
    if hasattr(params, 'content_dim'):
        outputs['A_content'] = tmp_outputs['content']
        outputs['A_style'] = tmp_outputs['style']

    with tf.variable_scope('seq_enc', reuse=True):
        tmp_outputs = singleseq_encoder(enc_state,
                                        inputs['B_landmarks'],
                                        inputs['B_lens'],
                                        enc_cell,
                                        params,
                                        is_training=is_training)
    if hasattr(params, 'content_dim'):
        outputs['B_content'] = tmp_outputs['content']
        outputs['B_style'] = tmp_outputs['style']

    with tf.variable_scope('seq_enc', reuse=True):
        tmp_outputs = singleseq_encoder(None,
                                        inputs['C_landmarks'],
                                        inputs['C_lens'],
                                        enc_cell,
                                        params,
                                        is_training=is_training)
    outputs['C_enc_state'] = tmp_outputs['states']
    if hasattr(params, 'content_dim'):
        outputs['C_content'] = tmp_outputs['content']
        outputs['C_style'] = tmp_outputs['style']
    return outputs
Пример #5
0
def get_seq_encoding_model(inputs, params, is_training, reuse):
  """Factory function to retrieve encoder network model."""
  enc_cell_fn = NAME_TO_RNNCELL[params.enc_model]

  recurrent_dropout_prob = 1.0
  if is_training:
    recurrent_dropout_prob = params.recurrent_dropout_prob
 
  rnn_cell = get_rnn_cell(
    enc_cell_fn, params.enc_rnn_size,
    use_dropout=is_training and params.use_recurrent_dropout,
    keep_prob=recurrent_dropout_prob, is_bidir=params.use_bidirection_lstm)
  
  if params.use_bidirection_lstm:
    enc_cell_fw, enc_cell_bw = rnn_cell[0], rnn_cell[1]
  else:
    enc_cell_fw = rnn_cell
    enc_cell_bw = None
  seq_encoder = _get_network(ENC_FN)
 
  outputs = dict()
  #######################
  ## Encoding function ##
  #######################
  with tf.variable_scope('seq_enc', reuse=reuse):
    tmp_outputs = seq_encoder(
      inputs['his_landmarks'], inputs['his_lens'],
      enc_cell_fw, enc_cell_bw, params, is_training=is_training)
  outputs['his_features'] = tmp_outputs['features']
  if hasattr(params, 'content_dim'):
    outputs['his_content'] = tmp_outputs['content']
    outputs['his_style'] = tmp_outputs['style']

  with tf.variable_scope('seq_enc', reuse=True):
    tmp_outputs = seq_encoder(
      inputs['fut_landmarks'], inputs['fut_lens'],
      enc_cell_fw, enc_cell_bw, params, is_training=is_training)
  outputs['fut_features'] = tmp_outputs['features']
  if hasattr(params, 'content_dim'):
    outputs['fut_content'] = tmp_outputs['content']
    outputs['fut_style'] = tmp_outputs['style']
  return outputs
Пример #6
0
def get_singleseq_encoding_model(inputs, params, is_training, reuse):
  """Factory function to retrieve encoder network model."""
  enc_cell_fn = NAME_TO_RNNCELL[params.enc_model]
  
  recurrent_dropout_prob = 1.0
  if is_training:
    recurrent_dropout_prob = params.recurrent_dropout_prob
  
  assert (not params.use_bidirection_lstm)
  enc_cell = get_rnn_cell(
    enc_cell_fn, params.enc_rnn_size,
    use_dropout=is_training and params.use_recurrent_dropout,
    keep_prob=recurrent_dropout_prob, is_bidir=False)
  
  singleseq_encoder = _get_network(SINGLESEQ_ENC_FN)
  
  outputs = dict()
  ##############
  ## encoding ##
  ##############
  with tf.variable_scope('seq_enc', reuse=reuse):
    tmp_outputs = singleseq_encoder(
      None, inputs['his_landmarks'], inputs['his_lens'],
      enc_cell, params, is_training=is_training)
  enc_state = tmp_outputs['states']
  if hasattr(params, 'content_dim'):
    outputs['his_content'] = tmp_outputs['content']
    outputs['his_style'] = tmp_outputs['style']
  #
  with tf.variable_scope('seq_enc', reuse=True):
    tmp_outputs = singleseq_encoder(
      enc_state, inputs['fut_landmarks'], inputs['fut_lens'],
      enc_cell, params, is_training=is_training)
  if hasattr(params, 'content_dim'):
    outputs['fut_content'] = tmp_outputs['content']
    outputs['fut_style'] = tmp_outputs['style']
  return outputs