def construct_model(params, saver, logger):
    if params['model'] != "CaptionGenerator":
        raise Exception('Model Not Implemented')

    if params['checkpoint'] is not None:
        logger.warn('use checkpoint: %s', params['checkpoint'])
        state_dict, params_ = saver.load_model(params['checkpoint'])
    else:
        raise Exception('checkpoint is None!')
    for key in params_:
        if 'data' in key:
            continue
        params[key] = params_[key]

    model = CaptionGenerator(
        params['hidden_dim'], params['rnn_layer'], params['rnn_cell'],
        params['rnn_dropout'], params['bidirectional'],
        params['attention_type'], params['context_type'],
        params['softmask_scale'], params['vocab_size'],
        params['sent_embedding_dim'], params['video_feature_dim'],
        params['video_use_residual'], params['max_cap_length'])

    model.load_state_dict(state_dict)
    logger.info('*' * 100)
    sys.stdout.flush()
    print(model)
    sys.stdout.flush()
    logger.info('*' * 100)

    return model
def construct_model(params, saver, logger):
    if params['checkpoint'] is None:
        logger.error('checkpoints are required for evaluation')
        exit()
    logger.warn('use checkpoint: %s', params['checkpoint'])
    state_dict_sl, state_dict_cg, params_ = saver.load_model_slcg(
        params['checkpoint'])

    # params_['anchor_list'] = ANCHOR_LIST
    # params['anchor_list'] = params_['anchor_list']
    # params['regressor_scale'] = params_['regressor_scale']
    # params = params
    for key in params_:
        if 'data' in key:
            continue
        params[key] = params_[key]
    params['batch_size'] = 8
    model_sl = SentenceLocalizer(
        params['hidden_dim'], params['rnn_layer'], params['rnn_cell'],
        params['rnn_dropout'], params['bidirectional'],
        params['attention_type_sl'], params['regressor_scale'],
        params['vocab_size'], params['sent_embedding_dim'],
        params['video_feature_dim'], params['fc_dropout'],
        params['anchor_list'], params['feature_mixer_type'],
        params['video_use_residual'], params['sent_use_residual'])

    model_cg = CaptionGenerator(
        params['hidden_dim'], params['rnn_layer'], params['rnn_cell'],
        params['rnn_dropout'], params['bidirectional'],
        params['attention_type_cg'], params['context_type'],
        params['softmask_scale'], params['vocab_size'],
        params['sent_embedding_dim'], params['video_feature_dim'],
        params['video_use_residual'], params['max_cap_length'])

    logger.info('*' * 100)
    sys.stdout.flush()
    print('caption generator' + '*' * 90)
    print(model_cg)
    print('sentence localizer' + '*' * 90)
    print(model_sl)
    sys.stdout.flush()
    logger.info('*' * 100)
    model_sl.load_state_dict(state_dict_sl)
    model_cg.load_state_dict(state_dict_cg)

    return model_sl, model_cg
Beispiel #3
0
def construct_model(params, saver, logger):
    if params['model'] != "CaptionGenerator":
        raise Exception('Model Not Implemented')

    # def __init__(self, hidden_dim, rnn_layer, rnn_cell, rnn_dropout, bidirectional, attention_type, context_type, scale,
    #              sent_vocab_size, sent_embedding_dim, video_feature_dim, video_use_residual, max_cap_length):

    model = CaptionGenerator(params['hidden_dim'], params['rnn_layer'], params['rnn_cell'], params['rnn_dropout'],
                             params['bidirectional'], params['attention_type'], params['context_type'],
                             params['softmask_scale'], params['vocab_size'], params['sent_embedding_dim'],
                             params['video_feature_dim'], params['video_use_residual'], params['max_cap_length'])

    logger.info('*' * 100)
    sys.stdout.flush()
    print(model)
    sys.stdout.flush()
    logger.info('*' * 100)
    if params['checkpoint'] is not None:
        logger.warn('use checkpoint: %s', params['checkpoint'])
        state_dict, params_ = saver.load_model(params['checkpoint'])
        param_refine(params, params_)
        model.load_state_dict(state_dict)

    return model
Beispiel #4
0
def construct_model(params, saver, logger):

    params['anchor_list'] = ANCHOR_LIST

    if params['checkpoint'] is not None:
        state_dict_sl, state_dict_cg, params_ = saver.load_model_slcg(
            params['checkpoint'])
        params['anchor_list'] = params_['anchor_list']

    model_sl = SentenceLocalizer(
        params['hidden_dim'], params['rnn_layer'], params['rnn_cell'],
        params['rnn_dropout'], params['bidirectional'],
        params['attention_type_sl'], params['regressor_scale'],
        params['vocab_size'], params['sent_embedding_dim'],
        params['video_feature_dim'], params['fc_dropout'],
        params['anchor_list'], params['feature_mixer_type'],
        params['video_use_residual'], params['sent_use_residual'],
        params['pe_video'], params['pe_sent'])

    model_cg = CaptionGenerator(
        params['hidden_dim'], params['rnn_layer'], params['rnn_cell'],
        params['rnn_dropout'], params['bidirectional'],
        params['attention_type_cg'], params['context_type'],
        params['softmask_scale'], params['vocab_size'],
        params['sent_embedding_dim'], params['video_feature_dim'],
        params['video_use_residual'], params['max_cap_length'])

    logger.info('*' * 100)
    sys.stdout.flush()
    print('caption generator' + '*' * 90)
    print(model_cg)
    print('sentence localizer' + '*' * 90)
    print(model_sl)
    sys.stdout.flush()
    logger.info('*' * 100)
    if params['checkpoint'] is not None:
        logger.warn('use checkpoint: %s', params['checkpoint'])
        model_sl.load_state_dict(state_dict_sl)
        model_cg.load_state_dict(state_dict_cg)
    if params['checkpoint_cg'] is not None:
        state_dict_cg, _ = saver.load_model(params['checkpoint_cg'])
        logger.warn('use checkpoint: %s', params['checkpoint_cg'])
        model_cg.load_state_dict(state_dict_cg)
    if params['checkpoint_sl'] is not None:
        state_dict_sl, _ = saver.load_model(params['checkpoint_sl'])
        logger.warn('use checkpoint: %s', params['checkpoint_sl'])
        model_sl.load_state_dict(state_dict_sl)

    return model_sl, model_cg