Exemplo n.º 1
0
                      vocab=vocab, max_history_length=args.max_history_length, 
                      merge_source=args.merge_source)
 logging.info('Loading validation data from ' + args.valid_set)
 valid_data = dh.load(args.fea_type, args.valid_path, args.valid_set, 
                      include_caption=args.include_caption, separate_caption=args.separate_caption, 
                      vocab=vocab, max_history_length=args.max_history_length, 
                      merge_source=args.merge_source)
 if args.fea_type[0] == 'none':
     feature_dims = 0
 else:
     feature_dims = dh.feature_shape(train_data)
 logging.info("Detected feature dims: {}".format(feature_dims));
 # report data summary
 logging.info('#vocab = %d' % len(vocab))
 # make batchset for training
 train_indices, train_samples = dh.make_batch_indices(train_data, args.batch_size,
                                                      max_length=args.max_length, separate_caption=args.separate_caption)
 logging.info('#train sample = %d' % train_samples)
 logging.info('#train batch = %d' % len(train_indices))
 # make batchset for validation
 valid_indices, valid_samples = dh.make_batch_indices(valid_data, args.batch_size,
                                                  max_length=args.max_length, separate_caption=args.separate_caption)
 logging.info('#validation sample = %d' % valid_samples)
 logging.info('#validation batch = %d' % len(valid_indices))
 # create_model
 model = make_model(len(vocab), len(vocab), 
   N=args.nb_blocks, d_model=args.d_model, d_ff=args.d_ff, 
   h=args.att_h, dropout=args.dropout,  
   separate_his_embed=args.separate_his_embed, 
   separate_cap_embed=args.separate_cap_embed, 
   ft_sizes=feature_dims, 
   diff_encoder=args.diff_encoder, 
Exemplo n.º 2
0
    if train_args.dictmap != '':
        dictmap = json.load(open(train_args.dictmap, 'r'))
    else:
        dictmap = None
    # report data summary
    logging.info('#vocab = %d' % len(vocab))
    # prepare test data
    logging.info('Loading test data from ' + args.test_set)
    test_data = dh.load(train_args.fea_type,
                        args.test_path,
                        args.test_set,
                        vocab=vocab,
                        dictmap=dictmap,
                        include_caption=train_args.include_caption)
    test_indices, test_samples = dh.make_batch_indices(test_data, 1)
    logging.info('#test sample = %d' % test_samples)
    # generate sentences
    logging.info('-----------------------generate--------------------------')
    start_time = time.time()
    result = generate_response(model,
                               test_data,
                               test_indices,
                               vocab,
                               maxlen=args.maxlen,
                               beam=args.beam,
                               penalty=args.penalty,
                               nbest=args.nbest)
    logging.info('----------------')
    logging.info('wall time = %f' % (time.time() - start_time))
    if args.output:
                                  dec_hsize=args.dec_hsize,
                                  att_size=args.att_size,
                                  sos=1,
                                  eos=2,
                                  ignore_label=3)
        else:
            print("Unknonw model type '{}' is specified.".format(args.type))
            sys.exit(1)

    # report data summary
    print('#vocab =', len(vocab))

    # make batchset for training
    print('Making mini batches for training data')
    train_indices = [None] * len(data)
    train_indices[0], train_samples = dh.make_batch_indices(
        data[0], args.train, batchsize, max_length=args.max_length)
    for n in six.moves.range(1, len(data)):
        train_indices[n], _ = dh.make_batch_indices(data[n],
                                                    args.train,
                                                    batchsize,
                                                    max_length=args.max_length,
                                                    reference=train_indices[0])
    print('#train sample =', train_samples, ' #train batch =',
          len(train_indices[0]))

    if args.valid != '':
        print('Making mini batches for validation data')
        valid_indices = [None] * len(data)
        valid_indices[0], valid_samples = dh.make_batch_indices(
            data[0], args.valid, batchsize, max_length=args.max_length)
        for n in six.moves.range(1, len(data)):
Exemplo n.º 4
0
     HLSTMDecoder(args.dec_layers,
                  len(vocab),
                  len(vocab),
                  args.embed_size,
                  args.hist_out_size + args.in_enc_hsize,
                  args.dec_hsize,
                  args.dec_psize,
                  independent=False,
                  dropout=dropout,
                  embed=embed_model))
 initialize_model_weights(model, "he", "xavier")
 # report data summary
 logging.info('#vocab = %d' % len(vocab))
 # make batchset for training
 logging.info('Making mini batches for training data')
 train_indices, train_samples = dh.make_batch_indices(
     train_data, args.batch_size, max_length=args.max_length)
 logging.info('#train sample = %d' % train_samples)
 logging.info('#train batch = %d' % len(train_indices))
 # make batchset for validation
 logging.info('Making mini batches for validation data')
 valid_indices, valid_samples = dh.make_batch_indices(
     valid_data, args.batch_size, max_length=args.max_length)
 logging.info('#validation sample = %d' % valid_samples)
 logging.info('#validation batch = %d' % len(valid_indices))
 # copy model to gpu
 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 model.to(device)
 # save meta parameters
 path = args.model + '.conf'
 with open(path, 'wb') as f:
     pickle.dump((vocab, args), f, -1)
Exemplo n.º 5
0
        model = nn.DataParallel(model)
    model.to(device)

    # prepare test data
    print 'Making mini batches for test data from', args.test
    data = []

    for n, feafile in enumerate(args.feafile):
        feature_data = dh.load(feafile, args.capfile, vocab=vocab)
        data.extend(dh.check_feature_shape(feature_data))

    test_indices = [None] * len(args.feafile)

    for n, feafile in enumerate(args.feafile):
        test_indices[n], test_samples = dh.make_batch_indices(data[n],
                                                              args.test,
                                                              1,
                                                              test=True)
        print 'Feature[%d]: #test sample = %d  #test batch = %d' % (
            n, test_samples, len(test_indices[n]))

    print '#vocab =', len(vocab)
    # generate sentences
    print '-----------------------generate--------------------------'
    start_time = time.time()
    result = generate_caption(model,
                              data,
                              test_indices,
                              vocab,
                              train_args.in_size,
                              stride=train_args.frame_stride,
                              maxlen=args.maxlen,
Exemplo n.º 6
0
 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 model.to(device)
 # report data summary
 logging.info('#vocab = %d' % len(vocab))
 # prepare test data
 logging.info('Loading test data from ' + args.test_set)
 test_data = dh.load(train_args.fea_type,
                     args.test_path,
                     args.test_set,
                     vocab=vocab,
                     include_caption=train_args.include_caption,
                     separate_caption=train_args.separate_caption,
                     max_history_length=train_args.max_history_length,
                     merge_source=train_args.merge_source,
                     undisclosed_only=args.undisclosed_only)
 test_indices, test_samples = dh.make_batch_indices(
     test_data, 1, separate_caption=train_args.separate_caption)
 logging.info('#test sample = %d' % test_samples)
 # generate sentences
 logging.info('-----------------------generate--------------------------')
 start_time = time.time()
 labeled_test = None
 if args.undisclosed_only and args.labeled_test is not None:
     labeled_test = json.load(open(args.labeled_test, 'r'))
 result = generate_response(model,
                            test_data,
                            test_indices,
                            vocab,
                            maxlen=args.maxlen,
                            beam=args.beam,
                            penalty=args.penalty,
                            nbest=args.nbest,