コード例 #1
0
def train(label2index, index2label):
    print("begin of training")
    # list of train videos
    with open('data/split1.train', 'r') as f:
        video_list = f.read().split('\n')[0:-1]
    # read train set
    dataset = Dataset('data', video_list, label2index)
    # train the network
    trainer = Trainer(dataset)
    print(" Traing trainer\n")
    trainer.train(batch_size=512, n_epochs=2, learning_rate=0.1)
    #trainer.train(batch_size = 512, n_epochs = 6, learning_rate = 0.01)
    # save training model
    trainer.save_model('results/net.model')
    print("Traing Done")

    # estimate prior, loss-based lengths, and monte-carlo grammar
    print("Preparing Prior")
    prior = estimate_prior(dataset)
    mean_lengths = loss_based_lengths(dataset)
    grammar = monte_carlo_grammar(dataset, mean_lengths, index2label)
    print("Grammar Done")

    np.savetxt('results/prior', prior)
    np.savetxt('results/mean_lengths', mean_lengths, fmt='%.3f')
    with open('results/grammar', 'w') as f:
        f.write('\n'.join(grammar) + '\n')
    print 'All Done!'
コード例 #2
0
def main(data_root, result_root, split, seed):

    result_root += "-s-%d-%d" % (split, seed)

    os.makedirs(result_root, exist_ok=True)

    ### read label2index mapping and index2label mapping ###########################
    label2index = dict()
    index2label = dict()
    # with open('data/mapping.txt', 'r') as f:
    with open(os.path.join(data_root, 'mapping.txt'), 'r') as f:
        content = f.read().split('\n')[0:-1]
        for line in content:
            label2index[line.split()[1]] = int(line.split()[0])
            index2label[int(line.split()[0])] = line.split()[1]

    ### read training data #########################################################
    print('read data...')
    # with open('data/split1.train', 'r') as f:
    with open(os.path.join(data_root, 'split%d.train' % split), 'r') as f:
        video_list = f.read().split('\n')[0:-1]
    dataset = Dataset(data_root, video_list, label2index, shuffle = True)
    print('done')

    ### generate path grammar for inference ########################################
    paths = set()
    for _, transcript in dataset:
        paths.add( ' '.join([index2label[index] for index in transcript]) )
    #with open('results/grammar.txt', 'w') as f:
    with open(os.path.join(result_root, 'grammar.txt'), 'w') as f:
        f.write('\n'.join(paths) + '\n')

    ### actual nn-viterbi training #################################################
    decoder = Viterbi(None, None, frame_sampling = 30, max_hypotheses = np.inf) # (None, None): transcript-grammar and length-model are set for each training sequence separately, see trainer.train(...)
    trainer = Trainer(decoder, dataset.input_dimension, dataset.n_classes, buffer_size = len(dataset), buffered_frame_ratio = 25)
    learning_rate = 0.01

    # train for 10000 iterations
    for i in tqdm(range(NUM_ITERS)):
        sequence, transcript = dataset.get()
        loss = trainer.train(sequence, transcript, batch_size = 512, learning_rate = learning_rate)
        # print some progress information
        if (i+1) % 100 == 0:
            print('Iteration %d, loss: %f' % (i+1, loss))
        # save model every 1000 iterations
        if (i+1) % 1000 == 0:
            print('save snapshot ' + str(i+1))
            # network_file = 'results/network.iter-' + str(i+1) + '.net'
            network_file = os.path.join(result_root, 'network.iter-' + str(i + 1) + '.net')
            # length_file = 'results/lengths.iter-' + str(i+1) + '.txt'
            length_file = os.path.join(result_root, 'lengths.iter-' + str(i + 1) + '.txt')
            # prior_file = 'results/prior.iter-' + str(i+1) + '.txt'
            prior_file = os.path.join(result_root, 'prior.iter-' + str(i + 1) + '.txt')
            trainer.save_model(network_file, length_file, prior_file)
        # adjust learning rate after 2500 iterations
        if (i+1) == 2500:
            learning_rate = learning_rate * 0.1
コード例 #3
0
def train(label2index, index2label):
    # list of train videos
    with open('data/split1.train', 'r') as f:
        video_list = f.read().split('\n')[0:-1]
    # read train set
    print('read data...')
    dataset = Dataset('data', video_list, label2index)
    print('done')
    # train the network
    trainer = Trainer(dataset)
    trainer.train(batch_size=512, n_epochs=2, learning_rate=0.1)
    trainer.save_model('results/net.model')
    # estimate prior, loss-based lengths, and monte-carlo grammar
    prior = estimate_prior(dataset)
    mean_lengths = loss_based_lengths(dataset)
    grammar = monte_carlo_grammar(dataset, mean_lengths, index2label)
    np.savetxt('results/prior', prior)
    np.savetxt('results/mean_lengths', mean_lengths, fmt='%.3f')
    with open('results/grammar', 'w') as f:
        f.write('\n'.join(grammar) + '\n')
コード例 #4
0
ファイル: train.py プロジェクト: iliasprc/CNN-RNN-Viterbi
    paths.add(' '.join(path))
'''
print(len(paths),count)
#with open(results_path+'grammar.txt', 'w') as f:
#    f.write('\n'.join(paths) + '\n')

### actual nn-viterbi training #################################################
decoder = Viterbi(None, None, frame_sampling = 5, max_hypotheses = np.inf) # (None, None): transcript-grammar and length-model are set for each training sequence separately, see trainer.train(...)
trainer = Trainer(decoder,  dataset.n_classes, buffer_size = len(dataset), buffered_frame_ratio = 1)
learning_rate = 0.00001
avg_loss=0
# train for 10000 iterations
for i in range(100000):
    sequence, transcript = dataset.get()
    #print('training',i)
    loss = trainer.train(sequence, transcript, batch_size = 1, learning_rate = learning_rate)
    avg_loss += loss
    # print some progress information
    if (i+1) % 10 == 0:
        print('Iteration %d, loss: %f  Average Loss %f' % (i + 1, loss, avg_loss / (i + 1)))
    # save model every 1000 iterations
    if (i+1) % 100 == 0:
        print('save snapshot ' + str(i+1))
        network_file = results_path+'network.iter.net'
        length_file = results_path+'lengths.iter.txt'
        prior_file = results_path+'prior.iter.txt'
        trainer.save_model(network_file, length_file, prior_file)
    # adjust learning rate after 2500 iterations
    if (i+1) == 5000:
        learning_rate = learning_rate * 0.1
コード例 #5
0
)  # (None, None): transcript-grammar and length-model are set for each training sequence separately, see trainer.train(...)
trainer = Trainer(decoder,
                  dataset.input_dimension,
                  dataset.n_classes,
                  buffer_size=len(dataset),
                  buffered_frame_ratio=25)
learning_rate = 0.01
window = 20
step = 5

# train for 100000 iterations
for i in range(100000):
    sequence, transcript = dataset.get()
    loss1, loss2 = trainer.train(sequence,
                                 transcript,
                                 batch_size=512,
                                 learning_rate=learning_rate,
                                 window=window,
                                 step=step)
    # print some progress information
    if (i + 1) % 100 == 0:
        print('Iteration %d, loss1: %f, loss2: %f, loss: %f' %
              (i + 1, loss1, loss2, loss1 - loss2))
    # save model every 1000 iterations
    if (i + 1) % 1000 == 0:
        network_file = 'results/network.iter-' + str(i + 1) + '.net'
        length_file = 'results/lengths.iter-' + str(i + 1) + '.txt'
        prior_file = 'results/prior.iter-' + str(i + 1) + '.txt'
        trainer.save_model(network_file, length_file, prior_file)
    # adjust learning rate after 2500 iterations
    if (i + 1) == 80000:
        learning_rate = learning_rate * 0.1