Beispiel #1
0
            break
    kaldiIn.close()

    input_shape_train = conv_configs[0]['input_shape']
    input_shape_1 = (input_shape_train[1], input_shape_train[2], input_shape_train[3])
    num_utt = len(feat_mats_np)
    feat_mats = []
    for i in xrange(num_utt):
        feat_mats.append(numpy.reshape(feat_mats_np[i], (feat_rows[i],) + input_shape_1))

    rng = numpy.random.RandomState(123)
    theano_rng = RandomStreams(rng.randint(2 ** 30))

    cnn = CNN_Forward(numpy_rng = rng, theano_rng=theano_rng,
                 conv_layer_configs = conv_configs, use_fast = use_fast)
    _file2cnn(cnn.conv_layers, filename=conv_net_file)
    out_function = cnn.build_out_function(feat_mats)

    log('> ... processing the data')

    kaldiOut = KaldiWriteOut(output_scp,output_ark)
    kaldiOut.open()
    for i in xrange(num_utt):
        feat_out = out_function(feat_mats[i])
        kaldiOut.write(uttIDs[i], feat_out)
    kaldiOut.close()

    end_time = time.clock()
    print >> sys.stderr, ('The code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))
Beispiel #2
0
    numpy_rng = numpy.random.RandomState(89677)
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... building the model')
    # construct the cnn architecture
    cnn = CNN_SAT(numpy_rng=numpy_rng, theano_rng = theano_rng,
              batch_size = batch_size, n_outs=n_outs,
              conv_layer_configs = conv_layer_configs,
              hidden_layers_sizes = hidden_layers_sizes,
              ivec_layers_sizes = ivec_layers_sizes,
              conv_activation = conv_activation, 
              full_activation = full_activation,
              use_fast = use_fast, update_part = update_part, ivec_dim = ivec_dim)

    if arguments.has_key('conv_input_file'):
        _file2cnn(cnn.conv_layers, filename=arguments['conv_input_file'])
    if arguments.has_key('full_input_file'):
        _file2nnet(cnn.full_layers, filename = arguments['full_input_file'])
    if arguments.has_key('ivec_input_file'):
        _file2nnet(cnn.ivec_layers, set_layer_num = len(ivec_layers_sizes) + 1, filename = arguments['ivec_input_file'], withfinal=False)

    # get the training, validation and testing function for the model
    log('> ... getting the finetuning functions')
    train_fn, valid_fn = cnn.build_finetune_functions(
                (train_x, train_y), (valid_x, valid_y),
                batch_size=batch_size)

    log('> ... finetunning the model')
    start_time = time.clock()

    while (lrate.get_rate() != 0):
Beispiel #3
0
    theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
    log('> ... building the model')
    # construct the cnn architecture
    cnn = CNN(numpy_rng=numpy_rng, theano_rng = theano_rng,
              batch_size = batch_size, n_outs=n_outs,
              conv_layer_configs = conv_layer_configs,
              hidden_layers_sizes = hidden_layers_sizes,
              conv_activation = conv_activation, 
              full_activation = full_activation,
              use_fast = use_fast, update_layers = update_layers)

    total_layer_number = len(cnn.layers)
    if full_ptr_layer_number > 0:
        _file2nnet(cnn.layers[len(conv_layer_configs):total_layer_number], set_layer_num = full_ptr_layer_number, filename = full_ptr_file,  withfinal=False)
    if conv_ptr_layer_number > 0:
        _file2cnn(cnn.layers[0:len(conv_layer_configs)], filename=conv_ptr_file)
    # get the training, validation and testing function for the model
    log('> ... getting the finetuning functions')
    train_fn, valid_fn = cnn.build_finetune_functions(
                (train_x, train_y), (valid_x, valid_y),
                batch_size=batch_size)

    log('> ... finetunning the model')
    start_time = time.clock()

    while (lrate.get_rate() != 0):
        train_error = []
        while (not train_sets.is_finish()):
            train_sets.load_next_partition(train_xy)
            for batch_index in xrange(train_sets.cur_frame_num / batch_size):  # loop over mini-batches
	        train_error.append(train_fn(index=batch_index, learning_rate = lrate.get_rate(), momentum = momentum))