Exemplo n.º 1
0
def do_classification(feature_data, predict, params):
    length = params[0]['max_length']
    x, m = batch.make_batch(feature_data, length, length / 2)
    x = batch.make_context(x, 15)
    #decision = predict(np.expand_dims(feature_data,axis=0).astype('float32'), np.ones(shape=(1,feature_data.shape[0])))
    decision = predict(x)
    return decision
def do_classification(feature_data, predict, params):
    length = params[0]['max_length']
    x, m = batch.make_batch(feature_data,length,length/2)
    x=batch.make_context(x,15)
    #decision = predict(np.expand_dims(feature_data,axis=0).astype('float32'), np.ones(shape=(1,feature_data.shape[0])))
    decision = predict(x)
    return decision
Exemplo n.º 3
0
def do_classification(feature_data, predict, params):
    length = params[0]['max_length']
    x, m = batch.make_batch(feature_data, length, length)
    x = batch.make_context(x, 15)
    #decision = predict(np.expand_dims(feature_data,axis=0).astype('float32'), np.ones(shape=(1,feature_data.shape[0])))
    decision = predict(x, m)
    pred_label = np.argmax(np.sum(decision, axis=(0, 1)), axis=-1)
    return batch.labels[pred_label]
Exemplo n.º 4
0
def do_classification(feature_data, predict, params):
    length = params[0]['max_length']
    x, m = batch.make_batch(feature_data,length,length)
    x = batch.make_context(x,15)
    #decision = predict(np.expand_dims(feature_data,axis=0).astype('float32'), np.ones(shape=(1,feature_data.shape[0])))
    decision = predict(x, m)
    pred_label = np.argmax(np.sum(decision,axis=(0,1)), axis = -1)
    return batch.labels[pred_label]
def calc_error(b, predict):
    ''' return error, cost on that set'''

    eps = 1e-10
    err = 0
    cost_val=0
    for (x,y_lab,m) in b:
        x=batch.make_context(x,15)
        y = onehot(y_lab)
        decision=predict(x.astype('float32')) + eps
        pred_label= np.argmax(decision,axis=-1)

        cost_val += -np.sum(y*np.log(decision))
        #pdb.set_trace()
        err += np.sum( np.expand_dims(pred_label,axis=1) != y_lab )
    err = err/float(len(b.index_bkup))
    cost_val = cost_val /len(b.index_bkup)
    return err , cost_val
Exemplo n.º 6
0
def calc_error(b, predict):
    ''' return error, cost on that set'''

    eps = 1e-10
    err = 0
    cost_val = 0
    for (x, y_lab, m) in b:
        x = batch.make_context(x, 15)
        y = onehot(y_lab)
        decision = predict(x.astype('float32')) + eps
        pred_label = np.argmax(decision, axis=-1)

        cost_val += -np.sum(y * np.log(decision))
        #pdb.set_trace()
        err += np.sum(np.expand_dims(pred_label, axis=1) != y_lab)
    err = err / float(len(b.index_bkup))
    cost_val = cost_val / len(b.index_bkup)
    return err, cost_val
Exemplo n.º 7
0
def calc_error(b, predict):
    ''' return error, cost on that set'''

    eps = 1e-10
    err = 0
    cost_val = 0
    for (x, y, m) in b:
        x = batch.make_context(x, 15)
        y = framewise_onehot(y, x.shape[1])
        decision = predict(x.astype('float32'), m.astype('float32')) + eps
        pred_label = np.argmax(decision, axis=2)
        y_lab = np.argmax(y, axis=2)

        cost_val += -np.sum(y * np.log(decision))
        #pdb.set_trace()
        err += np.sum((pred_label != y_lab))
    err = err / len(b.index_bkup)
    cost_val = cost_val / len(b.index_bkup)
    return err, cost_val
Exemplo n.º 8
0
def calc_error(b, predict):
    ''' return error, cost on that set'''

    eps = 1e-10
    err = 0
    cost_val=0
    for (x,y,m) in b:
        x = batch.make_context(x,15)
        y = framewise_onehot(y,x.shape[1])
        decision=predict(x.astype('float32'),m.astype('float32')) + eps
        pred_label= np.argmax(decision,axis=2)
        y_lab = np.argmax(y,axis=2)

        cost_val += -np.sum(y*np.log(decision))
        #pdb.set_trace()
        err += np.sum( (pred_label!= y_lab ))
    err = err/len(b.index_bkup)
    cost_val = cost_val /len(b.index_bkup)
    return err , cost_val
def do_train(data, data_val, data_test, **classifier_parameters):
    ''' input
        -------
        data: {label:np.array(features)}
        classifier_param: {n_layers:3,...}

        output
        ------
        model_param = {structure:
                        {n_layers: int,
                         n_dense: int...},
                       params:lasagne.layers.get_all_params(l_out)
                      }
    '''
    import time
    batch_maker = batch.Batch(data, isShuffle = True, seg_window = classifier_parameters['max_length'], seg_hop = classifier_parameters['max_length']/2)
    b_v = batch.Batch(data_val, isShuffle = True, seg_window = classifier_parameters['max_length'], seg_hop = classifier_parameters['max_length']/2)
    b_t = batch.Batch(data_test, isShuffle = True, seg_window = classifier_parameters['max_length'], seg_hop = classifier_parameters['max_length']/2)

    input_var = T.tensor3('input')
    mask = T.matrix('mask')
    target_output = T.matrix('target_output')
    network,layers = build(input_var, **classifier_parameters)

    eps = 1e-10
    loss_train = cost(lasagne.layers.get_output(
        network,  deterministic=False)+eps,target_output)
    loss_eval  = cost(lasagne.layers.get_output(
        network,  deterministic=True)+eps,target_output)
    all_params = lasagne.layers.get_all_params(network)
    updates = lasagne.updates.adadelta(loss_train,all_params,learning_rate=1.0)
    #updates = lasagne.updates.momentum(loss_train , all_params,
                                    #learning_rate, momentum)
    pred_fun = lasagne.layers.get_output(
            network, deterministic=True)
    train = theano.function([input_var, target_output],loss_train , updates=updates)
    #compute_cost = theano.function([input_var, target_output, mask],loss_eval)
    predict = theano.function( [input_var], pred_fun)


#theano.config.warn_float64='pdb'
    print "start training"

    #err, cost_test = calc_error(data_val,predict)
    epoch = 0
    no_best = 70
    best_cost = np.inf
    best_epoch = epoch
    model_params = []
    # TO REMOVE
    #model_params.append(lasagne.layers.get_all_param_values(network))
    while epoch < 500:

        start_time = time.time()
        cost_train = 0
        for _, (x ,y ,m) in enumerate(batch_maker):
            x=batch.make_context(x,15)
            x =x .astype('float32')
            m=m.astype('float32')
            y = onehot(y)
            y=y.astype('float32')

            assert(not np.any(np.isnan(x)))
            cost_train+= train(x, y) *x .shape[0]#*x .shape[1]
            assert(not np.isnan(cost_train))
        cost_train = cost_train/ len(batch_maker.index_bkup)
        err_val, cost_val = calc_error(b_v,predict)

        err_test, cost_test = calc_error(b_t,predict)
            #cost_val, err_val = 0, 0
        #pdb.set_trace()
        end_time = time.time()

        is_better = False
        if cost_val < best_cost:
            best_cost =cost_val
            best_epoch = epoch
            is_better = True

        if is_better:
            print "epoch: {} ({}s), training cost: {}, val cost: {}, val err: {}, test cost {}, test err: {}, New best.".format(epoch, end_time-start_time, cost_train, cost_val, err_val, cost_test, err_test)
        else:
            print "epoch: {} ({}s), training cost: {}, val cost: {}, val err: {}, test cost {}, test err: {}".format(epoch, end_time-start_time, cost_train, cost_val, err_val, cost_test, err_test)

        sys.stdout.flush()
        model_params.append(lasagne.layers.get_all_param_values(network))
        #check_path('dnn')
        #save_data('dnn/epoch_{}.autosave'.format(epoch), (classifier_parameters, model_params[best_epoch]))
        #savename = os.path.join(modelDir,'epoch_{}.npz'.format(epoch))
        #files.save_model(savename,structureDic,lasagne.layers.get_all_param_values(network))
        if epoch - best_epoch >= no_best:
            ## Early stoping
            print "Training stops, best epoch is {}".format(best_epoch)
            break
        epoch += 1
    return (classifier_parameters, model_params[best_epoch])
Exemplo n.º 10
0
def do_train(data, data_val, data_test, **classifier_parameters):
    ''' input
        -------
        data: {label:np.array(features)}
        classifier_param: {n_layers:3,...}

        output
        ------
        model_param = {structure:
                        {n_layers: int,
                         n_dense: int...},
                       params:lasagne.layers.get_all_params(l_out)
                      }
    '''
    import time
    import pdb
    batch_maker = batch.Batch(data,
                              isShuffle=True,
                              seg_window=classifier_parameters['max_length'],
                              seg_hop=classifier_parameters['max_length'] / 2)
    b_v = batch.Batch(data_val,
                      max_batchsize=500,
                      seg_window=classifier_parameters['max_length'],
                      seg_hop=classifier_parameters['max_length'] / 2)
    b_t = batch.Batch(data_test,
                      max_batchsize=500,
                      seg_window=classifier_parameters['max_length'],
                      seg_hop=classifier_parameters['max_length'] / 2)

    input_var = T.tensor3('input')
    mask = T.matrix('mask')
    target_output = T.tensor3('target_output')
    nnet, layers = build(input_var, mask, **classifier_parameters)

    eps = 1e-10
    loss_train = cost(
        lasagne.layers.get_output(nnet, deterministic=False) + eps,
        target_output, mask)
    loss_eval = cost(
        lasagne.layers.get_output(nnet, deterministic=True) + eps,
        target_output, mask)
    all_params = lasagne.layers.get_all_params(nnet, trainable=True)
    updates = lasagne.updates.adadelta(loss_train,
                                       all_params,
                                       learning_rate=1.0)
    #updates = lasagne.updates.momentum(loss_train , all_params,
    #learning_rate, momentum)
    pred_fun = lasagne.layers.get_output(nnet, deterministic=True)
    train = theano.function([input_var, target_output, mask],
                            loss_train,
                            updates=updates)
    #compute_cost = theano.function([input_var, target_output, mask],loss_eval)
    predict = theano.function([input_var, mask], pred_fun)

    #theano.config.warn_float64='pdb'
    print "start training"

    #err, cost_test = calc_error(data_val,predict)
    epoch = 0
    no_best = 70
    best_cost = np.inf
    best_epoch = epoch
    model_params = []
    # TO REMOVE
    #model_params.append(lasagne.layers.get_all_param_values(nnet))
    while epoch < 10000:

        start_time = time.time()
        cost_train = 0
        for _, (x, y, m) in enumerate(batch_maker):
            x = x.astype('float32')
            x = batch.make_context(x, 15)
            m = np.ones_like(m)
            m = m.astype('float32')
            y = framewise_onehot(y, x.shape[1])
            y = y.astype('float32')

            assert (not np.any(np.isnan(x)))
            cost_train += train(x, y, m) * x.shape[0]  #*x .shape[1]
            assert (not np.isnan(cost_train))
        cost_train = cost_train / len(batch_maker.index_bkup)
        err_val, cost_val = calc_error(b_v, predict)

        err_test, cost_test = calc_error(b_t, predict)
        #cost_val, err_val = 0, 0
        #pdb.set_trace()
        end_time = time.time()

        print "epoch: {} ({}s), training cost: {}, val cost: {}, val err: {}, test cost {}, test err: {}".format(
            epoch, end_time - start_time, cost_train, cost_val, err_val,
            cost_test, err_test)
        model_params.append(lasagne.layers.get_all_param_values(nnet))
        check_path('lstm')
        save_data('lstm/epoch_{}.autosave'.format(epoch),
                  (classifier_parameters, model_params[best_epoch]))
        #savename = os.path.join(modelDir,'epoch_{}.npz'.format(epoch))
        #files.save_model(savename,structureDic,lasagne.layers.get_all_param_values(nnet))
        is_better = False
        if cost_val < best_cost:
            best_cost = cost_val
            best_epoch = epoch
            is_better = True
        if epoch - best_epoch >= no_best:
            ## Early stoping
            break
        epoch += 1
    return (classifier_parameters, model_params[best_epoch])
Exemplo n.º 11
0
def do_train(data, data_val, data_test, **classifier_parameters):
    ''' input
        -------
        data: {label:np.array(features)}
        classifier_param: {n_layers:3,...}

        output
        ------
        model_param = {structure:
                        {n_layers: int,
                         n_dense: int...},
                       params:lasagne.layers.get_all_params(l_out)
                      }
    '''
    import time
    batch_maker = batch.Batch(data,
                              isShuffle=True,
                              seg_window=classifier_parameters['max_length'],
                              seg_hop=classifier_parameters['max_length'] / 2,
                              max_batchsize=800)
    b_v = batch.Batch(data_val,
                      isShuffle=True,
                      seg_window=classifier_parameters['max_length'],
                      seg_hop=classifier_parameters['max_length'] / 2,
                      max_batchsize=400)
    b_t = batch.Batch(data_test,
                      isShuffle=True,
                      seg_window=classifier_parameters['max_length'],
                      seg_hop=classifier_parameters['max_length'] / 2,
                      max_batchsize=400)

    input_var = T.tensor3('input')
    mask = T.matrix('mask')
    target_output = T.matrix('target_output')
    network, layers = build(input_var, **classifier_parameters)

    eps = 1e-10
    loss_train = cost(
        lasagne.layers.get_output(network, deterministic=False) + eps,
        target_output)
    loss_eval = cost(
        lasagne.layers.get_output(network, deterministic=True) + eps,
        target_output)
    all_params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.adadelta(loss_train,
                                       all_params,
                                       learning_rate=1.0)
    #updates = lasagne.updates.momentum(loss_train , all_params,
    #0.001, 0.9)
    pred_fun = lasagne.layers.get_output(network, deterministic=True)
    train = theano.function([input_var, target_output],
                            loss_train,
                            updates=updates)
    #compute_cost = theano.function([input_var, target_output, mask],loss_eval)
    predict = theano.function([input_var], pred_fun)

    #theano.config.warn_float64='pdb'
    print "start training"

    #err, cost_test = calc_error(data_val,predict)
    epoch = 0
    no_best = 10
    best_cost = np.inf
    best_epoch = epoch
    model_params = []
    try:
        while epoch < 500:

            start_time = time.time()
            cost_train = 0
            for _, (x, y, m) in enumerate(batch_maker):
                x = batch.make_context(x, 15)
                x = x.astype('float32')
                m = m.astype('float32')
                y = onehot(y)
                y = y.astype('float32')

                assert (not np.any(np.isnan(x)))
                cost_train += train(x, y) * x.shape[0]  #*x .shape[1]
                assert (not np.isnan(cost_train))
            cost_train = cost_train / len(batch_maker.index_bkup)
            err_val, cost_val = calc_error(b_v, predict)

            err_test, cost_test = calc_error(b_t, predict)
            #cost_val, err_val = 0, 0
            #pdb.set_trace()
            end_time = time.time()

            is_better = False
            if cost_val < best_cost:
                best_cost = cost_val
                best_epoch = epoch
                is_better = True

            if is_better:
                print "epoch: {} ({}s), training cost: {}, val cost: {}, val err: {}, test cost {}, test err: {}, New best.".format(
                    epoch, end_time - start_time, cost_train, cost_val,
                    err_val, cost_test, err_test)
            else:
                print "epoch: {} ({}s), training cost: {}, val cost: {}, val err: {}, test cost {}, test err: {}".format(
                    epoch, end_time - start_time, cost_train, cost_val,
                    err_val, cost_test, err_test)

            sys.stdout.flush()
            model_params.append(lasagne.layers.get_all_param_values(network))
            #check_path('dnn')
            #save_data('dnn/epoch_{}.autosave'.format(epoch), (classifier_parameters, model_params[best_epoch]))
            #savename = os.path.join(modelDir,'epoch_{}.npz'.format(epoch))
            #files.save_model(savename,structureDic,lasagne.layers.get_all_param_values(network))
            if epoch - best_epoch >= no_best:
                ## Early stoping
                print "Training stops, best epoch is {}".format(best_epoch)
                break
            epoch += 1
    except:
        print "Unexpectedly stoped, return model %d" % best_epoch
        if best_epoch == 0:
            return (classifier_parameters, model_params[-1])
        else:
            return (classifier_parameters, model_params[best_epoch])
    return (classifier_parameters, model_params[best_epoch])