Ejemplo n.º 1
0
def evaluate_model(NNInput):

    datasets, datasetsTry, G_MEAN, G_SD, RDataOrig, yDataOrig, yDataDiatOrig = load_data(NNInput)

    if (NNInput.Model=='ModPIP') or (NNInput.Model=='PIP'):
        NNInput.NLayers = NNInput.NHid
        NNInput.NLayers.insert(0,NNInput.NIn)
        NNInput.NLayers.append(NNInput.NOut)

    InputVar = T.dmatrix('Inputs')
    Layers   = create_nn(NNInput, InputVar, 1, G_MEAN, G_SD)


    if (NNInput.TryNNFlg > 0):
        i=-1
        for Ang in NNInput.AngVector:
            i=i+1
            RSetTry, GSetTry, ySetTry, ySetTryDiat, ySetTryTriat = datasetsTry[i]
            if (NNInput.Model == 'ModPIP') or (NNInput.Model == 'ModPIPPol'):
                xSetTry = RSetTry
            elif (NNInput.Model == 'PIP'):
                xSetTry = GSetTry
            NTry                  = xSetTry.get_value(borrow=True).shape[0]
            NBatchTry             = NTry // NNInput.NMiniBatch
            yPredTry = lasagne.layers.get_output(Layers[-1], inputs=xSetTry) 
            PathToTryLabels = NNInput.PathToOutputFldr + '/REBestDet.csv.' + str(Ang)
            yPredTry = T.cast(yPredTry, 'float64')
            yPredTry = yPredTry.eval() 
            yPredTry = InverseTransformation(NNInput, yPredTry, ySetTryDiat.get_value())
            ySetTry  = T.cast(ySetTry, 'float64')
            ySetTry  = ySetTry.eval()
            ySetTry  = InverseTransformation(NNInput, ySetTry, ySetTryDiat.get_value())
            save_to_plot(PathToTryLabels, 'Evaluated', numpy.concatenate((RSetTry.get_value(), ySetTry, yPredTry), axis=1))


    for iLayer in range(len(NNInput.NLayers)-1):
        PathToFldr = NNInput.PathToOutputFldr + Layers[iLayer].name + '/'
        if not os.path.exists(PathToFldr):
            os.makedirs(PathToFldr)
        if (NNInput.Model == 'ModPIP'):
            if (iLayer == 0) and (NNInput.BondOrderStr != 'DiatPotFun'):
                save_parameters_PIP(PathToFldr, Layers[iLayer].Lambda.get_value(), Layers[iLayer].re.get_value())
            elif (iLayer > 1):
                if (NNInput.BiasesFlg):
                    save_parameters(PathToFldr, Layers[iLayer].W.get_value(), Layers[iLayer].b.get_value())
                else:
                    save_parameters_NoBiases(PathToFldr, Layers[iLayer].W.get_value())
        elif (NNInput.Model == 'ModPIPPol'):
            if (iLayer == 0) and (NNInput.BondOrderStr != 'DiatPotFun'):
                save_parameters_PIP(PathToFldr, Layers[iLayer].Lambda.get_value(), Layers[iLayer].re.get_value())
            elif (iLayer==1):
                save_parameters_NoBiases(PathToFldr, Layers[iLayer].W.get_value())
        elif (NNInput.Model == 'PIP'):
            if (NNInput.BiasesFlg):
                save_parameters(PathToFldr, Layers[iLayer].W.get_value(), Layers[iLayer].b.get_value())
            else:
                save_parameters_NoBiases(PathToFldr, Layers[iLayer].W.get_value())


    compute_cuts(NNInput, NNInput.AnglesCuts, NNInput.RCuts, Layers, G_MEAN, G_SD)
Ejemplo n.º 2
0
def evaluation(model, supervisor, num_label):
    teX, teY, num_te_batch = load_data(cfg.dataset,
                                       cfg.batch_size,
                                       is_training=False)
    fd_test_acc = save_to()
    with supervisor.managed_session(config=tf.ConfigProto(
            allow_soft_placement=True)) as sess:
        supervisor.saver.restore(sess, tf.train.latest_checkpoint(cfg.logdir))
        tf.logging.info('Model restored!')

        test_acc = 0
        prob = np.zeros((num_te_batch * cfg.batch_size, num_label))
        for i in tqdm(range(num_te_batch),
                      total=num_te_batch,
                      ncols=70,
                      leave=False,
                      unit='b'):
            start = i * cfg.batch_size
            end = start + cfg.batch_size
            acc, prob[start:end, :] = sess.run(
                [model.accuracy, model.activation], {
                    model.X: teX[start:end],
                    model.labels: teY[start:end]
                })
            test_acc += acc
        test_acc = test_acc / (cfg.batch_size * num_te_batch)
        np.savetxt(cfg.results + '/prob_test.txt', prob, fmt='%1.2f')
        print(
            'Classification probability for each category has been saved to ' +
            cfg.results + '/prob_test.txt')
        fd_test_acc.write(str(test_acc))
        fd_test_acc.close()
        print('Test accuracy has been saved to ' + cfg.results +
              '/test_accuracy.txt')
Ejemplo n.º 3
0
def load(q, y_type):
    main = load_data(
        lag_year=5,
        sql_version=False)  # main = entire dataset before standardization/qcut
    col = main.columns[2:-2]
    # dfs = sample_from_main(main, y_type=y_type, part=1, q=q)  # part=1: i.e. test over entire 150k records
    # x, y = dfs[0]
    x, X_test, y, Y_test = sample_from_datacqtr(main,
                                                y_type=y_type,
                                                testing_period=dt.datetime(
                                                    2008, 3, 31),
                                                q=q)
    return x, y
Ejemplo n.º 4
0
from keras.models import Model, load_model
import numpy as np
from LoadData import load_data
import gc
import random
from keras.applications.mobilenet import relu6,DepthwiseConv2D
import datetime
from keras.preprocessing import image
gc.collect()
import numpy as np
from skimage import io, transform, img_as_ubyte
import os
lst = ['鸟','荷','竹','马','菊','兰','柳','梅','山']
starttime = datetime.datetime.now()
X_test, Y_test = load_data('E:\\Qianrushi\\test')

image_size=(224, 224)

X_test = X_test.astype('float32')
X_test /= 255.

X_test -= 0.5

X_test *= 2.


model = load_model('mobilenet1.hdf5',custom_objects={
                  'relu6': relu6,
                  'DepthwiseConv2D': DepthwiseConv2D})

# model = load_model('finalvgg16.hdf5')
Ejemplo n.º 5
0
def sgd_optimization(NNInput):
    """
    Demonstrate stochastic gradient descent optimization of a log-linear model

    :type LearningRate: float
    :param LearningRate: learning rate used (factor for the stochastic gradient)

    :type NEpoch: int
    :param NEpoch: maximal number of epochs to run the optimizer

    :type PathToData: string
    :param PathToData: the path of the dataset file

    """

    def normalized_squared_error(a, b, expon):
        """Computes the element-wise squared normalized difference between two tensors.
        .. math:: L = ( (p - t) / t )^2
        Parameters
        ----------
        a, b : Theano tensor
            The tensors to compute the squared difference between.
        Returns
        -------
        Theano tensor
            An expression for the item-wise squared difference.
        """
        a, b = align_targets(a, b)
        return T.square((a - b) / T.abs_(b)**expon)
        # / T.abs_(TargetVar)**(0.0) / T.abs_(b)**expon



    def weighted_squared_error(a, b, Shift, Power):
        """Computes the element-wise squared normalized difference between two tensors.
        .. math:: L = ( (p - t) / t )^2
        Parameters
        ----------
        a, b : Theano tensor
            The tensors to compute the squared difference between.
        Returns
        -------
        Theano tensor
            An expression for the item-wise squared difference.
        """
        a, b = align_targets(a, b)
        Vi   = T.maximum(b, Shift)
        w    = T.power(Shift/b, Power)
        return w * T.square(a - b)
        # / T.abs_(TargetVar)**(0.0) / T.abs_(b)**expon



    def align_targets(predictions, targets):
        """Helper function turning a target 1D vector into a column if needed.
        This way, combining a network of a single output unit with a target vector
        works as expected by most users, not broadcasting outputs against targets.
        Parameters
        ----------
        predictions : Theano tensor
            Expression for the predictions of a neural network.
        targets : Theano tensor
            Expression or variable for corresponding targets.
        Returns
        -------
        predictions : Theano tensor
            The predictions unchanged.
        targets : Theano tensor
            If `predictions` is a column vector and `targets` is a 1D vector,
            returns `targets` turned into a column vector. Otherwise, returns
            `targets` unchanged.
        """
        if (getattr(predictions, 'broadcastable', None) == (False, True) and
                getattr(targets, 'ndim', None) == 1):
            targets = as_theano_expression(targets).dimshuffle(0, 'x')
        return predictions, targets


    ##################################################################################################################################
    ### LOADING DATA
    ##################################################################################################################################
    print('\nLoading Data ... \n')

    if (NNInput.TryNNFlg > 0):
        datasets, datasetsTry, G_MEAN, G_SD, RDataOrig, yDataOrig, yDataDiatOrig = load_data(NNInput)
    else:
        datasets, G_MEAN, G_SD, RDataOrig, yDataOrig, yDataDiatOrig = load_data(NNInput)

    RSetTrain, GSetTrain, ySetTrain, ySetTrainDiat, ySetTrainTriat = datasets[0]
    RSetValid, GSetValid, ySetValid, ySetValidDiat, ySetValidTriat = datasets[1]
    #RSetTest,  GSetTest,  ySetTest,  ySetTestDiat,  ySetTestTriat  = datasets[2]


    #plot_set(NNInput, RSetTrain.get_value(), ySetTrainDiat.get_value(), RSetValid.get_value(), ySetValidDiat.get_value(), RSetTest.get_value(), ySetTestDiat.get_value())
    
    NNInput.NIn  = RSetTrain.get_value(borrow=True).shape[1]
    NNInput.NOut = ySetTrain.get_value(borrow=True).shape[1] 
    print(('    Nb of Input:  %i')    % NNInput.NIn)
    print(('    Nb of Output: %i \n') % NNInput.NOut)
    if (NNInput.Model=='ModPIP') or (NNInput.Model=='PIP'):
        NNInput.NLayers = NNInput.NHid
        NNInput.NLayers.insert(0,NNInput.NIn)
        NNInput.NLayers.append(NNInput.NOut)

    NTrain      = RSetTrain.get_value(borrow=True).shape[0]
    NBatchTrain = NTrain // NNInput.NMiniBatch
    NValid      = RSetValid.get_value(borrow=True).shape[0]
    #NTest       = RSetTest.get_value(borrow=True).shape[0]
    print(('    Nb of Training   Examples: %i')    % NTrain)
    print(('    Nb of Training   Batches:  %i') % NBatchTrain)
    print(('    Nb of Validation Examples: %i')    % NValid)
    #print(('    Nb of Test       Examples: %i \n') % NTest)



    ######################
    # BUILD ACTUAL MODEL #
    ######################  
    InputVar  = T.dmatrix('Inputs')
    #InputVar.tag.test_value  = numpy.random.randint(100,size=(100,3))
    InputVar.tag.test_value  = numpy.array([[1.0,2.0,7.0],[3.0,5.0,11.0]]) * 0.529177
    TargetVar = T.dmatrix('Targets')
    #TargetVar.tag.test_value = numpy.random.randint(100,size=(100,1))


    Layers = create_nn(NNInput, InputVar, TargetVar)

    TrainPrediction  = lasagne.layers.get_output(Layers[-1])
    if (NNInput.LossFunction == 'squared_error'):
        TrainError       = T.sqr(TrainPrediction - TargetVar)
        TrainLoss        = lasagne.objectives.squared_error(TrainPrediction, TargetVar)
    elif (NNInput.LossFunction == 'normalized_squared_error'):
        TrainError       = T.abs_( (TrainPrediction - TargetVar) / T.abs_(TargetVar)**NNInput.OutputExpon)
        TrainLoss        = normalized_squared_error(TrainPrediction, TargetVar, NNInput.OutputExpon)
    elif (NNInput.LossFunction == 'huber_loss'):
        TrainError       = T.abs_( (TrainPrediction - TargetVar) )
        TrainLoss        = lasagne.objectives.huber_loss(TrainPrediction, TargetVar, delta=5)
    elif (NNInput.LossFunction == 'weighted_squared_error'):
        TrainError       = T.abs_( (TrainPrediction - TargetVar) )
        TrainLoss        = weighted_squared_error(TrainPrediction, TargetVar, NNInput.Shift, NNInput.Power)

    if (NNInput.Model == 'ModPIP'):
        LayersK          = {Layers[2]: 1.0, Layers[3]: 1.0}
    elif (NNInput.Model=='ModPIPPol'):
        LayersK          = {Layers[1]: 1.0}
    elif (NNInput.Model=='PIP'):
        LayersK          = {Layers[0]: 1.0, Layers[1]: 1}
    L2Penalty        = regularize_layer_params_weighted(LayersK, l2)
    L1Penalty        = regularize_layer_params_weighted(LayersK, l1)
    #TrainLoss        = TrainLoss
    TrainLoss        = TrainLoss.mean() + NNInput.kWeightDecay[0] * L1Penalty + NNInput.kWeightDecay[1] * L2Penalty

    params           = lasagne.layers.get_all_params(Layers[-1], trainable=True)
    if (NNInput.Method == 'nesterov'):
        updates          = lasagne.updates.nesterov_momentum(TrainLoss, params, learning_rate=NNInput.LearningRate, momentum=NNInput.kMomentum)
    elif (NNInput.Method == 'rmsprop'):
        updates          = lasagne.updates.rmsprop(TrainLoss, params, learning_rate=NNInput.LearningRate, rho=NNInput.RMSProp[0], epsilon=1e-06)
    elif (NNInput.Method == 'adamax'):
        updates          = lasagne.updates.adamax(TrainLoss, params, learning_rate=NNInput.LearningRate, beta1=0.9, beta2=0.999, epsilon=1e-08)
    elif (NNInput.Method == 'amsgrad'):
        updates          = lasagne.updates.amsgrad(TrainLoss, params, learning_rate=NNInput.LearningRate, beta1=0.9, beta2=0.999, epsilon=1e-08)
    elif (NNInput.Method == 'adam'):
        updates          = lasagne.updates.adam(TrainLoss, params, learning_rate=NNInput.LearningRate, beta1=0.9, beta2=0.999, epsilon=1e-08)
    elif (NNInput.Method == 'adadelta'):
        updates          = lasagne.updates.adadelta(TrainLoss, params, learning_rate=NNInput.LearningRate, rho=0.95, epsilon=1e-08)
    TrainFn = theano.function(inputs=[InputVar, TargetVar], outputs=[TrainError, TrainLoss], updates=updates)


    ValidPrediction = lasagne.layers.get_output(Layers[-1], deterministic=True)

    if (NNInput.LossFunction == 'squared_error'):
        ValidError      = T.sqr(ValidPrediction - TargetVar)
    elif (NNInput.LossFunction == 'normalized_squared_error'):
        ValidError      = T.sqr((ValidPrediction - TargetVar) / TargetVar)
        ValidError      = T.sqrt(ValidError.mean())
    elif (NNInput.LossFunction == 'huber_loss'):
        ValidError      = T.sqr(ValidPrediction - TargetVar)
        ValidError      = T.sqrt(ValidError.mean())
    elif (NNInput.LossFunction == 'weighted_squared_error'):
        Vi              = T.maximum(ValidPrediction, NNInput.Shift)
        w               = T.power(NNInput.Shift/TargetVar, NNInput.Power)
        ValidError      = w * T.sqr(ValidPrediction - TargetVar)
        ValidError      = T.sqrt(ValidError.mean())
    ValFn   = theano.function(inputs=[InputVar, TargetVar], outputs=ValidError)


    ###############
    # TRAIN MODEL #
    ###############
    print('\n\nTRAINING ... ')

    if (NNInput.fvalid < 0):
        fValid = NBatchTrain * numpy.absolute(NNInput.fvalid)
    else:
        fValid = NNInput.fvalid
    BestValidError        = numpy.inf
    BestIter              = 0
    TestScore             = 0.
    tStart                = timeit.default_timer()
    iEpoch                = 0
    LoopingFlg            = True
    iIterTot              = 0
    Train                 = []
    TrainEpochVec         = []
    Valid                 = []
    ValidEpochVec         = []
    iTry                  = 0

   
    if (NNInput.Model=='ModPIP') or (NNInput.Model == 'ModPIPPol'):
        xSetTrain = RSetTrain
        xSetValid = RSetValid
        #xSetTest  = RSetTest
        xDataOrig = RDataOrig
    elif (NNInput.Model == 'PIP'):
        xSetTrain = GSetTrain
        xSetValid = GSetValid
        #xSetTest  = GSetTest
        #xDataOrig = GDataOrig
    # print(xSetTrain)
    # print(xSetValid)
    # print(xSetTest)
    # print(xDataOrig)
    # print(ySetTrain.get_value())
    # print(ySetValid.get_value())
    # print(ySetTest.get_value())
    # print(yDataOrig)
    # time.sleep(5)

    ThisTrainError = 0.0
    while (iEpoch < NNInput.NEpoch) and (LoopingFlg):
        iEpoch += 1

        iMiniBatch    = 0
        TrainErrorVec = [] 
        for TrainBatch in iterate_minibatches(xSetTrain, ySetTrain, NNInput.NMiniBatch, shuffle=True):
            iMiniBatch += 1
            iIterTot    = (iEpoch - 1) * NBatchTrain + iMiniBatch
            TrainInputs, TrainTargets          = TrainBatch
            [TrainErrorTemp, MiniBatchAvgCost] = TrainFn(TrainInputs, TrainTargets)
            TrainErrorVec                      = numpy.append(TrainErrorVec, TrainErrorTemp)


            if (iIterTot + 1) % fValid == 0:

                ValidErorrVec = []
                for ValidBatch in iterate_minibatches(xSetValid, ySetValid, NValid, shuffle=False):
                    ValidInputs, ValidTargets = ValidBatch
                    ValidErorrVec             = numpy.append(ValidErorrVec, ValFn(ValidInputs, ValidTargets))
                
                ThisValidError = numpy.sqrt( numpy.mean(ValidErorrVec) )
                ValidEpochVec  = numpy.append(ValidEpochVec, iEpoch)
                Valid          = numpy.append(Valid, ThisValidError)

                # fig = plt.figure()
                # plt.plot(ValidErorrVec, color='lightblue', linewidth=3)
                # #ax.set_xlim(,)
                # plt.show()

                print( '\n    iEpoch %i, minibatch %i/%i, training error %f, validation error %f' % (iEpoch, iMiniBatch + 1, NBatchTrain, ThisTrainError, ThisValidError) )

                # if we got the best validation score until now
                if ThisValidError < BestValidError:
                    #improve patience if loss improvement is good enough
                    #if (ThisValidError < BestValidError * NNInput.ImpThold):
                        # NNInput.NPatience = max(NNInput.NPatience, iIterTot * NNInput.NDeltaPatience)

                    BestValidError = ThisValidError
                    BestIter       = iIterTot

                    # # test it on the test set
                    # TestErrorVec = []
                    # for TestBatch in iterate_minibatches(xSetTest, ySetTest, NTest, shuffle=False):
                    #     TestInputs, TestTargets = TestBatch
                    #     TestErrorVec = numpy.append(TestErrorVec, ValFn(TestInputs, TestTargets))
                    # TestScore  = numpy.mean(TestErrorVec)

                    # print(('        iEpoch %i, minibatch %i/%i, test error of best model %f') % (iEpoch, iMiniBatch + 1, NBatchTrain, TestScore))
                    print('        iEpoch %i, minibatch %i/%i, Best so far')


                    if (NNInput.WriteFinalFlg > 0):
                        
                        for iLayer in range(len(NNInput.NLayers)-1):

                            PathToFldr = NNInput.PathToOutputFldr + Layers[iLayer].name + '/'
                            if not os.path.exists(PathToFldr):
                                os.makedirs(PathToFldr)
                            PathToFile = PathToFldr + 'Weights.npz'
                            numpy.savez(PathToFile, *lasagne.layers.get_all_param_values(Layers[iLayer]))

                            if (NNInput.WriteFinalFlg > 1):
                                if (NNInput.Model == 'ModPIP'):
                                    if (iLayer == 0) and (NNInput.BondOrderStr != 'DiatPotFun'):
                                        save_parameters_PIP(PathToFldr, Layers[iLayer].Lambda.get_value(), Layers[iLayer].re.get_value())
                                    elif (iLayer > 1):
                                        if (NNInput.BiasesFlg):
                                            save_parameters(PathToFldr, Layers[iLayer].W.get_value(), Layers[iLayer].b.get_value())
                                        else:
                                            save_parameters_NoBiases(PathToFldr, Layers[iLayer].W.get_value())
                                elif (NNInput.Model == 'ModPIPPol'):
                                    if (iLayer == 0) and (NNInput.BondOrderStr != 'DiatPotFun'):
                                        save_parameters_PIP(PathToFldr, Layers[iLayer].Lambda.get_value(), Layers[iLayer].re.get_value())
                                    elif (iLayer==1):
                                        save_parameters_NoBiases(PathToFldr, Layers[iLayer].W.get_value())
                                elif (NNInput.Model == 'PIP'):
                                    if (NNInput.BiasesFlg):
                                        save_parameters(PathToFldr, Layers[iLayer].W.get_value(), Layers[iLayer].b.get_value())
                                    else:
                                        save_parameters_NoBiases(PathToFldr, Layers[iLayer].W.get_value())


                        if (NNInput.TryNNFlg > 1):
                            i=-1
                            for Ang in NNInput.AngVector:
                                i=i+1
                                iTry=iTry+1
                                RSetTry, GSetTry, ySetTry, ySetTryDiat, ySetTryTriat  = datasetsTry[i]
                                if (NNInput.Model == 'ModPIP') or (NNInput.Model == 'ModPIPPol'):
                                    xSetTry = RSetTry
                                elif (NNInput.Model == 'PIP'):
                                    xSetTry = GSetTry
                                NTry                  = xSetTry.get_value(borrow=True).shape[0]
                                NBatchTry             = NTry // NNInput.NMiniBatch
                                yPredTry = lasagne.layers.get_output(Layers[-1], inputs=xSetTry) 
                                if  (NNInput.TryNNFlg > 2):
                                    PathToTryLabels = NNInput.PathToOutputFldr + '/REBestDet.csv.' + str(iTry)
                                else:
                                    PathToTryLabels = NNInput.PathToOutputFldr + '/REBestDet.csv.' + str(Ang)
                                yPredTry = T.cast(yPredTry, 'float64')
                                yPredTry = yPredTry.eval()
                                yPredTry = InverseTransformation(NNInput, yPredTry, ySetTryDiat.get_value())
                                ySetTry = T.cast(ySetTry, 'float64')
                                ySetTry = ySetTry.eval()
                                ySetTry = InverseTransformation(NNInput, ySetTry, ySetTryDiat.get_value())
                                save_to_plot(PathToTryLabels, 'Evaluated', numpy.concatenate((RSetTry.get_value(), ySetTry, yPredTry), axis=1))
                    
        TrainEpochVec   = numpy.append(TrainEpochVec, iEpoch)
        ThisTrainError  = numpy.sqrt( numpy.mean(TrainErrorVec) )
        Train           = numpy.append(Train, ThisTrainError)


    #############################################################################################################
    ### LOADING THE OPTIMAL PARAMETERS
    for iLayer in range(len(NNInput.NLayers)-1):

        PathToFldr = NNInput.PathToWeightFldr + Layers[iLayer].name + '/'
        print(' Loading Parameters for Layer ', iLayer, ' from File ', PathToFldr)
        if (NNInput.Model == 'ModPIP'):
            if (iLayer == 0) and (NNInput.BondOrderStr != 'DiatPotFun'):
                save_parameters_PIP(PathToFldr, Layers[iLayer].Lambda.get_value(), Layers[iLayer].re.get_value())
            elif (iLayer > 1):
                if (NNInput.BiasesFlg):
                    save_parameters(PathToFldr, Layers[iLayer].W.get_value(), Layers[iLayer].b.get_value())
                else:
                    save_parameters_NoBiases(PathToFldr, Layers[iLayer].W.get_value())
        elif (NNInput.Model == 'ModPIPPol'):
            if (iLayer == 0) and (NNInput.BondOrderStr != 'DiatPotFun'):
                save_parameters_PIP(PathToFldr, Layers[iLayer].Lambda.get_value(), Layers[iLayer].re.get_value())
            elif (iLayer==1):
                save_parameters_NoBiases(PathToFldr, Layers[iLayer].W.get_value())
        elif (NNInput.Model == 'PIP'):
            save_parameters(PathToFldr, Layers[iLayer].W.get_value(), Layers[iLayer].b.get_value())


    #############################################################################################################
    ### Evaluating Model for a Particular Data-Set
    if (NNInput.TryNNFlg > 0):
        i=-1
        for Ang in NNInput.AngVector:
            i=i+1
            RSetTry, GSetTry, ySetTry, ySetTryDiat, ySetTryTriat  = datasetsTry[i]
            if (NNInput.Model == 'ModPIP') or (NNInput.Model == 'ModPIPPol'):
                xSetTry = RSetTry
            elif (NNInput.Model == 'PIP'):
                xSetTry = GSetTry
            NTry                  = xSetTry.get_value(borrow=True).shape[0]
            NBatchTry             = NTry // NNInput.NMiniBatch
            yPredTry = lasagne.layers.get_output(Layers[-1], inputs=xSetTry) 
            PathToTryLabels = NNInput.PathToOutputFldr + '/REBestDet.csv.' + str(Ang)
            yPredTry = T.cast(yPredTry, 'float64')
            yPredTry = yPredTry.eval()
            yPredTry = InverseTransformation(NNInput, yPredTry, ySetTryDiat.get_value())
            ySetTry = T.cast(ySetTry, 'float64')
            ySetTry = ySetTry.eval()
            ySetTry = InverseTransformation(NNInput, ySetTry, ySetTryDiat.get_value())
            save_to_plot(PathToTryLabels, 'Evaluated', numpy.concatenate((RSetTry.get_value(), ySetTry, yPredTry), axis=1))


    #############################################################################################################
    ### COMPUTING ERRORS
    ySetTrain = InverseTransformation(NNInput, ySetTrain.get_value(), ySetTrainDiat.get_value())
    ySetValid = InverseTransformation(NNInput, ySetValid.get_value(), ySetValidDiat.get_value())
    #ySetTest  = InverseTransformation(NNInput, ySetTest.get_value(),  ySetTestDiat.get_value())

    yPredTrain  = lasagne.layers.get_output(Layers[-1], inputs=xSetTrain) 
    yPredTrain  = T.cast(yPredTrain, 'float64')
    yPredTrain  = yPredTrain.eval()
    yPredTrain  = InverseTransformation(NNInput, yPredTrain, ySetTrainDiat.get_value())
    error_Train = ySetTrain - yPredTrain 
    plot_error(NNInput, error_Train, 'Train')

    yPredValid  = lasagne.layers.get_output(Layers[-1], inputs=xSetValid) 
    yPredValid  = T.cast(yPredValid, 'float64')
    yPredValid  = yPredValid.eval()
    yPredValid  = InverseTransformation(NNInput, yPredValid, ySetValidDiat.get_value())
    error_Valid = ySetValid - yPredValid
    plot_error(NNInput, error_Valid, 'Valid') 

    # yPredTest   = lasagne.layers.get_output(Layers[-1], inputs=xSetTest) 
    # yPredTest   = T.cast(yPredTest, 'float64')
    # yPredTest   = yPredTest.eval()
    # yPredTest   = InverseTransformation(NNInput, yPredTest, ySetTestDiat.get_value())
    # error_Test  = ySetTest - yPredTest
    # plot_error(NNInput, error_Test, 'Test')

    # plot_set(NNInput, RSetTrain.get_value(), ySetTrain, RSetValid.get_value(), ySetValid, RSetTest.get_value(), ySetTest)


    yPredOrig   = lasagne.layers.get_output(Layers[-1], inputs=xDataOrig) 
    yPredOrig   = T.cast(yPredOrig, 'float64')
    yPredOrig   = yPredOrig.eval()
    yPredOrig   = InverseTransformation(NNInput, yPredOrig, yDataDiatOrig)
    plot_scatter(NNInput, yPredOrig, yDataOrig)
    #plot_overall_error(NNInput, yPredOrig, yDataOrig)

    plot_history(NNInput, TrainEpochVec, Train, ValidEpochVec, Valid)


    tEnd = timeit.default_timer()
    print(('\nOptimization complete. Best validation score of %f obtained at iteration %i, with test performance %f') % (BestValidError, BestIter + 1, TestScore))
    print(('\nThe code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((tEnd - tStart) / 60.)), file=sys.stderr)
Ejemplo n.º 6
0
from jmodel.util import *
from jmodel.Optimizers.BiascOptimizer import *
from jmodel.Models.Rnnlm import *
from jmodel.Trainers.RnnTrainer import *
# 設定超參數
batch_size = 32
wordvec_size = 100
hidden_size = 100  # RNN隱藏狀態向量的元素數
time_size = 35  # 展開RNN的大小
lr = 20.0
max_epoch = 4
max_grad = 0.25

# 載入學習資料
corpus, word_to_id, id_to_word = load_data("ptb.train.txt")
# corpus = corpus[]
vocab_size = len(word_to_id)
xs = corpus[:-1]
ts = corpus[1:]

# 產生模型
model = Rnnlm(vocab_size, wordvec_size, hidden_size)
optimizer = SGD(lr)
trainer = RnnlmTrainer(model, optimizer)

# 套用梯度裁減並學習
trainer.fit(xs,
            ts,
            max_epoch,
            batch_size,
Ejemplo n.º 7
0
from LoadData import load_data
from  Pca import pca
import  numpy as np
import  matplotlib.pyplot as plt
import matplotlib.lines as mlines

if __name__ == '__main__':

    Path = 'iris.data'
    X, Y = load_data(Path)


    K = 2        #将数据降维为2维
    finaldata = pca(X, K)

    #分别存储不同类别的鸢尾花降维后的数据
    red_x = []
    red_y = []

    blue_x = []
    blue_y = []

    green_x = []
    green_y = []

    finaldata = np.array(finaldata)  #将矩阵转化为数组,便于索引

    for i in range(0,finaldata.shape[0]):    #按行遍历
        if Y[i] == 0:
            red_x.append(finaldata[i][0])
            red_y.append(finaldata[i][1])
Ejemplo n.º 8
0
def train(model, supervisor, num_label):
    trX, trY, num_tr_batch, valX, valY, num_val_batch = load_data(
        cfg.dataset, cfg.batch_size, is_training=True)
    Y = valY[:num_val_batch * cfg.batch_size].reshape((-1, 1))

    fd_train_acc, fd_loss, fd_val_acc = save_to()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with supervisor.managed_session(config=config) as sess:
        print("\nNote: all of results will be saved to directory: " +
              cfg.results)
        for epoch in range(cfg.epoch):
            sys.stdout.write('Training for epoch ' + str(epoch) + '/' +
                             str(cfg.epoch) + ':')
            sys.stdout.flush()
            if supervisor.should_stop():
                print('supervisor stoped!')
                break
            for step in tqdm(range(num_tr_batch),
                             total=num_tr_batch,
                             ncols=70,
                             leave=False,
                             unit='b'):
                start = step * cfg.batch_size
                end = start + cfg.batch_size
                global_step = epoch * num_tr_batch + step

                if global_step % cfg.train_sum_freq == 0:
                    _, loss, train_acc, summary_str = sess.run([
                        model.train_op, model.loss, model.accuracy,
                        model.train_summary
                    ])

                    supervisor.summary_writer.add_summary(
                        summary_str, global_step)

                    fd_loss.write(str(global_step) + ',' + str(loss) + "\n")
                    fd_loss.flush()
                    fd_train_acc.write(
                        str(global_step) + ',' +
                        str(train_acc / cfg.batch_size) + "\n")
                    fd_train_acc.flush()
                else:
                    sess.run(model.train_op)

                if cfg.val_sum_freq != 0 and global_step % cfg.val_sum_freq == 0:
                    val_acc = 0
                    prob = np.zeros(
                        (num_val_batch * cfg.batch_size, num_label))
                    for i in range(num_val_batch):
                        start = i * cfg.batch_size
                        end = start + cfg.batch_size
                        acc, prob[start:end, :] = sess.run(
                            [model.accuracy, model.activation], {
                                model.X: valX[start:end],
                                model.labels: valY[start:end]
                            })
                        val_acc += acc
                    val_acc = val_acc / (cfg.batch_size * num_val_batch)
                    np.savetxt(cfg.results + '/activations_step_' +
                               str(global_step) + '.txt',
                               np.hstack((prob, Y)),
                               fmt='%1.2f')
                    fd_val_acc.write(
                        str(global_step) + ',' + str(val_acc) + '\n')
                    fd_val_acc.flush()

            if (epoch + 1) % cfg.save_freq == 0:
                supervisor.saver.save(
                    sess, cfg.logdir + '/model_epoch_%04d_step_%02d' %
                    (epoch, global_step))

        fd_val_acc.close()
        fd_train_acc.close()
        fd_loss.close()
Ejemplo n.º 9
0
def sgd_optimization(NNInput):

    RandomSeed = 42
    set_tt_rng(MRG_RandomStreams(RandomSeed))

    NSigmaSamples = 1000
    SigmaIntCoeff = 2

    ##################################################################################################################################
    ### LOADING DATA
    ##################################################################################################################################
    print('\nLoading Data ... \n')

    if (NNInput.TryNNFlg):
        datasets, datasetsPlot, RDataOrig, yDataOrig, yDataDiatOrig = load_data(
            NNInput)
    else:
        datasets, RDataOrig, yDataOrig, yDataDiatOrig = load_data(NNInput)

    RSetTrain, ySetTrain, ySetTrainDiat, ySetTrainTriat = datasets[0]
    RSetPlot, ySetPlot, ySetPlotDiat, ySetPlotTriat = datasetsPlot[0]
    RSetPlotTemp = RSetPlot
    #NNInput.NIn  = xSetTrain.get_value(borrow=True).shape[1]
    NNInput.NOut = ySetTrain.get_value(borrow=True).shape[1]
    print(('    Nb of Input:  %i') % NNInput.NIn)
    print(('    Nb of Output: %i \n') % NNInput.NOut)
    NNInput.NLayers = NNInput.NHid
    NNInput.NLayers.insert(0, NNInput.NIn)
    NNInput.NLayers.append(NNInput.NOut)

    NNInput.NTrain = RSetTrain.get_value(borrow=True).shape[0]
    print(('    Nb of Training   Examples: %i') % NNInput.NTrain)

    # compute number of minibatches for training, validation and testing
    if (NNInput.NMiniBatch != 0):
        NNInput.NBatchTrain = NNInput.NTrain // NNInput.NMiniBatch
        print(('    Nb of Training   Batches: %i') % NNInput.NBatchTrain)
    else:
        print('    No-BATCH Version')

    ##############################################################################################
    ### TESTING REAL PARAMETERS ##################################################################
    if (NNInput.ReadIniParamsFlg):
        if (NNInput.Model == 'PIP'):
            LambdaVec = NNInput.LambdaVec
            reVec = NNInput.reVec
            WIni = [
                load_parameters(NNInput.PathToWeightFldr +
                                NNInput.LayersName[iLayer] + '/')[0]
                for iLayer in range(1, len(NNInput.LayersName))
            ]
            bIni = [
                load_parameters(NNInput.PathToWeightFldr +
                                NNInput.LayersName[iLayer] + '/')[1]
                for iLayer in range(1, len(NNInput.LayersName))
            ]
        if (NNInput.Model == 'ModPIP'):
            LambdaIni = load_parameters_PIP(NNInput.PathToWeightFldr +
                                            NNInput.LayersName[1] + '/')[0]
            reIni = load_parameters_PIP(NNInput.PathToWeightFldr +
                                        NNInput.LayersName[1] + '/')[1]
            LambdaVec = numpy.array([1.0, 1.0, 1.0]) * LambdaIni
            reVec = numpy.array([1.0, 1.0, 1.0]) * reIni
            #print('Lambda = ', LambdaVec)
            #print('re     = ', reVec)
            WIni = [
                load_parameters(NNInput.PathToWeightFldr +
                                NNInput.LayersName[iLayer] + '/')[0]
                for iLayer in range(3, len(NNInput.LayersName))
            ]
            bIni = [
                load_parameters(NNInput.PathToWeightFldr +
                                NNInput.LayersName[iLayer] + '/')[1]
                for iLayer in range(3, len(NNInput.LayersName))
            ]
        elif (NNInput.Model == 'LEPS'):
            DeVec = NNInput.DeVec
            betaVec = NNInput.betaVec
            reVec = NNInput.reVec
            k = NNInput.k
        i = -1
        for Ang in NNInput.AngVector:
            i = i + 1
            RSetPlot, ySetPlot, ySetPlotDiat, ySetPlotTriat = datasetsPlot[i]
            if (NNInput.Model == 'PIP') or (NNInput.Model == 'ModPIP'):
                yPredInitial = try_model_PIP(NNInput,
                                             RSetPlot.get_value(borrow=True),
                                             LambdaVec, reVec, WIni, bIni)
            elif (NNInput.Model == 'LEPS'):
                yPredInitial = try_model_LEPS(NNInput,
                                              RSetPlot.get_value(borrow=True),
                                              DeiVec, betaiVec, reiVec, ki)
            yPredInitial = InverseTransformation(NNInput, yPredInitial,
                                                 ySetPlotDiat.get_value())
            PathToPlotLabels = NNInput.PathToOutputFldr + '/REInitial.csv.' + str(
                int(numpy.floor(Ang)))
            ySetPlot = T.cast(ySetPlot, 'float64')
            ySetPlot = ySetPlot.eval()
            ySetPlot = InverseTransformation(NNInput, ySetPlot,
                                             ySetPlotDiat.get_value())
            save_to_plot(
                PathToPlotLabels, 'Initial',
                numpy.column_stack(
                    [RSetPlot.get_value(), ySetPlot, yPredInitial]))
            print('    Initial Evaluation Saved in File: ', PathToPlotLabels,
                  '\n')
        RSetPlotTemp = RSetPlot
    ##############################################################################################

    ##################################################################################################################################
    # BUILD ACTUAL MODEL #
    ##################################################################################################################################
    ### COMPUTING / UPDATING INFERENCE ######################################################################
    # print(RSetTrain.get_value())
    # print(ySetTrain.get_value())
    # time.sleep(5)
    if (NNInput.TrainFlg):
        if (NNInput.NMiniBatch > 0):
            RSetTrainTemp = pymc3.Minibatch(RSetTrain.get_value(),
                                            batch_size=NNInput.NMiniBatch,
                                            dtype='float64')
            ySetTrainTemp = pymc3.Minibatch(ySetTrain.get_value(),
                                            batch_size=NNInput.NMiniBatch,
                                            dtype='float64')
        else:
            RSetTrainTemp = RSetTrain
            ySetTrainTemp = ySetTrain
            NNInput.NMiniBatch = NNInput.NTrain
        #ADVIApprox, ADVIInference, ADVITracker, SVGDApprox, NUTSTrace, model, yPred, Sigma, Layers = construct_model(NNInput, RSetTrainTemp, ySetTrainTemp, GaussWeightsW, GaussWeightsb)
        ADVIApprox, ADVIInference, SVGDApprox, NUTSTrace, Params, yPred = construct_model(
            NNInput, RSetTrain, ySetTrain, RSetTrainTemp, ySetTrainTemp,
            GaussWeightsW, GaussWeightsb)
        #
        plot_ADVI_ELBO(NNInput, ADVIInference)
        #
        if (NNInput.SaveInference):
            PathToModTrace = NNInput.PathToOutputFldr + '/Approx&Preds.pkl'
            with open(PathToModTrace, 'wb') as buff:
                #pickle.dump({'model': model, 'trace': ADVITrace, 'tracker': ADVITracker, 'inference': ADVIInference, 'approx': ADVIApprox, 'yLike': yLike}, buff)
                pickle.dump(
                    {
                        'ADVIApprox': ADVIApprox,
                        'Params': Params,
                        'yPred': yPred
                    }, buff)
        #
    else:
        PathToWeightFldr = NNInput.PathToOutputFldr + '/Model&Trace.pkl'
        with open(PathToWeightFldr, 'rb') as buff:
            data = pickle.load(buff)
        #model, ADVITrace, ADVITracker, ADVIInference, ADVIApprox, yPred = data['model'], data['trace'], data['tracker'], data['inference'], data['approx'], data['yPred']
        ADVIApprox, Params, yPred = data['ADVIApprox'], data['Params'], data[
            'yPred']
        RSetPlot, ySetPlot, ySetPlotDiat, ySetPlotTriat = datasetsPlot[0]
        RSetPlotTemp = RSetPlot

    if (NNInput.NTraceADVI > 0):
        ADVITrace = ADVIApprox.sample(draws=NNInput.NTraceADVI)
        plot_ADVI_trace(NNInput, ADVITrace)
    else:
        ADVITrace = 1
    ##############################################################################################

    ### SAMPLING PARAMETERS POSTERIOR #######################################################################
    PathToADVI = NNInput.PathToOutputFldr + '/ParamsPosts/'
    if not os.path.exists(PathToADVI):
        os.makedirs(PathToADVI)

    if (NNInput.Model == 'PIP') or (NNInput.Model == 'ModPIP'):
        save_ADVI_reconstruction_PIP(NNInput, PathToADVI, ADVIApprox, Params)
        save_ADVI_sample_PIP(NNInput, PathToADVI, ADVIApprox, Params)
    elif (NNInput.Model == 'LEPS'):
        save_ADVI_reconstruction_LEPS(PathToADVI, ADVIApprox, Params)
    ##############################################################################################

    ### RECONSTRUCTING MOMENTS ###################################################################
    #means = ADVIApprox.bij.rmap(ADVIApprox.mean.eval())
    #sds   = ADVIApprox.bij.rmap(ADVIApprox.std.eval())
    #plot_ADVI_reconstruction(NNInput, means, sds)

    # PathToADVI = NNInput.PathToOutputFldr + '/ParamsPosts/'
    # if not os.path.exists(PathToADVI):
    #     os.makedirs(PathToADVI)
    # save_ADVI_reconstruction(PathToADVI, ADVITrace, model, 0.0, 0.0)
    ##############################################################################################

    ### RUNNING NUTS #############################################################################
    # xSetTrainTemp = xSetTrain
    # ySetTrainTemp = ySetTrain

    # fig = plt.figure()
    # pymc3.traceplot(NUTSTrace);
    # plt.show()
    # FigPath = NNInput.PathToOutputFldr + '/NUTSTrace.png'
    # fig.savefig(FigPath)
    # #plt.close()

    # varnames = means.keys()
    # fig, axs = plt.subplots(nrows=len(varnames), figsize=(12, 18))
    # for var, ax in zip(varnames, axs):
    #     mu_arr    = means[var]
    #     sigma_arr = sds[var]
    #     ax.set_title(var)
    #     for i, (mu, sigma) in enumerate(zip(mu_arr.flatten(), sigma_arr.flatten())):
    #         sd3 = (-4*sigma + mu, 4*sigma + mu)
    #         x = numpy.linspace(sd3[0], sd3[1], 300)
    #         y = stats.norm(mu, sigma).pdf(x)
    #         ax.plot(x, y)
    #         if hierarchical_trace[var].ndim > 1:
    #             t = NUTSTrace[var][i]
    #         else:
    #             t = NUTSTrace[var]
    #         sns.distplot(t, kde=False, norm_hist=True, ax=ax)
    # fig.tight_layout()
    # plt.show()
    # FigPath = NNInput.PathToOutputFldr + '/ADVIDistributionsReconstruction.png'
    # fig.savefig(FigPath)
    # #plt.close()
    ##############################################################################################

    # ## COMPUTING MAX POSTERIOR ##################################################################
    # map_estimate = pymc3.find_MAP(model=model)
    # if (NNInput.Model == 'ModPIP'):
    #    LambdaVec    = map_estimate.get('Lambda')
    #    reVec        = map_estimate.get('re')
    #    WNames       = ['W1', 'W2', 'W3']
    #    WIni         = [ map_estimate.get(WNames[iLayer]) for iLayer in range(len(NNInput.LayersName))]
    #    bNames       = ['b1', 'b2', 'b3']
    #    bIni         = [ map_estimate.get(bNames[iLayer]) for iLayer in range(len(NNInput.LayersName))]
    # elif (NNInput.Model == 'PIP'):
    #    LambdaVec    = NNInput.reVec
    #    reVec        = NNInput.reVec
    #    WNames       = ['W1', 'W2', 'W3']
    #    WIni         = [ map_estimate.get(WNames[iLayer]) for iLayer in range(len(NNInput.LayersName))]
    #    bNames       = ['b1', 'b2', 'b3']
    #    bIni         = [ map_estimate.get(bNames[iLayer]) for iLayer in range(len(NNInput.LayersName))]
    # elif (NNInput.Model == 'LEPS'):
    #    DeVec   = map_estimate.get('De')
    #    betaVec = map_estimate.get('beta')
    #    reVec   = map_estimate.get('re')
    #    k       = map_estimate.get('k')

    # i=-1
    # for Ang in NNInput.AngVector:
    #    i=i+1

    #    xSetTry,  ySetTry  = datasetsTry[i]

    #    PathToAbscissaToPlot = NNInput.PathToDataFldr + '/R.csv.' + str(Ang)
    #    xPlot = abscissa_to_plot(PathToAbscissaToPlot)
    #    if (NNInput.Model == 'PIP') or (NNInput.Model == 'ModPIP'):
    #        yPredMaxPosterior = try_model_PIP(NNInput, xSetTry.get_value(borrow=True), LambdaVec, reVec, WIni, bIni, IniMean, IniStD)
    #    elif (NNInput.Model == 'LEPS'):
    #        yPredMaxPosterior = try_model_LEPS(NNInput, xSetTry.get_value(borrow=True), DeVec, betaVec, reVec, k)
    #    #print(WIni, bIni)
    #    PathToTryLabels = NNInput.PathToOutputFldr + '/REMaxPosterior.' + str(Ang) + '.csv'
    #    save_to_plot(PathToTryLabels, 'Evaluated', numpy.column_stack([xPlot, yPredMaxPosterior]))
    # #############################################################################################

    ### SAMPLING OUTPUT POSTERIOR #######################################################################
    PathToADVI = NNInput.PathToOutputFldr + '/OutputPosts/'
    if not os.path.exists(PathToADVI):
        os.makedirs(PathToADVI)

    x = T.dmatrix('X')
    n = T.iscalar('n')
    x.tag.test_value = numpy.empty_like(RSetPlotTemp)
    x.tag.test_value = numpy.random.randint(100, size=(100, 3))
    n.tag.test_value = 100
    _sample_proba_yPred = ADVIApprox.sample_node(
        yPred, size=n, more_replacements={RSetTrainTemp: x})
    sample_proba_yPred = theano.function([x, n], _sample_proba_yPred)

    m = T.iscalar('m')
    _sample_proba_SigmaPred = ADVIApprox.sample_node(Params.get('Sigma'),
                                                     size=n * m)
    sample_proba_SigmaPred = theano.function([n, m], _sample_proba_SigmaPred)
    SigmaPred = sample_proba_SigmaPred(NNInput.NOutPostSamples, NSigmaSamples)
    SigmaPred = numpy.reshape(SigmaPred,
                              (NNInput.NOutPostSamples, NSigmaSamples))

    i = -1
    for Ang in NNInput.AngVector:
        numpy.random.seed(RandomSeed)
        pymc3.set_tt_rng(RandomSeed)
        i = i + 1
        RSetPlot, ySetPlot, ySetPlotDiat, ySetPlotTriat = datasetsPlot[i]
        ySetPlot = T.cast(ySetPlot, 'float64')
        ySetPlot = ySetPlot.eval()
        #ySetPlot     = InverseTransformation(NNInput, ySetPlot, ySetPlotDiat.get_value())
        yPredPlot = sample_proba_yPred(RSetPlot.get_value(borrow=True),
                                       NNInput.NOutPostSamples)
        yPredSum = ySetPlot * 0.0
        yPredSumSqr = ySetPlot * 0.0
        for j in range(NNInput.NOutPostSamples):
            yPredTemp = numpy.array(yPredPlot[j, :])
            yPredTemp = InverseTransformation(NNInput, yPredTemp,
                                              ySetPlotDiat.get_value())
            yPredSum = yPredSum + yPredTemp
            yPredSumSqr = yPredSumSqr + numpy.square(yPredTemp)
        #
        yMean = yPredSum / NNInput.NOutPostSamples
        yStD = numpy.sqrt(yPredSumSqr / NNInput.NOutPostSamples -
                          numpy.square(yMean))
        yPlus = yMean + SigmaIntCoeff * yStD
        yMinus = yMean - SigmaIntCoeff * yStD
        PathToPlotLabels = NNInput.PathToOutputFldr + '/OutputPosts/yPred' + str(
            int(numpy.floor(Ang))) + '.csv'
        save_moments(
            PathToPlotLabels, 'yPred',
            numpy.column_stack(
                [RSetPlot.get_value(), ySetPlot, yMean, yStD, yMinus, yPlus]))
        print('    Wrote Sampled yPred for Angle ', Ang, '\n')

    if (NNInput.AddNoiseToPredsFlg):
        i = -1
        for Ang in NNInput.AngVector:
            numpy.random.seed(RandomSeed)
            pymc3.set_tt_rng(RandomSeed)
            i = i + 1
            RSetPlot, ySetPlot, ySetPlotDiat, ySetPlotTriat = datasetsPlot[i]
            ySetPlot = T.cast(ySetPlot, 'float64')
            ySetPlot = ySetPlot.eval()
            #ySetPlot     = InverseTransformation(NNInput, ySetPlot, ySetPlotDiat.get_value())
            yPredPlot = sample_proba_yPred(RSetPlot.get_value(borrow=True),
                                           NNInput.NOutPostSamples)
            yPostSum = ySetPlot * 0.0
            yPostSumSqr = ySetPlot * 0.0
            for j in range(NNInput.NOutPostSamples):
                yPredTemp = numpy.array(yPredPlot[j, :])
                if (NNInput.AddNoiseToPredsFlg):
                    for k in range(NSigmaSamples):
                        yPostTemp = InverseTransformation(
                            NNInput, yPostTemp, ySetPlotDiat.get_value())
                        SigmaTemp = SigmaPred[j, k]
                        RandNum = numpy.random.normal(loc=0.0, scale=SigmaTemp)
                        yPostTemp = yPredTemp * RandNum
                        yPostSum = yPostSum + yPostTemp
                        yPostSumSqr = yPostSumSqr + numpy.square(yPostTemp)
            #
            yMean = yPostSum / NNInput.NOutPostSamples
            yStD = numpy.sqrt(yPostSumSqr / NNInput.NOutPostSamples -
                              numpy.square(yMean))
            yPlus = yMean + SigmaIntCoeff * yStD
            yMinus = yMean - SigmaIntCoeff * yStD
            PathToPlotLabels = NNInput.PathToOutputFldr + '/OutputPosts/yPost' + str(
                int(numpy.floor(Ang))) + '.csv'
            save_moments(
                PathToPlotLabels, 'yPost',
                numpy.column_stack([
                    RSetPlot.get_value(), ySetPlot, yMean, yStD, yMinus, yPlus
                ]))
            print('    Wrote Sampled yPost for Angle ', Ang, '\n')
Ejemplo n.º 10
0
from DataFrame import DataFrame
from PlotGeneration import PlotGeneration
from LoadData import load_data
from StatisticalTest import StatisticalTest

if __name__ == "__main__":

    print("Loading data frames. . .")
    load_data()  # load data

    print("Loading statistics. . .")
    DataFrame.FemaleIncidenceRate()  # generate female incidence rates
    DataFrame.FemaleMortalityRate()  # generate female mortality rates
    DataFrame.MaleIncidenceRate()  # generate male incidence rates
    DataFrame.MaleMortalityRate()  # generate male morality rates

    print("Generating CSV file. . .")
    PlotGeneration.create_csv()  # generate the CSV file to create the map

    print("Generating map. . .")
    PlotGeneration.generate_choropleth()  # map creation
    PlotGeneration.generate_sankey()  # sankey plot generation
    PlotGeneration.generate_boxplot()

    print("Calculating statistics. . .\n"
          )  # T-Test generation, calculating averages
    StatisticalTest()
    StatisticalTest.box_plot_statistics(
        PlotGeneration.map_data['total_incidence'],
        PlotGeneration.map_data['state_name'], 'incidence_rate')
    print("")
Ejemplo n.º 11
0
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from LoadData import  load_data
from sklearn.neural_network import MLPClassifier
from BuildModel import get_Model
from livelossplot.keras import PlotLossesCallback
from sklearn.svm import SVC

x_train, x_test, x_valid, y_train, y_test, y_valid = load_data(test_size = 0.25, valid_size = 0.25)
print('Shape of training data', x_train.shape)
print('Shape of training labels', len(y_train))
print('Shape of test data', x_test.shape)
print('Shape of test labels', len(y_test))
print('Shape of validation data', x_valid.shape)
print('Shape of validation labels', len(y_valid))
print(f'\nFeatures extracted:', x_train.shape[1])

#MLP classifier 
model = MLPClassifier(alpha = 0.01, batch_size = 256, epsilon = 1e-08, hidden_layer_sizes=(300,), learning_rate = 'adaptive', max_iter = 500)
model.fit(x_train, y_train)
y_prediction = model.predict(x_test)
accuracy = accuracy_score(y_true = y_test, y_pred = y_prediction)
print("\nAccuracy: {:.2f}%".format(accuracy * 100))
print("\n")
print(classification_report(y_test, y_prediction))

"""#Deep Neural Network
model = get_Model(x_train.shape[1])
model.fit(x_train, y_train,
          batch_size = 10000,
          epochs = 100,
Ejemplo n.º 12
0
def sgd_optimization(NNInput):

    ##################################################################################################################################
    ### LOADING DATA
    ##################################################################################################################################
    print('\nLoading Data ... \n')

    if (NNInput.TryNNFlg > 0):
        datasets, datasetsTry = load_data(NNInput)
    else:
        datasets = load_data(NNInput)

    # RSetTrainValid, ySetTrainValid, ySetTrainValidDiat, ySetTrainValidTriat = datasets[0]
    # RSetTest,       ySetTest,       ySetTestDiat,       ySetTestTriat       = datasets[1]
    # RDataOrig,      yDataOrig,      yDataDiatOrig,      yDataTriatOrig      = datasets[2]

    # NNInput.NIn  = RSetTrainValid.shape[1]
    # NNInput.NOut = ySetTrainValid.shape[1]
    # print(('  Nb of Input:  %i')    % NNInput.NIn)
    # print(('  Nb of Output: %i \n') % NNInput.NOut)

    # NNInput.NLayers = NNInput.NHid
    # NNInput.NLayers.insert(0,NNInput.NIn)
    # NNInput.NLayers.append(NNInput.NOut)
    # print('  Network Shape: ', NNInput.NLayers, '\n')

    # NTrainValid = RSetTrainValid.shape[0]
    # NTest       = RSetTest.shape[0]
    # print(('  Nb of Training + Validation Examples: %i')    % NTrainValid)
    # print(('  Nb of Test                  Examples: %i \n') % NTest)

    # NBatchTrainValid = NTrainValid // NNInput.NMiniBatch
    # print(('  Nb of Training + Validation Batches: %i') % NBatchTrainValid)

    RSetTrain, ySetTrain, ySetTrainDiat, ySetTrainTriat = datasets[0]
    RSetValid, ySetValid, ySetValidDiat, ySetValidTriat = datasets[1]
    RDataOrig, yDataOrig, yDataDiatOrig, yDataTriatOrig = datasets[2]

    NNInput.NIn = RSetTrain.shape[1]
    #NNInput.NOut = ySetTrain.shape[1]
    print(('  Nb of Input:  %i') % NNInput.NIn)
    print(('  Nb of Output: %i \n') % 1)

    NNInput.NLayers = NNInput.NHid
    NNInput.NLayers.insert(0, NNInput.NIn)
    NNInput.NLayers.append(NNInput.NOut)
    print('  Network Shape: ', NNInput.NLayers, '\n')

    NTrain = RSetTrain.shape[0]
    NValid = RSetValid.shape[0]
    print('  Nb of Training + Validation Examples: ', NTrain + NValid,
          '; of which: ', NTrain, ' for Training and ', NValid,
          ' for Validation')
    #NTest  = RSetTest.shape[0]
    #print(('  Nb of Test                  Examples: %i \n') % NTest)

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print('\nBuilding the Model ... \n')

    model = build_MLP_model(NNInput)
    #model.summary()

    ###############
    # TRAIN MODEL #
    ###############

    early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  min_delta=NNInput.ImpThold,
                                                  patience=NNInput.NPatience,
                                                  restore_best_weights=True,
                                                  verbose=1)

    WeightsPath = NNInput.CheckpointFldr + '/weights.csv'
    mc_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=NNInput.CheckpointFilePath,
        monitor='val_loss',
        save_best_only=True,
        save_weights_only=True,
        verbose=1)

    lr_callback = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                       factor=0.7,
                                                       patience=500,
                                                       mode='auto',
                                                       min_delta=1.e-6,
                                                       cooldown=0,
                                                       min_lr=1.e-8,
                                                       verbose=1)

    tb_callback = tf.keras.callbacks.TensorBoard(
        log_dir=NNInput.CheckpointFilePath,
        histogram_freq=100,
        batch_size=NNInput.NMiniBatch,
        write_graph=True,
        write_grads=True,
        write_images=True,
        embeddings_freq=0,
        embeddings_layer_names=None,
        embeddings_metadata=None,
        embeddings_data=None)

    callbacksVec = [mc_callback, early_stop, tb_callback]

    ### Training the NN
    print('\nTraining the Model ... \n')
    # history = model.fit(RSetTrainValid, ySetTrainValid, shuffle=True, batch_size=NNInput.NMiniBatch, epochs=NNInput.NEpoch, validation_split=NNInput.PercValid, verbose=1, callbacks=callbacksVec)
    xTrain = RSetTrain  #tf.convert_to_tensor(RSetTrain, tf.float32)
    yTrain = ySetTrain  #tf.convert_to_tensor(ySetTrain, tf.float32)
    xValid = RSetValid  #tf.convert_to_tensor(RSetValid, tf.float32)
    yValid = ySetValid  #tf.convert_to_tensor(ySetValid, tf.float32)
    history = model.fit(xTrain,
                        yTrain,
                        shuffle=True,
                        batch_size=NNInput.NMiniBatch,
                        epochs=NNInput.NEpoch,
                        validation_data=(xValid, yValid),
                        verbose=1,
                        callbacks=callbacksVec)

    ### Plotting History
    #plot_history(NNInput, history)

    #ErrorTest = model.evaluate(RSetTest, ySetTest, verbose=1)
    #print("TensorBoard LogDir: ", NNInput.CheckpointFldr)

    model.load_weights(NNInput.CheckpointFilePath)
    jLayer = -1
    for iLayer in [1, 3, 4, 5]:
        jLayer = jLayer + 1
        Params = model.get_layer(index=jLayer).get_weights()
        PathToFldr = NNInput.PathToOutputFldr + NNInput.LayersName[iLayer] + '/'
        if not os.path.exists(PathToFldr):
            os.makedirs(PathToFldr)
        PathToFile = PathToFldr + 'Weights.npz'
        numpy.savez(PathToFile, Params[0], Params[1])
        if (NNInput.WriteFinalFlg > 0):
            if (jLayer == 0):
                save_parameters_PIP(PathToFldr, Params[0], Params[1])
            else:
                save_parameters(PathToFldr, Params[0], Params[1])

    yPredOrig = model.predict(RDataOrig)
    yPredOrig = InverseTransformation(NNInput, yPredOrig, yDataDiatOrig)
    plot_scatter(NNInput, yPredOrig, yDataOrig)

    ### Evaluating Model for a Particular Data-Set
    if (NNInput.TryNNFlg > 0):

        i = -1
        for Ang in NNInput.AngVector:
            i = i + 1
            xSetTry, ySetTry, ySetTryDiat, ySetTryTriat = datasetsTry[i]
            yPredTry = model.predict(xSetTry)
            yPredTry = InverseTransformation(NNInput, yPredTry, ySetTryDiat)
            ### Saving Predicted Output
            #PathToTryLabels = NNInput.PathToOutputFldr + '/yEvaluated.csv'
            #save_labels(PathToTryLabels, 'Generated', yPredTry)
            PathToAbscissaToPlot = NNInput.PathToDataFldr + '/R.csv.' + str(
                Ang)
            xPlot = abscissa_to_plot(PathToAbscissaToPlot)
            #PathToTryLabels = NNInput.PathToOutputFldr + '/REBestDet.csv.' + str(Ang)
            PathToTryLabels = NNInput.PathToOutputFldr + '/REBestAll.csv.' + str(
                Ang)
            # ErrorAbs    =  ySetTry - yPredTry
            # ErrorRel    = (ySetTry - yPredTry) / ySetTry
            # AbsErrorAbs = abs(  ySetTry - yPredTry            )
            # AbsErrorRel = abs( (ySetTry - yPredTry) / ySetTry )
            save_to_plot(PathToTryLabels, 'Evaluated',
                         numpy.concatenate((xPlot, ySetTry, yPredTry), axis=1))
            #save_to_plot_all(PathToTryLabels, 'Evaluated', numpy.concatenate((xPlot, ySetTry, yPredTry, ErrorAbs, ErrorRel, AbsErrorAbs, AbsErrorRel), axis=1))

            # ### Plotting Results
            # plot_try_set(NNInput, ySetTry, yPredTry)

        error = ySetTry - yPredTry
        plot_error(NNInput, error)
Ejemplo n.º 13
0
def evaluate_model(NNInput):

    ##################################################################################################################################
    ### LOADING DATA
    ##################################################################################################################################
    print('\nLoading Data ... \n')

    if (NNInput.TryNNFlg > 0):
        datasets, datasetsTry = load_data(NNInput)
    else:
        datasets = load_data(NNInput)

    RSetTrainValid, ySetTrainValid, ySetTrainValidDiat, ySetTrainValidTriat = datasets[
        0]
    RSetTest, ySetTest, ySetTestDiat, ySetTestTriat = datasets[1]
    RDataOrig, yDataOrig, yDataDiatOrig, yDataTriatOrig = datasets[2]

    NNInput.NIn = RSetTrainValid.shape[1]
    NNInput.NOut = ySetTrainValid.shape[1]
    print(('  Nb of Input:  %i') % NNInput.NIn)
    print(('  Nb of Output: %i \n') % NNInput.NOut)

    NNInput.NLayers = NNInput.NHid
    NNInput.NLayers.insert(0, NNInput.NIn)
    NNInput.NLayers.append(NNInput.NOut)
    print('  Network Shape: ', NNInput.NLayers, '\n')

    NTrainValid = RSetTrainValid.shape[0]
    NTest = RSetTest.shape[0]
    print(('  Nb of Training + Validation Examples: %i') % NTrainValid)
    print(('  Nb of Test                  Examples: %i \n') % NTest)

    NBatchTrainValid = NTrainValid // NNInput.NMiniBatch
    print(('  Nb of Training + Validation Batches: %i') % NBatchTrainValid)

    ######################
    # BUILD ACTUAL MODEL #
    ######################
    print('\nBuilding the Model ... \n')

    model = build_MLP_model(NNInput)
    #model.summary()

    ###############
    # TRAIN MODEL #
    ###############

    model.load_weights(NNInput.CheckpointFilePath)
    jLayer = -1
    for iLayer in [1, 3, 4, 5]:
        jLayer = jLayer + 1
        Params = model.get_layer(index=jLayer).get_weights()
        PathToFldr = NNInput.PathToOutputFldr + NNInput.LayersName[iLayer] + '/'
        if not os.path.exists(PathToFldr):
            os.makedirs(PathToFldr)
        PathToFile = PathToFldr + 'Weights.npz'
        numpy.savez(PathToFile, Params[0], Params[1])
        if (NNInput.WriteFinalFlg > 0):
            if (jLayer == 0):
                save_parameters_PIP(PathToFldr, Params[0], Params[1])
            else:
                save_parameters(PathToFldr, Params[0], Params[1])

    yPredOrig = model.predict(RDataOrig)
    yPredOrig = InverseTransformation(NNInput, yPredOrig, yDataDiatOrig)
    plot_scatter(NNInput, yPredOrig, yDataOrig)

    ### Evaluating Model for a Particular Data-Set
    if (NNInput.TryNNFlg > 0):

        i = -1
        for Ang in NNInput.AngVector:
            i = i + 1
            xSetTry, ySetTry, ySetTryDiat, ySetTryTriat = datasetsTry[i]
            yPredTry = model.predict(xSetTry)
            yPredTry = InverseTransformation(NNInput, yPredTry, ySetTryDiat)
            ### Saving Predicted Output
            #PathToTryLabels = NNInput.PathToOutputFldr + '/yEvaluated.csv'
            #save_labels(PathToTryLabels, 'Generated', yPredTry)
            PathToAbscissaToPlot = NNInput.PathToDataFldr + '/R.csv.' + str(
                Ang)
            xPlot = abscissa_to_plot(PathToAbscissaToPlot)
            #PathToTryLabels = NNInput.PathToOutputFldr + '/REBestDet.csv.' + str(Ang)
            PathToTryLabels = NNInput.PathToOutputFldr + '/REBestAll.csv.' + str(
                Ang)
            # ErrorAbs    =  ySetTry - yPredTry
            # ErrorRel    = (ySetTry - yPredTry) / ySetTry
            # AbsErrorAbs = abs(  ySetTry - yPredTry            )
            # AbsErrorRel = abs( (ySetTry - yPredTry) / ySetTry )
            save_to_plot(PathToTryLabels, 'Evaluated',
                         numpy.concatenate((xPlot, ySetTry, yPredTry), axis=1))
            #save_to_plot_all(PathToTryLabels, 'Evaluated', numpy.concatenate((xPlot, ySetTry, yPredTry, ErrorAbs, ErrorRel, AbsErrorAbs, AbsErrorRel), axis=1))

            # ### Plotting Results
            # plot_try_set(NNInput, ySetTry, yPredTry)

        error = ySetTry - yPredTry
        plot_error(NNInput, error)
Ejemplo n.º 14
0
        'finish_timing': TIMESTAMP()
    }

    # parser

    qcut_q = int(args.bins)
    qcut_q = 9  # CHANGE FOR REMOTE PC

    y_type = args.y_type  # 'yoyr','qoq','yoy'
    y_type = 'qoq'
    resume = args.resume
    sample_no = args.sample_no
    sample_no = 1  # CHANGE FOR HKPC

    # load data for entire period
    main = load_data(lag_year=args.lag,
                     sql_version=args.sql_version)  # CHANGE FOR DEBUG
    label_df = main.iloc[:, :2]

    space['num_class'] = qcut_q
    space['is_unbalance'] = True

    sql_result = {'qcut': qcut_q}
    # sql_result['name'] = 'try add ibes as X'
    # sql_result['trial'] = db_last['trial'] + 1

    # roll over each round
    period_1 = dt.datetime(2017, 12, 31)  # CHANGE FOR DEBUG

    for i in tqdm(range(sample_no)):  # divide sets and return
        testing_period = period_1 + i * relativedelta(
            months=3)  # set sets in chronological order
Ejemplo n.º 15
0
def main(argv=None):

    new_train_set, new_train_mask = load_data()
    print('Loading data finished!')
    train(new_train_set, new_train_mask)
Ejemplo n.º 16
0
    def train(self, model):

        #training parameters
        batch_size = 8
        maxepoches = 25
        learning_rate = 0.1
        lr_decay = 1e-6
        lr_drop = 20
        # The data, shuffled and split between train and test sets:
        # (x_train, y_train), (x_test, y_test) = cifar10.load_data()
        (x_train, y_train), (x_test, y_test) = load_data()
        x_train = x_train.astype('float32')
        x_test = x_test.astype('float32')
        x_train, x_test = self.normalize(x_train, x_test)

        y_train = keras.utils.to_categorical(y_train, self.num_classes)
        y_test = keras.utils.to_categorical(y_test, self.num_classes)

        def lr_scheduler(epoch):
            return learning_rate * (0.5**(epoch // lr_drop))

        reduce_lr = keras.callbacks.LearningRateScheduler(lr_scheduler)

        #data augmentation
        datagen = ImageDataGenerator(
            featurewise_center=False,  # set input mean to 0 over the dataset
            samplewise_center=False,  # set each sample mean to 0
            featurewise_std_normalization=
            False,  # divide inputs by std of the dataset
            samplewise_std_normalization=False,  # divide each input by its std
            zca_whitening=False,  # apply ZCA whitening
            rotation_range=
            15,  # randomly rotate images in the range (degrees, 0 to 180)
            width_shift_range=
            0.1,  # randomly shift images horizontally (fraction of total width)
            height_shift_range=
            0.1,  # randomly shift images vertically (fraction of total height)
            horizontal_flip=True,  # randomly flip images
            vertical_flip=False)  # randomly flip images
        # (std, mean, and principal components if ZCA whitening is applied).
        datagen.fit(x_train)

        #optimization details
        sgd = optimizers.SGD(lr=learning_rate,
                             decay=lr_decay,
                             momentum=0.9,
                             nesterov=True)
        model.compile(loss='binary_crossentropy',
                      optimizer=sgd,
                      metrics=['accuracy'])

        # training process in a for loop with learning rate drop every 25 epoches.

        historytemp = model.fit_generator(datagen.flow(x_train,
                                                       y_train,
                                                       batch_size=batch_size),
                                          steps_per_epoch=x_train.shape[0] //
                                          batch_size,
                                          epochs=maxepoches,
                                          validation_data=(x_test, y_test),
                                          callbacks=[reduce_lr],
                                          verbose=2,
                                          class_weight='auto')
        model.save_weights('cifar10vgg.h5')
        return model
Ejemplo n.º 17
0
from keras.models import Model, load_model
import numpy as np
from LoadData import load_data
import gc
import random
# from keras.applications.mobilenet import relu6,DepthwiseConv2D
import datetime
from keras.preprocessing import image
gc.collect()
import numpy as np
from skimage import io, transform, img_as_ubyte
import os
lst = ['鸟', '荷', '竹', '马', '菊', '兰', '柳', '梅', '山']
starttime = datetime.datetime.now()
X_test, Y_test = load_data('E:\\Imagedecolor\\test')

image_size = (224, 224)

X_test = X_test.astype('float32')
X_test /= 255.

X_test -= 0.5

X_test *= 2.

# model = load_model('finalmobilenet.hdf5',custom_objects={
#                   'relu6': relu6,
#                   'DepthwiseConv2D': DepthwiseConv2D})

model = load_model('data_argumentation_vgg16.hdf5')
Ejemplo n.º 18
0
    Step 4: (this part is updated) 
            4.1 load_data
            4.2 for loop -> roll over all time period,
    '''
    # import 'LoadData.py' module from Local Finder 'Preprocessing'
    # import load_data, clean_set function from 'LoadData.py' module
    # this need update on GitHub -> Update Project from VCS (Command + T on MacBook)

    y_type = 'qoq'

    db_string = 'postgres://*****:*****@hkpolyu.cgqhw7rofrpo.ap-northeast-2.rds.amazonaws.com:5432/postgres'
    engine = create_engine(db_string)

    # 4.1. run load data -> return entire dataframe (153667, 3174) for all datacqtr (period)
    main = load_data(
        lag_year=5, sql_version=False
    )  # change sql_version -> True if trying to run this code through Postgres Database
    col = main.columns[2:-4].to_list()
    # print(len(col), col)

    explanation_ratio_dict = {
    }  # create dictionary contains explained_variance_ratio for all 40 sets

    # 4.2. for loop -> roll over all time period from main dataset
    period_1 = dt.datetime(2017, 12, 31)

    for i in tqdm(
            range(1)
    ):  # change to 40 for full 40 sets, change to False to stop saving csv

        testing_period = period_1 + i * relativedelta(
Ejemplo n.º 19
0
# coding: utf-8
#使用自訂的模式

from LoadData import load_data
import numpy as np
import random

np.random.seed(10)
num_class = 2
RGB = 3  # 彩色
batchSize = 8

# Step 1. 資料準備

(x_train, y_train), (x_test, y_test) = load_data()

# x_train = x_train.transpose(0, 2, 3, 1)
# x_test = x_test.transpose(0, 2, 3, 1)

# 打亂資料
index_1 = [i for i in range(len(x_train))]
random.shuffle(index_1)
x_train = x_train[index_1]
y_train = y_train[index_1]

index_2 = [i for i in range(len(x_test))]
random.shuffle(index_2)
x_test = x_test[index_2]
y_test = y_test[index_2]

print("train data:", 'images:', x_train.shape, " labels:", y_train.shape)
Ejemplo n.º 20
0
KERNEL_SIZE = (3, 3)  # 卷积核大小
INPUT_SHAPE = (64, 64, 3)  # 图像张量
POOL_SIZE = (2, 2)  # 池化缩小比例因素
NB_CLASSES = 0  # 分类数
EPOCHS = 100  # 循环的次数

KIND_LISTS = ['anger', 'fear', 'happy', 'normal', 'sad', 'surprised']

# 主函数
if __name__ == '__main__':
    print('[INFO] start...')
    train_images_path = "/media/wsk/移动磁盘1/face_images/train"
    test_images_path = "/media/wsk/移动磁盘1/face_images/test"
    # 加载图片
    print("[INFO] loading images...")
    x_train_background, y_train_background = load_data(train_images_path,
                                                       IMAGE_SIZE)
    x_test_background, y_test_background = load_data(test_images_path,
                                                     IMAGE_SIZE)
    #初始化模型
    print("[INFO] initialize background model...")
    background_model = initialize_model(FILTERS, KERNEL_SIZE, INPUT_SHAPE,
                                        POOL_SIZE,
                                        len(os.listdir(train_images_path)))
    # 训练模型
    print("[INFO] compiling background model...")
    train(background_model, x_train_background, y_train_background,
          x_test_background, y_test_background, 8, EPOCHS,
          'models/Base_model.h5')

    # 测试模型并输出分类日志
    image_lists = load_test_image(test_images_path, IMAGE_SIZE)
Ejemplo n.º 21
0
    clusters = get_clusters(points, centroids)

    for i in range(iterations):

        clusters = get_clusters(points, centroids)
        centroids = center(clusters)

    return clusters, centroids


if __name__ == "__main__":

    filepath = './data/iris.data'

    data = load_data(filepath)

    X = np.array([f[:-1] for f in data])
    Y = np.array([f[-1] for f in data])  # Target

    clusters, centroids = k_means(X, 3)
    print(centroids)

    for i, cluster in enumerate(clusters):
        cluster = np.array(cluster)
        plt.scatter(cluster[:, 0],
                    cluster[:, 1],
                    c=COLORS[i],
                    label="Cluster {}".format(i))

    for i, centroid in enumerate(centroids):
Ejemplo n.º 22
0
from keras import objectives, losses

from LoadData import load_data
import PlottingHelpers
import ProcessingHelpers

importlib.reload(ProcessingHelpers)  # while still working on than fun
importlib.reload(PlottingHelpers)  # while still working on than fun

sns.set()

dirname = os.getcwd()
pth = os.path.join(dirname, 'CMAPSSData')

print('loading data...')
dc = load_data(pth)
print('done')

# get the first data set training data
df = dc['FD_003']['df_train'].copy()
'''
Make a Column for the RUL data
According to the data description document the data set contains multiple units, each unit 
starts 
at a certain degradation point and the measurement data ends closely before the unit was 
decommissioned of broke.

Therefore assume, that for the last measurement time that is available for a unit the units 
RUL=0 (stopped measuring just before machine broke)
'''
# get the time of the last available measurement for each unit