# in layer 2 grab the 5th column
    r, c = out[i].shape
    stats = np.zeros((noOfStatsTests, c))
    for j in range(c):
        r = out[i][:, j].shape[0]
        y = out[i][:, j]
        # stats[0, j] = a.local_charactor(y)
        # stats[1, j] = a.global_charactor(y)
        # ynorm = a.normalise_to_zero_one_interval(y, layerMinMaxVal[i][0], layerMinMaxVal[i][1])
        # print(stats)

doPredictions = 1
## This sections figures out how good the model is by getting it to predict the answers for the train
## and test sets
if doPredictions == 1:
    d.prediction_tester(model, XTrain, TTrain, name='Training data')
    if noOfTestData != 0:
        d.prediction_tester(model,
                            XTest,
                            TTest,
                            name='Test data',
                            example_no=0)

from keras.utils.visualize_util import plot
plot(model, to_file='model.png')

d.write_out(model, "model.json")

doGap = 0
if doGap == 1:
    verbose = 1
def GeneralisationTest(noOfTestData=500, doPredictions=1,  doMatLabResults=False):
    """Function to create a disjoing from the training set test set"""
    X= np.load("allInputDataCurrent.npy")
    T= np.load("allOutputDataCurrent.npy")
    from keras.models import load_model
    model = load_model("Random_model.h5")


    # things we can calc from this:
    noOfTrainData = len(X)
    assert len(X) == len(T)
    lenOfInput = len(X[3])
    lenOfOutput = len(T[3])
    lenOfBlock = int(lenOfInput / noOfPrototypes)
    noOfExamples = noOfTrainData //noOfPrototypes
    noOfNewExamples = noOfTestData // noOfPrototypes
    lenOfR = lenOfInput - lenOfBlock
    weightOfX = int(sum(X[0]))
    weightOfR = weightOfX - lenOfBlock
    inverseWeightOfR = lenOfR - weightOfR
    denom=lenOfInput-(lenOfInput/noOfPrototypes) # denom is the floating point length of R
    assert int(denom) == lenOfR
    fractionalWeightOfR = weightOfR / denom
    fractionalInverseWeightOfR = inverseWeightOfR / denom
    weight = [fractionalWeightOfR, fractionalInverseWeightOfR]
    weightOfT = int(sum(T[3]))

    if lenOfOutput == noOfPrototypes:
        use1HOT = 1
    else:
        use1HOT = 0

    if categories == True:
        noOfOutputs = noOfPrototypes
        if use1HOT == 1:
            sizeOfOutput = noOfPrototypes
            print('Overwriting output vector size to length {}'.format(noOfPrototypes))
    else:
        noOfOutputs = noOfTrainData

    print('Random vector, R, has weight {0}'.format(weightOfR))

    #Test_X = code.make_prototyped_random_codes(M=noOfTestData, n=lenOfInput, p=noOfPrototypes, weight=[fractionalWeightOfR],
     #                                     k=2, symbolList=None, verbose=verbose, decay_templates=decay)


    #### testing code
    #this gives you matlab files of the codes so you can play with them if you want
    if doMatLabResults:
        Test_X = code.make_prototyped_random_codes(M=500, n=lenOfInput, p=noOfPrototypes, weight=[fractionalWeightOfR],
                                             k=2, symbolList=None, verbose=verbose, decay_templates=decay)
        sio.savemat('Test_X5000.mat', {'Test_X':Test_X})
        R = code.make_random_codes(M=500, n=501, weight=weight, k=2,symbolList=[1,0], verbose=True)
        sio.savemat('R3.mat', {'R':R})
    #######

    Test_X, All_X = code.get_test_x(X=X, noOfTestData=noOfTestData, lenOfInput=lenOfInput, noOfPrototypes=noOfPrototypes,
               weight=[fractionalWeightOfR, fractionalInverseWeightOfR], k=2, symbolList=None, verbose=verbose, decay_templates=decay)

    ###### get T
    ######
    ##  Now we get the correct sized Test_T
    Test_T, prototypeOutputCodes = code.get_test_t(T,
                                                   noOfPrototypes=noOfPrototypes,
                                                   noOfTestData=noOfTestData,
                                                   lenOfOutput=len(T[0]),
                                                   verbose=False)



    ## This sections figures out how good the model is by getting it to predict the answers for the train
    ## and test sets
    if doPredictions == 1:
        d.prediction_tester(model, X, T, name='Training data')
        if noOfTestData != 0:
            d.prediction_tester(model, Test_X, Test_T, name='Test data', example_no=0)

    np.save("GeneralisantionInputDataTest.npy", Test_X)
    np.save("GeneralisationOutputDataTest.npy", Test_T)

    return Test_X, Test_T