Exemplo n.º 1
0
def getSetForMNIST():
    x_train, x_test, y_train, y_test, x_test_noise = loadMnist(
        rescaleFactor=2,
        fashion=False,
        size=None,
        mean=0,
        var=0.01,
        path="../../../Data/mnist")
    if (np.max(x_test) <= 1):
        x_test = np.array(x_test * 255, dtype=np.int)
        x_train = np.array(x_train * 255, dtype=np.int)
    else:
        x_test = np.array(x_test, dtype=np.int)
        x_train = np.array(x_train, dtype=np.int)
    unique = list(np.sort(np.unique(x_test)))
    myLogSpace = np.logspace(-8, -6, len(unique))
    x_test = myLogSpace[x_test]
    x_test = np.reshape(
        x_test, (x_test.shape[0],
                 (x_test.shape[1] * x_test.shape[2]))).astype(dtype=np.float32)
    x_train = myLogSpace[x_train]
    x_train = np.reshape(
        x_train,
        (x_train.shape[0],
         (x_train.shape[1] * x_train.shape[2]))).astype(dtype=np.float32)
    return x_train, x_test, y_train, y_test
Exemplo n.º 2
0
    def __init__(self,
                 size,
                 epochs=10,
                 nbUnits=[100, 100],
                 sparsities=[0.5, 0.5],
                 frames=10,
                 use_mnist=True):
        self.size = size
        self.nbUnits = nbUnits
        self.sparsities = sparsities
        self.biasList = np.logspace(-2, 2, frames)
        self.use_mnist = use_mnist
        if not self.use_mnist:
            self.barycenters = np.log(
                np.array([[5 * 10**(-6), 10**(-4)], [10**(-5), 5 * 10**(-6)],
                          [10**(-4), 10**(-4)]]) / (8 * 10**-7))
            set = VoronoiSet(self.barycenters)
            x_train, y_train = set.generate(10000)
            x_train = np.asarray(x_train, dtype=np.float32)
            x_test, y_test = set.generate(1000)
            x_test = np.asarray(x_test, dtype=np.float32)
        else:
            x_train, x_test, y_train, y_test, x_test_noise = loadMnist(
                rescaleFactor=2,
                fashion=False,
                size=None,
                mean=0,
                var=0.01,
                path="../../../Data/mnist")
            x_test = np.reshape(
                x_test,
                (x_test.shape[0],
                 (x_test.shape[1] * x_test.shape[2]))).astype(dtype=np.float32)
            x_train = np.reshape(
                x_train, (x_train.shape[0],
                          (x_train.shape[1] * x_train.shape[2]))).astype(
                              dtype=np.float32)

        self.epoch = epochs
        self.x_train = x_train
        self.y_train = y_train
        self.x_test = x_test
        self.y_test = y_test
        self.fig, self.ax = plt.subplots(figsize=(19.2, 10.8), dpi=100)

        self.norm = clr.Normalize(vmin=0., vmax=1.)
        cmap = plt.get_cmap("Oranges")
        self.sm = plt.cm.ScalarMappable(cmap=cmap, norm=self.norm)
        self.sm.set_array([])
        cbar = self.ax.figure.colorbar(self.sm, ax=self.ax, norm=self.norm)
        cbar.ax.set_ylabel("scores", fontsize="xx-large")
        cbar.ax.tick_params(labelsize="xx-large")
        self.cmap = cmap
Exemplo n.º 3
0
def trainWithChemTemplateNN(savePath):

    x_train, x_test, y_train, y_test, x_test_noise = loadMnist(
        rescaleFactor=2,
        fashion=False,
        size=None,
        mean=0,
        var=0.01,
        path="../../../Data/mnist")

    x_test = np.reshape(
        x_test, (x_test.shape[0],
                 (x_test.shape[1] * x_test.shape[2]))).astype(dtype=np.float32)
    x_train = np.reshape(
        x_train,
        (x_train.shape[0],
         (x_train.shape[1] * x_train.shape[2]))).astype(dtype=np.float32)

    nbUnits = [100, 100]
    sparsities = [0.5, 0.5]

    assert len(nbUnits) == len(sparsities)

    epochs = 100
    usingSoftmax = True

    model2 = tf.keras.Sequential()
    for idx, n in enumerate(nbUnits):
        model2.add(
            autoRegulLayer(units=n,
                           biasValue=np.zeros(n) + 1.,
                           fractionZero=sparsities[idx],
                           rate=10.,
                           rateInhib=10.,
                           activation=tf.keras.activations.relu))
    if usingSoftmax:
        model2.add(
            autoRegulLayer(units=10,
                           biasValue=np.zeros(n) + 1.,
                           fractionZero=0,
                           rate=10.,
                           rateInhib=10.,
                           activation=tf.keras.activations.softmax))
    model2.compile(optimizer=tf.optimizers.Adam(),
                   loss='sparse_categorical_crossentropy',
                   metrics=['accuracy'])
    model2.build(input_shape=(None, x_train.shape[-1]))
    model2.fit(x_train[:],
               y_train[:],
               epochs=epochs,
               verbose=True,
               validation_data=(x_test, y_test))
Exemplo n.º 4
0
def train(layer,
          listOfRescale,
          Batches,
          Sparsity,
          NbUnits,
          Initial_Result_Path,
          epochs,
          repeat,
          use_bias,
          fashion=False):
    """
        Train a variety of neural network on Batches architecture, a list of string name for architecture.
        Sparsity and NbUnits should be dictionary which keys are the architecture names.
        The training is also made for different rescale size of the inputs.
        They should define 3d_list with respectively the sparsity and number of units desired for each layer.
        The training program save all result in the Initial_Result_Path folder.
        Results for different rescale and different batches are saved in separate sub-directory.
    :param layer: the layer object to use
    :param listOfRescale: list with the rescale size, often [1,2,4], that is the scale to divide each size of the image.
    :param Batches: see above
    :param Sparsity: see above
    :param NbUnits: see above
    :param Initial_Result_Path:
    :param fashion: if Fashion_mnist rather than mnist: use True.
    :param epochs: int, number of epochs per training
    :param repeat: int, number of repeat
    :param use_bias: boolean, True if using bias
    :return:
    """
    gpuIdx = None
    for ridx, r in enumerate(listOfRescale):
        print("________________________SWITCHING RESCALE_____________________")
        p = "../Data/mnist"
        x_train, x_test, y_train, y_test, x_test_noise = loadMnist(
            rescaleFactor=r,
            fashion=fashion,
            size=None,
            mean=0,
            var=0.01,
            path=p)
        flatten = True  #we have to fatten
        for idxb, batch in enumerate(Batches):
            sparsityMat = Sparsity[batch]
            nbUnitMat = NbUnits[batch]
            RESULT_PATH = Initial_Result_Path + str(r) + "/" + str(batch) + "/"
            argsList = []
            for i in range(len(sparsityMat)):
                sparsity = sparsityMat[i]
                units = nbUnitMat[i]
                for idx, u in enumerate(units):
                    biasValues = [
                        np.random.rand(1) for _ in range(len(sparsity[idx]))
                    ]
                    argsList += [[
                        layer, RESULT_PATH, gpuIdx, x_train, y_train, x_test,
                        x_test_noise, y_test, u, use_bias, flatten, epochs,
                        biasValues, sparsity[idx],
                        str(i) + "_" + str(idx), repeat
                    ]]
            with multiprocessing.Pool(processes=len(argsList)) as pool:
                batchOutputs = pool.map(trainMultiProcess, argsList)
            pool.close()
            pool.join()
            print("Finished computing, closing pool")

            _saveTrainingResults(batchOutputs, RESULT_PATH)
Exemplo n.º 5
0
def train(savePath):
    """
        Train some neural network, and save the weight, aka the architecture, so that in can be used by our parser module.
    :param: savePath: path where the network will be saved
    :return directory for weight
             accuracy
             testing_x_set : a set of inputs for test
             testing_y_set : a set of outputs for test
             nnAnswer : the answer for the raw nn on the test set
    """

    import tensorflow as tf

    x_train,x_test,y_train,y_test,x_test_noise=loadMnist(rescaleFactor=2,fashion=False,size=None,mean=0,var=0.01,path="../../Data/mnist")

    archlist=["binary","sig","sparseNormal"]
    architecture=archlist[2]
    nbUnits = [50,50,10,10]
    nbLayers = len(nbUnits)
    use_bias = True
    epochs = 5
    sess=tf.Session()
    with sess.as_default():
        GPUidx = sess.list_devices()[0].name
        layerList=[]
        layerList+=[tf.keras.layers.Flatten(input_shape=(x_train.shape[1], x_train.shape[2]))]
        if(architecture=="binary"):
            for e in range(nbLayers-1):
                layerList+=[clippedBinaryLayer(GPUidx,units=nbUnits[e], activation=None,use_bias=use_bias)]
            layerList+=[clippedBinaryLayer(GPUidx,units=10, activation=tf.nn.softmax,use_bias=use_bias)]
        elif(architecture=="sig"):
            for e in range(nbLayers-1):
                layerList+=[clippedSparseBioSigLayer(GPUidx,units=nbUnits[e], activation=None,use_bias=use_bias)]
            layerList+=[clippedSparseBioSigLayer(GPUidx,units=10, activation=tf.nn.softmax,use_bias=use_bias)]
        else:
            for e in range(nbLayers-1):
                layerList+=[clippedSparseBioDenseLayer(GPUidx,units=nbUnits[e], activation=tf.keras.activations.relu,use_bias=use_bias)]
            layerList+=[clippedSparseBioDenseLayer(GPUidx,units=10, activation=tf.nn.softmax,use_bias=use_bias)]
        model = tf.keras.models.Sequential(layerList)
        model.compile(optimizer='adam',
                      loss='sparse_categorical_crossentropy',
                      metrics=['accuracy'])
        #model.build(input_shape=x_train.shape)
        model.fit(x_train, y_train, epochs=epochs,verbose=True)
        print(model.summary())
        _,acc=model.evaluate(x_test, y_test)
        _,accNoise=model.evaluate(x_test_noise, y_test)

        nnAnswer = model.predict(x_test)


        # activs=[tf.placeholder(dtype=tf.float32) for _ in layerList]
        # inputs = tf.placeholder(dtype=tf.float32,shape=(None,x_train.shape[1],x_train.shape[2]))
        # activs[0]=layerList[0](inputs)
        # for idx,l in enumerate(layerList[1:]):
        #     activs[idx+1] = l(activs[idx])
        # activation=sess.run(activs,feed_dict={inputs:x_train})
        # names = ["activation of layer"+str(idx) for idx in range(len(layerList))]
        # for idx,a in enumerate(activation):
        #     displayEmbeddingHeat(a,0.1,name=names[idx])

        savePath = os.path.join(savePath,"weightDir")
        plotWeight(model,use_bias)
        saveModelWeight(model,use_bias,savePath)

        print("Ended Training")
    sess.close()
    del model
    del sess
    return savePath,acc,x_test,y_test,nnAnswer
Exemplo n.º 6
0
def trainWithChemTemplateNN(savePath):

    x_train,x_test,y_train,y_test,x_test_noise=loadMnist(rescaleFactor=2,fashion=False,size=None,mean=0,var=0.01,path="../../Data/mnist")
    if(np.max(x_test)<=1):
        x_test = np.array(x_test*255,dtype=np.int)
        x_train = np.array(x_train*255,dtype=np.int)
    else:
        x_test = np.array(x_test,dtype=np.int)
        x_train = np.array(x_train,dtype=np.int)
    unique = list(np.sort(np.unique(x_test)))
    myLogSpace = np.logspace(-8,-4,len(unique))
    x_test = myLogSpace[x_test]
    x_test = np.reshape(x_test,(x_test.shape[0],(x_test.shape[1]*x_test.shape[2]))).astype(dtype=np.float32)
    x_train = myLogSpace[x_train]
    x_train = np.reshape(x_train,(x_train.shape[0],(x_train.shape[1]*x_train.shape[2]))).astype(dtype=np.float32)

    constantList,enzymeInit,activInit,inhibInit,C0 = _findConstant(savePath)
    nbUnits = [10,10,5,3]
    sparsities = [0.1,0.1,0.1,0.]
    use_bias = False
    epochs = 10
    my_batchsize = 32
    x_train = x_train/C0
    x_test = x_test/C0


    #Instead of MNIST we try simple VORONOI task:
    # barycenters=np.log(np.array([[5*10**(-6),10**(-4)],[10**(-5),5*10**(-6)],[10**(-4),10**(-4)]])/C0)
    # set=VoronoiSet(barycenters)
    # x_train,y_train=set.generate(100000)
    # x_train = np.asarray(x_train,dtype=np.float32)
    # x_test, y_test=set.generate(1000)
    # x_test = np.asarray(x_test,dtype=np.float32)
    # print(y_test)
    # colors = ["r","g","b"]
    # for idx,x in enumerate(x_test):
    #     plt.scatter(x[0],x[1],c=colors[y_test[idx]])
    # for b in barycenters:
    #     plt.scatter(b[0],b[1],c="m",marker="x")
    # plt.show()

    usingLog = True
    usingSoftmax = True

    # if usingLog:
    #     x_train = np.log(x_train)
    #     x_test = np.log(x_test)


    # model = chemTemplateNNModel(nbUnits=nbUnits,sparsities=sparsities,reactionConstants= constantList, enzymeInitC=enzymeInit, activTempInitC=activInit,
    #                             inhibTempInitC=inhibInit, randomConstantParameter=None,usingLog=usingLog,usingSoftmax=usingSoftmax)
    # print("model is running eagerly: "+str(model.run_eagerly))
    # # model.run_eagerly=True
    # model.compile(optimizer=tf.optimizers.Adam(),
    #               loss='sparse_categorical_crossentropy',
    #               #loss = tf.keras.losses.MeanSquaredError(),
    #               metrics=['accuracy'])
    #               #metrics=[tf.keras.metrics.MeanSquaredError()])
    # model.build(input_shape=(None,x_train.shape[-1]))
    # print("testing against example:")
    # model.greedy_set_cps(x_train[:my_batchsize])
    # #tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="tfOUT", histogram_freq=1 ,profile_batch = 2)
    # model.fit(x_train[:], y_train[:],epochs=10,verbose=True)#,callbacks=[tensorboard_callback])

    #res = model.call(x_train[:10])

    # print("computing cps at equilibrium")
    # import time
    # t = time.time()
    # cps = model.obtainCp(tf.convert_to_tensor(x_train[:100],dtype=tf.float32))
    # print("ended computing of cps in ",time.time()-t)
    # import matplotlib.pyplot as plt
    #
    # plt.figure(figsize=(19.2,10.8), dpi=100)
    # plt.scatter(range(cps.shape[0]),cps[:,0],c="b")
    # plt.yscale("log")
    # plt.ylim(1,10**9)
    # plt.title("competition found for the mnist dataset under the initialization architecture")
    # plt.show()
    # plt.savefig("cp_rescaleof"+str(forcedRescaleFactor))

    # res = model.call(x_test[:10])
    # concentration = model.predConcentration(x_test[:10])
    # print(res)
    # print(concentration)
    a = 1
    b = 1
    @tf.function
    def mylogActivation(x):
        return tf.math.log(a * tf.math.exp(x)/(b + tf.math.exp(x)))

    model2 = tf.keras.Sequential()
    model2.add(tf.keras.layers.Dense(100,activation=mylogActivation))
    model2.add(tf.keras.layers.Dense(100,activation=mylogActivation))
    model2.add(tf.keras.layers.Dense(10,activation=mylogActivation))
    if usingSoftmax:
        model2.add(tf.keras.layers.Dense(10,activation=tf.keras.activations.softmax))
    model2.compile(optimizer=tf.optimizers.Adam(),
                   #loss=tf.keras.losses.BinaryCrossentropy(),
                   loss='sparse_categorical_crossentropy',
                   #loss = tf.keras.losses.MeanSquaredError(),
                   metrics=['accuracy']
                   #metrics=[tf.keras.metrics.MeanSquaredError()]
                   )
    model2.build(input_shape=(None,x_train.shape[-1]))
    model2.fit(np.log(x_train[:]), y_train[:],epochs=epochs,verbose=True)
Exemplo n.º 7
0
def drawInformation(savePath, frames=400):

    x_train, x_test, y_train, y_test, x_test_noise = loadMnist(
        rescaleFactor=2,
        fashion=False,
        size=None,
        mean=0,
        var=0.01,
        path="../../Data/mnist")
    if (np.max(x_test) <= 1):
        x_test = np.array(x_test * 255, dtype=np.int)
        x_train = np.array(x_train * 255, dtype=np.int)
    else:
        x_test = np.array(x_test, dtype=np.int)
        x_train = np.array(x_train, dtype=np.int)
    unique = list(np.sort(np.unique(x_test)))
    myLogSpace = np.logspace(-8, -4, len(unique))
    x_test = myLogSpace[x_test]
    x_test = np.reshape(
        x_test, (x_test.shape[0],
                 (x_test.shape[1] * x_test.shape[2]))).astype(dtype=np.float32)
    x_train = myLogSpace[x_train]
    x_train = np.reshape(
        x_train,
        (x_train.shape[0],
         (x_train.shape[1] * x_train.shape[2]))).astype(dtype=np.float32)

    constantList, enzymeInit, activInit, inhibInit, C0 = _findConstant(
        savePath)
    nbUnits = [100, 30, 10]
    sparsities = [0.5, 0.5, 0.5]
    use_bias = False
    epochs = 10
    my_batchsize = 32

    x_train = x_train / C0
    x_test = x_test / C0

    device_name = tf.test.gpu_device_name()
    if not tf.test.is_gpu_available():
        raise SystemError('GPU device not found')
    print('Found GPU at: {}'.format(device_name))

    forcedRescaleFactor = 1

    model = chemTemplateNNModel(nbUnits=nbUnits,
                                sparsities=sparsities,
                                reactionConstants=constantList,
                                enzymeInitC=enzymeInit,
                                activTempInitC=activInit,
                                inhibTempInitC=inhibInit,
                                randomConstantParameter=None)
    print("model is running eagerly: " + str(model.run_eagerly))
    # model.run_eagerly=True
    model.compile(optimizer=tf.optimizers.Adam(),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    model.build(input_shape=(None, x_train.shape[-1]))

    print("creating animation:")

    my_anim = animator(model, x_train, frames)

    # fig,ax = plt.subplots(figsize=(19.2,10.8), dpi=100)
    # ani = animation.FuncAnimation(fig, my_anim.animate , frames=frames, repeat=True, interval=20, blit=False)
    # ani.save('zoomFULL.gif',writer=LoopingPillowWriter(fps=40))

    fig2, ax = plt.subplots(figsize=(19.2, 10.8), dpi=100)
    mean = []
    cps = []
    modelValidity = []
    from tqdm import tqdm
    for i in tqdm(np.arange(0, frames)):
        a, b, c = my_anim.mean(i)
        mean += [a]
        modelValidity += [b]
        cps += [c]
    ax.plot(my_anim.rangeForRescale[:len(mean)], mean, c="b", label="cps mean")
    ax.set_xlabel("rescale factor", fontsize="xx-large")
    ax.set_ylabel("cp mean over 10 first images of mnist", fontsize="xx-large")
    ax.set_yscale("log")
    ax.tick_params(labelsize="xx-large")
    ax2 = ax.twinx()
    ax2.plot(my_anim.rangeForRescale[:len(mean)],
             modelValidity,
             c="r",
             label="modelValidity")
    ax2.set_yscale("log")
    ax2.set_ylabel('templateInhibConcentration divived by E0',
                   fontsize="xx-large")
    ax2.tick_params(labelsize="xx-large")

    fig2.legend()
    fig2.show()
    fig2.savefig("competitionAndValidtywrtRescaleFactorFULL")

    fig, ax = plt.subplots(figsize=(19.2, 10.8), dpi=100)
    cps = np.array(cps)
    cmap = plt.get_cmap('tab20', cps.shape[1])
    import matplotlib.colors as clr
    norm = clr.Normalize(vmin=0, vmax=cps.shape[1])
    sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
    sm.set_array([])
    cbar = ax.figure.colorbar(sm, ax=ax, norm=norm)
    cbar.ax.set_ylabel("idx of image", fontsize="xx-large")
    cbar.ax.tick_params(labelsize="xx-large")
    for e in range(cps.shape[1]):
        ax.plot(my_anim.rangeForRescale[:cps.shape[0]], cps[:, e], c=cmap(e))
    ax.set_yscale("log")
    ax.set_ylabel("computed cps", fontsize="xx-large")
    ax.set_xlabel("rescale factor", fontsize="xx-large")
    ax.tick_params(labelsize="xx-large")
    fig.show()
    fig.savefig("competitionwrtRescaleFactorFULL")
Exemplo n.º 8
0
def drawCourbs(savePath, frames=400):

    x_train, x_test, y_train, y_test, x_test_noise = loadMnist(
        rescaleFactor=2,
        fashion=False,
        size=None,
        mean=0,
        var=0.01,
        path="../../Data/mnist")
    if (np.max(x_test) <= 1):
        x_test = np.array(x_test * 255, dtype=np.int)
        x_train = np.array(x_train * 255, dtype=np.int)
    else:
        x_test = np.array(x_test, dtype=np.int)
        x_train = np.array(x_train, dtype=np.int)
    unique = list(np.sort(np.unique(x_test)))
    myLogSpace = np.logspace(-8, -4, len(unique))
    x_test = myLogSpace[x_test]
    x_test = np.reshape(
        x_test, (x_test.shape[0],
                 (x_test.shape[1] * x_test.shape[2]))).astype(dtype=np.float32)
    x_train = myLogSpace[x_train]
    x_train = np.reshape(
        x_train,
        (x_train.shape[0],
         (x_train.shape[1] * x_train.shape[2]))).astype(dtype=np.float32)

    constantList, enzymeInit, activInit, inhibInit, C0 = _findConstant(
        savePath)
    nbUnits = [100, 30, 10]
    sparsities = [0.5, 0.5, 0.5]

    x_train = x_train / C0
    x_test = x_test / C0

    model = chemTemplateNNModel(nbUnits=nbUnits,
                                sparsities=sparsities,
                                reactionConstants=constantList,
                                enzymeInitC=enzymeInit,
                                activTempInitC=activInit,
                                inhibTempInitC=inhibInit,
                                randomConstantParameter=None)
    print("model is running eagerly: " + str(model.run_eagerly))
    # model.run_eagerly=True
    model.compile(optimizer=tf.optimizers.Adam(),
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])
    model.build(input_shape=(None, x_train.shape[-1]))

    print("creating animation:")

    my_anim = animator(model, x_train, frames)

    fig, ax = plt.subplots(figsize=(19.2, 10.8), dpi=100)
    ax.plot(my_anim.rangeForRescale,
            np.power(my_anim.rangeForRescale, 0.5) * model.enzymeInitC,
            c="r",
            label="enzyme rescaled")
    ax.plot(my_anim.rangeForRescale,
            np.array([model.inhibTempInitC] *
                     my_anim.rangeForRescale.shape[0]),
            c="b",
            label="template are not rescaled")
    ax.set_xlabel("rescale factor", fontsize="xx-large")
    ax.set_ylabel("rescaled value", fontsize="xx-large")
    #ax.set_yscale("log")
    ax.tick_params(labelsize="xx-large")
    fig.legend()
    fig.show()
    fig.savefig("EnzymeAndtemplateCourbs")