コード例 #1
0
def cross_mi_3D(data, settings):
    nTrial, nProcess, nSample = data.shape
    if nTrial*nSample < 2 * nProcess:
        # If there are too few samples, there is no point to calculate anything
        return np.full((nProcess, nProcess), np.nan)
    else:
        lag = settings['lag']

        # Check that number of timesteps is sufficient to estimate lagMax
        if nSample <= lag:
            raise ValueError('lag', lag, 'cannot be estimated for number of timesteps', nSample)

        # dataCanon = numpy_transpose_byorder(data, 'rps', 'srp')
        dataOrd = numpy_transpose_byorder(data, 'rps', 'psr')
        xx = numpy_merge_dimensions(dataOrd[:, :nSample-lag], 1, 3)
        yy = numpy_merge_dimensions(dataOrd[:, lag:], 1, 3)

        rez = np.zeros((nProcess, nProcess))
        if lag > 0:
            for i in range(nProcess):
                for j in range(nProcess):
                    rez[i][j] = ee.mi(xx[i], yy[j])
        else:
            # Optimization - take advantage of symmetry
            for i in range(nProcess):
                for j in range(i, nProcess):
                    rez[i][j] = ee.mi(xx[i], yy[j])
                    rez[j][i] = rez[i][j]

        return rez
コード例 #2
0
    def evaluateModel(self):
        with tf.Session() as sess:
            # Restore variables from disk.
            self.saver.restore(sess, self.args.pretrained)

            x, y, _, bins = self.dataset.get_eval_data()

            mu_out, x_rec = sess.run([self.mu, self.x_mu],
                                     feed_dict={
                                         self.tf_X: x,
                                         self.tf_Y: y,
                                         self.lagMul: np.asarray([[self.lm]])
                                     })

            x_mae_out, z0 = sess.run([self.x_mae, self.z_plain],
                                     feed_dict={
                                         self.tf_X: x,
                                         self.tf_Y: y,
                                         self.z_plain: mu_out
                                     })

            mi = ee.mi(z0, y, k=20)
            self.log.info("Mutual Information: %f, X MAE: %f" %
                          (mi, x_mae_out))

            tmp_idx = np.random.choice(10000, 1000)
コード例 #3
0
 def cmi(self, X, Y, Z):
     np.random.seed(0)
     r = ee.mi(X.copy(order='C'),
               Y.copy(order='C'),
               z=Z.copy(order='C'),
               k=self.k)
     return r if r >= 0 else 0
コード例 #4
0
def average_predictive_info(data, settings):
    x, y = drop_nan_rows(split3D(data, settings['max_lag']))

    nSample, nProcess = x.shape
    if nSample < 5 + 5 * nProcess:
        # If there are too few samples, there is no point to calculate anything
        return np.array(np.nan)
    else:
        return ee.mi(x, y) / nProcess
コード例 #5
0
def responsiveness_1(keyp, keyp_diff, actions):

    assert len(keyp.shape) == 2 and keyp.shape[1] == 2

    keyp_combine = np.hstack((actions, keyp))
    print(keyp_combine.shape)
    resp = ee.mi(keyp_combine, keyp_diff)

    return resp
コード例 #6
0
def do_it(X,Y,X_corrected,Y_corrected,do_entropy=True):
    #pearsons
    corr = np.corrcoef(X,Y)[0,1]
    varx = np.var(X)
    vary = np.var(Y)
    cov = np.cov(X,Y)[0,1]
    meanx = np.mean(X)
    meany = np.mean(Y)

    #concordance
    conc = 2.*float(cov)/(float(varx)+float(vary)+np.square(float(meanx)-float(meany)))
    
    #spearman
    spearman=spearmanr(X,Y)[0]
                    
    #dice coefficient

    #by percentile:
    # dice = pandas.Series()
    #U = np.union1d(X,Y)
    # for percent in [100,50,25,10,5,1]:
    #     threshold = np.percentile(abs(U),percent)
    #     x = abs(X)>threshold
    #     y = abs(Y)>threshold
    #     s = x*y
    for threshold in [0]:
        x_pos = X_corrected>threshold
        x_neg = X_corrected<threshold
        y_pos = Y_corrected>threshold
        y_neg = Y_corrected<threshold
        pos = x_pos*y_pos
        neg = x_neg*y_neg
        s = pos+neg
        
        xplusy = x_pos.sum()+x_neg.sum()+y_pos.sum()+y_neg.sum()
        if  xplusy == 0:
            die = 0
        else:
            die = 2.*s.sum()/xplusy
        # dice[str(percent)+'_'+str(threshold)] = die
        dice = die

    if do_entropy:
        #entropy
        x = ee.vectorize(X)
        y = ee.vectorize(Y)
        mi = ee.mi(x,y)
        hx = -ee.entropy(x)
        hy = -ee.entropy(y)
        ecc= np.sqrt(mi/(0.5*(hx+hy)))
    else:
        ecc = np.nan

    return corr, conc, spearman, dice, ecc
コード例 #7
0
def responsiveness(keyp, actions):
    """

    :param keyp: T x 2
    :param actions: T x 1
    :return:
    """
    assert len(keyp.shape) == 2 and keyp.shape[1] == 2
    resp = ee.mi(keyp, actions)

    return resp
コード例 #8
0
    def mi(self, X, Y):
        dx, dy, dz = X.shape[-1], Y.shape[-1], 0
        d = dx + dy + dz
        k = np.clip(2 * d, 7, 20)

        if (k, dx, dy, dz) in self.alphas:
            alpha = self.alphas[k, dx, dy, dz]
        else:
            alpha = None

        np.random.seed(0)
        r = ee.mi(X.copy(order='C'), Y.copy(order='C'), k=k, alpha=alpha)
        return r if r >= 0 else 0
コード例 #9
0
ファイル: mi.py プロジェクト: mrhossain/corrsim
    def ksg(x, y):
        """
        Kraskov–Stogbauer–Grassberger (KSG) estimator of mutual information
        between two sentences represented as word embedding matrices x and y
        :param x: list of word embeddings for the first sentence
        :param y: list of word embeddings for the second sentence
        :return: KSG similarity measure between the two sentences
        """

        if pool is None:
            xT = x.T
            yT = y.T
        else:
            xT = pool(x, axis=0).reshape(-1, 1)
            yT = pool(y, axis=0).reshape(-1, 1)

        return mi(xT, yT, base=np.e, k=k)
コード例 #10
0
    def evaluateModel(self):
        with tf.Session() as sess:
            # Restore variables from disk.
            self.saver.restore(sess, self.args.pretrained)
            x,y,_,bins = self.dataset.get_eval_data()

            mu_out, x_rec, y_rec = sess.run(
                [self.mu, self.x_mu, self.y_mu],
                feed_dict={self.tf_X: x, self.tf_Y: y, self.lagMul: np.asarray([[self.lm]])})

            x_mae_out, y_o, z1, z0 = sess.run([self.x_mae, self.y_mae, self.z_1, self.z_0],
                                          feed_dict={self.tf_X: x, self.tf_Y: y, self.lagMul: np.asarray([[self.lm]]), self.z: mu_out})

            mi = ee.mi(z0, y, k=20)
            self.log.info("Mutual Information: %f, X MAE: %f, Y MAE: %f" % (mi, x_mae_out, y_o))


            Plotter.plot2DData(y_rec[:, :], bins[:], self.args.plot_path, Strings.STIB_WO_REG_Y)
            Plotter.plot2DLatentSpace(z0[:, 0], z1[:], bins[:], self.args.plot_path, Strings.STIB_WO_REG_LATENT)
            Plotter.plot2DData(x_rec[:, :], bins[:], self.args.plot_path, Strings.STIB_WO_REG_X)
コード例 #11
0
def average_predictive_info_non_uniform(dataLst, settings):
    # Test that all trials have sufficient timesteps for lag estimation
    nSampleMin = np.min(set_list_shapes(dataLst, axis=1))
    if nSampleMin <= settings['max_lag']:
        raise ValueError('lag', settings['max_lag'], 'cannot be estimated for number of timesteps', nSampleMin)

    xLst = []
    yLst = []
    for dataTrial in dataLst:
        x, y = drop_nan_rows(split3D(dataTrial, settings['max_lag']))
        xLst += [x]
        yLst += [y]
    xArr = np.vstack(xLst)
    yArr = np.vstack(yLst)

    nSample, nProcess = xArr.shape
    if nSample < 4 * nProcess:
        # If there are too few samples, there is no point to calculate anything
        return np.array(np.nan)
    else:
        return ee.mi(xArr, yArr) / nProcess
コード例 #12
0
ファイル: test.py プロジェクト: gregversteeg/NPEET
print('Mutual Information')
trueent = 0.5 * (1 + log(2. * pi * cov[0][0]))  # x sub
trueent += 0.5 * (1 + log(2. * pi * cov[1][1]))  # y sub
trueent += -0.5 * (2 + log(4. * pi * pi * det([[cov[0][0], cov[0][1]], [cov[1][0], cov[1][1]]])))  # xz sub
print('true MI(x:y)', trueent / log(2))

ent = []
err = []
for NN in Ntry:
    tempent = []
    for j in range(nsamples):
        points = nr.multivariate_normal(mean, cov, NN)
        x = [point[:1] for point in points]
        y = [point[1:2] for point in points]
        tempent.append(ee.mi(x, y))
    tempent.sort()
    tempmean = np.mean(tempent)
    ent.append(tempmean)
    err.append((tempmean - tempent[samplo], tempent[samphi] - tempmean))

print('samples used', Ntry)
print('estimated MI', ent)
print('95% conf int.\n', err)


print('\nIF you permute the indices of x, e.g., MI(X:Y) = 0')
# You can use shuffle_test method to just get mean, standard deviation
ent = []
err = []
for NN in Ntry:
コード例 #13
0
ファイル: test.py プロジェクト: guoqing-zhou/NPEET
print('Mutual Information')
trueent = 0.5 * (1 + log(2. * pi * cov[0][0]))  # x sub
trueent += 0.5 * (1 + log(2. * pi * cov[1][1]))  # y sub
trueent += -0.5 * (2 + log(4. * pi * pi * det(
    [[cov[0][0], cov[0][1]], [cov[1][0], cov[1][1]]])))  # xz sub
print('true MI(x:y)', trueent / log(2))

ent = []
err = []
for NN in Ntry:
    tempent = []
    for j in range(nsamples):
        points = nr.multivariate_normal(mean, cov, NN)
        x = [point[:1] for point in points]
        y = [point[1:2] for point in points]
        tempent.append(ee.mi(x, y))
    tempent.sort()
    tempmean = np.mean(tempent)
    ent.append(tempmean)
    err.append((tempmean - tempent[samplo], tempent[samphi] - tempmean))

print('samples used', Ntry)
print('estimated MI', ent)
print('95% conf int.\n', err)

print('\nIF you permute the indices of x, e.g., MI(X:Y) = 0')
# You can use shuffle_test method to just get mean, standard deviation
ent = []
err = []
for NN in Ntry:
    tempent = []
コード例 #14
0
    def optimizeModel(self):

        lm = 0.7

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        saver = tf.train.Saver(max_to_keep=1000000)

        x_l_t = np.inf
        y_l_t = np.inf

        x, y, _, bins = self.dataset.get_eval_data()

        with tf.Session(config=config) as sess:
            sess.run(tf.initialize_all_variables())

            for iter in range(150000):

                x_batch, y_batch, t_batch = self.dataset.next_batch(
                    self.batch_size)

                _, mu_out, ll_out, rl_out, log_sigma_out, yl_out, irl_out, z0_out_tr = sess.run(
                    [
                        self.optimizer, self.mu, self.latent_loss,
                        self.x_reconstr_loss, self.log_sigma,
                        self.y_reconstr_loss, self.ir_loss, self.z_0
                    ],
                    feed_dict={
                        self.tf_X: x_batch,
                        self.tf_Y: y_batch,
                        self.lagMul: np.asarray([[lm]])
                    })

                _, irl_out_adv, z_out, y_mur_sg_out, bi_loss_out = sess.run(
                    [
                        self.optimizer_ir, self.ir_loss, self.z, self.y_mur_sg,
                        self.bi_loss
                    ],
                    feed_dict={
                        self.tf_X: x_batch,
                        self.tf_Y: y_batch,
                        self.lagMul: np.asarray([[lm]])
                    })

                if ((iter) % 300 == 0) and iter > 1:
                    self.log.info(
                        "Iteration: %d, Lambda: %.2f, I(x,t): %.2f, I(Z,X): %.2f, I(Z1,Y): %.2f"
                        % (iter, lm, ll_out, rl_out, yl_out))

                if iter % 300 == 0 and iter > 0:

                    mu_out_tmp = sess.run(
                        [self.mu],
                        feed_dict={
                            self.tf_X: x,
                            self.tf_Y: y,
                            self.lagMul: np.asarray([[lm]])
                        })

                    x_rec, y_rec, y_irr_rec, ll_test, y_rec_test_loss, y_irr_test_loss, mi_loss, y_o, y_ir_o, y_hat_test, z0_out, x_test_loss, x_mae_out, bi_out = sess.run(
                        [
                            self.x_mu, self.y_mu, self.y_mur_sg,
                            self.latent_loss, self.y_reconstr_loss,
                            self.y_irr_rec_loss, self.ir_loss, self.y_mae,
                            self.y_irr_mae, self.y_hat, self.z_0,
                            self.x_reconstr_loss, self.x_mae, self.bi_loss
                        ],
                        feed_dict={
                            self.tf_X: x,
                            self.tf_Y: y,
                            self.lagMul: np.asarray([[lm]]),
                            self.z: np.reshape(mu_out_tmp, (10000, 3))
                        })
                    self.log.info(
                        "I(Z0,Y): %.2f, BI Loss: %.2f, X MAE: %.2f, Y MAE: %.2f"
                        % (ee.mi(z0_out, y, k=20), bi_out, x_mae_out, y_o))

                    if y_o <= y_l_t and x_mae_out <= x_l_t:
                        y_l_t = y_o
                        x_l_t = x_mae_out
                        self.log.info("saved model ...")
                        save_path = saver.save(sess, self.args.save_path)

                    lm = lm * 1.03