Пример #1
0
def binomial(k1, k2):
    ## pr. honest finds less blocks
    honest_pr = utils.binomial_cdf(k1 + k2, honest * s, e / s)
    ## attacker number of blocks at first round
    malicious_1 = utils.binomial(k1, att * s, e / s)
    ## attacker number of blocks at second round
    malicious_2 = utils.binomial(k2, att * s, e / s)
    return honest_pr, malicious_1, malicious_2
Пример #2
0
def PhiCapHist(alpha, hAlpha, samples, freqDist, count):
    phiCap = 0
    for i in range(len(alpha)):
        h = hAlpha[i]
        b = 1
        for j in range(config.num_samples):
            if count in freqDist[j]:
                b *= utils.binomial(alpha[j], len(samples[j]),
                                    len(freqDist[j][count]))
            else:
                b *= utils.binomial(alpha[j], len(samples[j]), 0)
        phiCap += h * b
    return phiCap
Пример #3
0
    def odf_marginal(self):
        """Computes the marginal ODF from the q-space signal attenuation 
        expressed in the SPF basis, following [cheng-ghosh-etal:10].

        Returns
        -------
        spherical_harmonics : sh.SphericalHarmonics instance.
        """
        dim_sh = sh.dimension(self.angular_rank)
    
        sh_coefs = np.zeros(dim_sh)
        sh_coefs[0] = 1 / np.sqrt(4 * np.pi)
    
        for l in range(2, self.angular_rank + 1, 2):
            for m in range(-l, l + 1):
                j = sh.index_j(l, m)
                for n in range(1, self.radial_order):
                    partial_sum = 0.0
                    for i in range(1, n + 1):
                        partial_sum += (-1)**i * \
                          utils.binomial(n + 0.5, n - i) * 2**i / i
                    sh_coefs[j] += partial_sum * kappa(self.zeta, n) * \
                      self.coefficients[n, j] * \
                      legendre(l)(0) * l * (l + 1) / (8 * np.pi)
        return sh.SphericalHarmonics(sh_coefs)
Пример #4
0
    def odf_marginal(self):
        """Computes the marginal ODF from the q-space signal attenuation 
        expressed in the SPF basis, following [cheng-ghosh-etal:10].

        Returns
        -------
        spherical_harmonics : sh.SphericalHarmonics instance.
        """
        dim_sh = sh.dimension(self.angular_rank)

        sh_coefs = np.zeros(dim_sh)
        sh_coefs[0] = 1 / np.sqrt(4 * np.pi)

        for l in range(2, self.angular_rank + 1, 2):
            for m in range(-l, l + 1):
                j = sh.index_j(l, m)
                for n in range(1, self.radial_order):
                    partial_sum = 0.0
                    for i in range(1, n + 1):
                        partial_sum += (-1)**i * \
                          utils.binomial(n + 0.5, n - i) * 2**i / i
                    sh_coefs[j] += partial_sum * kappa(self.zeta, n) * \
                      self.coefficients[n, j] * \
                      legendre(l)(0) * l * (l + 1) / (8 * np.pi)
        return sh.SphericalHarmonics(sh_coefs)
Пример #5
0
def test_hamming_sphere(sk):
    s, k = sk
    result = list(hamming_sphere(s, k))
    result_set = set(result)
    assert len(result) == len(result_set)
    assert len(result) == 3**k * binomial(len(s), k)
    for t in result:
        assert hamming_distance(s, t) == k
Пример #6
0
def test_binomial():
    assert binomial(0, 0) == 1
    assert binomial(0, 1) == 0
    assert binomial(0, -1) == 0
    assert binomial(1, 0) == 1
    assert binomial(1, 1) == 1
    assert binomial(1, 2) == 0
    assert binomial(10, 5) == 10 * 9 * 8 * 7 * 6 // (2 * 3 * 4 * 5)
Пример #7
0
def radial_function(r, n, l, zeta):
    result = 0.0
    for i in range(n + 1):
        result += \
          (- 1)**i * utils.binomial(n + 0.5, n - i) / \
          factorial(i) * 2**(0.5 * l + i - 0.5) * \
          special.gamma(0.5 * l + i + 1.5) * \
          hyp1f1((2 * i + l + 3) * 0.5, l + 1.5, - 2 * np.pi**2 * r**2 * zeta)
    result = result * 4 * (-1)**(0.5 * l) * zeta**(0.5 * l + 1.5) * \
      np.pi**(l + 1.5) * r**l / special.gamma(l + 1.5) * spf.kappa(zeta, n)
    return result
Пример #8
0
def radial_function(r, n, l, zeta):
    result = 0.0
    for i in range(n + 1):
        result += \
          (- 1)**i * utils.binomial(n + 0.5, n - i) / \
          factorial(i) * 2**(0.5 * l + i - 0.5) * \
          special.gamma(0.5 * l + i + 1.5) * \
          hyp1f1((2 * i + l + 3) * 0.5, l + 1.5, - 2 * np.pi**2 * r**2 * zeta)
    result = result * 4 * (-1)**(0.5 * l) * zeta**(0.5 * l + 1.5) * \
      np.pi**(l + 1.5) * r**l / special.gamma(l + 1.5) * spf.kappa(zeta, n)
    return result
Пример #9
0
    def R(self, j, mp, m):
        """compute transformation matrix element 
         j         j    0   1   2   3
        R   (Q) = R   (Q , Q , Q , Q )
         m'm       m'm
        
                  -j
        in        __
             j   \     j   j
        [R.u]  = /__  u   R   (Q) ,
             m   m'=j  m'  m'm
        
        according to the formula:
                __    ___________________________
         j     \     /(j-m')(j+m'  )(j-m   )(j+m)    j+m-k    j-m'-k   m'-m+k     k
        R    = /__ \/ ( k  )(m'-m+k)(m'-m+k)( k ) (a)     (a*)      (b)      (-b*)
         m'm    k
                    0     3          2     1
        where a := Q - i.Q  ; b := -Q  -i.Q  .
        
        first three arguments to be provided as multiplicities:
        [J] = 2j+1, [M] = 2m+1, [MP] = 2m'+1, these are always integer
        [-3/2] --> -2; [-1] --> -1; [-1/2] --> 0; [0] --> 1; [1/2] --> 2, etc.
        """
        a   = complex( self.q[0], -self.q[3] ) 
        ac  = complex( self.q[0],  self.q[3] ) #   complex conjugate of a
        b   = complex(-self.q[2], -self.q[1] )
        mbc = complex( self.q[2], -self.q[1] ) # - complex conjugate of b
        res = complex( 0.0 )
        j_p_mp = ( j + mp - 2 ) // 2 # j+m'
        j_m_mp = ( j - mp ) // 2     # j-m'
        j_p_m  = ( j + m  - 2 ) // 2 # j+m
        j_m_m  = ( j - m ) // 2      # j-m
        if j_p_mp < 0 or j_m_mp < 0 or j_p_m < 0 or j_m_m < 0:
            return res

        # prepare constant arrays
        mp_m_m = j_p_mp - j_p_m
        n = np.asarray([j_m_mp, j_p_mp, j_m_m, j_p_m])
        kp = np.asarray([0, mp_m_m, mp_m_m, 0])
        _a = np.asarray([a, ac, b, mbc])
        aexp = np.asarray([j_p_m, j_m_mp, mp_m_m, 0])
        # get range for loop
        k_mx = int(j_p_m if (j_p_m < j_m_mp) else j_m_mp)
        k_mn = int(-j_p_mp+j_p_m if (-j_p_mp+j_p_m > 0) else 0)
        for k in range(k_mn, k_mx+1):
            _k = kp + k
            factor = np.sqrt(np.prod(utils.binomial(n, _k))*complex(1.))
            _aexp = aexp + np.asarray([-k, -k, k, k])
            prod = np.prod(np.power(_a, _aexp))
            res += factor * prod
        return res
Пример #10
0
    def odf_tuch(self):
        """Computes the Tuch ODF from the q-space signal attenuation expressed
        in the SPF basis, following [cheng-ghosh-etal:10].

        Returns
        -------
        spherical_harmonics : sh.SphericalHarmonics instance.
        """
        dim_sh = sh.dimension(self.angular_rank)
        sh_coefs = np.zeros(dim_sh)
        for j in range(dim_sh):
            l = sh.index_l(j)
            for n in range(self.radial_order):
                partial_sum = 0.0
                for i in range(n):
                    partial_sum += utils.binomial(i - 0.5, i) * (-1)**(n - i)
                sh_coefs[j] += partial_sum * self.coefficients[n, j] \
                  * kappa(zeta, n)
            sh_coefs[j] = sh_coefs[j] * legendre(l)(0)
        return sh.SphericalHarmonics(sh_coefs)
Пример #11
0
    def odf_tuch(self):
        """Computes the Tuch ODF from the q-space signal attenuation expressed
        in the SPF basis, following [cheng-ghosh-etal:10].

        Returns
        -------
        spherical_harmonics : sh.SphericalHarmonics instance.
        """
        dim_sh = sh.dimension(self.angular_rank)
        sh_coefs = np.zeros(dim_sh)
        for j in range(dim_sh):
            l = sh.index_l(j)
            for n in range(self.radial_order):
                partial_sum = 0.0
                for i in range(n):
                    partial_sum += utils.binomial(i - 0.5, i) * (-1)**(n - i)
                sh_coefs[j] += partial_sum * self.coefficients[n, j] \
                  * kappa(zeta, n)
            sh_coefs[j] = sh_coefs[j] * legendre(l)(0)
        return sh.SphericalHarmonics(sh_coefs)
Пример #12
0
def basic(s, coeffs):
    n = len(coeffs) - 1
    r = 1.0 - s
    if s >= 0.5:
        sigma = r / s
        multiplier = s
    else:
        sigma = s / r
        multiplier = r
        coeffs = coeffs[::-1]

    result = coeffs[0]
    for j in range(1, n + 1):
        binom_val = utils.binomial(n, j)
        modified_coeff = binom_val * coeffs[j]
        result = result * sigma + modified_coeff

    for _ in range(n):
        result = multiplier * result

    return result
Пример #13
0
def pr_a(info,target):
    mean_prob = np.mean([info['prob'](info) for t in range(10)])
    return u.binomial(target,info['honests'],mean_prob)
Пример #14
0
#!/usr/bin/env python3
import math
import numpy as np
import utils as u

# probability that if attacker chooses a time t \in T, it falls on 50% of the
# population node, or right before:
# Pr[node_i chooses t ] = d / T for a delay d
# Pr[attacker targets 50%] = binomial(h/2,h,d/T)
# Pr[ 50% < target < 60% ] = binomialCDF(60%) - binomialCDF(50%)

n = 100
att = n * (1.0 / 3.0)
hon = n - att
T = 80
delay = 10
pr_node = delay / T
target = int(0.5 * hon)
pr50 = u.binomial(target, hon, pr_node)
upto = int(0.60 * hon)
# Pr[ 50% < target < 60% ]
pr_to60 = u.binomial_cdf(upto, hon, pr_node) - u.binomial_cdf(
    target, hon, pr_node)
print("pr50: {}".format(pr50))
print("pr[50->60]: {}".format(pr_to60))
Пример #15
0
 def visible_to_hidden(self, v_sample):
     wx_b = T.dot(v_sample, self.W) + self.hbias
     return binomial(sigm(wx_b))
Пример #16
0
    def save_metrics(self, figure_dir='', draw_mask=True):
        if self.config.save_metrics:
            print('-------- Save figures begin --------')
            # print(history.history.keys())

            if figure_dir == '':
                figure_dir = os.path.join(
                    self.config.root_dir, 'figures', self.config.model_name,
                    self.config.run, '{}{:.4f}_{}{:.4f}'.format(
                        self.config.record_metrics[0],
                        self.results[self.config.record_metrics[0]],
                        self.config.record_metrics[1],
                        self.results[self.config.record_metrics[1]]))
            else:
                figure_dir = os.path.join(
                    figure_dir, '{}{:.4f}_{}{:.4f}'.format(
                        self.config.record_metrics[0],
                        self.results[self.config.record_metrics[0]],
                        self.config.record_metrics[1],
                        self.results[self.config.record_metrics[1]]))

            if not os.path.exists(figure_dir):
                os.makedirs(figure_dir)

            epochs = self.history.epoch
            metrics = self.history.history
            with open(os.path.join(figure_dir, 'logs.txt'), 'w') as f:
                f.write('epoch : ' + str(epochs) + '\n')
                for key, value in metrics.items():
                    f.write(key + ' : ' + str(value) + '\n')
                for key, value in self.results.items():
                    f.write(key + ' : ' + str(value) + '\n')
                if self.config.run in ['prune', 'atuo']:
                    for key, value in self.prune_info.items():
                        f.write(key + ' : ' + str(value) + '\n')

            pair_keys = [
                'loss', 'ift_loss', 'rec_loss', 'PSNR', 'SSIM', 'ift_PSNR',
                'ift_SSIM', 'rec_PSNR', 'rec_SSIM'
            ]
            for key in pair_keys:
                if key in metrics.keys() and 'val_' + key in metrics.keys():
                    plt.figure()
                    plt.plot(epochs, metrics[key])
                    plt.plot(epochs, metrics['val_' + key])
                    plt.grid(True)
                    plt.title(key + ' vs epoch')
                    plt.ylabel(key)
                    plt.xlabel('epoch')
                    plt.legend(['Train', 'Val'], loc='upper left')
                    plt.tight_layout()
                    plt.savefig(os.path.join(figure_dir,
                                             key + '_vs_epoch.png'))
                    print('Saving figure at ' +
                          os.path.join(figure_dir, key + '_vs_epoch.png'))
                    plt.show(block=False)
                    plt.pause(0.01)
                    del metrics[key]
                    del metrics['val_' + key]

            for key, value in metrics.items():
                plt.figure()
                plt.plot(epochs, value)
                plt.grid(True)
                plt.title(key + ' vs epoch')
                plt.ylabel(key)
                plt.xlabel('epoch')
                plt.tight_layout()
                plt.savefig(os.path.join(figure_dir, key + '_vs_epoch.png'))
                print('Saving figure at ' +
                      os.path.join(figure_dir, key + '_vs_epoch.png'))
                plt.show(block=False)
                plt.pause(0.01)

            if draw_mask:
                pmask_layer_list = get_list(self.model, ['PMask2D'])
                for i in range(len(pmask_layer_list)):
                    # prob, mask = pmask_layer_list[i].get_weights()[0], pmask_layer_list[i].mask
                    prob = np.squeeze(pmask_layer_list[i].get_weights()[0],
                                      axis=(0, 3))
                    # mask = np.squeeze(pmask_layer_list[i].get_weights()[1], axis=(0, 3))
                    mask = binomial(prob)
                    plt.figure()
                    plt.title('Probability')
                    plt.subplot(2, 2, 1)
                    fig_obj = plt.imshow(prob, cmap=plt.get_cmap('jet'))
                    plt.colorbar(fig_obj)
                    plt.title('Probability (avg=%.4f)' % np.mean(prob))
                    plt.subplot(2, 2, 2)
                    fig_obj = plt.imshow(mask, cmap=plt.get_cmap('gray'))
                    plt.colorbar(fig_obj)
                    plt.title('Mask (%.2f%%)' %
                              (100.0 * np.sum(mask) / mask.size))
                    plt.subplot(2, 2, 3)
                    plt.plot(np.mean(prob, axis=0))
                    plt.plot(np.mean(prob, axis=1))
                    plt.legend(['Row', 'Col'])
                    plt.title('PDF')
                    plt.subplot(2, 2, 4)
                    plt.plot(np.mean(mask, axis=0))
                    plt.plot(np.mean(mask, axis=1))
                    plt.legend(['Row', 'Col'])
                    plt.title('PDF')
                    plt.tight_layout()
                    plt.savefig(os.path.join(figure_dir,
                                             'Parametric_mask.png'))
                    print('Saving figure at ' +
                          os.path.join(figure_dir, 'Parametric_mask.png'))
                    plt.show(block=False)
                    plt.pause(0.01)

                pmask_layer_list = get_list(self.model,
                                            ['PMask1DH', 'PMask1DV'])
                for i in range(len(pmask_layer_list)):
                    # prob, mask = pmask_layer_list[i].get_weights()[0], pmask_layer_list[i].mask
                    prob = np.squeeze(pmask_layer_list[i].get_weights()[0],
                                      axis=(0, 3))
                    mask = binomial(prob)
                    plt.figure()
                    plt.title('Probability')
                    plt.subplot(2, 2, 1)
                    fig_obj = plt.imshow(np.broadcast_to(prob, [256, 256]),
                                         cmap=plt.get_cmap('jet'))
                    plt.colorbar(fig_obj)
                    plt.title('Probability (avg=%.4f)' % np.mean(prob))
                    plt.subplot(2, 2, 2)
                    fig_obj = plt.imshow(np.broadcast_to(mask, [256, 256]),
                                         cmap=plt.get_cmap('gray'))
                    plt.colorbar(fig_obj)
                    plt.title('Mask (%.2f%%)' %
                              (100.0 * np.sum(mask) / mask.size))
                    plt.subplot(2, 2, 3)
                    plt.plot(prob)
                    plt.grid(True)
                    plt.title('PDF')
                    plt.tight_layout()
                    plt.savefig(os.path.join(figure_dir,
                                             'Parametric_mask.png'))
                    print('Saving figure at ' +
                          os.path.join(figure_dir, 'Parametric_mask.png'))
                    plt.show(block=False)
                    plt.pause(0.01)

            print('-------- Save figures end --------')

        # def prune(self, x_train, y_train, x_test, y_test):
        #
        #     print('-------- Prune begin --------')
        #
        #     batchnorm_layer_list = [layer for layer in self.model.layers if 'batch_normalization' in layer.name]
        #     mask_layer_list = [layer for layer in self.model.layers if 'mask' in layer.name]
        #     num_layers = len(mask_layer_list)
        #
        #     for t in range(self.config.prune_steps):
        #         print('Before prune:')
        #         self.validate(x_test=x_test, y_test=y_test)
        #
        #         print('-------- Prune step {}: {}% begin --------'.format(t, self.config.sparsity[t] * 100))
        #
        #         prune_mask_vectors = prune_batchnorm(model=self.model, layer_list=batchnorm_layer_list,
        #                                              sparsity=self.config.sparsity[t])
        #
        #         for i in range(num_layers):
        #             mask_layer_list[i].set_weights(
        #                 [np.reshape(prune_mask_vectors[i], [1, 1, 1, len(prune_mask_vectors[i])])])
        #
        #         print('After prune:')
        #         self.validate(x_test=x_test, y_test=y_test)
        #
        #         print('Retrain:')
        #         ckpt_save_path = os.path.join(self.config.root_dir, 'checkpoints', self.config.model_name,
        #                                       'prune_%.2f' % self.config.sparsity[t])
        #         log_dir = os.path.join(self.config.root_dir, 'logs', self.config.model_name,
        #                                'prune_%.2f' % self.config.sparsity[t])
        #         self.callbacks = self.instance.initialize_callbacks(ckpt_save_path=ckpt_save_path, log_dir=log_dir)
        #         self.train(x_train, y_train, x_test, y_test)
        #
        #         print('After retrain:')
        #         self.validate(x_test=x_test, y_test=y_test)
        #
        #         save_dir = os.path.join(self.config.root_dir, 'results', self.config.model_name,
        #                                 'prune_%.2f' % self.config.sparsity[t])
        #         self.save_model(result_dir=save_dir)
        #
        #         figure_dir = os.path.join(self.config.root_dir, 'figures', self.config.model_name,
        #                                   'prune_%.2f' % self.config.sparsity[t])
        #         self.save_metrics(figure_dir=figure_dir)
        #
        #         print('-------- Prune step {}: {}% end --------'.format(t, self.config.sparsity[t] * 100))
        #
        #     print('-------- Prune end --------')

        # def compress(self):
        #     print('-------- Compress begin --------')
        #
        #     conv_layer_list = [layer for layer in self.model.layers if 'conv' in layer.name]
        #     batchnorm_layer_list = [layer for layer in self.model.layers if 'batch_normalization' in layer.name]
        #     mask_layer_list = [layer for layer in self.model.layers if 'mask' in layer.name]
        #     dense_layer_list = [layer for layer in self.model.layers if 'dense' in layer.name]
        #     num_layers = len(mask_layer_list)
        #
        #     mask_vector = [None] * num_layers
        #     final_channel = [None] * num_layers
        #
        #     # obtain mask and number of channels
        #     for i in range(num_layers):
        #         mask = mask_layer_list[i].get_weights()[0]
        #         mask_vector[i] = mask
        #         final_channel[i] = int(np.sum(mask))
        #     print('Final channel: ' + str([(i, mask_layer_list[i].name, final_channel[i]) for i in range(num_layers)]))
        #
        #     self.config.final_channel = final_channel
        #     self.config.is_final = True
        #     self.config.restore_model = False
        #     self.config.bn_mask = False
        #     # create final model
        #     final_instance = interface.ModelAdapter(self.config)
        #     final_model = final_instance.create_model()
        #     final_instance.serialize_model()
        #
        #     final_conv_layer_list = [layer for layer in final_model.layers if 'conv' in layer.name]
        #     final_batchnorm_layer_list = [layer for layer in final_model.layers if 'batch_normalization' in layer.name]
        #     final_dense_layer_list = [layer for layer in final_model.layers if 'dense' in layer.name]
        #
        #     # first conv-bn-mask-relu block
        #     print('Compress the first conv-bn-mask-relu block ' + conv_layer_list[0].name)
        #     conv_weights = conv_layer_list[0].get_weights()
        #     bn_weights = batchnorm_layer_list[0].get_weights()
        #     cur_mask = mask_vector[0]
        #     # kernel
        #     conv_weights[0] *= cur_mask
        #     conv_weights[0] = conv_weights[0][..., np.where(np.squeeze(cur_mask) == 1)[0]]
        #     # bias
        #     conv_weights[1] *= np.squeeze(cur_mask)
        #     conv_weights[1] = conv_weights[1][..., np.where(np.squeeze(cur_mask) == 1)[0]]
        #     # batchnorm: gamma, beta, mean, variance
        #     for j in range(len(bn_weights)):
        #         bn_weights[j] *= np.squeeze(cur_mask)
        #         bn_weights[j] = bn_weights[j][..., np.where(np.squeeze(cur_mask) == 1)[0]]
        #     # weights transfer
        #     final_conv_layer_list[0].set_weights(conv_weights)
        #     final_batchnorm_layer_list[0].set_weights(bn_weights)
        #
        #     # reduce the in-channel of the convolution in the first Resblock
        #     conv_weights = conv_layer_list[1].get_weights()
        #     cur_mask = cur_mask.transpose(0, 1, 3, 2)
        #     # kernel
        #     conv_weights[0] *= cur_mask
        #     conv_weights[0] = conv_weights[0][..., np.where(np.squeeze(cur_mask) == 1)[0], :]
        #     # weight transfer
        #     final_conv_layer_list[1].set_weights(conv_weights)
        #
        #     # depth-wise convlutional layers in skip connections
        #     dwc = [19, 36]
        #     for idx in dwc:
        #         print('Compress depth-wise ' + conv_layer_list[idx].name)
        #         final_conv_layer_list[idx].set_weights(conv_layer_list[idx].get_weights())
        #     # remove layer names in list
        #     conv_layer_list = np.delete(conv_layer_list, dwc)
        #     final_conv_layer_list = np.delete(final_conv_layer_list, dwc)
        #
        #     # regular conv-bn-mask-relu block weights transfer
        #     for i in range(1, num_layers):
        #         print('Compress ' + str((i, conv_layer_list[i].name, batchnorm_layer_list[i].name)))
        #         conv_weights = conv_layer_list[i].get_weights()
        #         bn_weights = batchnorm_layer_list[i].get_weights()
        #         pre_mask = mask_vector[i - 1].transpose(0, 1, 3, 2)
        #         cur_mask = mask_vector[i]
        #         # kernel
        #         conv_weights[0] *= cur_mask
        #         if i % 2 == 1:
        #             conv_weights[0] = conv_weights[0][..., np.where(np.squeeze(cur_mask) == 1)[0]]
        #         if i % 2 == 0:
        #             conv_weights[0] *= pre_mask
        #             conv_weights[0] = conv_weights[0][..., np.where(np.squeeze(pre_mask) == 1)[0], :]
        #         # bias
        #         conv_weights[1] *= np.squeeze(cur_mask)
        #         if i % 2 == 1:
        #             conv_weights[1] = conv_weights[1][..., np.where(np.squeeze(cur_mask) == 1)[0]]
        #         # batchnorm: gamma, beta, mean, variance
        #         for j in range(len(bn_weights)):
        #             bn_weights[j] *= np.squeeze(cur_mask)
        #             if i % 2 == 1:
        #                 bn_weights[j] = bn_weights[j][..., np.where(np.squeeze(cur_mask) == 1)[0]]
        #         # weight transfer
        #         final_conv_layer_list[i].set_weights(conv_weights)
        #         final_batchnorm_layer_list[i].set_weights(bn_weights)
        #
        #     # dense layer weights transfer
        #     for i in range(len(dense_layer_list)):
        #         print('Compress ' + final_dense_layer_list[i].name)
        #         final_dense_layer_list[i].set_weights(dense_layer_list[i].get_weights())
        #
        #     print('-------- Compress end --------')

        return final_model