コード例 #1
0
ファイル: test_lmc.py プロジェクト: jshepherd01/LMC2PY
 def in_potential_values(self):
     lmc = LMC([1, 5, 3], [])
     lmc.in_out(1)
     self.assertEqual(lmc.accumulator, 1)
     lmc.in_out(1)
     self.assertEqual(lmc.accumulator, 5)
     lmc.in_out(1)
     self.assertEqual(lmc.accumulator, 3)
コード例 #2
0
    def setup(self, filepath):
        """Checks the extension and converts the file into mailbox machine code."""

        # Adds checker function from file or from batch
        if self.batch_fp:
            self.batch_tests, self.potential_values = file_parser.parse_batch_test_file(self.batch_fp)
            self.checker = self.get_batch_outputs
        elif self.checker_fp:
            self.checker = self.get_checker(self.checker_fp)

        # compiles mailboxes from file
        self.mailboxes, num_mailboxes = file_parser.get_mailboxes_from_file(filepath)
        print("Compiled program uses %d/100 mailboxes." % num_mailboxes)
        self.lmc = LMC(self.potential_values, self.mailboxes, self.max_cycles)

        # change working directory to filepath so that feedback outputs in right location.
        os.chdir(ntpath.dirname(filepath) or '.')
コード例 #3
0
    def _definePlaceholdersEncoderLikelihood(self, s, Ns, Nc, Nw):
        '''
		Parameters
		----------
		s : tensor-like (Ns)
			The frequencies we wish to evaluate the likelihood not used yet
		Ns : int
			Number of frequencies
		Nc : int
			Number of channels
		Nw : int
			Number of windows
		Returns
		-------
		None:
			Creates a bunch of object attributes related to computing NLL
		'''
        ########
        # Data #
        ########

        # Frequencies are a placeholder in case we decide to do stochastic
        self.s = tf.placeholder(tf.float32, shape=[Ns], name='s_')
        #The fourier transformed data
        self.y_fft = tf.placeholder(tf.complex64,
                                    shape=[Ns, Nc, None],
                                    name='y_fft')

        ###########
        # Kernels #
        ###########
        with tf.variable_scope('global'):
            self.LMCkernels = [
                LMC(Nc,
                    self.Q,
                    self.R,
                    learnVar=self.learnVar,
                    init_style=self.init_style,
                    unif_bounds=self.unif_bounds) for i in range(self.L)
            ]

        ###########
        # Encoder #
        ###########
        with tf.variable_scope('encoder'):
            self.S_ = tf.Variable(rand.randn(self.batch_size,
                                             self.L).astype(np.float32),
                                  name='S_')
            self.scores = tf.nn.softplus(self.S_, name='scores')

        ###########################
        # Evaluate log-likelihood #
        ###########################
        #Combine the factor UKU matrices
        self.UKUL = [self.LMCkernels[l].UKU(self.s) for l in range(self.L)]
        self.UKUstore = tf.stack(self.UKUL)
        self.UKUstorep = tf.transpose(self.UKUstore, perm=[2, 3, 1, 0])

        # Make scores proper dimension
        self.scores_c = tf.cast(tf.transpose(self.scores), dtype=tf.complex64)
        self.scores_c1 = tf.expand_dims(self.scores_c, axis=0)
        self.scores_c2 = tf.expand_dims(self.scores_c1, axis=0)
        self.scores_c3 = tf.expand_dims(self.scores_c2, axis=0)
        self.UKUe = tf.expand_dims(self.UKUstorep, axis=-1)

        #Multiply scores
        self.prod_uku = tf.multiply(self.scores_c3, self.UKUe)
        self.prod_ukuT = tf.transpose(self.prod_uku, perm=[4, 3, 2, 0, 1])
        self.UKUscores = tf.reduce_sum(self.prod_ukuT, axis=1)

        #Add in the noise
        self.noise = tf.cast(1 / self.eta * tf.eye(self.C), tf.complex64)
        self.UKUnoise_half = tf.add(self.UKUscores, self.noise)
        self.UKUnoise = 2 * self.UKUnoise_half

        #Transform Y into the proper shape
        self.Yp = tf.transpose(self.y_fft, perm=[2, 0, 1])
        self.Yp1 = tf.expand_dims(self.Yp, axis=-1)
        self.Yc = tf.squeeze(tf.conj(self.Yp))

        #Get the quadratic form
        self.SLV = tf.linalg.solve(self.UKUnoise, self.Yp1)
        self.SLVs = tf.squeeze(self.SLV)
        self.Quad = tf.multiply(self.SLVs, self.Yc)
        self.QL = tf.reduce_sum(self.Quad, axis=-1)  #Nw x Ns

        #This is where we do the proper weighting
        self.llk = tf.reduce_mean(self.QL)

        #Get log determinant
        self.LD = tf.linalg.logdet(self.UKUnoise)

        #Normalization constant
        self.const = tf.cast(Nc * np.log(np.pi) * -1, tf.complex64)

        #Add together for final likelihood
        self.logDet = tf.cast(tf.reduce_mean(self.LD), tf.complex64)
        self.LogLikelihood = self.const - self.logDet - self.llk
        self.eval = tf.real(self.LogLikelihood)

        #################################
        # Final negative log-likelihood #
        #################################
        self.NLL = -1.0 * self.eval

        # Regularization of scores
        self.reg_scores = 0.01 * tf.nn.l2_loss(self.scores)

        #Regularization of factors
        self.reg_features = tf.nn.l2_loss(tf.real(self.bNorm()))

        #################################################
        # Define the final loss with classification etc #
        # somewhere else                                #
        #################################################

        #####################
        # Stuff for UKUnorm #
        #####################
        #self.sf = tf.placeholder(tf.float32,shape=[None],name='sf_')
        s_fine = np.arange(1000) / 1000 * 55
        self.sf = tf.constant(s_fine.astype(np.float32))
        self.UKUL2_norm = [
            self.LMCkernels[l].UKU(self.sf) for l in range(self.L)
        ]
        self.UKUstore_norm = tf.stack(self.UKUL2_norm)
        self.UKUstorep_norm = tf.transpose(self.UKUstore_norm,
                                           perm=[2, 3, 1, 0])
        self.UKUndiv = tf.reduce_sum(tf.abs(self.UKUstorep_norm),
                                     axis=3,
                                     keepdims=True)
        self.UKUnorm = tf.divide(self.UKUstorep_norm,
                                 tf.cast(self.UKUndiv, tf.complex64))
コード例 #4
0
ファイル: test_lmc.py プロジェクト: jshepherd01/LMC2PY
 def setUp(self):
     self.lmc = LMC([], [])
コード例 #5
0
ファイル: test_lmc.py プロジェクト: jshepherd01/LMC2PY
 def setUp(self):
     self.lmc = LMC([55], [901, 902, 105, 902, 000, 100])