예제 #1
0
파일: mds.py 프로젝트: rahatzamancse/MPSE
    def optimize(self, agd=True, batch_size=None, batch_number=None, lr=0.01,
                 **kwargs):
        """\
        Optimize stress function using gradient-based methods. If batch size or
        number are given, optimization begins with stochastic gradient descent.
        If agd is set to True, optimization ends with adaptive gradient descent.
        """
        if self.verbose > 0:
            print('- MDS.optimize():')

        if batch_number is not None or batch_size is not None:
            F = lambda X: self.F(X,batch_number=batch_number,
                                 batch_size=batch_size)
            if self.verbose > 0:
                print('  method : stochastic gradient descent')
                if batch_number is None:
                    print(f'  batch size : {batch_size}')
                else:
                    print(f'  batch number : {batch_number}')
            self.X, H = gd.mgd(self.X,F,lr=lr,**kwargs)
            self.update(H=H)
        if agd is True:
            F = lambda X: self.F(X)
            if self.verbose > 0:
                print('  method : exact gradient & adaptive gradient descent')
            self.X, H = gd.agd(self.X,F,**kwargs,**self.H)
            self.update(H=H)

        if self.verbose > 0:
            print(f'  final stress : {self.cost:0.2e}[{self.ncost:0.2e}]')
예제 #2
0
파일: mpse.py 프로젝트: rahatzamancse/MPSE
    def optimize_all(self,
                     agd=True,
                     batch_size=None,
                     batch_number=None,
                     lr=0.01,
                     **kwargs):
        if self.verbose:
            print('- Multiview.optimize_all(): ')

        p = [None] + [self.persp.c] * self.K
        if batch_number is not None or batch_size is not None:
            XQ = [self.X] + self.Q
            F = lambda XQ: self.F(XQ[0],
                                  XQ[1::],
                                  batch_number=batch_number,
                                  batch_size=batch_size)
            XQ, H = gd.mgd(XQ, F, lr=lr, **kwargs)
            self.X = XQ[0]
            self.Q = XQ[1::]
            self.update(H=H)
        if agd is True:
            XQ = [self.X] + self.Q
            F = lambda XQ: self.F(XQ[0], XQ[1::])
            XQ, H = gd.cagd(XQ, F, **kwargs, **self.H)
            self.X = XQ[0]
            self.Q = XQ[1::]
            self.update(H=H)

        if self.verbose > 0:
            print(f'  Final stress : {self.cost:0.2e}')
예제 #3
0
    def optimize_X(self, agd=True, batch_size=None, batch_number=None, lr=0.01,
                   **kwargs):
        if self.verbose > 0:
            print('- Multiview.optimize_X():')

        if batch_number is not None or batch_size is not None:
            if self.verbose > 0:
                print('  method : stochastic gradient descent')
                if batch_number is None:
                    print(f'  batch size : {batch_size}')
                else:
                    print(f'  batch number : {batch_number}')
            F = lambda X: self.FX(X,self.Q,batch_number=batch_number,
                                  batch_size=batch_size)
            self.X, H = gd.mgd(self.X,F,lr=lr,**kwargs)
            self.update(H=H)
        if agd is True:
            F = lambda X: self.FX(X,self.Q)
            if self.verbose > 0:
                print('  method : exact gradient & adaptive gradient descent')
            self.X, H = gd.agd(self.X,F,**kwargs,**self.H)
            self.update(H=H)

        if self.verbose > 0:
            print(f'  Final stress : {self.cost:0.2e}[{self.ncost:0.2e}]')
예제 #4
0
    def optimize_Q(self,batch_size=None,batch_number=None,lr=0.01,**kwargs):
        if self.verbose > 0:
            print('- Multiview.optimize_Q():')

        F = lambda Q: self.FQ(self.X,Q,batch_number=batch_number,
                              batch_size=batch_size)
        if batch_number is None and batch_size is None:
            self.Q, H = gd.cagd(self.Q,F,**kwargs)
        else:
            self.Q, H = gd.mgd(self.Q,F,lr=lr,**kwargs)
        self.update(H=H)

        if self.verbose > 0:
            print(f'  Final stress : {self.cost:0.2e}[{self.ncost:0.2e}]')
예제 #5
0
파일: mpse.py 프로젝트: rahatzamancse/MPSE
    def optimize_X(self, agd=True, approx=0.5, lr=5., **kwargs):
        if self.verbose > 0:
            print('- Multiview.optimize_X():')

        if approx is not None:
            if self.verbose > 0:
                print('  method : stochastic gradient descent')
                print('  approx =', approx)
            F = lambda X: self.FX(X, self.Q, approx=approx)
            self.X, H = gd.mgd(self.X, F, lr=lr, **kwargs)
            self.update(H=H)
        if agd is True:
            F = lambda X: self.FX(X, self.Q)
            if self.verbose > 0:
                print('  method : exact gradient & adaptive gradient descent')
            self.X, H = gd.agd(self.X, F, **kwargs, **self.H)
            self.update(H=H)

        if self.verbose > 0:
            print(f'  Final stress : {self.cost:0.2e}')