Exemplo n.º 1
0
    def SetOperator(self, A, dist, name=None):
        try:
            from mpi4py import MPI
        except:
            from petram.helper.dummy_mpi import MPI
        myid     = MPI.COMM_WORLD.rank
        nproc    = MPI.COMM_WORLD.size

        self.row_offsets = A.RowOffsets()        
        
        AA = build_csr_local(A, self.dtype, self.is_complex)

        if self.gui.write_mat:
            write_coo_matrix('matrix', AA.tocoo())

        if dist:
           self.spss.set_distributed_csr_matrix(AA)           
        else:
           self.spss.set_csr_matrix(AA)
        self._matrix = AA
Exemplo n.º 2
0
 def save_to_file(self, file):
     for i in range(self.shape[0]):
         for j in range(self.shape[1]):
             name = file + '_' + str(i) + '_' + str(j)
             v = self[i, j]
             if isinstance(v, chypre.CHypreMat):
                 m = v.get_local_coo()
                 write_coo_matrix(name, m)
             elif isinstance(v, chypre.CHypreVec):
                 m = coo_matrix(v.toarray()).transpose()
                 write_coo_matrix(name, m)
             elif isinstance(v, ScipyCoo):
                 write_coo_matrix(name, v)
             elif v is None:
                 continue
             else:
                 assert False, "Don't know how to write file for " + \
                     str(type(v))
Exemplo n.º 3
0
    def SetOperator(self, A, dist, name=None, ifactor=0):
        try:
            from mpi4py import MPI
        except BaseException:
            from petram.helper.dummy_mpi import MPI

        myid = MPI.COMM_WORLD.rank
        nproc = MPI.COMM_WORLD.size

        from petram.ext.mumps.mumps_solve import i_array
        gui = self.gui
        s = self.s

        if dist:
            dprint1("SetOperator distributed matrix")
            A.eliminate_zeros()
            if gui.write_mat:
                write_coo_matrix('matrix', A)
            if gui.write_inv:
                smyid = '{:0>6d}'.format(myid)
                np.savez("matrix." + smyid, A=A)
                self._merge_coo_matrix(myid, nproc)

            import petram.ext.mumps.mumps_solve as mumps_solve
            dprint1('!!!these two must be consistent')
            dprint1('sizeof(MUMPS_INT) ', mumps_solve.SIZEOF_MUMPS_INT())

            # set matrix format
            s.set_icntl(5, 0)
            s.set_icntl(18, 3)

            dprint1("NNZ local: ", A.nnz)
            nnz_array = np.array(MPI.COMM_WORLD.allgather(A.nnz))
            if myid == 0:
                dprint1("NNZ all: ", nnz_array, np.sum(nnz_array))
                s.set_n(A.shape[1])
            dtype_int = 'int' + str(mumps_solve.SIZEOF_MUMPS_INT() * 8)
            row = A.row
            col = A.col
            row = row.astype(dtype_int) + 1
            col = col.astype(dtype_int) + 1
            AA = self.make_matrix_entries(A)

            if len(col) > 0:
                dprint1('index data size ', type(col[0]))
                dprint1('matrix data type ', type(AA[0]))

            s.set_nz_loc(len(A.data))
            s.set_irn_loc(i_array(row))
            s.set_jcn_loc(i_array(col))
            s.set_a_loc(self.data_array(AA))

            self.dataset = (A.data, row, col)

            self.irhs_loc = np.unique(row)
            self.N_global = np.sum(MPI.COMM_WORLD.allgather(len(
                self.irhs_loc)))

        else:
            A = A.tocoo(False)  # .astype('complex')
            import petram.ext.mumps.mumps_solve as mumps_solve
            dprint1('!!!these two must be consistent')
            dprint1('sizeof(MUMPS_INT) ', mumps_solve.SIZEOF_MUMPS_INT())

            dtype_int = 'int' + str(mumps_solve.SIZEOF_MUMPS_INT() * 8)

            if gui.write_mat:
                # tocsr().tocoo() forces the output is row sorted.
                write_coo_matrix('matrix', A.tocsr().tocoo())
            # No outputs
            if myid == 0:
                row = A.row
                col = A.col
                row = row.astype(dtype_int) + 1
                col = col.astype(dtype_int) + 1
                AA = self.make_matrix_entries(A)

                if len(col) > 0:
                    dprint1('index data size ', type(col[0]))
                    dprint1('matrix data type ', type(AA[0]))

                s.set_n(A.shape[0])
                s.set_nz(len(A.data))
                s.set_irn(i_array(row))
                s.set_jcn(i_array(col))
                s.set_a(self.data_array(AA))
                self.dataset = (A.data, row, col)
                self.irhs_loc = np.unique(row)
                self.N_global = len(self.irhs_loc)
            else:
                self.irhs_loc = None
                self.N_global = None

        if self.skip_solve:
            print("skip solve is on.... returning")
            return

        # blr
        if gui.use_blr:
            s.set_icntl(35, 1)
            s.set_cntl(7, float(gui.blr_drop))

        # out-of-core
        if gui.out_of_core:
            s.set_icntl(22, 1)

        if gui.icntl14.lower() != 'default':
            # percentage increase in the estimated workingspace
            s.set_icntl(14, convert2int(gui.icntl14))

        if gui.icntl23.lower() != 'default':
            # maximum size of the working memory
            s.set_icntl(23, convert2int(gui.icntl23))

        if gui.icntl8.lower() != 'default':
            # the scaling strategy
            s.set_icntl(8, convert2int(gui.icntl8))

        if gui.icntl6.lower() != 'default':
            # permutes the matrix to  azero-freediagonal and/or
            # scale the matrix
            s.set_icntl(6, convert2int(gui.icntl6))

        if gui.cntl1.lower() != 'default':
            # relative threshold for numerical pivoting
            s.set_cntl(1, convert2float(gui.cntl1))

        if gui.cntl4.lower() != 'default':
            # threshold for static pivoting
            s.set_cntl(4, convert2float(gui.cntl4))

        s.set_icntl(24, 0)  # No Null detection
        self.set_ordering_flag(s)

        MPI.COMM_WORLD.Barrier()
        if not gui.restore_fac:
            if not self.silent:
                dprint1("job1")
            s.set_job(1)
            s.run()
            info1 = s.get_info(1)

            if info1 != 0:
                assert False, "MUMPS call (job1) failed. Check error log"

            if not self.silent:
                dprint1("job2")
            s.set_icntl(13, 0)
            s.set_icntl(5, 0)

            s.set_job(2)
            s.run()
            info1 = s.get_info(1)
            if info1 != 0:
                assert False, "MUMPS call (job2) failed. Check error log"
            if gui.write_fac:
                if not self.silent:
                    dprint1("job7 (save)")
                prefix, path = self.split_dir_prefix(gui.factor_path)
                s.set_saveparam(prefix, path)
                s.set_oocparam("ooc_", path)

                # wait here to make sure path is created.
                MPI.COMM_WORLD.Barrier()

                s.set_job(7)
                s.run()
                info1 = s.get_info(1)
                if info1 != 0:
                    assert False, "MUMPS call (job7) failed. Check error log"

        else:
            if not self.silent:
                dprint1("job8 (restore)")
            pathes = gui.factor_path.split(',')
            prefix, path = self.split_dir_prefix(pathes[ifactor].strip())

            s.set_saveparam(prefix, path)
            s.set_oocparam("ooc_", path)

            MPI.COMM_WORLD.Barrier()  # wait here to make sure path is created.
            # s.set_job(-1)
            # s.run()
            s.set_job(8)
            s.run()
            info1 = s.get_info(1)
            if info1 != 0:
                assert False, "MUMPS call (job8) failed. Check error log"
Exemplo n.º 4
0
    def SetOperator(self, A, dist, name=None):
        try:
            from mpi4py import MPI
        except:
            from petram.helper.dummy_mpi import MPI
        myid     = MPI.COMM_WORLD.rank
        nproc    = MPI.COMM_WORLD.size

        from petram.ext.mumps.mumps_solve import i_array
        gui = self.gui
        s = self.s
        if dist:
            dprint1("SetOperator distributed matrix")
            A.eliminate_zeros()
            if gui.write_mat:
                write_coo_matrix('matrix', A)

            import petram.ext.mumps.mumps_solve as mumps_solve
            dprint1('!!!these two must be consistent')
            dprint1('sizeof(MUMPS_INT) ' , mumps_solve.SIZEOF_MUMPS_INT())
            #dprint1('index data size ' , type(A.col[0]))
            #dprint1('matrix data type ' , type(A.data[0]))

            # set matrix format
            s.set_icntl(5,0)
            s.set_icntl(18,3)

            dprint1("NNZ local: ", A.nnz)
            nnz_array = np.array(MPI.COMM_WORLD.allgather(A.nnz))
            if myid ==0:
                dprint1("NNZ all: ", nnz_array, np.sum(nnz_array))            
                s.set_n(A.shape[1])
            dtype_int = 'int'+str(mumps_solve.SIZEOF_MUMPS_INT()*8)
            row = A.row
            col = A.col
            row = row.astype(dtype_int) + 1
            col = col.astype(dtype_int) + 1
            AA = self.make_matrix_entries(A)

            if len(col) > 0:
                dprint1('index data size ' , type(col[0]))
                dprint1('matrix data type ' , type(AA[0]))

            s.set_nz_loc(len(A.data))
            s.set_irn_loc(i_array(row))
            s.set_jcn_loc(i_array(col))
            s.set_a_loc(self.data_array(AA))


            s.set_icntl(2, 1)

            self.dataset = (A.data, row, col)
        else:
            A = A.tocoo(False)#.astype('complex')
            import petram.ext.mumps.mumps_solve as mumps_solve
            dprint1('!!!these two must be consistent')
            dprint1('sizeof(MUMPS_INT) ' , mumps_solve.SIZEOF_MUMPS_INT())

            dtype_int = 'int'+str(mumps_solve.SIZEOF_MUMPS_INT()*8)

            if gui.write_mat:
                #tocsr().tocoo() forces the output is row sorted.
                write_coo_matrix('matrix', A.tocsr().tocoo())
            # No outputs
            if myid ==0:
                row = A.row
                col = A.col
                row = row.astype(dtype_int) + 1
                col = col.astype(dtype_int) + 1
                AA = self.make_matrix_entries(A)                        
                
                if len(col) > 0:
                    dprint1('index data size ' , type(col[0]))
                    dprint1('matrix data type ' , type(AA[0]))

                s.set_n(A.shape[0])
                s.set_nz(len(A.data))
                s.set_irn(i_array(row))
                s.set_jcn(i_array(col))
                s.set_a(self.data_array(AA))
                self.dataset = (A.data, row, col)                

        # blr
        if gui.use_blr:   
            s.set_icntl(35,1)
            s.set_cntl(7, float(gui.blr_drop))
            
        # out-of-core
        if gui.out_of_core:
           s.set_icntl(22,  1)

        def convert2float(txt):
            try:
                return float(txt)
            except:
                assert False, "can not convert to float. Input text is "+txt
        def convert2int(txt):
            try:
                return int(txt)
            except:
                assert False, "can not convert to float. Input text is "+txt
                
        if  gui.icntl14.lower() != 'default':       # percentage increase in the estimated workingspace
            s.set_icntl(14, convert2int(gui.icntl14))
            
        if  gui.icntl23.lower() != 'default':       # maximum size of the working memory   
            s.set_icntl(23, convert2int(gui.icntl23))
                        
        if  gui.icntl8.lower() != 'default':        # the scaling strategy
            s.set_icntl(8, convert2int(gui.icntl8))
                        
        if  gui.icntl6.lower() != 'default':        # permutes the matrix to  azero-freediagonal and/or
            s.set_icntl(6, convert2int(gui.icntl6))  # scale the matrix    
                        
        if  gui.icntl10.lower() != 'default':       # iterative refinement
            s.set_icntl(10, convert2int(gui.icntl10))
                        
        if  gui.cntl1.lower() != 'default':         # relative threshold for numerical pivoting  
            s.set_cntl(1, convert2float(gui.cntl1))
                       
        if  gui.cntl4.lower() != 'default':         # threshold for static pivoting   
            s.set_cntl(4, convert2float(gui.cntl4))
                       
        if  gui.cntl2.lower() != 'default':
            s.set_cntl(2, convert2float(gui.cntl2))  #  stopping criterion for iterative refinement
            
        self.set_ordering_flag(s)


        MPI.COMM_WORLD.Barrier()
        dprint1("job1")
        s.set_job(1)
        s.run()
        info1 = s.get_info(1)

        if info1 != 0:
            assert False, "MUMPS call (job1) faield. Check error log"

        MPI.COMM_WORLD.Barrier()
        dprint1("job2")
        s.set_icntl(24, 1)
        s.set_job(2)
        s.run()
        info1 = s.get_info(1)
        if info1 != 0:
            assert False, "MUMPS call (job2) faield. Check error log"