예제 #1
0
파일: common.py 프로젝트: ryanabbit/horton
def reduce_ugrid(ugrid, stride, chop):
    '''Reduce the uniform grid

       **Arguments:**

       ugrid
            The uniform integration grid.

       stride
            The reduction factor.

       chop
            The number of slices to chop of the grid in each direction.

       Returns: a reduced ugrid object
    '''
    if (chop < 0):
        raise ValueError('Chop must be positive or zero.')
    if ((ugrid.shape - chop) % stride != 0).any():
        raise ValueError('The stride is not commensurate with all three grid demsions.')

    new_shape = (ugrid.shape-chop)/stride
    grid_rvecs = ugrid.grid_rvecs*stride
    new_ugrid = UniformGrid(ugrid.origin, grid_rvecs, new_shape, ugrid.pbc)
    return new_ugrid
예제 #2
0
파일: common.py 프로젝트: ryanabbit/horton
def write_random_lta_cube(dn, fn_cube):
    sys = System.from_file(context.get_fn('test/lta_gulp.cif'))
    ugrid = UniformGrid(np.zeros(3, float), sys.cell.rvecs * 0.1,
                        np.array([10, 10, 10]), np.array([1, 1, 1]))
    cube_data = np.random.uniform(0, 1, ugrid.shape)
    sys.update_grid(ugrid)
    sys.extra['cube_data'] = cube_data
    sys.to_file(os.path.join(dn, fn_cube))
    return sys
예제 #3
0
def write_random_lta_cube(dn, fn_cube):
    '''Write a randomized cube file'''
    # start from an existing cube file
    mol = IOData.from_file(context.get_fn('test/lta_gulp.cif'))
    # Define a uniform grid with only 1000 points, to make the tests fast.
    ugrid = UniformGrid(np.zeros(3, float), mol.cell.rvecs * 0.1,
                        np.array([10, 10, 10]), np.array([1, 1, 1]))
    # Write to the file dn/fn_cube
    mol.cube_data = np.random.uniform(0, 1, ugrid.shape)
    mol.grid = ugrid
    mol.to_file(os.path.join(dn, fn_cube))
    return mol
예제 #4
0
def test_reduce_data1():
    from horton import UniformGrid
    data = np.random.normal(0, 1, (11, 21, 31))
    grid_rvecs = np.identity(3, float) * 0.1
    ugrid = UniformGrid(np.array([0.3, 0.2, -0.1]), grid_rvecs,
                        np.array(data.shape), np.array([1, 1, 0]))

    data1, ugrid1 = reduce_data(data, ugrid, 10, 1)
    assert data1.shape == (1, 2, 3)
    assert (data1 == data[:-1:10, :-1:10, :-1:10]).all()
    assert (ugrid1.origin == ugrid.origin).all()
    assert abs(ugrid1.grid_rvecs - ugrid.grid_rvecs * 10).max() < 1e-10
    assert (ugrid1.shape == [1, 2, 3]).all()
    assert (ugrid1.pbc == ugrid.pbc).all()

    data2, ugrid2 = reduce_data(data, ugrid, 5, 1)
    assert data2.shape == (2, 4, 6)
    assert (data2 == data[:-1:5, :-1:5, :-1:5]).all()
    assert (ugrid2.origin == ugrid.origin).all()
    assert abs(ugrid2.grid_rvecs - ugrid.grid_rvecs * 5).max() < 1e-10
    assert (ugrid2.shape == [2, 4, 6]).all()
    assert (ugrid2.pbc == ugrid.pbc).all()