Пример #1
0
def generate_max_task_list(array_size,blk_mem_size=16.,memory=MEM_SIZE,priority_list=None):
    shape = list(array_size)
    length = len(shape)
    if priority_list is None:
        priority_list = numpy.arange(length)[::-1]
    chunk_size = get_max_blocksize_from_mem(shape,blk_mem_size,memory,priority_list)
    return generate_task_list(chunk_size,shape)
Пример #2
0
def generate_max_task_list(array_size,blk_mem_size=16.,memory=MEM_SIZE,priority_list=None):
    shape = list(array_size)
    length = len(shape)
    if priority_list is None:
        priority_list = numpy.arange(length)[::-1]
    chunk_size = get_max_blocksize_from_mem(shape,blk_mem_size,memory,priority_list)
    return generate_task_list(chunk_size,shape)
Пример #3
0
def safeBcastInPlace(comm, in_array, root=0):
    shape = in_array.shape
    length = len(shape)
    # just use 16 for blocksize, size of complex(double)
    chunk_size = get_max_blocksize_from_mem(list(shape),16.,MEM_SIZE,priority_list=numpy.arange(length)[::-1])
    task_list = generate_task_list(chunk_size,shape)
    for block in task_list:
        which_slice = [slice(*x) for x in block]
        tmp = in_array[tuple(which_slice)].copy()
        tmp = comm.bcast(tmp,root=0)
        in_array[tuple(which_slice)] = tmp
Пример #4
0
def safeAllreduceInPlace(comm, in_array):
    shape = in_array.shape
    length = len(shape)
    # just use 16 for blocksize, size of complex(double)
    chunk_size = get_max_blocksize_from_mem(list(shape),16.,MEM_SIZE,priority_list=numpy.arange(length)[::-1])
    task_list = generate_task_list(chunk_size,shape)
    for block in task_list:
        which_slice = [slice(*x) for x in block]
        tmp = in_array[tuple(which_slice)].copy()
        comm.Allreduce(MPI.IN_PLACE, tmp, op=MPI.SUM)
        in_array[tuple(which_slice)] = tmp
Пример #5
0
def safeBcastInPlace(comm, in_array, root=0):
    shape = in_array.shape
    length = len(shape)
    # just use 16 for blocksize, size of complex(double)
    chunk_size = get_max_blocksize_from_mem(list(shape),16.,MEM_SIZE,priority_list=numpy.arange(length)[::-1])
    task_list = generate_task_list(chunk_size,shape)
    for block in task_list:
        which_slice = [slice(*x) for x in block]
        tmp = in_array[tuple(which_slice)].copy()
        tmp = comm.bcast(tmp,root=0)
        in_array[tuple(which_slice)] = tmp
Пример #6
0
def safeAllreduceInPlace(comm, in_array):
    shape = in_array.shape
    length = len(shape)
    # just use 16 for blocksize, size of complex(double)
    chunk_size = get_max_blocksize_from_mem(list(shape),16.,MEM_SIZE,priority_list=numpy.arange(length)[::-1])
    task_list = generate_task_list(chunk_size,shape)
    for block in task_list:
        which_slice = [slice(*x) for x in block]
        tmp = in_array[tuple(which_slice)].copy()
        comm.Allreduce(MPI.IN_PLACE, tmp, op=MPI.SUM)
        in_array[tuple(which_slice)] = tmp
Пример #7
0
def safeNormDiff(in_array1, in_array2):
    shape = in_array1.shape
    assert shape == in_array2.shape
    length = len(shape)
    # just use 16 for blocksize, size of complex(double)
    chunk_size = get_max_blocksize_from_mem(list(shape),16.,MEM_SIZE,priority_list=numpy.arange(length)[::-1])
    task_list = generate_task_list(chunk_size,shape)
    norm = 0.0
    for block in task_list:
        which_slice = [slice(*x) for x in block]
        norm += numpy.linalg.norm(in_array1[tuple(which_slice)] - in_array2[tuple(which_slice)])
    return norm
Пример #8
0
def safeNormDiff(in_array1, in_array2):
    shape = in_array1.shape
    assert shape == in_array2.shape
    length = len(shape)
    # just use 16 for blocksize, size of complex(double)
    chunk_size = get_max_blocksize_from_mem(list(shape),16.,MEM_SIZE,priority_list=numpy.arange(length)[::-1])
    task_list = generate_task_list(chunk_size,shape)
    norm = 0.0
    for block in task_list:
        which_slice = [slice(*x) for x in block]
        norm += numpy.linalg.norm(in_array1[tuple(which_slice)] - in_array2[tuple(which_slice)])
    return norm