コード例 #1
0
def test_KiB_shared_zeros():
    """test sharedmem.zeros for arrays on the order of 2**16, single axis types"""
    for typestr in numtypes:
        shape = (2**16, )
        a = sharedmem.zeros(shape, dtype=typestr)
        t = (a == np.zeros(shape))
        assert t.all()
コード例 #2
0
def test_KiB_shared_zeros():
    """test sharedmem.zeros for arrays on the order of 2**16, single axis types"""
    for typestr in numtypes:
        shape = (2**16,)
        a = sharedmem.zeros(shape,dtype=typestr)
        t = (a == np.zeros(shape))
        assert t.all()
コード例 #3
0
def flow_compute(G, B, pq_i, pv_i, n, f, parallel=False):
    """
    :param G:
    :param B:
    :param pq_i: Index of PQ nodes
    :param pv_i: Index of PV nodes
    :param n: Total number of nodes
    :param f:
    :return:
    """
    x = np.zeros((2 * n + 2, 1))
    df = np.ones((2 * n, 1))
    for i in range(n + 1):
        # transverse component of voltage, usually initialized to be 1, x[2 * i] = e[i]
        x[2 * i] = 1

        # vertical component of voltage, usually initialized to be 0, x[2 * i + 1] = f[i]
        x[2 * i + 1] = 0
    # initialize jacob matrix
    jacob = shm.zeros((2 * n, 2 * n))
    step = 0
    while not converge(df):
        if parallel == False:
            update_jacob(G, B, x, pq_i, pv_i, jacob, n)
        else:
            update_jacob_parallel(G, B, x, pq_i, pv_i, jacob, n)
        update_f(G, B, f, df, x, pq_i, pv_i, n)
        update_x(x, jacob, df, n)
        step += 1
        if step > 100:
            raise RuntimeError("Computation fails to converge.")
    return x, step
コード例 #4
0
def test_MiB_shared_zeros():
    """test sharedmem.zeros for arrays on the order 2**21 bytyes, single axis uint8"""

    shape = (2**21, )
    a = sharedmem.zeros(shape, dtype='uint8')
    t = (a == np.zeros(shape))
    assert t.all()
コード例 #5
0
def test_MiB_shared_zeros():
    """test sharedmem.zeros for arrays on the order 2**21 bytyes, single axis uint8"""
    
    shape = (2**21,)
    a = sharedmem.zeros(shape,dtype='uint8')
    t = (a == np.zeros(shape))
    assert t.all()
コード例 #6
0
def test_shared_zeros():
    """test sharedmem.zeros for small single axis types"""
    for typestr in numtypes:
        shape = (10, )
        a = sharedmem.zeros(shape, dtype=typestr)
        t = (a == np.zeros(shape))
        assert t.all()
コード例 #7
0
def test_shared_zeros():
    """test sharedmem.zeros for small single axis types"""
    for typestr in numtypes:
        shape = (10,)
        a = sharedmem.zeros(shape,dtype=typestr)
        t = (a == np.zeros(shape))
        assert t.all()
コード例 #8
0
ファイル: test_shmarray.py プロジェクト: se4u/numpy-sharedmem
def test_two_subprocesses_no_pickle():
    #setup
    shape = (4,)
    a = sharedmem.zeros(shape, dtype='float64')
    a = sharedmem.zeros(shape)
    print os.getpid(),":", a


    lck = multiprocessing.Lock()

    def modify_array(a,lck):
        # a = pickle.loads(a)
        with lck: #lck.acquire()
        
            a[0] = 1
            a[1] = 2
            a[2] = 3
            # lck.release()
        print os.getpid(), "modified array"
        
    p = multiprocessing.Process(target=modify_array, args=(a,lck))
    p.start()

    # poll for the result super inefficent!
    t0 = time.time()
    t1 = t0+10
    nn = 0
    while True:
        if a[0]:
            with lck: #             lck.acquire()
                t = (a == np.array([1,2,3,0], dtype='float64'))
                # lck.release()
            break
        
        if time.time() > t1 : # use timeout instead
            break
        nn += 1
    # this will raise an exception if timeout    
    print os.getpid(), t
    assert t.all()
    print "finished (from %s)" % os.getpid()
    
    p.join()
    print a
コード例 #9
0
def test_two_subprocesses_no_pickle():
    #setup
    shape = (4,)
    a = sharedmem.zeros(shape, dtype='float64')
    a = sharedmem.zeros(shape)
    print(os.getpid(),":", a)


    lck = multiprocessing.Lock()

    def modify_array(a,lck):
        # a = pickle.loads(a)
        with lck: #lck.acquire()
        
            a[0] = 1
            a[1] = 2
            a[2] = 3
            # lck.release()
        print(os.getpid(), "modified array")
        
    p = multiprocessing.Process(target=modify_array, args=(a,lck))
    p.start()

    # poll for the result super inefficent!
    t0 = time.time()
    t1 = t0+10
    nn = 0
    while True:
        if a[0]:
            with lck: #             lck.acquire()
                t = (a == np.array([1,2,3,0], dtype='float64'))
                # lck.release()
            break
        
        if time.time() > t1 : # use timeout instead
            break
        nn += 1
    # this will raise an exception if timeout    
    print(os.getpid(), t)
    assert t.all()
    print("finished (from %s)" % os.getpid())
    
    p.join()
    print(a)
コード例 #10
0
def test_two_subprocesses_with_pickle():
    from nose import SkipTest
    raise SkipTest("this test is known to fail")

    shape = (4, )
    a = sharedmem.zeros(shape, dtype='float64')
    a = sharedmem.zeros(shape)
    print(os.getpid(), ":", a)
    pa = pickle.dumps(a)

    lck = multiprocessing.Lock()

    def modify_array(pa, lck):
        a = pickle.loads(pa)
        with lck:
            a[0] = 1
            a[1] = 2
            a[2] = 3

        print(os.getpid(), "modified array")

    p = multiprocessing.Process(target=modify_array, args=(pa, lck))
    p.start()

    t0 = time.time()
    t1 = t0 + 10
    nn = 0
    while True:
        if a[0]:
            with lck:
                t = (a == np.array([1, 2, 3, 0], dtype='float64'))
            break
        if time.time() > t1:  # use timeout instead
            break
        nn += 1

    print(os.getpid(), t, "nn:", nn)
    assert t.all()
    print("finished (from %s)" % os.getpid())

    p.join()

    print(a)
コード例 #11
0
def test_two_subprocesses_with_pickle():
    from nose import SkipTest
    raise SkipTest("this test is known to fail")

    shape = (4,)
    a = sharedmem.zeros(shape, dtype='float64')
    a = sharedmem.zeros(shape)
    print(os.getpid(),":", a)
    pa = pickle.dumps(a)

    lck = multiprocessing.Lock()

    def modify_array(pa,lck):
        a = pickle.loads(pa)
        with lck:
            a[0] = 1
            a[1] = 2
            a[2] = 3

        print(os.getpid(), "modified array")
        
    p = multiprocessing.Process(target=modify_array, args=(pa,lck))
    p.start()

    t0 = time.time()
    t1 = t0+10
    nn = 0
    while True:
        if a[0]:
            with lck:
                t = (a == np.array([1,2,3,0], dtype='float64'))
            break
        if time.time() > t1 : # use timeout instead
            break
        nn += 1
        
    print(os.getpid(), t, "nn:", nn)
    assert t.all()
    print("finished (from %s)" % os.getpid())
    
    p.join()
    
    print(a)
コード例 #12
0
ファイル: RD_physics.py プロジェクト: stunax/cac
def gen_system(size):
    global V
    global U
    global Vres
    global Ures
    U = sh.zeros((size, size), dtype = np.float)
    V = sh.zeros((size, size), dtype = np.float)
    Ures = sh.zeros((size-2, size-2), dtype = np.float)
    Vres = sh.zeros((size-2, size-2), dtype = np.float)

    center = size/2
    blob = np.ones((10,10), dtype=np.float)
    blob[1:-1,1:] = 0.0
    bsize, _ = blob.shape

    for i in xrange(1, size, 2*bsize):
        U[ i:i+bsize, i:i+bsize ] = blob

    return (U, V)
コード例 #13
0
def fdkcore(nr_projections, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume, volume_weight, count_out):

    workers = nr_projections
    recon_volume = shmarray.zeros((z_voxels, z_voxels, z_voxels), dtype=float32)
    lock = Lock()
    processes = [Process(target=worker, args=(i, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume, volume_weight, lock)) \
            for i in xrange(workers)]
    for p in processes:
        p.start()
    for p in processes:
        p.join()
    return recon_volume
コード例 #14
0
def fdkcore(nr_projections, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume, volume_weight, count_out):

    NoWorkers = 16
    loadPerWorker = nr_projections/NoWorkers
    recon_volume = [shmarray.zeros((z_voxels, z_voxels, z_voxels), dtype=float32)
            for i in xrange(NoWorkers)]
    processes = [Process(target=worker, args=((i*loadPerWorker), loadPerWorker, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume[i], volume_weight)) \
            for i in xrange(NoWorkers)]
    for p in processes:
        p.start()
    for p in processes:
        p.join()
    return numpy.add.reduce(recon_volume, axis=0)
コード例 #15
0
def test():
    import numpy.random
    import multiprocessing
    import multiprocessing.sharedctypes
    import shmarray
    import numpy

    data = shmarray.zeros(100)
    #data = numpy.zeros(100)
    #data = multiprocessing.sharedctypes.RawArray('d', 100)

    d = None

    def doFuzz(inds):
        #data, inds = args

        numpy.random.seed()
        r = numpy.random.random()
        #print multiprocessing.current_process()

        d[inds] = multiprocessing.current_process().pid

        return inds

    def initFuzz(data):
        global d
        d = data

    #data = shmarray.zeros(100)
    #data = numpy.zeros(100)
    #data = multiprocessing.sharedctypes.RawArray('d', 100)

    pool = multiprocessing.Pool(4, initFuzz, (data, ))

    i = pool.map(doFuzz, range(0, 100), chunksize=10)

    pool.close()

    def foo(data):
        data[50:] = -1

    p = multiprocessing.Process(target=foo, args=(data, ))
    p.start()
    p.join()

    print(data)
コード例 #16
0
def fdkcore(nr_projections, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume, volume_weight, count_out):

    workers = nr_projections
    recon_volume = shmarray.zeros((z_voxels, z_voxels, z_voxels),
                                  dtype=float32)
    lock = Lock()
    processes = [Process(target=worker, args=(i, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume, volume_weight, lock)) \
            for i in xrange(workers)]
    for p in processes:
        p.start()
    for p in processes:
        p.join()
    return recon_volume
コード例 #17
0
def fdkcore(nr_projections, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume, volume_weight, count_out):

    loadPerWorker = nr_projections / 16
    recon_volume = [
        shmarray.zeros((z_voxels, z_voxels, z_voxels), dtype=float32)
        for i in xrange(16)
    ]
    processes = [Process(target=worker, args=((i*loadPerWorker), loadPerWorker, projections, combined_matrix, z_voxel_coords,
            transform_matrix, z_voxels, detector_rows, detector_columns,
            recon_volume[i], volume_weight)) \
            for i in xrange(16)]
    for p in processes:
        p.start()
    for p in processes:
        p.join()
    return numpy.add.reduce(recon_volume, axis=0)
コード例 #18
0
ファイル: shmTest.py プロジェクト: RuralCat/CLipPYME
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
################
import numpy.random
import multiprocessing
import multiprocessing.sharedctypes
import shmarray
import numpy


data = shmarray.zeros(100)
# data = numpy.zeros(100)
# data = multiprocessing.sharedctypes.RawArray('d', 100)

d = None


def doFuzz(inds):
    # data, inds = args

    numpy.random.seed()
    r = numpy.random.random()
    # print multiprocessing.current_process()

    d[inds] = multiprocessing.current_process().pid
コード例 #19
0
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
################
import numpy.random
import multiprocessing
import multiprocessing.sharedctypes
import shmarray
import numpy

data = shmarray.zeros(100)
#data = numpy.zeros(100)
#data = multiprocessing.sharedctypes.RawArray('d', 100)

d = None


def doFuzz(inds):
    #data, inds = args

    numpy.random.seed()
    r = numpy.random.random()
    #print multiprocessing.current_process()

    d[inds] = multiprocessing.current_process().pid
コード例 #20
0
ファイル: test_shmarray.py プロジェクト: se4u/numpy-sharedmem
def test_KiB_shared_zeros():
    for typestr in numtypes:
        shape = (2**16,)
        a = sharedmem.zeros(shape,dtype=typestr)
        t = (a == np.zeros(shape))
        assert t.all()
コード例 #21
0
ファイル: test_shmarray.py プロジェクト: se4u/numpy-sharedmem
def test_MiB_shared_zeros():
    shape = (2**21,)
    a = sharedmem.zeros(shape,dtype='uint8')
    t = (a == np.zeros(shape))
    assert t.all()
コード例 #22
0
 def alloc_n(n):
     shape = (2**n, )
     a = sharedmem.zeros(shape, dtype='uint8')
     t = (a == np.zeros(shape))
     assert t.all()
コード例 #23
0
ファイル: example.py プロジェクト: gitgallagher/parallel
	def __init__(self):
		self.values = zeros(10)
コード例 #24
0
 def alloc_n(n):
     shape = (2**n,)
     a = sharedmem.zeros(shape,dtype='uint8')
     t = (a == np.zeros(shape))
     assert t.all()