def test_neural_net(): # A simple neural network implementation based on the Bead classes # This example implements a three-layer (5,4,1) network with random # parameters. ltc1 = BeadLinTransConst(np.random.normal(0, 1, (4, 5)), np.random.normal(0, 1, 4)) swi1 = BeadSwitch(4) ltc2 = BeadLinTransConst(np.random.normal(0, 1, (1, 4)), np.random.normal(0, 1, 1)) swi2 = BeadSwitch(1) # This neural net routine does not need to know the sizes of each layer. # Exercise: generalize it such that it would work for an arbitrary number of # layers. def neural_net(x, do_gradient=False): # forward path ltc1.forward([x]) swi1.forward([ltc1.ar_out]) ltc2.forward([swi1.ar_out]) swi2.forward([ltc2.ar_out]) # back path if do_gradient: # clean gradient arrays ltc1.resetg() swi1.resetg() ltc2.resetg() swi2.resetg() # compute derivatives gx = np.zeros(x.shape) swi2.ar_gout[0] = 1.0 # we know the the final output is a scalar. swi2.back([ltc2.ar_gout]) ltc2.back([swi1.ar_gout]) swi1.back([ltc1.ar_gout]) ltc1.back([gx]) return swi2.ar_out[ 0], gx # we know the the final output is a scalar. else: return swi2.ar_out[0] # we know the the final output is a scalar. x = np.random.normal(0, 1, 5) print('The inputs for the neural network') print(x) print() print('Calling neural network without gradient') print('F(x)', neural_net(x)) print() print('Calling neural network with gradient') f, gx = neural_net(x, True) print('F(x)', f) print('Gradient') print(gx) print() print('Running check_delta on the neural network function.') dxs = np.random.normal(0, 1e-4, (100, 5)) check_delta(neural_net, x, dxs) print('Test passed.')
def test_neural_net(): # A simple neural network implementation based on the Bead classes # This example implements a three-layer (5,4,1) network with random # parameters. ltc1 = BeadLinTransConst(np.random.normal(0,1,(4,5)), np.random.normal(0,1,4)) swi1 = BeadSwitch(4) ltc2 = BeadLinTransConst(np.random.normal(0,1,(1,4)), np.random.normal(0,1,1)) swi2 = BeadSwitch(1) # This neural net routine does not need to know the sizes of each layer. # Exercise: generalize it such that it would work for an arbitrary number of # layers. def neural_net(x, do_gradient=False): # forward path ltc1.forward([x]) swi1.forward([ltc1.ar_out]) ltc2.forward([swi1.ar_out]) swi2.forward([ltc2.ar_out]) # back path if do_gradient: # clean gradient arrays ltc1.resetg() swi1.resetg() ltc2.resetg() swi2.resetg() # compute derivatives gx = np.zeros(x.shape) swi2.ar_gout[0] = 1.0 # we know the the final output is a scalar. swi2.back([ltc2.ar_gout]) ltc2.back([swi1.ar_gout]) swi1.back([ltc1.ar_gout]) ltc1.back([gx]) return swi2.ar_out[0], gx # we know the the final output is a scalar. else: return swi2.ar_out[0] # we know the the final output is a scalar. x = np.random.normal(0,1,5) print('The inputs for the neural network') print(x) print() print('Calling neural network without gradient') print('F(x)', neural_net(x)) print() print('Calling neural network with gradient') f, gx = neural_net(x, True) print('F(x)', f) print('Gradient') print(gx) print() print('Running check_delta on the neural network function.') dxs = np.random.normal(0,1e-4,(100,5)) check_delta(neural_net, x, dxs) print('Test passed.')
def check_bead_delta(bead, amp, eps): '''General derivative testing routine for the Bead classes **Arguments:** bead An instance of a subclass of the Bead class. amp Amplitude for the randomly generated reference input data for the bead. eps Magnitude of the small displacements around the reference input data. ''' # A wrapper around the bead that matches the API of the molmod derivative # tester. def fun(x, do_gradient=False): # chop the contiguous array x into ars_in ars_in = [] offset = 0 for nin in bead.nins: ars_in.append(x[offset:offset + nin]) offset += nin # call forward path bead.forward(ars_in) # to gradient or not to gradient ... if do_gradient: # call back path for every output component gxs = [] for i in range(bead.nout): bead.resetg() bead.ar_gout[i] = 1 ars_gin = [np.zeros(nin) for nin in bead.nins] bead.back(ars_gin) gx = np.concatenate(ars_gin) gxs.append(gx) gxs = np.array(gx) return bead.ar_out, gxs else: return bead.ar_out nx = sum(bead.nins) x = np.random.uniform(-amp, amp, nx) dxs = np.random.uniform(-eps, eps, (100, nx)) check_delta(fun, x, dxs)
def check_bead_delta(bead, amp, eps): '''General derivative testing routine for the Bead classes **Arguments:** bead An instance of a subclass of the Bead class. amp Amplitude for the randomly generated reference input data for the bead. eps Magnitude of the small displacements around the reference input data. ''' # A wrapper around the bead that matches the API of the molmod derivative # tester. def fun(x, do_gradient=False): # chop the contiguous array x into ars_in ars_in = [] offset = 0 for nin in bead.nins: ars_in.append(x[offset:offset+nin]) offset += nin # call forward path bead.forward(ars_in) # to gradient or not to gradient ... if do_gradient: # call back path for every output component gxs = [] for i in range(bead.nout): bead.resetg() bead.ar_gout[i] = 1 ars_gin = [np.zeros(nin) for nin in bead.nins] bead.back(ars_gin) gx = np.concatenate(ars_gin) gxs.append(gx) gxs = np.array(gx) return bead.ar_out, gxs else: return bead.ar_out nx = sum(bead.nins) x = np.random.uniform(-amp, amp, nx) dxs = np.random.uniform(-eps, eps, (100, nx)) check_delta(fun, x, dxs)
def check_gpos_ff(ff): def fn(x, do_gradient=False): ff.update_pos(x.reshape(ff.system.natom, 3)) if do_gradient: gpos = np.zeros(ff.system.pos.shape, float) e = ff.compute(gpos) assert np.isfinite(e) assert np.isfinite(gpos).all() return e, gpos.ravel() else: e = ff.compute() assert np.isfinite(e) return e x = ff.system.pos.ravel() dxs = np.random.normal(0, 1e-4, (100, len(x))) check_delta(fn, x, dxs)
def check_vtens_part(system, part, nlists=None, symm_vtens=True): ''' * symm_vtens: Check if the virial tensor is a symmetric matrix. For instance for dipole interactions, this is not true ''' # define some rvecs and gvecs if system.cell.nvec == 3: gvecs = system.cell.gvecs rvecs = system.cell.rvecs else: gvecs = np.identity(3, float) rvecs = np.identity(3, float) # Get the reduced coordinates reduced = np.dot(system.pos, gvecs.transpose()) if symm_vtens: assert abs(np.dot(reduced, rvecs) - system.pos).max() < 1e-10 def fn(x, do_gradient=False): rvecs = x.reshape(3, 3) if system.cell.nvec == 3: system.cell.update_rvecs(rvecs) system.pos[:] = np.dot(reduced, rvecs) if nlists is not None: nlists.update() if do_gradient: vtens = np.zeros((3, 3), float) e = part.compute(vtens=vtens) gvecs = np.linalg.inv(rvecs).transpose() grvecs = np.dot(gvecs, vtens) assert np.isfinite(e) assert np.isfinite(vtens).all() assert np.isfinite(grvecs).all() if symm_vtens: assert abs(vtens - vtens.transpose()).max() < 1e-10 return e, grvecs.ravel() else: e = part.compute() assert np.isfinite(e) return e x = rvecs.ravel() dxs = np.random.normal(0, 1e-4, (100, len(x))) check_delta(fn, x, dxs)
def check_gpos_part(system, part, nlists=None): def fn(x, do_gradient=False): system.pos[:] = x.reshape(system.natom, 3) if nlists is not None: nlists.update() if do_gradient: gpos = np.zeros(system.pos.shape, float) e = part.compute(gpos) assert np.isfinite(e) assert np.isfinite(gpos).all() return e, gpos.ravel() else: e = part.compute() assert np.isfinite(e) return e x = system.pos.ravel() dxs = np.random.normal(0, 1e-4, (100, len(x))) check_delta(fn, x, dxs)
def check_diff_ic(icfn, iterp, shape=(-1,3), period=None): def fnv(x0, do_gradient=False): q, g = icfn(x0.reshape(shape),1) if do_gradient: return q, g.ravel() else: return q def fng(x0, do_gradient=False): q, g, h = icfn(x0.reshape(shape),2) if do_gradient: return g.ravel(), h.reshape(g.size,g.size) else: return g.ravel() for ps in iterp(): x0 = np.array(ps).ravel() dxs = np.random.normal(0, eps, (100, len(x0))) check_delta(fnv, x0, dxs, period) check_delta(fng, x0, dxs, period)
def check_vtens_ff(ff): # define some rvecs and gvecs if ff.system.cell.nvec == 3: gvecs = ff.system.cell.gvecs rvecs = ff.system.cell.rvecs else: gvecs = np.identity(3, float) rvecs = np.identity(3, float) # Get the reduced coordinates reduced = np.dot(ff.system.pos, gvecs.transpose()) assert abs(np.dot(reduced, rvecs) - ff.system.pos).max() < 1e-10 def fn(x, do_gradient=False): rvecs = x.reshape(3, 3) if ff.system.cell.nvec == 3: ff.update_rvecs(rvecs) ff.update_pos(np.dot(reduced, rvecs)) if do_gradient: vtens = np.zeros((3, 3), float) e = ff.compute(vtens=vtens) gvecs = np.linalg.inv(rvecs).transpose() grvecs = np.dot(gvecs, vtens) assert np.isfinite(e) assert np.isfinite(vtens).all() assert np.isfinite(grvecs).all() assert abs(vtens - vtens.transpose()).max() < 1e-10 return e, grvecs.ravel() else: e = ff.compute() assert np.isfinite(e) return e x = rvecs.ravel() dxs = np.random.normal(0, 1e-4, (100, len(x))) check_delta(fn, x, dxs)