def __init__(self): self.fc1 = nn.Linear(2, 3) self.act1 = nn.Sigmoid() self.fc2 = nn.Linear(3, 10) self.act2 = nn.Sigmoid() self.fc3 = nn.Linear(10, 1) self.act3 = nn.Sigmoid()
def test_0(self): s = nn.Sigmoid() inputs = np.asarray([0], dtype=np.float16) s.call(inputs) self.assertEqual(inputs[0], 0.5) inputs[:] = 0 t = nn.Tanh() t.call(inputs) self.assertEqual(inputs[0], 0)
def grad_check_all(inputs, eps, tolerance): print('gradient check', end='...') assert(nn.grad_check(nn.Sigmoid(inputs.shape[1]), inputs, eps, tolerance)) assert(nn.grad_check(nn.LogSoftMax(inputs.shape[1]), inputs, eps, tolerance)) assert(nn.grad_check(nn.SoftMax(inputs.shape[1]), inputs, eps, tolerance)) assert(nn.grad_check(nn.ReLU(inputs.shape[1]), inputs, eps, tolerance)) assert(nn.grad_check(nn.Tanh(inputs.shape[1]), inputs, eps, tolerance)) assert(nn.grad_check(nn.Linear(inputs.shape[1], max(1, inputs.shape[1] - 3)), inputs, eps, tolerance)) assert(nn.grad_check(nn.CrossEntropy(inputs.shape[1]), inputs, eps, tolerance, np.random.rand(*inputs.shape))) print('[OK]')
def test_pos(self): s = nn.Sigmoid() inputs = np.arange(11, dtype=np.float) s.call(inputs) n = np.sum(inputs < 0.9) self.assertEqual(n, 3) inputs[:] = np.arange(11) - 5 t = nn.Tanh() t.call(inputs) for i in range(6): self.assertAlmostEqual(inputs[5 + i] + inputs[5 - i], 0)
def main(): np.random.seed(1) model = nn.Sequential() model.add(nn.Linear(2, 5)) model.add(nn.Sigmoid()) model.add(nn.Linear(5, 1)) #model.add(nn.Sigmoid()) model.set_metric(nn.MSE()) x = np.array([[0, 0], [0, 1], [1, 0], [1, 1]]) y = np.array([[0], [1], [1], [0]]) model.fit(x, y, 5000, 1)
def build_regressions(self): box_regressions = [] num_box = len(self.params['box_sizes']) * len(self.params['ratios']) num_class = self.num_class out_channels = num_box * num_class classifiers = nn.Sequential(nn.Conv2dReLU(256, 256, 3, 1, 1), nn.Conv2dReLU(256, 256, 3, 1, 1), nn.Conv2dReLU(256, 256, 3, 1, 1), nn.Conv2dReLU(256, 256, 3, 1, 1), nn.Conv2d(256, out_channels, 3, 1, 1), nn.Sigmoid()) out_channels = num_box * 4 box_regressions = nn.Sequential(nn.Conv2dReLU(256, 256, 3, 1, 1), nn.Conv2dReLU(256, 256, 3, 1, 1), nn.Conv2dReLU(256, 256, 3, 1, 1), nn.Conv2dReLU(256, 256, 3, 1, 1), nn.Conv2d(256, out_channels, 3, 1, 1)) self.classifiers = classifiers self.box_regressions = box_regressions
import nn network = nn.Container() network.add(nn.Reshape((1, 784))) network.add(nn.Linear(784, 100)) network.add(nn.Sigmoid()) network.add(nn.Linear(100, 10)) network.add(nn.Sigmoid()) network.add(nn.MSE(), cost=True) network.make()
numpy.random.seed(seed) if not os.path.exists('qm7.mat'): os.system('wget http://www.quantum-machine.org/data/qm7.mat') dataset = scipy.io.loadmat('qm7.mat') # -------------------------------------------- # Extract training data # -------------------------------------------- P = dataset['P'][range(0,split)+range(split+1,5)].flatten() X = dataset['X'][P] T = dataset['T'][0,P] # -------------------------------------------- # Create a neural network # -------------------------------------------- I,O = nn.Input(X),nn.Output(T) nnsgd = nn.Sequential([I,nn.Linear(I.nbout,400),nn.Sigmoid(),nn.Linear(400,100),nn.Sigmoid(),nn.Linear(100,O.nbinp),O]) nnsgd.modules[-2].W *= 0 nnavg = copy.deepcopy(nnsgd) # -------------------------------------------- # Train the neural network # -------------------------------------------- for i in range(1,1000001): if i > 0: lr = 0.001 # learning rate if i > 500: lr = 0.0025 if i > 2500: lr = 0.005 if i > 12500: lr = 0.01 r = numpy.random.randint(0,len(X),[mb]) Y = nnsgd.forward(X[r])
import os os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' train_data = scipy.io.loadmat('../data/nist36_train_set1.mat') train_x, train_y = train_data['train_data'], train_data['train_labels'] input_length = len(train_x[0]) batch_size = 30 num_epochs = 100 batches = get_random_batches(train_x, train_y, batch_size) model = nn.Sequential(nn.Linear(1024, 64), nn.Sigmoid(), nn.Linear(64, 36), nn.Softmax()) criterion = torch.nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) loss_overall = [] accuracy_overall = [] for epoch in range(num_epochs): total_loss = 0 total_acc = 0 for xb, yb in batches: ## Converting np array to torch tensor