def main(args): # Create model. model = ConvolutionNet() # Create data iterators for training and testing sets. data = get_CIFAR10_data(args.data_dir) train_dataiter = minpy.nn.io.NDArrayIter(data=data['X_train'], label=data['y_train'], batch_size=batch_size, shuffle=True) test_dataiter = minpy.nn.io.NDArrayIter(data=data['X_test'], label=data['y_test'], batch_size=batch_size, shuffle=False) # Create solver. solver = minpy.nn.solver.Solver(model, train_dataiter, test_dataiter, num_epochs=10, init_rule='gaussian', init_config={'stdvar': 0.001}, update_rule='sgd_momentum', optim_config={ 'learning_rate': 1e-3, 'momentum': 0.9 }, verbose=True, print_every=20) # Initialize model parameters. solver.init() # Train! solver.train()
def main(args): # Create model. model = ConvolutionNet() # Create data iterators for training and testing sets. data = get_CIFAR10_data(args.data_dir) train_dataiter = minpy.nn.io.NDArrayIter( data=data['X_train'], label=data['y_train'], batch_size=batch_size, shuffle=True) test_dataiter = minpy.nn.io.NDArrayIter( data=data['X_test'], label=data['y_test'], batch_size=batch_size, shuffle=False) # Create solver. solver = minpy.nn.solver.Solver( model, train_dataiter, test_dataiter, num_epochs=10, init_rule='gaussian', init_config={'stdvar': 0.001}, update_rule='sgd_momentum', optim_config={'learning_rate': 1e-3, 'momentum': 0.9}, verbose=True, print_every=20) # Initialize model parameters. solver.init() # Train! solver.train()
def main(args): # Create model. model = TwoLayerNet() # Create data iterators for training and testing sets. img_fname = os.path.join(args.data_dir, 'train-images-idx3-ubyte') label_fname = os.path.join(args.data_dir, 'train-labels-idx1-ubyte') with open(label_fname, 'rb') as f: magic_nr, size = struct.unpack('>II', f.read(8)) assert magic_nr == 2049 assert size == 60000 label = real_numpy.fromfile(f, dtype=real_numpy.int8) with open(img_fname, 'rb') as f: magic_nr, size, rows, cols = struct.unpack('>IIII', f.read(16)) assert magic_nr == 2051 assert size == 60000 assert rows == cols == 28 img = real_numpy.fromfile(f, dtype=real_numpy.uint8).reshape( size, rows * cols) train_dataiter = io.NDArrayIter(data=img, label=label, batch_size=batch_size, shuffle=True) # Create solver. solver = minpy.nn.solver.Solver(model, train_dataiter, train_dataiter, num_epochs=10, init_rule='gaussian', init_config={'stdvar': 0.001}, update_rule='sgd_momentum', optim_config={ 'learning_rate': 1e-4, 'momentum': 0.9 }, verbose=True, print_every=20) # Initialize model parameters. solver.init() # Train! solver.train()
def main(args): # Create model. model = TwoLayerNet() # Create data iterators for training and testing sets. img_fname = os.path.join(args.data_dir, 'train-images-idx3-ubyte') label_fname = os.path.join(args.data_dir, 'train-labels-idx1-ubyte') with open(label_fname, 'rb') as f: magic_nr, size = struct.unpack('>II', f.read(8)) assert magic_nr == 2049 assert size == 60000 label = real_numpy.fromfile(f, dtype=real_numpy.int8) with open(img_fname, 'rb') as f: magic_nr, size, rows, cols = struct.unpack('>IIII', f.read(16)) assert magic_nr == 2051 assert size == 60000 assert rows == cols == 28 img = real_numpy.fromfile( f, dtype=real_numpy.uint8).reshape(size, rows * cols) train_dataiter = io.NDArrayIter( data=img, label=label, batch_size=batch_size, shuffle=True) # Create solver. solver = minpy.nn.solver.Solver( model, train_dataiter, train_dataiter, num_epochs=10, init_rule='gaussian', init_config={'stdvar': 0.001}, update_rule='sgd_momentum', optim_config={'learning_rate': 1e-4, 'momentum': 0.9}, verbose=True, print_every=20) # Initialize model parameters. solver.init() # Train! solver.train()