def question_1i_sanity_check(): """ Sanity check for cnn.py designed by Kortez """ test_model = cnn.CNN() x_test0 = torch.zeros(BATCH_SIZE, 50, 21) assert(test_model.forward(x_test0).shape == torch.Size([5, 256])), \ "Shape of forward() should be {} but is: {}".format(torch.Size([5, 256]), test_model.forward(x_test0).shape) print("-" * 80) print("Shape of forward() passed Sanity Check!") print("-" * 80) test_model.load_state_dict({'conv.weight':torch.full((256,50,5), 1), \ 'conv.bias':torch.full((256,), -10)}) x_test1 = torch.tensor([[21 * [x] for x in range(50)], [21 * [x % 2] for x in range(50)]]).float() out_p1 = [[x.item() for x in y] for y in test_model.forward(x_test1)] gold_p1 = [256 * [6115.0], 256 * [115.0]] assert ( out_p1 == gold_p1), "Output of part1 should be {} but is: {}".format( gold_p1, out_p1) print("Part 1 passed Sanity Check!") print("-" * 80) test_model = cnn.CNN() b_size = 2 x_test2 = (torch.rand((b_size, 50, 21)) - torch.full( (b_size, 50, 21), -1)).float() out_p2 = [[round(x.item(), 2) for x in y] for y in test_model.forward(x_test2)] w_c, b_c = test_model.state_dict()['conv.weight'], test_model.state_dict( )['conv.bias'] x_2 = [[[x.item() for x in y] for y in z] for z in x_test2.permute(0, 2, 1)] w = [[[x.item() for x in y] for y in z] for z in w_c.permute(0, 2, 1)] b = [x.item() for x in b_c] gold_p2 = [256 * [1] for i in range(b_size)] for batch in range(b_size): for window in range(256): conv_sum = [] for k in range(17): conv_item = 0 for i in range(5): for j in range(50): conv_item += x_2[batch][k + i][j] * w[window][i][j] conv_item += b[window] conv_sum.append(conv_item) gold_p2[batch][window] = round(max(max(conv_sum), 0.0), 2) assert ( out_p2 == gold_p2), "Output of part2 should be {} but is: {}".format( gold_p2, out_p2) print("Part 2 passed Sanity Check!") print("-" * 80) print("Sanity Check Passed for Question 1i CNN!") print("-" * 80)
def run(self): GPIO.setmode(GPIO.BCM) sv = suvomoter.suvomoter() sensor = 4 window = QtGui.QWindow() window.setGeometry(0, 0, 100, 100) GPIO.setup(sensor, GPIO.IN) GPIO.setup(12, GPIO.OUT) np = cnn.CNN() accounts = np.nb_classes time.sleep(2) while True: #if accounts != record.VOICE_RECORD().account_number: # np.build() if GPIO.input(sensor): #print GPIO.input(sensor) print("no detected") #time.sleep(1) else: print("detected") GPIO.output(12, True) rst = np.predict() GPIO.output(12, False) if rst != -1: sv.open() #time.sleep(1) time.sleep(0.2)
def create_network(self): """Neural Network Variables""" cnn = cn.CNN(self.x, self.keep_prob, self.train_params.convolutional_layer_count, self.train_params.image_size, self.train_params.number_of_classes, self.train_params.neuron_multiplier, self.train_params.convolutional_filter) prediction = cnn.return_network() return tf.round(tf.nn.sigmoid(prediction))
def chooseModel(opts): print('selecting model to test...') if 'cnn' == opts.model: if 'onlyIncoming' == opts.mode: params = cnn.generate_default_onlyIncoming_params(opts.dataType) else: params = cnn.generate_default_whole_params(opts.dataType) modelObj = cnn.CNN(opts, params) elif 'cudnnLstm' == opts.model: if 'onlyIncoming' == opts.mode: params = cudnnLstm.generate_default_onlyIncoming_params(opts.dataType) else: params = cudnnLstm.generate_default_whole_params(opts.dataType) modelObj = cudnnLstm.LSTM(opts, params) elif 'sae' == opts.model: if 'onlyIncoming' == opts.mode: params = sae.generate_default_onlyIncoming_params(opts.dataType) else: params = sae.generate_default_whole_params(opts.dataType) modelObj = sae.SAE(opts, params) return params, modelObj
def mnist_experiment(args): #60k samples dataset_for_test = torchvision.datasets.MNIST( 'dataset', train=True, transform=torchvision.transforms.ToTensor(), download=True) #10k samples dataset_for_trainval = torchvision.datasets.MNIST( 'dataset', train=False, transform=torchvision.transforms.ToTensor(), download=True) if args.test: n_train = 5000 n_out = len(dataset_for_trainval) - n_train dataset_train, _ = data.random_split(dataset_for_trainval, [n_train, n_out]) n_out = len(dataset_for_test) - n_train dataset_test, _ = data.random_split(dataset_for_test, [n_train, n_out]) else: n_train = 5000 n_val = len(dataset_for_trainval) - n_train dataset_train, dataset_test = data.random_split( dataset_for_trainval, [n_train, n_val]) dataloader_test = data.DataLoader(dataset_test, batch_size=64, num_workers=layers.NUM_WORKERS) if args.test: logger.Logger.dataloader_test = dataloader_test print("Split for {}: {}/{} samples".format( "test" if args.test else "validation", len(dataset_train), len(dataset_test))) print("lr = {:.5f} gamma = {:.5f}".format(args.lr, args.gamma)) layer1 = { 'm': args.approx_m, 'd2': 10, 'R': args.R, 'patch_dim': 5, 'patch_stride': 1, 'kernel': 'rbf', 'avg_pooling_kernel_size': 2, 'r': 16, 'gamma': args.gamma, } layer2 = { 'm': 2 * args.approx_m, 'd2': 10, 'R': args.R, 'patch_dim': 5, 'patch_stride': 1, 'kernel': 'rbf', 'avg_pooling_kernel_size': 2, 'r': 32, 'gamma': args.gamma, } if args.cnn: model = cnn.CNN(img_shape=(1, 28, 28), layer_confs=[layer1, layer2], activation_func=args.activation) else: model = layers.CCNN(img_shape=(1, 28, 28), layer_confs=[layer1, layer2]) loggers = model.train(dataset_train, nn.CrossEntropyLoss(), 'fro', n_epochs=args.epochs, batch_size=64, lr=args.lr, verbose=args.verbose) if args.test: for i, log in enumerate(loggers): log.save('layer_{}'.format(i)) elif args.eval_all: acc = test_all(model, dataloader_test) for l, layer_acc in enumerate(acc): print("Accuracy: {:.2f}% on {} samples for layer {}".format( layer_acc * 100, len(dataset_test), l)) if layers.SAFETY_CHECK: assert torch.norm(acc[-1] - test(model, dataloader_test)) <= 1e-4 return acc[-1].item() else: acc = test(model, dataloader_test) print("Accuracy: {:.2f}% on {} samples".format(acc * 100, len(dataset_test))) return acc
def create_model(self): cnn_model = cnn.CNN(layers=[ { 'type': 'sepconv1D', 'args': { 'filters': 32, 'kernel_size': 5, 'activation': 'relu', 'input_shape': self.state_shape } }, { 'type': 'maxpool1D', 'args': { 'pool_size': 2 } }, { 'type': 'conv1D', 'args': { 'filters': 128, 'kernel_size': 3, 'activation': 'relu' } }, { 'type': 'maxpool1D', 'args': { 'pool_size': 2 } }, { 'type': 'dropout', 'args': { 'ratio': 0.15 } }, { 'type': 'flatten', 'args': None }, { 'type': 'dense', 'args': { 'output': 250 } }, { 'type': 'dropout', 'args': { 'ratio': 0.2 } }, { 'type': 'activation', 'args': { 'function': 'relu' } }, { 'type': 'dense', 'args': { 'output': 250 } }, { 'type': 'dense', 'args': { 'output': 125 } }, { 'type': 'dense', 'args': { 'output': 32 } }, { 'type': 'dense', 'args': { 'output': self.action_size } }, { 'type': 'activation', 'args': { 'function': 'linear' } }, ]) cnn_model.build_model() cnn_model.compile_model() return cnn_model