def CIFAR10Test(loopCnt=600, isFull=False, barLen=105): pyb.LED(1).off() sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240) sensor.set_windowing((192, 192)) # Set window sensor.skip_frames(time=300) # Wait for settings take effect. sensor.set_auto_gain(False) #sensor.set_framerate(0<<9|1<<12) if isFull: net = nn.load('/cifar10.network') else: net = nn.load('/cifar10_fast.network') labels = [ 'plane', 'auto', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ] clock = time.clock() tAvg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() t0 = time.ticks() lst = net.search(img, threshold=0.640, min_scale=1, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5) t1 = time.ticks() - t0 tAvg = tAvg * 0.9 + t1 * 0.1 img.draw_string( 4, 8, 'CIFAR-10: classify:\nplane,auto,cat,dog,\ndeer,horse,frog,ship,\ntruck,horse', color=(0, 0, 0)) lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick) for obj in lst: print(' %s - Confidence %f%%' % (labels[obj.index()], obj.value())) rc = obj.rect() #img.draw_rectangle(rc, color=(255,255,255)) img.draw_rectangle(barLen + 10, 1, 50, 8, fill=True, color=(0, 0, 0)) img.draw_string(barLen + 10, 0, labels[obj.index()]) print('algo time cost : %.2f ms' % (tAvg))
def trainAll(params, trainOpt, trainInput, trainTarget, trainInputWeights, validInput, validTarget, validInputWeights, initWeights=None): model = nn.load(params['modelFilename']) if initWeights is not None: model.loadWeights(initWeights) trainer = nn.Trainer(name=params['name'], model=model, trainOpt=trainOpt, outputFolder=params['outputFolder']) # Combine train & valid set allInput, allTarget, allInputWeights = combineInputs( trainInput, trainTarget, trainInputWeights, validInput, validTarget, validInputWeights) trainer.train(trainInput=allInput, trainTarget=allTarget, trainInputWeights=allInputWeights) return model, trainer
def trainValid(params, trainOpt, trainInput, trainTarget, trainInputWeights, validInput, validTarget, validInputWeights, initWeights=None): model = nn.load(params['modelFilename']) if initWeights is not None: model.loadWeights(initWeights) trainer = nn.Trainer( name=params['name']+\ ('-v' if params['validDataFilename'] is not None else ''), model=model, trainOpt=trainOpt, outputFolder=params['outputFolder'] ) # Validation training trainer.train(trainInput=trainInput, trainTarget=trainTarget, trainInputWeights=trainInputWeights, validInput=validInput, validTarget=validTarget, validInputWeights=validInputWeights) return model, trainer
def trainValid( params, trainOpt, trainInput, trainTarget, trainInputWeights, validInput, validTarget, validInputWeights, initWeights=None, ): model = nn.load(params["modelFilename"]) if initWeights is not None: model.loadWeights(initWeights) trainer = nn.Trainer( name=params["name"] + ("-v" if params["validDataFilename"] is not None else ""), model=model, trainOpt=trainOpt, outputFolder=params["outputFolder"], ) # Validation training trainer.train( trainInput=trainInput, trainTarget=trainTarget, trainInputWeights=trainInputWeights, validInput=validInput, validTarget=validTarget, validInputWeights=validInputWeights, ) return model, trainer
def loadModel( taskId, resultsFolder): print 'Loading model...' modelSpecFile = '%s/%s/%s.model.yml' % (resultsFolder, taskId, taskId) modelWeightsFile = '%s/%s/%s.w.npy' % (resultsFolder, taskId, taskId) model = nn.load(modelSpecFile) model.loadWeights(np.load(modelWeightsFile)) return model
def loadEnsemble( taskIds, resultsFolder): """ Load class specific models. """ models = [] for taskId in taskIds: taskFolder = os.path.join(resultsFolder, taskId) modelSpec = os.path.join(taskFolder, '%s.model.yml' % taskId) modelWeights = os.path.join(taskFolder, '%s.w.npy' % taskId) model = nn.load(modelSpec) model.loadWeights(np.load(modelWeights)) models.append(model) return models
def main(argv): model = nn.load(argv[1]) gui = paint.gui() print("past init") while True: time.sleep(2 - time.monotonic() % 2) image = gui.get_image() data = np.array(image).astype(np.float32) data = skimage.measure.block_reduce(data, block_size=(10, 10), func=np.mean) data = torch.from_numpy(data) data = data[None, None, :, :] # correct the number of dimensions print(data.size()) nn.predict(model, data)
def unittest(data_path, temp_path): import os, nn, image match = 0 test_dir = data_path+"/cifar10" files = os.listdir(test_dir) total = len(files) labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] net = nn.load(data_path+'/cifar10_fast.network') for f in sorted(files): img = image.Image(test_dir+"/"+f, copy_to_fb=True) out = net.forward(img) label = labels[out.index(max(out))] if (f.split('-')[0] == label): match += 1 return ((match / total * 100 ) > 90)
def unittest(data_path, temp_path): import os, nn, image match = 0 test_dir = data_path+"/cifar10" files = os.listdir(test_dir) total = len(files) labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] net = nn.load('/cifar10_fast.network') for f in sorted(files): img = image.Image(test_dir+"/"+f, copy_to_fb=True) out = net.forward(img) label = labels[out.index(max(out))] if (f.split('-')[0] == label): match += 1 return (total == 21 and match == 19)
def LENETTest(loopCnt=1200, barLen=60): sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.VGA) # Set frame size to QVGA (320x240) sensor.set_windowing((84, 84)) # Set 128x128 window. sensor.skip_frames(time=1400) # Wait for settings take effect. sensor.set_auto_gain(False) sensor.set_framerate(2 << 2) #sensor.set_auto_whitebal(False) #sensor.set_auto_exposure(False) net = nn.load('/lenet.network') labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] clock = time.clock() avg = 0.0 pyb.LED(1).on() startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() img.draw_string(3, 8, 'recg 0-9', color=(0, 0, 0)) t1 = time.ticks() tmp_img = img.copy().binary([(120, 255)], invert=True) lst = net.search(tmp_img, threshold=0.8, min_scale=1, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5, softmax=False) t2 = time.ticks() - t1 avg = avg * 0.95 + t2 * 0.05 lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt img.draw_rectangle(0, 2, barLen + 1, 3) img.draw_rectangle(0, 3, lnLen, 1, fill=True) for obj in lst: print('Detected %s - Confidence %f%%' % (labels[obj.index()], obj.value())) img.draw_rectangle(obj.rect()) img.draw_string(barLen + 8, 2, labels[obj.index()], color=(0, 0, 0)) # print(clock.fps()) print('algo time cost : %.2f ms' % (avg))
def trainAll( params, trainOpt, trainInput, trainTarget, trainInputWeights, validInput, validTarget, validInputWeights, initWeights=None, ): model = nn.load(params["modelFilename"]) if initWeights is not None: model.loadWeights(initWeights) trainer = nn.Trainer(name=params["name"], model=model, trainOpt=trainOpt, outputFolder=params["outputFolder"]) # Combine train & valid set allInput, allTarget, allInputWeights = combineInputs( trainInput, trainTarget, trainInputWeights, validInput, validTarget, validInputWeights ) trainer.train(trainInput=allInput, trainTarget=allTarget, trainInputWeights=allInputWeights) return model, trainer
def LENetTest(loopCnt=600, isFull=False, barLen=80): sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.CIF) # Set frame size to QVGA (320x240) sensor.set_windowing((96, 96)) # Set 128x128 window. sensor.set_auto_gain(True) sensor.set_auto_whitebal(False) sensor.set_auto_exposure(False) sensor.skip_frames(time=400) # Wait for settings take effect. net = nn.load('/lenet.network') labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] clock = time.clock() tAvg = 0.0 startTick = time.ticks() while (True): if time.ticks() - startTick > loopCnt: break clock.tick() img = sensor.snapshot() t0 = time.ticks() tmp_img = img.copy().binary([(120, 255)], invert=True) lst = net.search(tmp_img, threshold=0.8, min_scale=1.0, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5, softmax=False) t1 = time.ticks() - t0 tAvg = tAvg * 0.9 + t1 * 0.1 img.draw_string(4, 8, 'LENET', color=(0, 0, 0)) lnLen = (barLen * (loopCnt - (time.ticks() - startTick))) // loopCnt DrawPgsBar(img, barLen, loopCnt, startTick) for obj in lst: print('Detected %s - Confidence %f%%' % (labels[obj.index()], obj.value())) img.draw_rectangle(obj.rect(), color=(255, 255, 255)) img.draw_string(4, 4, labels[obj.index()]) print('algo time cost : %.2f ms' % (tAvg))
for pseudolabel_images, pseudolabel_labels in ((args.data_pseudolabel_images, args.data_pseudolabel_labels), (args.data_pseudolabel_images2, args.data_pseudolabel_labels2)): plabel = PseudoLabel(pseudolabel_images, pseudolabel_labels) splits = int(len(plabel) / len(datasets['train']) / args.data_pseudolabel_rate) # 30% normal images if splits > 1: plabel = RollSplitSet(plabel, splits) datasets['train'] += plabel #datasets = {mode: d if mode == 'train' else expand_valid_dataset(d) for mode, d in datasets.items()} datasets_t = {mode: TransformedDataset(d, data_augs[mode]) for mode, d in datasets.items()} classnames = datasets['train'].classnames[1:] if args.model: print('loading model {}...'.format(args.model)) model = nn.load(args.model) else: model = nn.create(args.version, args.basenet, classnames) if args.weights: print(f'loading weights {args.weights}') weights = torch.load(args.weights, map_location='cpu') weights = weights.get('state_dict', weights) model.load_state_dict(weights, strict=True) loss_functions = dict(bce=binary_cross_entropy_with_logits, focal=binary_focal_loss_with_logits, lovasz=lovasz_hinge, lovasz_bce=lovasz_bce_with_logits, lovasz_focal_symmetric=lovasz_focal_symmetric, dice=dice_loss_with_logits, dice_and_bce=dice_and_bce)
sensor.reset() sensor.set_pixformat(sensor.GRAYSCALE) sensor.set_framesize(sensor.QQSIF) sensor.set_auto_gain(True) sensor.set_auto_whitebal(True) sensor.set_auto_exposure(True) sensor.skip_frames(time=500) sensor.set_contrast(3) thresholds = (0, 50) uart = UART(3, 9600, timeout_char=10) uart.init(9600) clock = time.clock() ImageX = 88 ImageY = 60 net = nn.load('/fnt-chars74k.network') labels = ['n/a', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] for i in range(ord('A'), ord('Z') + 1): labels.append(chr(i)) for i in range(ord('a'), ord('z') + 1): labels.append(chr(i)) while (True): clock.tick() img = sensor.snapshot() img.cartoon(size=1, seed_threshold=1) img.binary([thresholds], zero=True) img.rotation_corr(0, 0, 180, 0, 0, 1) blobfound = False for blob in img.find_blobs([thresholds], area_threshold=100, merge=False): if (img.get_statistics(roi=blob.rect()).stdev() < 50):
if "-v-" in params["savedModelId"]: # Train model model, trainer = trainValid( params, trainOpt, trainInput, trainTarget, trainInputWeights, validInput, validTarget, validInputWeights, initWeights=initWeights, ) # Reload model model = nn.load(params["modelFilename"]) model.loadWeights(np.load(trainer.modelFilename)) # Run tests runTests(params, model, trainer) # Re-train if params["testDataFilename"] is not None and params["validDataFilename"] is not None: # Setup options trainOpt["needValid"] = False print "Stopped score:", trainer.stoppedTrainScore trainOpt["stopScore"] = trainer.stoppedTrainScore # Train train+valid model, trainer = trainAll(
trainData = np.load(trainDataFilename) trainInput = trainData[0] trainTarget = trainData[1] trainInput, trainTarget = vt.shuffleData( trainInput, trainTarget, np.random.RandomState(2)) with open(configFilename) as f: trainOpt = yaml.load(f) for i in range(0, 10): trainInput_, trainTarget_, testInput_, testTarget_ = \ vt.splitData(trainInput, trainTarget, 0.1, i) trainOpt['heldOutRatio'] = 0.1 trainOpt['xvalidNo'] = 0 trainOpt['needValid'] = True model = nn.load(modelFilename) trainer = nn.Trainer( name=name + ('-%d-v' % i), model=model, trainOpt=trainOpt, outputFolder=outputFolder ) trainer.train(trainInput_, trainTarget_) # Train again with all data, without validation trainOpt['needValid'] = False trainOpt['numEpoch'] = trainer.stoppedEpoch + 1 trainer = nn.Trainer( name=name + ('-%d' % i), model=model, trainOpt=trainOpt,
# Untitled - By: Gehaha - 周四 3月 7 2019 # 笑脸识别历程 import sensor, time, image, os, nn sensor.reset() sensor.set_contrast(2) sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time=2000) sensor.set_auto_gain(False) #加载微笑检测网络 net = nn.load('/smile.network') #load face haar cascade face_cascade = image.HaarCascade("frontalface", stages=25) print(face_cascade) #FPS clock clock = time.clock() while (True): clock.tick() # capture snapshot img = sensor.snapshot() # 识别人脸 objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) #检测微笑
#翻译和注释:01Studio import sensor, image, time, os, nn #摄像头初始化 sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((128, 128)) # Set 128x128 window. sensor.skip_frames(time=100) sensor.set_auto_gain(False) sensor.set_auto_exposure(False) # 加载 lenet 神经网络模型 net = nn.load('/lenet.network') labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] clock = time.clock() # Create a clock object to track the FPS. while (True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. #copy()表示创建一个图像副本储存在MicroPython堆中而不是帧缓冲区 #二值化是为了方便处理,阈值可以自己设定。 out = net.forward(img.copy().binary([(150, 255)], invert=True)) max_idx = out.index(max(out)) score = int(out[max_idx] * 100) #大于70分认为识别可信 if (score < 70):
trainData = np.load(trainDataFilename) trainInput = trainData[0] trainTarget = trainData[1] trainInput, trainTarget = vt.shuffleData(trainInput, trainTarget, np.random.RandomState(2)) with open(configFilename) as f: trainOpt = yaml.load(f) for i in range(0, 10): trainInput_, trainTarget_, testInput_, testTarget_ = \ vt.splitData(trainInput, trainTarget, 0.1, i) trainOpt['heldOutRatio'] = 0.1 trainOpt['xvalidNo'] = 0 trainOpt['needValid'] = True model = nn.load(modelFilename) trainer = nn.Trainer(name=name + ('-%d-v' % i), model=model, trainOpt=trainOpt, outputFolder=outputFolder) trainer.train(trainInput_, trainTarget_) # Train again with all data, without validation trainOpt['needValid'] = False trainOpt['numEpoch'] = trainer.stoppedEpoch + 1 trainer = nn.Trainer(name=name + ('-%d' % i), model=model, trainOpt=trainOpt, outputFolder=outputFolder) trainer.train(trainInput_, trainTarget_) testOutput = nn.test(model, testInput_)
# CIFAR10 Example import sensor, image, time, os, nn sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((128, 128)) # Set 128x128 window. sensor.skip_frames(time=1000) sensor.set_auto_gain(False) sensor.set_auto_exposure(False) # Load cifar10 network net = nn.load('/cifar10.network') # Faster, smaller and less accurate. #net = nn.load('/cifar10_fast.network') labels = [ 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ] clock = time.clock() # Create a clock object to track the FPS. while (True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. out = net.forward(img) max_idx = out.index(max(out)) score = int(out[max_idx] * 100) if (score < 70): score_str = "??:??%" else:
# LetNet Example import sensor, image, time, os, nn sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat(sensor.GRAYSCALE) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((128, 128)) # Set 128x128 window. sensor.skip_frames(time=100) sensor.set_auto_gain(False) sensor.set_auto_exposure(False) # Load lenet network net = nn.load('/lenet.network') labels = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] clock = time.clock() # Create a clock object to track the FPS. while(True): clock.tick() # Update the FPS clock. img = sensor.snapshot() # Take a picture and return the image. out = net.forward(img.copy().binary([(150, 255)], invert=True)) max_idx = out.index(max(out)) score = int(out[max_idx]*100) if (score < 70): score_str = "??:??%" else: score_str = "%s:%d%% "%(labels[max_idx], score) img.draw_string(0, 0, score_str) print(clock.fps()) # Note: OpenMV Cam runs about half as fast when connected # to the IDE. The FPS should increase once disconnected.
with open(params['configFilename']) as f: trainOpt = yaml.load(f) trainData = np.load(params['trainDataFilename']) trainInput = trainData[0] trainTarget = trainData[1] if params['validDataFilename'] is not None: validData = np.load(params['validDataFilename']) validInput = validData[0] validTarget = validData[1] else: validInput = None validTarget = None model = nn.load(params['modelFilename']) trainer = nn.Trainer( name=params['name']+\ ('-v' if params['validDataFilename'] is not None else ''), model=model, trainOpt=trainOpt, outputFolder=params['outputFolder'] ) trainer.train(trainInput, trainTarget, validInput, validTarget) if params['testDataFilename'] is not None: if params['imageqa']: imageqa_test.testAll( trainer.name, model, params['dataFolder'], params['outputFolder']) else:
# Simle detection using Haar Cascade + CNN. import sensor, time, image, os, nn sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(2) sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time=2000) sensor.set_auto_gain(False) # Load smile detection network net = nn.load('/smile.network') # Load Face Haar Cascade face_cascade = image.HaarCascade("frontalface", stages=25) print(face_cascade) # FPS clock clock = time.clock() while (True): clock.tick() # Capture snapshot img = sensor.snapshot() # Find faces. objects = img.find_features(face_cascade, threshold=0.75, scale_factor=1.25) # Detect smiles for r in objects: # Resize and center detection area
self.clear_button = QPushButton(self, text="Очистить поле") self.clear_button.setGeometry(20, 10, 256, 50) self.clear_button.clicked.connect(self.clear_input) self.translate_button = QPushButton(self, text="Перевести в текст") self.translate_button.setGeometry(360, 10, 256, 50) self.translate_button.clicked.connect(self.translate_number) self.show() def clear_input(self): self.input.image.fill(Qt.white) self.repaint() def translate_number(self): img = np.array(recarray_view(self.input.image.scaled(28, 28))) prediction, percent = nn.get_value(img) self.info.setText(f'Вероятность: {round(percent*1000)/10}%') self.output.setText(str(prediction)) if __name__ == "__main__": app = QApplication([]) if not nn.load(): QMessageBox.warning(None, "Ошибка", "При загрузке нейронной сети возникла ошибка. Пожалуйста, проверьте её наличие.") else: window = Window() sys.exit(app.exec_())
# Batch info batchEnd = min(N, batchStart + numExPerBat) Ytmp = model.forward(X[batchStart:batchEnd], dropout=False) if Y is None: Yshape = np.copy(Ytmp.shape) Yshape[0] = N Y = np.zeros(Yshape) Y[batchStart:batchEnd] = Ytmp batchStart += numExPerBat return Y if __name__ == '__main__': """ Usage: test.py id -test test.npy """ taskId = sys.argv[1] for i in range(2, len(sys.argv)): if sys.argv[i] == '-test': testDataFile = sys.argv[i + 1] testAnswerFile = os.path.join('results/%s' % taskId, '%s.test.o.txt' % taskId) testTruthFile = os.path.join('results/%s' % taskId, '%s.test.t.txt' % taskId) modelFile = 'results/%s/%s.model.yml' % (taskId, taskId) model = nn.load(modelFile) model.loadWeights(np.load('results/%s/%s.w.npy' % (taskId, taskId))) testData = np.load(testDataFile) X = testData Y = test(model, X) np.savetxt(testAnswerFile,Y, delimiter=',')
import sensor, time, nn, image, os sensor.reset() # Reset and initialize the sensor. sensor.set_contrast(3) sensor.set_pixformat( sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((192, 192)) # Set window sensor.set_framerate(2 << 9 | 4 << 11) sensor.skip_frames(time=100) # Wait for settings take effect. # net = nn.load('/cifar10.network') net = nn.load('/cifar10_fast.network') labels = [ 'plane', 'auto', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ] clock = time.clock() tAvg = 40 while (True): clock.tick() img = sensor.snapshot() t0 = time.ticks() lst = net.search(img, threshold=0.595, min_scale=1, scale_mul=0.8, \ x_overlap=-1, y_overlap=-1, contrast_threshold=0.5, softamax=False) t1 = time.ticks() - t0 tAvg = (tAvg * 200 + t1 * 56) >> 8 for obj in lst: print('Detected %s - Confidence %f%%' % (labels[obj.index()], obj.value())) rc = obj.rect() print(type(rc))
# In this example we slide the LeNet detector window over the image and get a list of activations # where there might be an object. Note that use a CNN with a sliding window is extremely compute # expensive so for an exhaustive search do not expect the CNN to be real-time. import sensor, image, time, os, nn sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.set_windowing((128, 128)) # Set 128x128 window. sensor.skip_frames(time=750) # Don't let autogain run very long. sensor.set_auto_gain(False) # Turn off autogain. sensor.set_auto_exposure(False) # Turn off whitebalance. # Load cifar10 network (You can get the network from OpenMV IDE). net = nn.load('/cifar10.network') # Faster, smaller and less accurate. # net = nn.load('/cifar10_fast.network') labels = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] clock = time.clock() while(True): clock.tick() img = sensor.snapshot() # net.search() will search an roi in the image for the network (or the whole image if the roi is not # specified). At each location to look in the image if one of the classifier outputs is larger than # threshold the location and label will be stored in an object list and returned. At each scale the # detection window is moved around in the ROI using x_overlap (0-1) and y_overlap (0-1) as a guide. # If you set the overlap to 0.5 then each detection window will overlap the previous one by 50%. Note
import nn model = nn.load("model.NND") print(model.layers[0].weights)