def run(rundir, chanIdx, q, args): xspub = xstream.Publisher() xssub = xstream.Subscribe(chanIdx2Str(chanIdx)) runner = Runner(rundir) inTensors = runner.get_input_tensors() outTensors = runner.get_output_tensors() q.put(1) # ready for work fpgaBlobs = None fcOutput = None labels = xdnn_io.get_labels(args['labels']) xdnnCPUOp = xdnn.XDNNCPUOp("%s/weights.h5" % rundir) while True: try: payload = xssub.get() if not payload: break (meta, buf) = payload if fpgaBlobs == None: # allocate buffers fpgaBlobs = [] batchsz = meta['shape'][0] # inTensors[0].dims[0] for io in [inTensors, outTensors]: blobs = [] for t in io: shape = (batchsz,) + tuple([t.dims[i] for i in range(t.ndims)][1:]) blobs.append(np.empty((shape), dtype=np.float32, order='C')) fpgaBlobs.append(blobs) fcOutput = np.empty((batchsz, args['outsz'],), dtype=np.float32, order='C') fpgaInput = fpgaBlobs[0][0] assert(tuple(meta['shape']) == fpgaInput.shape) data = np.frombuffer(buf, dtype=np.float32).reshape(fpgaInput.shape) np.copyto(fpgaInput, data) jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1]) runner.wait(jid) xdnnCPUOp.computeFC(fpgaBlobs[1][0], fcOutput) softmaxOut = xdnnCPUOp.computeSoftmax(fcOutput) xdnn_io.printClassification(softmaxOut, meta['images'], labels) sys.stdout.flush() if meta['id'] % 1000 == 0: print("Recvd query %d" % meta['id']) sys.stdout.flush() del data del buf del payload xspub.send(meta['from'], "success") except Exception as e: logging.error("Worker exception " + str(e))
def main(): args = xdnn_io.processCommandLine() runner = Runner(args['vitis_rundir']) inTensors = runner.get_input_tensors() outTensors = runner.get_output_tensors() batch_sz = args['batch_sz'] if batch_sz == -1: # use Runner's suggested batch size batch_sz = inTensors[0].dims[0] if args['golden']: goldenMap = xdnn_io.getGoldenMap(args['golden']) top5Count = 0 top1Count = 0 fpgaBlobs = [] for io in [inTensors, outTensors]: blobs = [] for t in io: shape = (batch_sz,) + tuple([t.dims[i] for i in range(t.ndims)][1:]) blobs.append(np.empty((shape), dtype=np.float32, order='C')) fpgaBlobs.append(blobs) img_paths = xdnn_io.getFilePaths(args['images']) labels = xdnn_io.get_labels(args['labels']) xdnnCPUOp = xdnn.XDNNCPUOp("%s/weights.h5" % args['vitis_rundir']) fcOutput = np.empty((batch_sz, args['outsz'],), dtype=np.float32, order='C') fpgaInput = fpgaBlobs[0][0] for i in range(0, len(img_paths), batch_sz): pl = [] # fill tensor input data from image file for j, p in enumerate(img_paths[i:i + batch_sz]): img, _ = xdnn_io.loadImageBlobFromFile(p, args['img_raw_scale'], args['img_mean'], args['img_input_scale'], fpgaInput.shape[2], fpgaInput.shape[3]) pl.append(p) np.copyto(fpgaInput[j], img) jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1]) runner.wait(jid) xdnnCPUOp.computeFC(fpgaBlobs[1][0], fcOutput) softmaxOut = xdnnCPUOp.computeSoftmax(fcOutput) if args['golden']: for j,p in enumerate(img_paths[i:i + batch_sz]): top1Count += xdnn_io.isTopK(softmaxOut[j], goldenMap, p, labels, 1) top5Count += xdnn_io.isTopK(softmaxOut[j], goldenMap, p, labels, 5) else: xdnn_io.printClassification(softmaxOut, pl, labels) if args['golden']: print ( ("\nAverage accuracy (n=%d) Top-1: %.1f%%, Top-5: %.1f%%\n") % (len(img_paths), float(top1Count)/float(len(img_paths))*100., float(top5Count)/float(len(img_paths))*100.) )
def run(rundir, chanIdx, q, args): xspub = xstream.Publisher() xssub = xstream.Subscribe(chanIdx2Str(chanIdx)) runner = Runner(rundir) inTensors = runner.get_input_tensors() outTensors = runner.get_output_tensors() q.put(1) # ready for work fpgaBlobs = None labels = xdnn_io.get_labels(args['labels']) if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc else: assert args['yolo_version'] in ( 'v2', 'v3'), "--yolo_version should be <v2|v3>" biases = bias_selector(args) if (args['visualize']): colors = generate_colors(len(labels)) while True: try: payload = xssub.get() if not payload: break (meta, buf) = payload if fpgaBlobs == None: # allocate buffers fpgaBlobs = [] batchsz = meta['shape'][0] # inTensors[0].dims[0] for io in [inTensors, outTensors]: blobs = [] for t in io: shape = (batchsz, ) + tuple( [t.dims[i] for i in range(t.ndims)][1:]) blobs.append( np.empty((shape), dtype=np.float32, order='C')) fpgaBlobs.append(blobs) fcOutput = np.empty(( batchsz, args['outsz'], ), dtype=np.float32, order='C') fpgaInput = fpgaBlobs[0][0] assert (tuple(meta['shape']) == fpgaInput.shape) data = np.frombuffer(buf, dtype=np.float32).reshape(fpgaInput.shape) np.copyto(fpgaInput, data) jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1]) runner.wait(jid) boxes = yolo_postproc(fpgaBlobs[1], args, meta['image_shapes'], biases=biases) if (not args['profile']): for i in range(min(batchsz, len(meta['image_shapes']))): print("Detected {} boxes in {}".format( len(boxes[i]), meta['images'][i]), flush=True) # Save the result if (args['results_dir']): for i in range(min(batchsz, len(meta['image_shapes']))): fname = meta['images'][i] filename = os.path.splitext(os.path.basename(fname))[0] out_file_txt = os.path.join(args['results_dir'], filename + '.txt') print("Saving {} boxes to {}".format( len(boxes[i]), out_file_txt)) sys.stdout.flush() saveDetectionDarknetStyle(out_file_txt, boxes[i], meta['image_shapes'][i]) if (args['visualize']): out_file_png = os.path.join( args['results_dir'], filename + '.png') print("Saving result to {}".format(out_file_png)) sys.stdout.flush() draw_boxes(fname, boxes[i], labels, colors, out_file_png) if meta['id'] % 1000 == 0: print("Recvd query %d" % meta['id']) sys.stdout.flush() del data del buf del payload xspub.send(meta['from'], "success") except Exception as e: logging.error("Worker exception " + str(e))
def main(): parser = xdnn_io.default_parser_args() parser = yolo_parser_args(parser) args = parser.parse_args() args = xdnn_io.make_dict_args(args) # Setup the environment img_paths = xdnn_io.getFilePaths(args['images']) if (args['golden'] or args['visualize']): assert args['labels'], "Provide --labels to compute mAP." assert args[ 'results_dir'], "For accuracy measurements, provide --results_dir to save the detections." labels = xdnn_io.get_labels(args['labels']) colors = generate_colors(len(labels)) if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc runner = Runner(args['vitis_rundir']) # Setup the blobs inTensors = runner.get_input_tensors() outTensors = runner.get_output_tensors() batch_sz = args['batch_sz'] if batch_sz == -1: batch_sz = inTensors[0].dims[0] fpgaBlobs = [] for io in [inTensors, outTensors]: blobs = [] for t in io: shape = (batch_sz, ) + tuple([t.dims[i] for i in range(t.ndims)][1:]) blobs.append(np.empty((shape), dtype=np.float32, order='C')) fpgaBlobs.append(blobs) fpgaInput = fpgaBlobs[0][0] # Setup the YOLO config net_h, net_w = fpgaInput.shape[-2:] args['net_h'] = net_h args['net_w'] = net_w biases = bias_selector(args) # Setup profiling env prep_time = 0 exec_time = 0 post_time = 0 # Start the execution for i in range(0, len(img_paths), batch_sz): pl = [] img_shapes = [] # Prep images t1 = timeit.default_timer() for j, p in enumerate(img_paths[i:i + batch_sz]): fpgaInput[j, ...], img_shape = xdnn_io.loadYoloImageBlobFromFile( p, net_h, net_w) pl.append(p) img_shapes.append(img_shape) t2 = timeit.default_timer() # Execute jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1]) runner.wait(jid) # Post Proc t3 = timeit.default_timer() boxes = yolo_postproc(fpgaBlobs[1], args, img_shapes, biases=biases) t4 = timeit.default_timer() prep_time += (t2 - t1) exec_time += (t3 - t2) post_time += (t4 - t3) for i in range(min(batch_sz, len(img_shapes))): print("Detected {} boxes in {}".format(len(boxes[i]), pl[i])) # Save the result if (args['results_dir']): for i in range(min(batch_sz, len(img_shapes))): filename = os.path.splitext(os.path.basename(pl[i]))[0] out_file_txt = os.path.join(args['results_dir'], filename + '.txt') print("Saving {} boxes to {}".format(len(boxes[i]), out_file_txt)) sys.stdout.flush() saveDetectionDarknetStyle(out_file_txt, boxes[i], img_shapes[i]) if (args['visualize']): out_file_png = os.path.join(args['results_dir'], filename + '.png') print("Saving result to {}".format(out_file_png)) sys.stdout.flush() draw_boxes(pl[i], boxes[i], labels, colors, out_file_png) # Profiling results if (args['profile']): print("\nAverage Latency in ms:") print(" Image Prep: {0:3f}".format(prep_time * 1000.0 / len(img_paths))) print(" Exec: {0:3f}".format(exec_time * 1000.0 / len(img_paths))) print(" Post Proc: {0:3f}".format(post_time * 1000.0 / len(img_paths))) sys.stdout.flush() # mAP calculation if (args['golden']): print() print("Computing mAP score : ") print("Class names are : {} ".format(labels)) mAP = calc_detector_mAP(args['results_dir'], args['golden'], len(labels), labels, args['prob_threshold'], args['mapiouthresh'], args['points']) sys.stdout.flush()