def predict_mask(img, net, FLAGS, DATA): # open image cvim = cv2.imread(img, cv2.IMREAD_UNCHANGED) if cvim is None: print("No image to open for ", img) return # predict mask from image start = time.time() mask = net.predict(cvim, path=FLAGS.path + '/' + FLAGS.model, verbose=FLAGS.verbose) print("Prediction for img ", img, ". Elapsed: ", time.time() - start, "s") # change to color color_mask = util.prediction_to_color(mask, DATA["label_remap"], DATA["color_map"]) # assess accuracy (if wanted) if FLAGS.label is not None: label = cv2.imread(FLAGS.label, 0) if label is None: print("No label to open") quit() net.individual_accuracy(mask, label) cv2.imwrite(FLAGS.log + "/" + os.path.basename(img), color_mask) if FLAGS.verbose: # show me the image # first, mix with image im, transparent_mask = util.transparency(cvim, color_mask) all_img = np.concatenate((im, transparent_mask, color_mask), axis=1) util.im_tight_plt(all_img) util.im_block() return
def predict_mask(img, sess, input, output, FLAGS, DATA): # open image cvim = cv2.imread(img, cv2.IMREAD_UNCHANGED) if cvim is None: print("No image to open for ", img) return # predict mask from image start = time.time() mask = sess.run(output, feed_dict={input: [cvim]}) print("Prediction for img ", img, ". Elapsed: ", time.time() - start, "s") # change to color color_mask = util.prediction_to_color(mask[0, :, :], DATA["label_remap"], DATA["color_map"]) cv2.imwrite(FLAGS.log + "/" + os.path.basename(img), color_mask) if FLAGS.verbose: # show me the image # first, mix with image im, transparent_mask = util.transparency(cvim, color_mask) all_img = np.concatenate((im, transparent_mask, color_mask), axis=1) util.im_tight_plt(all_img) util.im_block() return
def predict_mask(img, stream, d_input, d_output, context, FLAGS, DATA): # open image cvim = cv2.imread(img, cv2.IMREAD_UNCHANGED).astype(np.float32) if cvim is None: print("No image to open for ", img) return cvim = cv2.resize(cvim, (DATA['img_prop']['width'], DATA['img_prop']['height']), interpolation=cv2.INTER_LINEAR) tcvim = np.transpose(cvim, axes=(2, 0, 1)) tcvim = tcvim.copy(order='C') tcvim = (tcvim - 128.0) / 128.0 # Bindings provided as pointers to the GPU memory. # PyCUDA lets us do this for memory allocations by # casting those allocations to ints bindings = [int(d_input), int(d_output)] # allocate memory on the CPU to hold results after inference output = np.empty((len(DATA['label_map']), DATA['img_prop']['height'], DATA['img_prop']['width']), dtype=np.float32, order='C') # predict mask from image start = time.time() cuda.memcpy_htod_async(d_input, tcvim, stream) # execute model context.enqueue(1, bindings, stream.handle, None) # transfer predictions back cuda.memcpy_dtoh_async(output, d_output, stream) # syncronize threads stream.synchronize() print("Prediction for img ", img, ". Elapsed: ", time.time() - start, "s") # mask from logits mask = np.argmax(output, axis=0) # change to color color_mask = util.prediction_to_color(mask, DATA["label_remap"], DATA["color_map"]) # save to log folder cv2.imwrite(FLAGS.log + "/" + os.path.basename(img), color_mask) if FLAGS.verbose: # show me the image # first, mix with image im, transparent_mask = util.transparency(cvim, color_mask) all_img = np.concatenate((im, transparent_mask, color_mask), axis=1) util.im_tight_plt(all_img) util.im_block() return