def main(): input_file = "image.jpg" output_file = "labels.png" # Download the model from https://goo.gl/ciEYZi saved_model_path = "/data/download/crfrnn_keras_model.h5" model = get_crfrnn_model_def() model.load_weights(saved_model_path) img_data, img_h, img_w = util.get_preprocessed_image(input_file) probs = model.predict(img_data, verbose=False)[0, :, :, :] segmentation = util.get_label_image(probs, img_h, img_w) segmentation.save(output_file)
def main(): input_file = 'image.jpg' output_file = 'labels.png' # Download the model from https://goo.gl/ciEYZi saved_model_path = 'crfrnn_keras_model.h5' model = get_crfrnn_model_def() model.load_weights(saved_model_path) img_data, img_h, img_w = util.get_preprocessed_image(input_file) probs = model.predict(img_data, verbose=False)[0, :, :, :] segmentation = util.get_label_image(probs, img_h, img_w) segmentation.save(output_file)
def main(): input_file = 'image.jpg' output_file = 'labels.png' # Download the model from https://goo.gl/ciEYZi saved_model_path = 'crfrnn_keras_model.h5' model = get_crfrnn_model_def() #model.load_weights(saved_model_path) img_data, img_h, img_w = util.get_preprocessed_image(input_file) tic = time.clock() probs = model.predict(img_data, verbose=False)[0, :, :, :] toc = time.clock() segmentation = util.get_label_image(probs, img_h, img_w) segmentation.save(output_file) print("Time taken: " + str(toc - tic))
def main(): input_file = 'image.jpg' output_file = 'labels.png' segment_file = 'segment.jpg' # Download the model from https://goo.gl/ciEYZi saved_model_path = 'crfrnn_keras_model.h5' model = get_crfrnn_model_def(num_segs=1) model.load_weights(saved_model_path) img_data, img_h, img_w = util.get_preprocessed_image(input_file) seg_data, seg_h, seg_w = util.get_preprocessed_image(segment_file) # img_data = img_data.reshape([1, 500, 500, 3]) # seg_data = seg_data.reshape([1, 500, 500, 3]) probs = model.predict([img_data, seg_data], verbose=False)[0, :, :, :] segmentation = util.get_label_image(probs, img_h, img_w) segmentation.save(output_file)
def main(): input_file = 'hiking.jpg' mask_file = 'labels.png' # Download the model from https://goo.gl/ciEYZi saved_model_path = 'crfrnn_keras_model.h5' model = get_crfrnn_model_def() model.load_weights(saved_model_path) img_data, img_h, img_w, size = util.get_preprocessed_image(input_file) probs = model.predict(img_data, verbose=False)[0] segmentation = util.get_label_image(probs, img_h, img_w, size) segmentation.save(mask_file) input_content = cv2.imread(input_file) mask_content = cv2.imread(mask_file) input_content[mask_content == 0] = 255 print(input_content.shape) cv2.imwrite('output.jpg', input_content)
def main(): input_file = inp_f tmp = path_leaf(inp_f) print(tmp) output_file = '../saliency_maps/' + tmp # Download the model from https://goo.gl/ciEYZi saved_model_path = 'crfrnn_keras_model.h5' model = get_crfrnn_model_def() model.load_weights(saved_model_path) img_data, img_h, img_w = util.get_preprocessed_image(input_file) probs = model.predict(img_data, verbose=False)[0, :, :, :] segmentation = util.get_label_image(probs, img_h, img_w) a = np.array(segmentation) print(np.shape(a)) for i in range(a.shape[0]): for j in range(a.shape[1]): if (a[i][j] > 0): a[i][j] = 255 segmentation = PIL.Image.fromarray(a) segmentation.save(output_file)
saved_model_path = "SegmentationModel_Weights.h5" nTest = 128 font = ImageFont.truetype("/usr/share/fonts/TTF/Anonymous Pro.ttf", 8, encoding="unic") label_seg_gt = u"Segmentation (Ground Truth)" label_im = u"Image" label_seg = u"Computed Segmentation" text_width_gt, text_height_gt = font.getsize(label_seg_gt) text_width_im, text_height_im = font.getsize(label_im) text_width_seg, text_height_seg = font.getsize(label_seg) model = get_crfrnn_model_def() model.load_weights(saved_model_path, by_name=True) for i in range(nTest): seg_gt, im, im_input = MakeInputData(n=np.random.randint(low=1, high=5)) probs = model.predict(im_input, verbose=False)[0, :, :, :] labels = probs.argmax(axis=2) seg_gt = Image.fromarray((seg_gt * 255).astype("uint8")) im = Image.fromarray((im * 255).astype("uint8")) labels = Image.fromarray((labels * 255).astype("uint8")) labels = labels.resize((GenImSize, GenImSize), Image.NEAREST) seg_gt = image_tint(seg_gt, tint='#8AFAAB') im = image_tint(im, tint='#8AA0FA') labels = image_tint(labels, tint='#F98A8A')
def main(): input_file = 'image.jpg' output_file = 'labels.png' # Download the model from https://goo.gl/ciEYZi saved_model_path = 'crfrnn_keras_model.h5' model = get_crfrnn_model_def() from keras.optimizers import Adam #model = build_model() model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['categorical_accuracy']) # we create two instances with the same arguments data_gen_args = dict(featurewise_center=True, featurewise_std_normalization=True, rotation_range=90, width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2) image_datagen = keras.preprocessing.image.ImageDataGenerator( **data_gen_args) mask_datagen = keras.preprocessing.image.ImageDataGenerator( **data_gen_args) # Provide the same seed and keyword arguments to the fit and flow methods seed = 1 #xtrain=/home/qwe/Downloads/crfrnn2/xtrain #ytrain='/home/qwe/Downloads/crfrnn2/ytrain' #grayx=cv2.cvtColor(xtrain,cv2.COLOR_BGR2GRAY) #pathx='/home/qwe/Downloads/crfrnn2/xtrain/*.*' #pathy='/home/qwe/Downloads/crfrnn2/ytrain' #c2='/home/qwe/Downloads/crfrnn2/ytrain' #c1='/home/qwe/Downloads/crfrnn2/xtrain' #xtrain= get_image_files(c2) #ytrain=get_image_files(c1) #image_datagen.fit(xtrain) #mask_datagen.fit(ytain, augment=False, seed=seed) # Provide the same seed and keyword arguments to the fit and flow methods image_generator = image_datagen.flow_from_directory('xtrain', target_size=(500, 500), class_mode=None, seed=seed) mask_generator = mask_datagen.flow_from_directory('ytrain', target_size=(500, 500), class_mode=None, seed=seed) # combine generators into one which yields image and masks train_generator = zip(image_generator, mask_generator) #print(type(train_generator),'akash') model.fit_generator(train_generator, steps_per_epoch=2000, epochs=50) img_data, img_h, img_w = util.get_preprocessed_image(input_file) probs = model.predict(img_data, verbose=False)[0, :, :, :] segmentation = util.get_label_image(probs, img_h, img_w) segmentation.save(output_file)