Example #1
0
def produce_view(input_image, class_image, id2label, viewstyle):
    """Produce an image ready for plotting or saving."""
    view = None
    if viewstyle == 'original':
        view = input_image
    elif (viewstyle == 'predictions') or (viewstyle == 'overlay'):
        view = color_class_image(class_image, id2label)
        if viewstyle == 'overlay':
            view = (0.5 * view.astype(np.float32) + 0.5 * input_image.astype(np.float32)).astype(np.uint8)
    else:
        print("Unknown view style")
    return view
    def run(self, args_s=""):
        args_d = json.loads(args_s)
        iname = args_d['panid']
        ext = args_d['ext']
        filename = args_d['filename']
        self.socketIO.emit('update', {
            'id': iname,
            "phase": 3,
            'val': -1,
            'max': -1
        })
        self.socketIO.wait(seconds=1)
        class_scores = img_combine2.img_combine2(
            namedtuple('Struct', args_d.keys())(*args_d.values()))
        print("blended...")
        img = misc.imread("./{0}{1}".format(panid, ext))
        img = misc.imresize(img, 10)

        class_image = np.argmax(class_scores, axis=2)
        pm = np.max(class_scores, axis=2)
        colored_class_image = utils.color_class_image(class_image,
                                                      args_d['model'])
        #colored_class_image is [0.0-1.0] img is [0-255]
        alpha_blended = 0.5 * colored_class_image + 0.5 * img
        misc.imsave(panid + "_seg_blended" + ext, alpha_blended)
        # for filename in tqdm.tqdm(os.listdir('/tmp')):
        #     if filename.endswith(".npy"):
        #         try:
        #             os.remove(filename)
        #         except Exception:
        #             pass
        self.socketIO.emit('update', {
            'id': iname,
            "phase": 3,
            'val': 1,
            'max': 1
        })
        self.socketIO.wait(seconds=1)
        self.responseQueue.put(args_d['local_id'])
    def on_request(self, *args):
        global mutex1, mutex2, mutex, mutex_data
        # tf.reset_default_graph()
        print("got request")
        data = args[0]
        filename, ext = splitext(data['input_path'])
        panid = basename(filename)
        # download file from upper server
        print("download...")
        sshdownload(data)
        args_d = {}
        remote_uuid = "{0}{1}".format(uuid.uuid4(), "_deeplearning")
        socketIO = SocketIO('localhost', ssht2.local_bind_port,
                            LoggingNamespace)
        args_d['remote_uuid'] = remote_uuid
        args_d['socketIO'] = socketIO
        args_d['model'] = "pspnet50_ade20k"

        args_d['sliding'] = True
        args_d['flip'] = True
        args_d['multi_scale'] = True
        print("phase 1...")
        args_d['input_path'] = "./{0}{1}".format(panid, ext)
        args_d['output_path'] = "{2}/{0}{1}".format(panid, ext,
                                                    config_p1_folder)
        pre_process.pre_process(
            namedtuple('Struct', args_d.keys())(*args_d.values()))
        print("phase 2...")
        # args_d['sess']=sess
        # args_d['model_ok']=pspnet
        args_d['input_path'] = config_p1_folder + '/'
        args_d['input_path_filter'] = panid

        args_d['output_path'] = config_p2_folder + '/'
        del args_d['socketIO']
        mutex.acquire()
        with open("temp_arg.json", 'w+') as fout:
            fout.write(json.dumps(args_d))
        mutex.release()
        # mutex1.put(args_d,block=True)
        print("sent task,wait response")
        while (1):
            # print("waiting...")
            mutex.acquire()
            if not os.path.exists("temp_arg.json"):
                break
            mutex.release()
            time.sleep(1)
        mutex.release()
        # mutex2.get(block=True)
        args_d['socketIO'] = socketIO
        print("phase 3...")
        args_d['input_path'] = "./{0}{1}".format(panid, ext)
        args_d['input_path2'] = "{2}/{0}{1}".format(panid, ext,
                                                    config_p2_folder)
        args_d['output_path'] = "{2}/{0}{1}".format(panid, ext,
                                                    config_p3_folder)
        class_scores = img_combine2.img_combine2(
            namedtuple('Struct', args_d.keys())(*args_d.values()))
        print("blended...")
        img = misc.imread("./{0}{1}".format(panid, ext))
        img = misc.imresize(img, 10)

        class_image = np.argmax(class_scores, axis=2)
        pm = np.max(class_scores, axis=2)
        colored_class_image = utils.color_class_image(class_image,
                                                      args_d['model'])
        #colored_class_image is [0.0-1.0] img is [0-255]
        alpha_blended = 0.5 * colored_class_image + 0.5 * img
        misc.imsave(filename + "_seg_blended" + ext, alpha_blended)
        print("upload...")
        sshupload(data, filename + "_seg_blended" + ext)
        print("garbage cleaning")
        print("success")
        self.emit("next")
Example #4
0
                              input_shape=(768, 480),
                              weights=args.weights)

        for i, img_path in enumerate(images):
            print("Processing image {} / {}".format(i + 1, len(images)))
            img = misc.imread(img_path, mode='RGB')
            cimg = misc.imresize(img, 20)

            # class_scores = predict_multi_scale(img, pspnet, EVALUATION_SCALES, sliding, flip)

            probs = pspnet.predict(cimg, args.flip)

            cm = np.argmax(probs, axis=2)
            pm = np.max(probs, axis=2)

            colored_class_image, color_names = utils.color_class_image(
                cm, args.model)

            count = collections.Counter(color_names)
            count = dict(count)

            y = sum(count.values())

            for key, value in count.items():

                # print(value, sum(count.values()))

                count[key] = value / y

    ##to sort in descending order according to the value
            count = sorted([(value, key) for (key, value) in count.items()],
                           reverse=True)
            pspnet = PSPNet50(nb_classes=150, input_shape=(473, 473),
                              weights=args.model)
        elif "pspnet101" in args.model:
            if "cityscapes" in args.model:
                pspnet = PSPNet101(nb_classes=19, input_shape=(713, 713),
                                   weights=args.model)
            if "voc2012" in args.model:
                pspnet = PSPNet101(nb_classes=21, input_shape=(473, 473),
                                   weights=args.model)

        else:
            print("Network architecture not implemented.")

        if args.multi_scale:
            EVALUATION_SCALES = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]  # must be all floats!
            EVALUATION_SCALES = [0.15, 0.25, 0.5]  # must be all floats!

        class_scores = predict_multi_scale(img, pspnet, EVALUATION_SCALES, args.sliding, args.flip)

        print("Writing results...")

        class_image = np.argmax(class_scores, axis=2)
        pm = np.max(class_scores, axis=2)
        colored_class_image = utils.color_class_image(class_image, args.model)
        # colored_class_image is [0.0-1.0] img is [0-255]
        alpha_blended = 0.5 * colored_class_image + 0.5 * img
        filename, ext = splitext(args.output_path)
        misc.imsave(filename + "_seg" + ext, colored_class_image)
        misc.imsave(filename + "_probs" + ext, pm)
        misc.imsave(filename + "_seg_blended" + ext, alpha_blended)
Example #6
0
id = {label.id for label in labels if label.name == 'ceiling'}
#todo get label and idddddd

img = misc.imread('PSPNet-Keras-tensorflow-master/example_images/ade20k2.jpg')

#create the neural network
pspnet = PSPNet50(nb_classes=150,
                  input_shape=(473, 473),
                  weights='pspnet50_ade20k')

#Run over input data
class_scores = predict_multi_scale(img, pspnet, EVALUATION_SCALES, sliding,
                                   flip)

print("Writing results...")

class_image = np.argmax(class_scores,
                        axis=2)  #Get best classes (highest score!!)
pm = np.max(class_scores, axis=2)
colored_class_image = utils.color_class_image(class_image, 'ade20k')
# colored_class_image is [0.0-1.0] img is [0-255]
alpha_blended = 0.5 * colored_class_image + 0.5 * img

#filename, ext = splitext('PSPNet-Keras-tensorflow-master/example_results/')

#misc.imsave(filename + "_seg" + ext, colored_class_image)
#misc.imsave(filename + "_probs" + ext, pm)
#misc.imsave(filename + "_seg_blended" + ext, alpha_blended)

misc.imshow(colored_class_image)
Example #7
0
def main():
    """Run when running this module as the primary one."""
    EVALUATION_SCALES = [1.0]  # must be all floats!

    parser = argparse.ArgumentParser()
    parser.add_argument("-m",
                        "--model",
                        type=str,
                        default="pspnet50_ade20k",
                        help="Model/Weights to use",
                        choices=[
                            "pspnet50_ade20k", "pspnet101_cityscapes",
                            "pspnet101_voc2012"
                        ])
    parser.add_argument("-i",
                        "--input_path",
                        type=str,
                        default="../example_images",
                        help="Path to the input images")
    parser.add_argument("-o",
                        "--output_path",
                        type=str,
                        default="../example_results",
                        help="Path to output")
    parser.add_argument("-g",
                        "--groundtruth_path",
                        type=str,
                        default="../example_groundtruth",
                        help="Path to groundtruth")
    parser.add_argument("--id", default="0")
    parser.add_argument(
        "-s",
        "--sliding",
        action="store_true",
        default=True,
        help=
        "Whether the network should be slided over the original image for prediction."
    )
    parser.add_argument(
        "-f",
        "--flip",
        action="store_true",
        default=True,
        help=
        "Whether the network should predict on both image and flipped image.")
    parser.add_argument(
        "-ms",
        "--multi_scale",
        action="store_true",
        help="Whether the network should predict on multiple scales.")
    parser.add_argument("-hm",
                        "--heat_maps",
                        action="store_true",
                        help="Whether the network should diplay heatmaps.")
    parser.add_argument("-v",
                        "--vis",
                        action="store_true",
                        help="Whether an interactive plot should be diplayed.")
    parser.add_argument(
        "-cci",
        "--complete_coarse_image",
        action="store_true",
        help="Whether a coarse imae should be completed with predictions.")
    parser.add_argument(
        "-e",
        "--evaluate",
        action="store_true",
        help="Whether an evaluation against groundtruth should be attempted.")
    args = parser.parse_args()

    environ["CUDA_VISIBLE_DEVICES"] = args.id

    sess = tf.Session()
    K.set_session(sess)

    with sess.as_default():
        print(args)
        import os
        cwd = os.getcwd()
        print("Running in %s" % cwd)

        image_paths = []
        if isfile(args.input_path):
            image_paths.append(args.input_path)
        elif isdir(args.input_path):
            file_types = ("png", "jpg")
            for file_type in file_types:
                image_paths.extend(
                    glob.glob(join(args.input_path + "/**/*." + file_type),
                              recursive=True))
            image_paths = sorted(image_paths)
        # print(image_paths)

        if "pspnet50" in args.model:
            pspnet = PSPNet50(nb_classes=150,
                              input_shape=(473, 473),
                              weights=args.model)
            if "ade20k" in args.model:
                from ade20k_labels import id2label, name2label

        elif "pspnet101" in args.model:
            if "cityscapes" in args.model:
                pspnet = PSPNet101(nb_classes=19,
                                   input_shape=(713, 713),
                                   weights=args.model)
                from cityscapes_labels import id2label, name2label
            if "voc2012" in args.model:
                pspnet = PSPNet101(nb_classes=21,
                                   input_shape=(473, 473),
                                   weights=args.model)
                from pascal_voc_labels import id2label, name2label

        else:
            print("Network architecture not implemented.")

        if args.multi_scale:
            EVALUATION_SCALES = [
                0.5, 0.75, 1.0, 1.25, 1.5, 1.75
            ]  # original implementation, must be all floats!

        for image_path in image_paths:
            image_name, ext = splitext(os.path.basename(image_path))
            image_name = image_name.replace(
                "_leftImg8bit", ""
            )  # strip leftImg8bit tag for gt matching and producting groundtruth

            print("Predicting image name: %s" % (image_name + ext))
            img = misc.imread(image_path)
            class_scores = predict_multi_scale(img, pspnet, EVALUATION_SCALES,
                                               args.sliding, args.flip)
            if args.heat_maps:
                # show_class_heatmap(class_scores, "person")
                show_class_heatmaps(class_scores)

            # visualize_prediction(img, class_scores, id2label)
            class_image = np.argmax(class_scores, axis=2)

            output_path, _ = splitext(args.output_path)

            if not os.path.exists(output_path):
                os.makedirs(output_path)
            output_path = join(output_path, image_name)

            print("Writing results to %s" % (output_path + ext))

            confidence_map = np.max(
                class_scores, axis=2
            )  # probability of the most likely class, a vage measure of the networks confidence
            colored_class_image = color_class_image(class_image, id2label)

            # colored_class_image is [0.0-1.0] img is [0-255]
            alpha_blended = 0.5 * colored_class_image + 0.5 * img
            if "cityscapes" in args.model:
                class_image = trainid_to_class_image(class_image)
            misc.imsave(output_path + "_gtFine_labelIds" + ext, class_image)
            misc.imsave(output_path + "_seg" + ext, colored_class_image)
            misc.imsave(output_path + "_probs" + ext, confidence_map)
            misc.imsave(output_path + "_seg_blended" + ext, alpha_blended)

            gt_path = find_matching_gt(args.groundtruth_path,
                                       image_name,
                                       args.model,
                                       verbose=True)
            if gt_path is not None:
                if args.complete_coarse_image:  # only for cityscapes
                    try:
                        coarse_image = misc.imread(gt_path)
                        class_image = complete_coarse_image(
                            coarse_image, class_image)
                        misc.imsave(output_path + "_gtFine_labelIds" + ext,
                                    class_image)
                    except AttributeError as err:
                        print("Warning: Could not read groundtruth: %s" % err)

                if args.evaluate:
                    if "cityscapes" in args.model:
                        evaluate_iou([class_image], [misc.imread(gt_path)],
                                     classes=35)
                    else:
                        # gt_image to class image
                        gt_image = misc.imread(gt_path)
                        gt_class_image = gt_image_to_class_image(
                            gt_image, id2label)
                        evaluate_iou([class_image], [gt_class_image],
                                     classes=pspnet.nb_classes)
            else:
                print("Could not find groundtruth for %s" % image_name)