def __init__(self, infer=False, show_ggnn=False):
        self.run_inference = infer
        self.show_ggnn = show_ggnn

        self.fig, axes = plt.subplots(1,
                                      2 if show_ggnn else 1,
                                      num=0,
                                      figsize=(12, 6))
        self.axes = np.array(axes).flatten()
        self.last_image = None
        self.last_image_name = None
        self.index = 0
        if not self.run_inference:
            return

        # Model setup
        self.evalGraph = tf.Graph()
        self.polyGraph = tf.Graph()
        # Evaluator Network
        tf.logging.info("Building EvalNet...")
        with self.evalGraph.as_default():
            with tf.variable_scope("discriminator_network"):
                self.evaluator = EvalNet(_BATCH_SIZE)
                self.evaluator.build_graph()
            self.saver = tf.train.Saver()

            # Start session
            self.evalSess = tf.Session(
                config=tf.ConfigProto(allow_soft_placement=True),
                graph=self.evalGraph)
            self.saver.restore(self.evalSess, FLAGS.EvalNet_checkpoint)

        # PolygonRNN++
        tf.logging.info("Building PolygonRNN++ ...")
        self.model = PolygonModel(FLAGS.PolyRNN_metagraph, self.polyGraph)

        self.model.register_eval_fn(
            lambda input_: self.evaluator.do_test(self.evalSess, input_))

        self.polySess = tf.Session(
            config=tf.ConfigProto(allow_soft_placement=True),
            graph=self.polyGraph)

        self.model.saver.restore(self.polySess, FLAGS.PolyRNN_checkpoint)

        if FLAGS.Use_ggnn:
            self.ggnnGraph = tf.Graph()
            tf.logging.info("Building GGNN ...")
            self.ggnnModel = GGNNPolygonModel(FLAGS.GGNN_metagraph,
                                              self.ggnnGraph)
            self.ggnnSess = tf.Session(
                config=tf.ConfigProto(allow_soft_placement=True),
                graph=self.ggnnGraph)

            self.ggnnModel.saver.restore(self.ggnnSess, FLAGS.GGNN_checkpoint)
Exemple #2
0
    def __init__(self):
        # Creating the graphs
        evalGraph = tf.Graph()
        polyGraph = tf.Graph()

        # Evaluator Network
        tf.logging.info("Building EvalNet...")
        with evalGraph.as_default():
            with tf.variable_scope("discriminator_network"):
                evaluator = EvalNet(_BATCH_SIZE)
                evaluator.build_graph()
            saver = tf.train.Saver()

            # Start session
            config = tf.ConfigProto(allow_soft_placement=True)
            config.gpu_options.allow_growth = True
            evalSess = tf.Session(config=config, graph=evalGraph)
            saver.restore(evalSess, self.EvalNet_checkpoint)

        # PolygonRNN++
        tf.logging.info("Building PolygonRNN++ ...")
        model = PolygonModel(self.PolyRNN_metagraph, polyGraph)

        model.register_eval_fn(
            lambda input_: evaluator.do_test(evalSess, input_))

        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        polySess = tf.Session(config=config, graph=polyGraph)

        model.saver.restore(polySess, self.PolyRNN_checkpoint)
        self.model = model
        self.polySess = polySess

        if self.Use_ggnn:
            ggnnGraph = tf.Graph()
            tf.logging.info("Building GGNN ...")
            ggnnModel = GGNNPolygonModel(self.GGNN_metagraph, ggnnGraph)
            ggnnSess = tf.Session(
                config=tf.ConfigProto(allow_soft_placement=True),
                graph=ggnnGraph)

            ggnnModel.saver.restore(ggnnSess, self.GGNN_checkpoint)
            self.ggnnModel = ggnnModel
            self.ggnnSess = ggnnSess
def inference(_):
    # Creating the graphs
    evalGraph = tf.Graph()
    polyGraph = tf.Graph()

    # Evaluator Network
    tf.logging.info("Building EvalNet...")
    with evalGraph.as_default():
        with tf.variable_scope("discriminator_network"):
            evaluator = EvalNet(_BATCH_SIZE)
            evaluator.build_graph()
        saver = tf.train.Saver()

        # Start session
        evalSess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                              graph=evalGraph)
        saver.restore(evalSess, FLAGS.EvalNet_checkpoint)

    # PolygonRNN++
    tf.logging.info("Building PolygonRNN++ ...")
    model = PolygonModel(FLAGS.PolyRNN_metagraph, polyGraph)

    model.register_eval_fn(lambda input_: evaluator.do_test(evalSess, input_))

    polySess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                          graph=polyGraph)

    model.saver.restore(polySess, FLAGS.PolyRNN_checkpoint)

    if FLAGS.Use_ggnn:
        ggnnGraph = tf.Graph()
        tf.logging.info("Building GGNN ...")
        ggnnModel = GGNNPolygonModel(FLAGS.GGNN_metagraph, ggnnGraph)
        ggnnSess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                              graph=ggnnGraph)

        ggnnModel.saver.restore(ggnnSess, FLAGS.GGNN_checkpoint)

    tf.logging.info("Testing...")
    if not os.path.isdir(FLAGS.OutputFolder):
        tf.gfile.MakeDirs(FLAGS.OutputFolder)
    crops_path = glob.glob(os.path.join(FLAGS.InputFolder, '*.png'))

    for crop_path in tqdm.tqdm(crops_path):
        image_np = io.imread(crop_path)
        image_np = np.expand_dims(image_np, axis=0)
        preds = [
            model.do_test(polySess, image_np, top_k)
            for top_k in range(_FIRST_TOP_K)
        ]

        # sort predictions based on the eval score and pick the best
        preds = sorted(preds, key=lambda x: x['scores'][0], reverse=True)[0]

        if FLAGS.Use_ggnn:
            polys = np.copy(preds['polys'][0])
            feature_indexs, poly, mask = utils.preprocess_ggnn_input(polys)
            preds_gnn = ggnnModel.do_test(ggnnSess, image_np, feature_indexs,
                                          poly, mask)
            output = {
                'polys': preds['polys'],
                'polys_ggnn': preds_gnn['polys_ggnn']
            }
        else:
            output = {'polys': preds['polys']}

        # dumping to json files
        save_to_json(crop_path, output)
class PolygonRefiner:
    def __init__(self, infer=False, show_ggnn=False):
        self.run_inference = infer
        self.show_ggnn = show_ggnn

        self.fig, axes = plt.subplots(1,
                                      2 if show_ggnn else 1,
                                      num=0,
                                      figsize=(12, 6))
        self.axes = np.array(axes).flatten()
        self.last_image = None
        self.last_image_name = None
        self.index = 0
        if not self.run_inference:
            return

        # Model setup
        self.evalGraph = tf.Graph()
        self.polyGraph = tf.Graph()
        # Evaluator Network
        tf.logging.info("Building EvalNet...")
        with self.evalGraph.as_default():
            with tf.variable_scope("discriminator_network"):
                self.evaluator = EvalNet(_BATCH_SIZE)
                self.evaluator.build_graph()
            self.saver = tf.train.Saver()

            # Start session
            self.evalSess = tf.Session(
                config=tf.ConfigProto(allow_soft_placement=True),
                graph=self.evalGraph)
            self.saver.restore(self.evalSess, FLAGS.EvalNet_checkpoint)

        # PolygonRNN++
        tf.logging.info("Building PolygonRNN++ ...")
        self.model = PolygonModel(FLAGS.PolyRNN_metagraph, self.polyGraph)

        self.model.register_eval_fn(
            lambda input_: self.evaluator.do_test(self.evalSess, input_))

        self.polySess = tf.Session(
            config=tf.ConfigProto(allow_soft_placement=True),
            graph=self.polyGraph)

        self.model.saver.restore(self.polySess, FLAGS.PolyRNN_checkpoint)

        if FLAGS.Use_ggnn:
            self.ggnnGraph = tf.Graph()
            tf.logging.info("Building GGNN ...")
            self.ggnnModel = GGNNPolygonModel(FLAGS.GGNN_metagraph,
                                              self.ggnnGraph)
            self.ggnnSess = tf.Session(
                config=tf.ConfigProto(allow_soft_placement=True),
                graph=self.ggnnGraph)

            self.ggnnModel.saver.restore(self.ggnnSess, FLAGS.GGNN_checkpoint)

    def vis(self, pred_path):
        vis_single(pred_path, self.fig, self.axes, self.show_ggnn)

    def infer(self, image_np, crop_path, output_folder):
        # TODO see if we can get some batch parellism
        image_np = np.expand_dims(image_np, axis=0)
        preds = [
            self.model.do_test(self.polySess, image_np, top_k)
            for top_k in range(_FIRST_TOP_K)
        ]

        # sort predictions based on the eval score and pick the best
        preds = sorted(preds, key=lambda x: x["scores"][0], reverse=True)[0]

        if FLAGS.Use_ggnn:
            polys = np.copy(preds["polys"][0])
            feature_indexs, poly, mask = utils.preprocess_ggnn_input(polys)
            preds_gnn = self.ggnnModel.do_test(self.ggnnSess, image_np,
                                               feature_indexs, poly, mask)
            output = {
                "polys": preds["polys"],
                "polys_ggnn": preds_gnn["polys_ggnn"]
            }
        else:
            output = {"polys": preds["polys"]}

        # dumping to json files
        json_name = save_to_json(output_folder, crop_path, output)
        self.vis(json_name)

    @ex.capture
    def refine(self, image_file, corners, chip_dir, output_dir, _log):
        if self.last_image_name == image_file:
            image_np = self.last_image
        else:
            image_np = io.imread(image_file)
            self.last_image_name = image_file
            self.last_image = image_np.copy()
        # Creating the graphs
        lx, ty, rx, by = rect_to_box(corners)
        image_np = image_np[ty:by, lx:rx]
        if image_np.size == 0:
            return
        image_np = transform.resize(image_np, (224, 224))
        output_file = os.path.join(
            chip_dir,
            "{}_{:06d}.png".format(
                os.path.basename(image_file.replace(".", "_")), self.index),
        )
        # TODO Consider saving to a consistent temp file
        _log.info(f"output_file {output_file}")
        io.imsave(output_file, image_np)
        if self.run_inference:
            self.infer(image_np, output_file, output_dir)
        self.index += 1

    def process_file(self, annotation_file, image_dir, chip_dir, output_dir):
        ub.ensuredir(chip_dir, mode=0o0777, recreate=True)
        ub.ensuredir(output_dir, mode=0o0777, recreate=True)

        df = pd.read_csv(annotation_file, names=range(16), skiprows=2)
        corners = df.iloc[:, 3:7]
        filenames = df.iloc[:, 1]
        for (corner, filename) in tqdm.tqdm(zip(corners.iterrows(),
                                                filenames)):
            self.refine(os.path.join(image_dir, filename), corner[1], chip_dir,
                        output_dir)
Exemple #5
0
def inference(_):
    # Creating the graphs
    evalGraph = tf.Graph()
    polyGraph = tf.Graph()

    # Evaluator Network
    tf.logging.info("Building EvalNet...")
    with evalGraph.as_default():
        with tf.variable_scope("discriminator_network"):
            evaluator = EvalNet(_BATCH_SIZE)
            evaluator.build_graph()
        saver = tf.train.Saver()

        # Start session
        evalSess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True
        ), graph=evalGraph)
        saver.restore(evalSess, FLAGS.EvalNet_checkpoint)

    # PolygonRNN++
    tf.logging.info("Building PolygonRNN++ ...")
    model = PolygonModel(FLAGS.PolyRNN_metagraph, polyGraph)

    model.register_eval_fn(lambda input_: evaluator.do_test(evalSess, input_))

    polySess = tf.Session(config=tf.ConfigProto(
        allow_soft_placement=True
    ), graph=polyGraph)

    model.saver.restore(polySess, FLAGS.PolyRNN_checkpoint)

    if FLAGS.Use_ggnn:
        ggnnGraph = tf.Graph()
        tf.logging.info("Building GGNN ...")
        ggnnModel = GGNNPolygonModel(FLAGS.GGNN_metagraph, ggnnGraph)
        ggnnSess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True
        ), graph=ggnnGraph)

        ggnnModel.saver.restore(ggnnSess, FLAGS.GGNN_checkpoint)

    tf.logging.info("Testing...")
    if not os.path.isdir(FLAGS.OutputFolder):
        tf.gfile.MakeDirs(FLAGS.OutputFolder)
    crops_path = glob.glob(os.path.join(FLAGS.InputFolder, '*.png'))

    for crop_path in tqdm.tqdm(crops_path):
        image_np = io.imread(crop_path)
        image_np = np.expand_dims(image_np, axis=0)
        preds = [model.do_test(polySess, image_np, top_k) for top_k in range(_FIRST_TOP_K)]

        # sort predictions based on the eval score and pick the best
        preds = sorted(preds, key=lambda x: x['scores'][0], reverse=True)[0]

        if FLAGS.Use_ggnn:
            polys = np.copy(preds['polys'][0])
            feature_indexs, poly, mask = utils.preprocess_ggnn_input(polys)
            preds_gnn = ggnnModel.do_test(ggnnSess, image_np, feature_indexs, poly, mask)
            output = {'polys': preds['polys'], 'polys_ggnn': preds_gnn['polys_ggnn']}
        else:
            output = {'polys': preds['polys']}

        # dumping to json files
        save_to_json(crop_path, output)
Exemple #6
0
def segment_image(path):
    with evalGraph.as_default():
        with tf.variable_scope("discriminator_network"):
            evaluator = EvalNet(_BATCH_SIZE)
            evaluator.build_graph()
        saver = tf.train.Saver()

        # Start session
        evalSess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True
        ), graph=evalGraph)
        saver.restore(evalSess, EvalNet_checkpoint)

    #Initializing and restoring PolyRNN++
    model = PolygonModel(PolyRNN_metagraph, polyGraph)
    model.register_eval_fn(lambda input_: evaluator.do_test(evalSess, input_))
    polySess = tf.Session(config=tf.ConfigProto(
        allow_soft_placement=True
    ), graph=polyGraph)
    model.saver.restore(polySess, PolyRNN_checkpoint)

    #Initializing and restoring GGNN
    ggnnGraph = tf.Graph()
    ggnnModel = GGNNPolygonModel(GGNN_metagraph, ggnnGraph)
    ggnnSess = tf.Session(config=tf.ConfigProto(
        allow_soft_placement=True
    ), graph=ggnnGraph)

    ggnnModel.saver.restore(ggnnSess,GGNN_checkpoint)

    #INPUT IMG CROP (224x224x3) -> object should be centered
    crop_path= path

    sys.path.append("/usr/src/lego_classification/assembly_segmentation/python_files")
    #Testing
    image_np = io.imread(crop_path)
    image_np = np.expand_dims(image_np, axis=0)
    preds = [model.do_test(polySess, image_np, top_k) for top_k in range(_FIRST_TOP_K)]

    # sort predictions based on the eval score to pick the best.
    preds = sorted(preds, key=lambda x: x['scores'][0], reverse=True)

    print(preds[0]['polys'][0])
    # #Visualizing TOP_K and scores
    # %matplotlib inline
    # import matplotlib.pyplot as plt
    # fig, axes = plt.subplots(2,3)
    # axes=np.array(axes).flatten()
    # [vis_polys(axes[i], image_np[0], np.array(pred['polys'][0]), title='score=%.2f' % pred['scores'][0]) for i,pred in enumerate(preds)]

    #Let's run GGNN now on the bestPoly
    bestPoly = preds[0]['polys'][0]
    feature_indexs, poly, mask = utils.preprocess_ggnn_input(bestPoly)
    preds_gnn = ggnnModel.do_test(ggnnSess, image_np, feature_indexs, poly, mask)
    refinedPoly=preds_gnn['polys_ggnn']
    print ("---------")
    print(refinedPoly[0])
    return refinedPoly[0]

#Visualize the final prediction
# fig, ax = plt.subplots(1,1)
# vis_polys(ax,image_np[0],refinedPoly[0], title='PolygonRNN++')
class InteractiveGUITest(QMainWindow):
    def __init__(self):
        super(InteractiveGUITest, self).__init__()
        loadUi('src/InteractiveGUI3.ui', self)

        self.image = None
        self.cropped = None
        self.pushButton.clicked.connect(self.loadClicked)
        self.ProcessButton.clicked.connect(
            self.processImageClicked
        )  ## Loading of the model have to be done here.
        self.PolyButton.clicked.connect(self.getpolyClicked)

    tf.logging.set_verbosity(tf.logging.INFO)
    # --
    global FLAGS
    flags = tf.flags
    FLAGS = flags.FLAGS
    # --- Some kind of placeholder for the input data like the checkpoints
    ## These FLAGS are useful as we need to change the folder structure only over here.

    flags.DEFINE_string('PolyRNN_metagraph', '', 'PolygonRNN++ MetaGraph ')
    flags.DEFINE_string('PolyRNN_checkpoint', '', 'PolygonRNN++ checkpoint ')
    flags.DEFINE_string('EvalNet_checkpoint', '', 'Evaluator checkpoint ')
    flags.DEFINE_string('GGNN_metagraph', '', 'GGNN poly MetaGraph ')
    flags.DEFINE_string('GGNN_checkpoint', '', 'GGNN poly checkpoint ')
    flags.DEFINE_string('InputFolder', '../imgs/',
                        'Folder with input image crops')
    flags.DEFINE_string('OutputFolder', '../output/', 'OutputFolder')
    flags.DEFINE_boolean('Use_ggnn', False, 'Use GGNN to postprocess output')

    #
    global _BATCH_SIZE, _FIRST_TOP_K, model, evalSess, polySess, ggnnSess, ggnnGraph, evalGraph, polyGraph, evaluator, ggnnModel
    _BATCH_SIZE = 1
    _FIRST_TOP_K = 5  ## Mainly for the first vertex predictions best of 5 is selected by the Evaluator

    evalGraph = tf.Graph()
    polyGraph = tf.Graph()

    # Evaluator Network
    tf.logging.info("Building EvalNet...")  ## Displaying INFO
    with evalGraph.as_default(
    ):  ## Making evalGraph as the default one in this scope
        with tf.variable_scope("discriminator_network"):
            evaluator = EvalNet(_BATCH_SIZE)
            evaluator.build_graph()
        saver = tf.train.Saver()

        # Start session
        evalSess = tf.Session(
            config=tf.ConfigProto(  ## loading the ckpt files here
                allow_soft_placement=True),
            graph=evalGraph)
        saver.restore(evalSess, FLAGS.EvalNet_checkpoint)

    # PolygonRNN++
    tf.logging.info("Building PolygonRNN++ ...")
    model = PolygonModel(FLAGS.PolyRNN_metagraph, polyGraph)
    ## metagraph is something that reconstructs the structure of the network to restore the model.
    model.register_eval_fn(lambda input_: evaluator.do_test(evalSess, input_))

    polySess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                          graph=polyGraph)

    model.saver.restore(polySess,
                        FLAGS.PolyRNN_checkpoint)  ## N/W loaded to PolySess
    tf.logging.info("Poly is restored...")

    if FLAGS.Use_ggnn:
        ggnnGraph = tf.Graph()
        tf.logging.info("Building GGNN ...")
        ggnnModel = GGNNPolygonModel(FLAGS.GGNN_metagraph, ggnnGraph)
        ggnnSess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True),
                              graph=ggnnGraph)
        ggnnModel.saver.restore(ggnnSess, FLAGS.GGNN_checkpoint)

    tf.logging.info("GGNN is restored...")

    ### Testing the images on the model.
    def TestOnCrop(self):
        def save_to_json(crop_name, predictions_dict):
            ## To save files to the json annotation about the poly vertices
            output_dict = {
                'img_source': crop_name,
                'polys': predictions_dict['polys'][0].tolist()
            }
            if 'polys_ggnn' in predictions_dict:
                output_dict['polys_ggnn'] = predictions_dict['polys_ggnn'][
                    0].tolist()

            fname = os.path.basename(crop_name).split('.')[0] + '.json'

            fname = os.path.join(FLAGS.OutputFolder, fname)

            json.dump(output_dict, open(fname, 'w'), indent=4)

        tf.logging.info("Testing...")
        if not os.path.isdir(FLAGS.OutputFolder):
            tf.gfile.MakeDirs(FLAGS.OutputFolder)
        crops_path = glob.glob(os.path.join(
            FLAGS.InputFolder,
            '*.png'))  ## generate a list of all the input input_images
        for crop_path in tqdm.tqdm(crops_path):
            image_np = io.imread(crop_path)  ## Input image read here
            image_np = np.expand_dims(image_np,
                                      axis=0)  ## Image flattened to 1D
            preds = [
                model.do_test(polySess, image_np, top_k)
                for top_k in range(_FIRST_TOP_K)
            ]  ## model is PolygonModel

            ## sort predictions based on the eval score and pick the best
            preds = sorted(preds, key=lambda x: x['scores'][0],
                           reverse=True)[0]

            if FLAGS.Use_ggnn:
                polys = np.copy(preds['polys'][0])
                feature_indexs, poly, mask = utils.preprocess_ggnn_input(polys)
                preds_gnn = ggnnModel.do_test(ggnnSess, image_np,
                                              feature_indexs, poly, mask)
                output = {
                    'polys': preds['polys'],
                    'polys_ggnn': preds_gnn['polys_ggnn']
                }
            else:
                output = {'polys': preds['polys']}

            # dumping to json files
            save_to_json(crop_path, output)

        show_ggnn = True
        preds_path = glob.glob(os.path.join(FLAGS.OutputFolder, '*.json'))
        fig, axes = plt.subplots(1, 1, num=0,
                                 figsize=(6, 6))  #2 if show_ggnn else 1
        axes = np.array(axes).flatten()
        print('axes:', axes)
        for pred_path in tqdm.tqdm(preds_path):
            pred = json.load(open(pred_path, 'r'))
            file_name = pred_path.split('/')[-1].split('.')[
                0]  # check the output file name from the pred_path
            print('File name ::', file_name)

            im_crop, polys = io.imread(pred['img_source']), np.array(
                pred['polys'])  ## pred['polys'] contains the polygons
            print('axes0:', axes[0])
            #print('axes1:', axes[1])
            vis_polys(axes[0],
                      im_crop,
                      polys,
                      title='PolygonRNN++ : %s ' % file_name)
            fig_name = os.path.join(FLAGS.OutputFolder, file_name) + '.png'
            fig.savefig(fig_name)

            [ax.cla() for ax in axes]
        fig, axes = plt.subplots(1, 1, num=1, figsize=(6, 6))
        axes = np.array(axes).flatten()
        print('axes:', axes)
        for pred_path in tqdm.tqdm(preds_path):
            pred = json.load(open(pred_path, 'r'))
            file_name = pred_path.split('/')[-1].split('.')[
                0]  # check the output file name from the pred_path
            print('File name ::', file_name)

            im_crop, polys = io.imread(pred['img_source']), np.array(
                pred['polys'])  ## pred['polys'] contains the polygons
            print('axes0:', axes[0])
            #print('axes1:', axes[1])
            vis_polys(axes[0],
                      im_crop,
                      np.array(pred['polys_ggnn']),
                      title=' PolygonRNN++ + GGNN : %s' % file_name)
            fig_name = os.path.join(FLAGS.OutputFolder, file_name) + 'GGNN.png'
            fig.savefig(fig_name)

            [ax.cla() for ax in axes]

    def loadClicked(self):
        fname, filter = QFileDialog.getOpenFileName(
            self, 'Open File', '/home/uib06040/polyrnn',
            "Image Files (*.png)")  ## Image browser
        if fname:
            self.readImage(fname)
        else:
            print('Invalid Image')
        #self.loadImage('dusseldorf_000002_000019_leftImg8bit.png')

    #@pyqtSlot()
    def getpolyClicked(self):
        self.cropped = cv2.imread("output/input.png")
        self.croppedGGNN = cv2.imread("output/inputGGNN.png")
        dim = (371, 371)
        resized = cv2.resize(self.cropped, dim, interpolation=cv2.INTER_AREA)
        self.cropped = resized.copy()
        resized = cv2.resize(self.croppedGGNN,
                             dim,
                             interpolation=cv2.INTER_AREA)
        self.croppedGGNN = resized.copy()
        self.displayCroppedImage(window=2)

    #@pyqtSlot()
    def processImageClicked(
            self):  ## Image Cropping:: need to use self.cropped over here.
        cv2.imwrite("imgs/input.png", self.cropped)
        self.TestOnCrop()

    def readImage(self, fname):
        self.image = cv2.imread(fname)

        def shape_selection(event, x, y, flags, param):
            # grab references to the global variables
            global ref_point, cropping

            # if the left mouse button was clicked, record the starting
            # (x, y) coordinates and indicate that cropping is being
            # performed
            if event == cv2.EVENT_LBUTTONDOWN:
                ref_point = [(x, y)]
                cropping = True

                # check to see if the left mouse button was released
            elif event == cv2.EVENT_LBUTTONUP:
                # record the ending (x, y) coordinates and indicate that
                # the cropping operation is finished
                ref_point.append((x, y))
                cropping = False

                # draw a rectangle around the region of interest
                cv2.rectangle(images, ref_point[0], ref_point[1], (0, 255, 0),
                              1)  ## Last argument is the line width
                cv2.imshow("images", images)

        images = self.image  # self.image = cv2.imread(fname)
        clone = images.copy()
        cv2.namedWindow("images")
        cv2.setMouseCallback("images", shape_selection)
        # keep looping until the 'q' key is pressed
        while True:
            # display the image and wait for a keypress
            cv2.imshow("images", images)
            key = cv2.waitKey(1) & 0xFF

            # if the 'r' key is pressed, reset the cropping region
            if key == ord("r"):
                images = clone.copy()

            # if the 'c' key is pressed, break from the loop
            elif key == ord("c"):
                break

        # if there are two reference points, then crop the region of interest
        # from the image and display it
        if len(ref_point) == 2:
            print("ref_point:", ref_point)

            crop_img = clone[ref_point[0][1]:ref_point[1][1],
                             ref_point[0][0]:ref_point[1][0]]

            cv2.imshow("crop_img", crop_img)
            cv2.waitKey(0)
            self.cropped = crop_img.copy()
            self.dimcropped = self.cropped.shape
            print("Shape of the cropped Image:", self.dimcropped[0:2])
            dim = (224, 224)
            resized = cv2.resize(self.cropped,
                                 dim,
                                 interpolation=cv2.INTER_AREA)
            self.cropped = resized.copy()
            print("Shape of the resized Image:", self.cropped.shape)

        # close all open windows
        cv2.destroyAllWindows()
        self.displayCroppedImage()

    def displayCroppedImage(self, window=1):
        qformat = QImage.Format_Indexed8
        if len(self.cropped.shape) == 3:  # rows[0],cols[1],channels[2]
            if (self.cropped.shape[2]) == 4:
                qformat = QImage.Format_RGBA8888
            else:
                qformat = QImage.Format_RGB888
        img = QImage(self.cropped, self.cropped.shape[1],
                     self.cropped.shape[0], self.cropped.strides[0], qformat)

        #BGR > RGB
        img = img.rgbSwapped()
        if window == 1:
            self.imglabel.setPixmap(QPixmap.fromImage(img))
            self.imglabel.setAlignment(QtCore.Qt.AlignHCenter
                                       | QtCore.Qt.AlignVCenter)
        if window == 2:
            qformat2 = QImage.Format_Indexed8
            if len(self.croppedGGNN.shape) == 3:  # rows[0],cols[1],channels[2]
                if (self.croppedGGNN.shape[2]) == 4:
                    qformat2 = QImage.Format_RGBA8888
                else:
                    qformat2 = QImage.Format_RGB888
            self.imglabel_2.setPixmap(QPixmap.fromImage(img))
            self.imglabel_2.setAlignment(QtCore.Qt.AlignHCenter
                                         | QtCore.Qt.AlignVCenter)
            img2 = QImage(self.croppedGGNN, self.croppedGGNN.shape[1],
                          self.croppedGGNN.shape[0],
                          self.croppedGGNN.strides[0], qformat2)
            img2 = img2.rgbSwapped()
            x, y = self.croppedGGNN.shape[0], self.croppedGGNN.shape[1]
            print("X and Y :", self.croppedGGNN.shape)
            imcrop = self.croppedGGNN[45:329, 48 + 2:332]
            #cv2.imshow("chck1", imcrop)
            #cv2.waitKey(0)
            #imcrop = imcrop[48:332,45:329] # 48,332 45,329
            resized = cv2.resize(
                imcrop, (self.dimcropped[1] - 2, self.dimcropped[0] - 2),
                interpolation=cv2.INTER_AREA)
            y_offset, x_offset, y_offset1, x_offset1 = ref_point[0][
                0] + 2, ref_point[0][1] + 2, ref_point[1][0], ref_point[1][1]
            print("X and Y offset:", x_offset, y_offset, x_offset1, y_offset1)
            self.image[x_offset:x_offset1, y_offset:y_offset1] = resized
            cv2.imwrite("overlay/input.png", self.image)
            self.imglabel_3.setPixmap(QPixmap.fromImage(img2))
            self.imglabel_3.setAlignment(QtCore.Qt.AlignHCenter
                                         | QtCore.Qt.AlignVCenter)