def __init__(self, dir):
     self.images = self.load_images(dir)
     self.pickColor = PickColor()
     self.imgPro = ImageProcessing()
     self.faces = [[[None for _ in range(3)] for _ in range(3)]
                   for _ in range(6)]
     self.cube_str_list = ['' for _ in range(54)]
Exemple #2
0
    def run(self):
        clock = pygame.time.Clock()
        self.generatePlatforms()

        # Se inicializa el modulo de procesamiento de imagenes
        imagePro = ImageProcessing()

        while True:
            # Se obtiene la accion realizada por el usuario mediante la camara.
            action = imagePro.procesarImagen()
            self.screen.fill((255,255,255))
            clock.tick(60)
            
            for event in pygame.event.get():
                if event.type == QUIT:
                    sys.exit()
                    
            if self.playery - self.cameray > 700:
                self.cameray = 0
                self.score = 0
                self.springs = []
                self.platforms = [[400, 500, 0, 0]]
                self.generatePlatforms()
                self.playerx = 400
                self.playery = 400
            self.drawGrid()
            self.drawPlatforms()
            if imagePro.beginGame:
                
                # Se hace interactuar al personaje con la accion realizada
                self.updatePlayer(action)

            self.updatePlatforms()
            self.screen.blit(self.font.render(str(self.score), -1, (0, 0, 0)), (25, 25))
            pygame.display.flip() 
Exemple #3
0
    def process_files_button_clicked(self):
        params = Parameters()
        colour_option = 0
        self.process_files_button.setDisabled(True)
        if self.gray_colour_scale_radio.isChecked():
            colour_option = True

        params.setColourOption(colour_option)
        params.setHeightInput(self.height_input.text())
        params.setSkew(self.skew_parameter.text())
        params.setWidthInput(self.width_input.text())
        params.setDestination(self.destination_folder.text())

        if len(self.payload.text()) > 0:
            params.setFile(self.payload.text())
        else:
            params.setFile(self.file_payload.text())

        processor = ImageProcessing()
        processor.process(params)

        self.process_files_button.setEnabled(True)
        finishedProcessing = QMessageBox()
        finishedProcessing.setText("Finished Processing!")
        self.resetAll()
        finishedProcessing.exec_()
    def __init__(self, image, h=32, step_size=32):
        self.imp = ImageProcessing() # this one shouldn't maybe be class but 'static' file :D
        self.N = image.shape[0]
        self.step_size = step_size

        # Used for trying something out
        self.im = image.copy()
        # Used to compute confidence
        self.im_width = image.shape[0]
        self.im_height = image.shape[1]
        self.h = h
        image_vector = self.imp.patch_to_vector(image)

        # Matrix of confidence init with 0 for missing pixels and 1s otherwise
        self.m_conf = np.zeros(image_vector.shape)
        self.m_conf[image_vector != -100] = 1
Exemple #5
0
def TestImagePipeline():
    # Saving, loading data
    data = Data()
    testImages = data.LoadTestImages()
    imageTransform = ImageTransform()
    imageTransform.Calibrate()
    processing = ImageProcessing(magnitudeKernelSize=11, angleKernelSize=5)
    laneId = LaneLineIdentification()

    if (imageTransform.isCalibrated):
        for image in testImages:
            laneId.areLanesDetected = False
            warpedImage = imageTransform.WarpLaneImage(image)
            processedImage = processing.Process(warpedImage)
            #            processing.ShowProcessedImage(warpedImage)
            laneId.DetectAndShow(image, warpedImage, processedImage,
                                 imageTransform)
Exemple #6
0
 def main():
     image_processing = ImageProcessing()
     image_processing.start()
     my_webserver = Webserver()
     my_webserver.app.run(port=8002,
                          host='0.0.0.0',
                          debug=False,
                          threaded=True)
     image_processing.stop()
     image_processing.join()
Exemple #7
0
    def __init__(self):
        super(MainWindow, self).__init__()
        uic.loadUi("pipeline.ui", self)

        self.show()

        self.process = ImageProcessing()
        self.point_cloud_processing = PointCloudProcessing()
        self.img1 = cv2.imread(
            "./images/subject1/subject1Left/subject1_Left_1.jpg")
        self.img2 = cv2.imread(
            "./images/subject1/subject1Middle/subject1_Middle_1.jpg")
        self.img3 = cv2.imread(
            "./images/subject1/subject1Right/subject1_Right_1.jpg")

        self.compute_disparity_left_button.clicked.connect(
            partial(self.recompute_disparity, True))
        self.compute_disparity_right_button.clicked.connect(
            partial(self.recompute_disparity, False))
        self.calibrate_button.clicked.connect(self.calibrate)
        self.process_button.clicked.connect(self.process_pcl)

        self.image_placeholder = self.findChild(QLabel, "disparity_image")

        self.block_size_slider = self.findChild(QSlider, "block_size")
        self.block_size_slider.valueChanged.connect(self._validate_values)
        self.min_disparity_slider = self.findChild(QSlider, "min_disparity")
        self.min_disparity_slider.valueChanged.connect(self._show_values)
        self.num_disparity_slider = self.findChild(QSlider, "num_disparity")
        self.num_disparity_slider.valueChanged.connect(self._show_values)
        self.p1_slider = self.findChild(QSlider, "p1")
        self.p1_slider.valueChanged.connect(self._validate_values)
        self.p2_slider = self.findChild(QSlider, "p2")
        self.p2_slider.valueChanged.connect(self._validate_values)
        self.max_dif_slider = self.findChild(QSlider, "disp_max_dif")
        self.max_dif_slider.valueChanged.connect(self._show_values)
        self.uniqueness_slider = self.findChild(QSlider, "uniqueness")
        self.uniqueness_slider.valueChanged.connect(self._show_values)
        self.speckle_slider = self.findChild(QSlider, "speckle_size")
        self.speckle_slider.valueChanged.connect(self._show_values)

        self._validate_values()
Exemple #8
0
 def __init__(self, SEGTAST='VAT_SSAT_DSAT', modelPath=''):
     self.SEGTAST = SEGTAST
     self.metrice_names = ["dice", "fp", "fn", "tp", "tn"]
     self.imageProcessor = ImageProcessing()
     self.models_mean_performance_neonates = pd.DataFrame()
     self.models_mean_performance_children = pd.DataFrame()
     self.models_median_performance_neonates = pd.DataFrame()
     self.models_median_performance_children = pd.DataFrame()
     # define labelnames and set number of classes
     if self.SEGTAST == "VAT_SSAT_DSAT":
         self.labelnames = ("bg", "ssat", "dsat", "vat")
     self.nClasses = len(self.labelnames)
     
     self.metric_func_dic = {'fp': self.calc_FalsePositive, 
                             'fn': self.calc_FalseNegative, 
                             'tp': self.calc_TruePositiv, 
                             'tn': self.calc_TrueNegative, 
                             'dice': self.calc_dice}
     if modelPath != "":
         self.load_Model(modelPath)
class PathFinder():
    def bestSearch(self):
        self.loadImage()
        
        bestSearch = BestSearch(self.imageProcessing.getOccupiedPoints(),
                                self.imageProcessing.getWeightsOfPoints(),
                                self.imageProcessing.getDimension())
        
        listOfPaths = bestSearch.search()
        self.dispaly(listOfPaths)
        
    def loadImage(self):
        self.imageProcessing = ImageProcessing()
        self.imageProcessing.processImage()
        


    def dispaly(self,
                listOfPaths):
        self.imageProcessing.convertAndDisplay(listOfPaths)
class MyTestCase(unittest.TestCase):
    def setUp(self):
        self.imageIO = ImageIO()
        self.imgPro = ImageProcessing()

    def test_saveImageCells(self):
        self.imgPro.saveCells(self.imageIO.loadImage("4", ".jpg"))
        self.assertEqual(len(self.imgPro.imageCells), 9)

    def test_splitsimageinto9regions(self):
        self.assertEqual(
            len(self.imgPro.splitImage(self.imageIO.loadImage("3", ".jpg"))),
            9)

    def test_noduplicateregions(self):
        imgregions = self.imgPro.splitImage(self.imageIO.loadImage(
            "1", ".jpg")).values()
        seen = list()
        for x in imgregions:
            for i in seen:
                self.assertFalse(np.array_equal(x, i))
            seen.append(x)
Exemple #11
0
def detect_motion(frameCount):
    # grab global references to the video stream, output frame, and
    # lock variables
    global vs, outputFrame, lock

    # initialize the motion detector and the total number of frames
    # read thus far
    md = ImageProcessing.imageProcessing(accumWeight=0.5)
    total = 0

    # loop over frames from the video stream
    while True:
        # read the next frame from the video stream, resize it,
        # convert the frame to grayscale, and blur it
        frame = vs.read()
        frame = imutils.resize(frame, width=400)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.convertScaleAbs(gray, -1, alpha=5, beta=10)

        # grab the current timestamp and draw it on the frame
        timestamp = datetime.datetime.now()
        cv2.putText(frame, timestamp.strftime(
            "%A %d %B %Y %I:%M:%S%p"), (10, frame.shape[0] - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

        # if the total number of frames has reached a sufficient
        # number to construct a reasonable background model, then
        # continue to process the frame
        if total > frameCount:
            # detect motion in the image
            motion = md.detect()

            # check to see if motion was found in the frame
            if motion is not None:
                # unpack the tuple and draw the box surrounding the
                # "motion area" on the output frame
                (thresh, (minX, minY, maxX, maxY)) = motion
                cv2.rectangle(frame, (minX, minY), (maxX, maxY),
                              (0, 0, 255), 2)



        # update the background model and increment the total number
        # of frames read thus far
        md.update(gray)
        total += 1

        # acquire the lock, set the output frame, and release the
        # lock
        with lock:
            outputFrame = frame.copy()
Exemple #12
0
def ExportHandriteLinesFromScannedDoc(image, pageNum):
    handwrittenDic = HandwrittenDic()
    newImagesForTrain = []
    boundries = FindSquaresHandwriteDoc(
        image.imageArray
    )  # The boundries are order from the bottom of the page up.
    boundries = reorderBoundries(boundries)

    for i in range(len(boundries)):
        x, y, w, h = boundries[i]
        cutImage = image.cutImage(image.imageArray, x, y, x + w, y + h)
        Label = handwrittenDic.FindLabelForLine(page=pageNum, lineNum=i + 1)
        newImagesForTrain.append(
            ImageProcessing(cutImage,
                            imagePath=None,
                            Label=Label,
                            handwrite_ID=image.handwrite_ID))

    return newImagesForTrain
 def loadImage(self):
     self.imageProcessing = ImageProcessing()
     self.imageProcessing.processImage()
class IterativeInpainting:

    def __init__(self, image, h=32, step_size=32):
        self.imp = ImageProcessing() # this one shouldn't maybe be class but 'static' file :D
        self.N = image.shape[0]
        self.step_size = step_size

        # Used for trying something out
        self.im = image.copy()
        # Used to compute confidence
        self.im_width = image.shape[0]
        self.im_height = image.shape[1]
        self.h = h
        image_vector = self.imp.patch_to_vector(image)

        # Matrix of confidence init with 0 for missing pixels and 1s otherwise
        self.m_conf = np.zeros(image_vector.shape)
        self.m_conf[image_vector != -100] = 1

    def contains_missing_values(self,patch):
        return (patch == 0).any()

    def inpaint(self, alpha=1):
        # clean_dico_indexes = self.imp.complet_dictionary(self.dictionary)
        print("start painting")
        # politique le plus simple pour remplir le image : dans l'ordre
        dictionary = self.imp.get_dictionary_patches(self.im, self.h, self.step_size)

        while self.some_pixels_are_missing():
            patch_original, i1, j1 = self.get_next_patch()
            patch = self.imp.patch_to_vector(patch_original)
            dictionary = self.imp.get_dictionary_patches(self.im, self.h, self.step_size)

            # for learning, use only nonzero examples
            non_null_indexes = np.where(patch != -100)[0]
            Y = patch[non_null_indexes].reshape(-1,1)
            X = dictionary[non_null_indexes, :]

            model = Lasso(alpha=alpha, max_iter=1000000)
            model = model.fit(X, Y) # coefficient sparse
            coef = np.array(model.coef_)

            # prediction
            # import pdb; pdb.set_trace()
            sum_coef = np.sum(coef[coef != 0])
            new_patch = np.divide(np.sum([coef[i] * dictionary[:,i] for i in range(len(coef))], axis=0), sum_coef)

            pixels_to_update = patch == -100
            patch[pixels_to_update] = new_patch[pixels_to_update]
            new_patch = self.imp.vector_to_patch(new_patch)

            # import pdb; pdb.set_trace()
            for i2 in range(self.h*2):
                for j2 in range(self.h*2):
                    if (patch_original[i2,j2] == -100).all():
                        self.im[i1+i2,j1+j2] = new_patch[i2,j2]
                        self.m_conf[i2 * self.im_width + j2] = 0.5

        return self.im

    def some_pixels_are_missing(self):
        """
        return true if any pixel is still missing from the image
        """
        contains_zero = lambda patch: (patch == -100).any()
        for patch in self.im:
            if (contains_zero(patch)):
                return True
        return False

    def get_next_patch(self):
        edges = self.imp.get_edges(self.im, self.im.shape)

        max_coord = edges[0]
        max_conf = 0
        for current_point in edges:
            conf = self.computeConfidence(current_point[0], current_point[1])
            if conf > max_conf:
                max_conf = conf
                max_point = current_point
        i1 = max(max_coord[0] - self.h, 0)
        j1 = max(max_coord[1] - self.h, 0)
        return self.imp.get_patch(max_point[0], max_point[1], self.h, self.im), i1, j1

    def computeConfidence(self, i, j):
        """
        Calculate the confidence of a given pixel(i, j), usually on the edge
        """
        conf = 0

        # y_max represents the bottom side of the patch
        y_max = j + self.h if j + self.h < self.im_height - 1 else self.im_height - 1
        # we init y to the top of the patch
        y = j - self.h if j - self.h > 0 else 0
        while (y <= y_max):
            # x_max represents the right side of the patch
            x_max = i + self.h if i + self.h < self.im_width - 1 else self.im_width - 1
             # we init x to the left of the patch
            x = i - self.h if i - self.h > 0 else 0  # init
            while (x <= x_max):
                conf = conf + self.m_conf[y * self.im_width + x] # depends on how the matrix is represented matrix or vector
                x += 1
            y += 1

        return conf / ((self.h * 2 + 1) * (self.h * 2 + 1))
Exemple #15
0
 def __init__(self, path, index, frame=0, verbose=False):
     ImageProcessing.__init__(self, path, index, frame, verbose)
     self.description = "Gamma Detection class"
     self.author = "Ariel Hernandez Estevenz"
Exemple #16
0
class Evaluater():

   def __init__(self, SEGTAST='VAT_SSAT_DSAT', modelPath=''):
       self.SEGTAST = SEGTAST
       self.metrice_names = ["dice", "fp", "fn", "tp", "tn"]
       self.imageProcessor = ImageProcessing()
       self.models_mean_performance_neonates = pd.DataFrame()
       self.models_mean_performance_children = pd.DataFrame()
       self.models_median_performance_neonates = pd.DataFrame()
       self.models_median_performance_children = pd.DataFrame()
       # define labelnames and set number of classes
       if self.SEGTAST == "VAT_SSAT_DSAT":
           self.labelnames = ("bg", "ssat", "dsat", "vat")
       self.nClasses = len(self.labelnames)
       
       self.metric_func_dic = {'fp': self.calc_FalsePositive, 
                               'fn': self.calc_FalseNegative, 
                               'tp': self.calc_TruePositiv, 
                               'tn': self.calc_TrueNegative, 
                               'dice': self.calc_dice}
       if modelPath != "":
           self.load_Model(modelPath)


# ================== define evaluation metrices ================================'

   def calc_FNFPTPTN(self, groundTruth, prediction):
       '''
       :param self:
       :param groundTruth: groundtruth data in 
                           shape = (batch, img_width, img_height, nub_classes)
       :param prediction: prediction array we want to evaluate agains 
                          groundtruth  
                          shape = (batch, img_width, img_height, nub_classes)
       '''
       FN = self.calc_FalseNegative(groundTruth, prediction)
       FP = self.calc_FalsePositive(groundTruth, prediction)
       TP = self.calc_TruePositiv(groundTruth, prediction)
       TN = self.calc_TrueNegative(groundTruth, prediction)
       print ('TP: %i, FP: %i, TN: %i, FN: %i' % (TP,FP,TN,FN))

   def calc_FalseNegative(self, groundTruth, prediction):
       'False Negative (FN): we predict a label of 0 (negative), but the true label is 1.'
       FN = np.sum(np.logical_and(prediction == 0, groundTruth == 1))
       return FN
   
   def calc_FalsePositive_rate(self, FP, TN):
       return FP/(FP+TN)    
   
   def calc_FalsePositive(self, groundTruth, prediction):
       'False Positive (FP): we predict a label of 1 (positive), but the true label is 0.'
       FP = np.sum(np.logical_and(prediction == 1, groundTruth == 0))
       return FP
   
   def calc_FalseNegative_rate(self, FN, TP):
       i = FN/(FN+TP)
       return FN/(FN+TP)
   
   def calc_TruePositiv(self, groundTruth, prediction):
       'True Positive (TP): we predict a label of 1 (positive), and the true label is 1'
       TP = np.sum(np.logical_and(prediction == 1, groundTruth == 1))       
       return TP
   
   def calc_TrueNegative(self, groundTruth, prediction):
       ' True Negative (TN): we predict a label of 0 (negative), and the true label is 0.'
       TN = np.sum(np.logical_and(prediction == 0, groundTruth== 0))
       return TN
   
   def calc_pix_accuracy(self, TP, TN, FP, FN):
       return (TP + TN) / (TP + TN + FP + FN)
   
   def calc_precision(self, TP, FP):
       # compute Precision (Positive predictive value)
       return TP/(TP+FP)
   
   def calc_metrices(self, groundTruth, prediction):  
        '''
        this function computes FP, FP, TN, TP, positive predictive, FP rate, FN rate, 
        dice similarity coefficient and accuracy
        '''
        gtruth = np.moveaxis(groundTruth, -1, 0)
        pred = np.moveaxis(prediction, -1, 0)
        # compute all metrices for all labels
        computed_metrice_dic = {}
        for i, label_name in enumerate(self.labelnames):
            for metric_key in self.metric_func_dic:
                computed_metrice_dic[label_name+'_'+ metric_key] = self.metric_func_dic[metric_key](gtruth[i], pred[i])        
        
        # compute accuracy, positive predictive value, false positive/negative rate 
        for i, label_name in enumerate(self.labelnames):

            computed_metrice_dic[label_name + '_fn_rate'] = self.calc_FalseNegative_rate(computed_metrice_dic[label_name + '_fn'],
                                                                                  computed_metrice_dic[label_name + '_tp'])
            computed_metrice_dic[label_name + '_fp_rate'] = self.calc_FalsePositive_rate(computed_metrice_dic[label_name + '_fp'],
                                                                                  computed_metrice_dic[label_name + '_tn'])
            computed_metrice_dic[label_name + '_precision'] = self.calc_precision(computed_metrice_dic[label_name + '_tp'],
                                                                                  computed_metrice_dic[label_name + '_fp'])
            computed_metrice_dic[label_name + '_accuracy'] =  self.calc_pix_accuracy(computed_metrice_dic[label_name + '_tp'],
                                                                                     computed_metrice_dic[label_name + '_tn'],
                                                                                     computed_metrice_dic[label_name + '_fp'],
                                                                                     computed_metrice_dic[label_name + '_fn'])
        return computed_metrice_dic   
    
   def calc_dice(self, groundTruth, prediction, non_seg_score=1.0):
       """
       Computes the Dice coefficient.
       Args:
           true_mask : Array of arbitrary shape.
           pred_mask : Array with the same shape than true_mask.  
       Returns:
           A scalar representing the Dice coefficient between the two 
           segmentations. 
       """
       assert groundTruth.shape == prediction.shape       
       groundTruth = np.asarray(groundTruth).astype(np.bool)
       prediction = np.asarray(prediction).astype(np.bool)       
       # If both segmentations are all zero, the dice will be 1.
       im_sum = groundTruth.sum() + prediction.sum()
       if im_sum == 0:
           return non_seg_score
       # Compute Dice coefficient
       intersection = np.logical_and(groundTruth, prediction)
       return 2. * intersection.sum() / im_sum 

     
   def calc_BatchDice(self, gtruthBatch, predBatch):  
       """this method calculates the total dice for a batch 
       Return: 
           in our case (bg, ssat, dsat, vat)
       """
       gtruth = np.moveaxis(gtruthBatch, 3, 0)
       pred = np.moveaxis(predBatch, 3, 0)   
       dices = np.zeros((self.nClasses))   
       for n in range(self.nClasses):
           dices[n] = self.calc_dice(gtruth[n], pred[n])
       return dices  

       
   def load_Model(self, pathToModel, loss='dice_coef_multilabel'):
       '''
       load model into Evaluation Manager:
           Args:
               modelFile: absolute path to model
               loss: arguments are tvs, cwpc or dice 
       '''
       if loss == 'dice_coef_multilabel':
           self.model = load_model(pathToModel, custom_objects={'dice_coef_multilabel': dice_coef_multilabel})
       elif loss == 'class_weighted_pixelwise_crossentropy':
           self.model = load_model(pathToModel, 
                                   custom_objects={ 'class_weighted_pixelwise_crossentropy': class_weighted_pixelwise_crossentropy})
       elif loss == 'tversky':       
           self.model = load_model(pathToModel, 
                                   custom_objects={ 'tversky_loss': tversky_loss})
       else: 
           print('please specify loss function when clling loadModel from Evaluation_Manager')
   
       self.input_shape_model = np.delete(self.model.layers[0].input_shape, 0)
           
   def get_BestModel_Name(self, main_dir):
        model_list = list()
        for file in os.listdir(main_dir):
            if file.endswith(".h5"):
                model_list.append(file)
        return sort_dir_aphanumerical(model_list)[-1]
    
   def predict(self, img):
       img = self.imageProcessor.normalize(img)         
       if self.input_shape_model[-1]> 1:
            img = np.array([img,  img, img])
            img = np.moveaxis(img, 0, -1)  
            img = img[np.newaxis,...]
       else:
            img = img[..., np.newaxis]
            img = img[np.newaxis, ...]
       return self.imageProcessor.aRGMAX(self.model.predict(img))       
       
   def EvaluateModels(self, config):
        '''
        This function evaluates multiple models which were previously trained by 
        "k_fold_training" function in trianing.py
        Args:
            :pathToModels: path to dir where models are stored at 
            directory must be as follows:
    
                ./dst/models
                    |
                    |---model1
                    |     |
                    |     |---model.h5
                    |     |---validation_IDs.csv
                    |     |---train_IDs.csv
                    |
                    |---model2
                    |     |
                    |     |---model.h5
                    |     |---validation_IDs.csv
                    |     |---train_IDs.csv
                    .
                    .
                    .
        '''
        pathToModels = os.path.join(config['dst'], 'models')                
        modelFolders = os.listdir(pathToModels)
        # loop through all folders and evaluate model
        for modelFolder in tqdm(modelFolders):        
            # define paths 
            abspath = os.path.join(pathToModels, modelFolder)
            csv_path_children = os.path.join(abspath, 
                                             'children_validationIDs.csv')
            csv_path_neonates = os.path.join(abspath, 
                                             'neonatal_validationIDs.csv')
            path_toModelFile = os.path.join(abspath, self.get_BestModel_Name(abspath))
            # evaluate model on neonatal data         
            self.evaluateModel(path_toModelFile, csv_path_neonates, config['infant_data_path'], loss=config['loss_function'], dst=abspath)
            # evaluate model on childrne data 
            self.evaluateModel(path_toModelFile, csv_path_children, config['children_data_path'], loss=config['loss_function'], dst=abspath, childrenData = True)    
        self.models_mean_performance_neonates.to_excel(os.path.join(pathToModels, 'All_Model_mean_neonates.xlsx'))
        self.models_mean_performance_children.to_excel(os.path.join(pathToModels, 'All_Model_mean_children.xlsx'))
        self.models_median_performance_neonates.to_excel(os.path.join(pathToModels, 'All_Model_median_neonates.xlsx'))
        self.models_median_performance_children.to_excel(os.path.join(pathToModels, 'All_Model_median_children.xlsx'))


   def evaluateModel(self, modelPath, csv_path_evaluationIDs, pathToData, loss='dice_coef_multilabel', dst='', childrenData = False):
        """'''
        Args:
            :neonate: True to evaluate on children if False on neonatal
            :saveOverallResults: path to which excel with results will be saved to 
        This function evaluates models on neonatal as well as infant data 
        Args:
           :data_path: path to evaluation data 
           :imgProcessing: if enabled thresholds the prediction output   
           :childrenData: if True images will be reshaped, zero padded to 
           shape 512x512
           :saveOverallResults: if a path is given, a exel with mean and SD info 
           for all classes is save as SegResults.xlsx
        """      
        init_GPU_Session()    
        # load model     
        self.load_Model(modelPath, loss)
        # define path to raw an gt data 
        pathToRawData = os.path.join(pathToData, 'Train_RawData')
        pathToLabelData = os.path.join(pathToData, 'Train_LabelData')       
        # get ids for evaluation 
        all_subjects = readCSVToList(csv_path_evaluationIDs) 
        # dataframe to store computed metrice values for all subjects 
        model_performance = pd.DataFrame()                             
        
        for subject in all_subjects:          
            # get raw and label images for ROI         
            raw_data_path = os.path.join(pathToRawData, subject + "_raw.npy")           
            label_data_path = os.path.join(pathToLabelData, subject + "_label.npy")           
            raw_ROI = np.load(raw_data_path)
            labels_ROI_groundTruth = np.load(label_data_path)             
            # reshape if image size is smaller then model input requirement
            if self.input_shape_model[0] > raw_ROI.shape[1] or self.input_shape_model[1] > raw_ROI.shape[2]:                
                raw_ROI = self.imageProcessor.paddBatch(raw_ROI, (self.input_shape_model[0],  self.input_shape_model[1]))  
                labels_ROI_groundTruth = self.imageProcessor.paddLabelBatch(labels_ROI_groundTruth,
                                                                           (self.input_shape_model[0],
                                                                            self.input_shape_model[1]))           
                pred = np.empty((len(raw_ROI), self.input_shape_model[0],
                                 self.input_shape_model[1], self.nClasses))
            else:
                pred = np.empty((len(raw_ROI), len(raw_ROI[1]), len(raw_ROI[2]), 
                                 self.nClasses))
            #make prediction for all images
            for n in range(0,len(raw_ROI)):          
                pred[n] = self.predict(raw_ROI[n])
            # compute evaluation metrices                   
            metrice_row_dic = self.calc_metrices(labels_ROI_groundTruth, pred)
            metrice_row_series = pd.Series(metrice_row_dic)
            metrice_row_series.name = subject
            model_performance = model_performance.append(metrice_row_series) 
        self.SaveResults(model_performance, dst, childrenData=childrenData)    
        # self.printAndSaveFinalResults(dice_list, fp_list, fn_list, saveSegResults=saveOverallResults, childrenData=childrenData)   

   def SaveResults(self, model_performance, dst, childrenData=False):
       means = model_performance.mean()
       means.name = 'mean'
       stds = model_performance.std()
       stds.name = 'std'    
       medians = model_performance.median()
       medians.name = 'median'    
       quantile = model_performance.quantile([.1, .25, .5, .75], axis = 0).transpose()
       comb = pd.concat([means, stds, medians, quantile], axis=1)

       if childrenData == True:
           Excel_name_ending = '_Children.xlsx'
           self.models_mean_performance_children = self.models_mean_performance_children.append(means, ignore_index=True)
           self.models_median_performance_children = self.models_mean_performance_children.append(medians, ignore_index=True)
       else:
           Excel_name_ending = '_Neonates.xlsx'                                
           self.models_mean_performance_neonates = self.models_mean_performance_neonates.append(means, ignore_index=True) 
           self.models_median_performance_neonates = self.models_mean_performance_neonates.append(medians, ignore_index=True) 
       model_performance.to_excel(os.path.join(dst, 'All_subj_volume_performance' + Excel_name_ending))      
       comb.to_excel(os.path.join(dst, 'Mean_performance' + Excel_name_ending))      
Exemple #17
0
class Transformation():
    extractObj = Extract()
    config = Configuration()
    img_process = ImageProcessing()
    lexicon = extractObj.getCsvData('harassment_lexicon')  # data frame
    tweets_json = extractObj.getJsonData('tweets_json')
    path_to_tweets = extractObj.getPath('tweets_json')
    twitterAuth = extractObj.getAuth()
    img_path = extractObj.get_tweet_imgs_path()

    #def __init__(self):
    #    pass

    def processHarassmentLexicon(self):
        # combine columns in a list
        lex = [
            self.lexicon[i].dropna().tolist()
            for i in self.lexicon.columns.values.tolist()
        ]

        # flatten the list of lists
        lex = [val for sub_lexicon in lex for val in sub_lexicon]

        # convert upper cases into lower case
        lex = [x.lower() for x in lex]

        # remove duplicates from lexicon
        lex = self.config.removeListDuplicates(lex)  #list(dict.fromkeys(lex))

        return lex

    # Returns a dictionary where each key is the source of the tweet and
    # each value is the collection of tweet objects that are belonged to corresponding user
    # 469786 total tweets
    # 15293 tweets contain toxic words
    # 384 students' tweets will be considered, other ones had no tweet. or there was no toxic tweet
    def processTweets(self, lex):
        source_and_tweets = {}

        for i in self.tweets_json:
            ## Json files in tweets folder are named as follows: sourceName_tweets.json --> i
            ## So we use the following re.sub to get the source of the tweet.
            source = self.config.stringReplace(
                i, '_tweets.json', '')  #re.sub('\_tweets.json$', '', i)

            line_generator = open(self.path_to_tweets + '/' + i)

            # ind_objects will store all tweet objects for each user that have harassing keyword in it.
            ind_objects = []

            for line in line_generator:  ## line_generator has all tweet objects for a specific person..
                line_object = json.loads(line)

                if 'retweeted_status' in line_object.keys():

                    # full_text attribute gives the tweet of the user.
                    # if tweet is retweeted and tweet exceeds its length limit, then it is truncated,
                    # So we check retweeted_status['full_text'] field to get the full tweet.
                    tweet = 'RT' + ' @' + line_object['retweeted_status'][
                        'user']['screen_name'] + ': ' + line_object[
                            'retweeted_status']['full_text']

                else:
                    tweet = line_object['full_text']

                words = self.config.splitLowerText(
                    tweet
                )  #tweet.lower().split() # we get all the words in every sentence
                words = self.config.removeListDuplicates(
                    words
                )  #words = list(dict.fromkeys(words)) # we removed all the duplicates from the word list

                if any(item in lex for item in words):
                    ind_objects.append(line_object)

            source_and_tweets[source] = ind_objects
            source_and_tweets = self.config.removeEmptyItemsDict(
                source_and_tweets
            )  ## after removing empty users, we get 382 total users in total.
            #source_and_tweets = {k: v for k, v in source_and_tweets.items() if v != []}

        return source_and_tweets

    # This method checks if there is any url in a tweet object excluding media
    def check_url(self, tweet_object):

        url = []
        # if tweet is retweeted
        if self.is_retweeted(tweet_object):

            # if there is a url in retweeted tweet.
            if tweet_object['retweeted_status']['entities']['urls'] != []:
                for i in tweet_object['retweeted_status']['entities']['urls']:
                    url.append(i['url'])
            else:
                url.append(None)
        else:  # if tweet is not retweeted

            if tweet_object['entities']['urls'] != []:
                for i in tweet_object['entities']['urls']:
                    url.append(i['url'])
            else:
                url.append(None)

        return url

    # Check if tweet object is quoted
    def is_quoted(self, tweet_object):

        if tweet_object['is_quote_status'] == True:
            return True
        else:
            return False

    # Check if tweet object is retweeted
    def is_retweeted(self, tweet_object):

        if 'retweeted_status' in tweet_object.keys():
            return True
        else:
            return False

    # returns id of the user tweet object belongs to
    def get_user_id(self, tweet_object):
        return tweet_object['user']['id_str']

    # returns the url to pp of the sender of a tweet
    def get_user_pp(self, tweet_object):
        return tweet_object['user']['profile_image_url_https']

    # returns the quoted user's screen name
    def get_quoted_user(self, tweet_object):
        if self.is_quoted(tweet_object):
            if self.is_retweeted(tweet_object):

                try:
                    return tweet_object['retweeted_status']['quoted_status'][
                        'user']['screen_name']
                except:
                    return None

            else:
                try:
                    return tweet_object['quoted_status']['user']['screen_name']
                except:
                    return None
        else:
            return None

    # Checks if there is any photo, video, etc. attached to tweet..
    def check_media(self, tweet_object):
        def get_media_info(k):

            media_url.append(k['url'])
            media_type.append(k['type'])
            if k['type'] == 'photo':
                media_url_https.append(k['media_url_https'])
            elif k['type'] == 'video':
                media_url_https.append(k['video_info']['variants'][1]['url'])
            elif k['type'] == 'animated_gif':
                media_url_https.append('animated_gif')
            else:
                media_url_https.append('None')

        media_url = []
        media_url_https = []
        media_type = []

        # Firstly check if there is a media
        if 'media' in tweet_object['entities'].keys(
        ) and not self.is_retweeted(tweet_object) and not self.is_quoted(
                tweet_object):

            for k in tweet_object['extended_entities']['media']:
                get_media_info(k)

        # retweeted but not quoted
        if self.is_retweeted(
                tweet_object) and not self.is_quoted(tweet_object):

            if 'media' in tweet_object['retweeted_status']['entities'].keys():

                for k in tweet_object['retweeted_status']['extended_entities'][
                        'media']:
                    get_media_info(k)

        # not retweeted but quoted
        if not self.is_retweeted(
                tweet_object) and 'quoted_status' in tweet_object.keys():

            if 'media' in tweet_object['quoted_status']['entities'].keys():

                for k in tweet_object['quoted_status']['extended_entities'][
                        'media']:
                    get_media_info(k)

        # no media, no retweet, no quoted
        if 'media' not in tweet_object['entities'].keys(
        ) and not self.is_retweeted(tweet_object) and not self.is_quoted(
                tweet_object):

            media_url.append(None)
            media_type.append(None)
            media_url_https.append(None)

        return [media_url, media_url_https, media_type]

    def extract_emojis(self, df):

        return list(df.apply(lambda row: demoji.findall(row['Tweet']), axis=1))

    def create_dataset(self):

        dct = self.processTweets(self.processHarassmentLexicon())

        def fill_df_fields(target_all, k, tweet_object):
            sources.append(tweet_object['user']['screen_name'])

            if target_all != []:
                targets.append(k)
            else:
                targets.append('None')

            if self.is_retweeted(tweet_object):
                tweet = 'RT' + ' @' + tweet_object['retweeted_status']['user'][
                    'screen_name'] + ': ' + tweet_object['retweeted_status'][
                        'full_text']
                tweets.append(tweet)
            else:
                tweets.append(tweet_object['full_text'])

            created_at.append(tweet_object['created_at'])
            favorite_count.append(tweet_object['favorite_count'])
            media_url_https_all.append(self.check_media(tweet_object)[1])
            media_type_all.append(self.check_media(tweet_object)[2])
            media_url_all.append(self.check_media(tweet_object)[0])
            is_retweeted_all.append(self.is_retweeted(tweet_object))
            normal_url_all.append(self.check_url(tweet_object))
            is_quoted_all.append(self.is_quoted(tweet_object))
            source_user_id.append(self.get_user_id(tweet_object))

        df = pd.DataFrame()
        sources = []
        targets = []
        tweets = []
        created_at = []
        favorite_count = []
        media_url_https_all = []
        media_type_all = []
        media_url_all = []
        normal_url_all = []
        is_retweeted_all = []
        is_quoted_all = []
        source_user_id = []
        for i in dct.items():
            #print(i[0])
            ## type of i is a tuple where i[0] is the source of the tweet and i[1] is the all tweets of corresponding source

            for j in i[1]:  ## iterate over all tweet objects for each user..
                # grabs all mentioned users
                mentioned = j['entities']['user_mentions']

                # includes mentioned users along with quoted users..
                # we define a target if a tweet is retweeted, or if there is any user mention, or if tweet is quoted from someone else.
                target_all = []
                if mentioned != []:
                    for t in mentioned:
                        if t != None:
                            target_all.append(t['screen_name'])

                    if self.get_quoted_user(
                            j
                    ) != None:  #if there is quoted user we also add that to target
                        target_all.append(self.get_quoted_user(j))

                else:
                    if self.get_quoted_user(j) != None:
                        target_all.append(self.get_quoted_user(j))

                if self.is_retweeted(j):  # if tweet is retweeted

                    if target_all != []:
                        for k in target_all:
                            fill_df_fields(target_all, k, j)

                    else:  # if there is no mention in the tweet
                        fill_df_fields(target_all, 'None', j)

                else:  # if tweet is not retweeted

                    if target_all != []:
                        for k in target_all:
                            fill_df_fields(target_all, k, j)

                    else:  # if there is no mention in the tweet
                        fill_df_fields(target_all, 'None', j)

        df['Source'] = sources
        df['Target'] = targets
        df['Tweet'] = tweets
        df['Is_quoted'] = is_quoted_all
        df['Is_retweeted'] = is_retweeted_all
        df['Created_at'] = created_at
        df['Favorite_count'] = favorite_count
        df['Media_url'] = media_url_all
        df['Media_url_https'] = media_url_https_all
        df['Media_type'] = media_type_all
        df['Normal_urls'] = normal_url_all
        df['Source_user_id'] = source_user_id
        df['Emojis'] = self.extract_emojis(df)
        df['Image_predictions'] = None
        df = df.reset_index().rename(columns={'index': 'row_id'})
        df = self.img_process.get_image_predictions(df)
        df = self.img_process.getHypernyms(df)
        return df

    def aggregated_dataset(self):
        df = self.create_dataset()
        df_aggregated = df.groupby(['Source', 'Target'],
                                   sort=False).size().reset_index(name='Count')
        df_merged = pd.merge(df,
                             df_aggregated,
                             on=['Source', 'Target'],
                             how='inner')
        df_merged_final = df_merged.groupby(
            ['Source', 'Target',
             'Count']).agg(lambda x: list(x)).reset_index()
        df_merged_final = df_merged_final.sort_values(
            by=['Count'], ascending=False).reset_index(drop=True)
        df_merged_final = df_merged_final[df_merged_final['Count'] >= 3]
        df_merged_final = df_merged_final.reset_index().rename(
            columns={'index': 'Interaction_Id'})
        return df_merged_final
 def setUp(self):
     self.imageIO = ImageIO()
     self.imgPro = ImageProcessing()
class CubeFaces:

    max_num_images = 6
    valid_types = [".jpg", ".png", ".tga"]
    up_pos_matrix = [i for i in range(9)]
    front_pos_matrix = [12, 13, 14, 24, 25, 26, 36, 37, 38]
    right_pos_matrix = [15, 16, 17, 27, 28, 29, 39, 40, 41]
    back_pos_matrix = [18, 19, 20, 30, 31, 32, 42, 43, 44]
    left_pos_matrix = [9, 10, 11, 21, 22, 23, 33, 34, 35]
    down_pos_matrix = [45, 46, 47, 48, 49, 50, 51, 52, 53]
    pos_matrix = [
        up_pos_matrix, front_pos_matrix, right_pos_matrix, back_pos_matrix,
        left_pos_matrix, down_pos_matrix
    ]

    def __init__(self, dir):
        self.images = self.load_images(dir)
        self.pickColor = PickColor()
        self.imgPro = ImageProcessing()
        self.faces = [[[None for _ in range(3)] for _ in range(3)]
                      for _ in range(6)]
        self.cube_str_list = ['' for _ in range(54)]

    def create_string_from_face(self, face, posmat, container):
        counter = 0
        for i in range(3):
            for j in range(3):
                container[posmat[counter]] = face[i][j]
                counter += 1
        return container

    def create_cube_string(self):
        self.init_all_faces()
        for i in range(len(self.faces)):
            self.create_string_from_face(self.faces[i], self.pos_matrix[i],
                                         self.cube_str_list)
        return ''.join(self.cube_str_list)

    def load_images(self, dir):
        images = list()
        path = os.path.dirname(inspect.getfile(dir))
        for filename in os.listdir(path):
            if self.checkValidImageFormat(filename):
                images.append(cv2.imread(os.path.join(path, filename), 1))
        return images

    def checkValidImageFormat(self, file):
        for i in self.valid_types:
            if file.endswith(i):
                return True
        return False

    def init_face_color(self, image, face):
        cells = self.imgPro.splitImage(image)
        for i in range(0, 3):
            for j in range(0, 3):
                face[i][j] = self.pickColor.calculateColorinCell(
                    cells.get((i, j)))
        return face

    def init_all_faces(self):
        for i in range(0, self.max_num_images):
            self.init_face_color(self.images[i], self.faces[i])
Exemple #20
0
    # print("result2 image_compare: ", result2)
    #
    #
    # save_pic_to_path(take_pic_from_cam(), "resuorses/test1.jpg")
    # time.sleep(5)
    # save_pic_to_path(take_pic_from_cam(), "resuorses/test2.jpg")
    # result3 = is_images_equal("resuorses/test1.jpg", "resuorses/test2.jpg")
    # print("result3 image_compare: ", result3)
    # print("result3 image_compare: ", result3)

    # save_pic_to_path(take_pic_from_cam(), "resuorses/test7.jpg")

    image_list = ["test7.jpg","test10.jpg", "test11.jpg", "test12.jpg", "test13.jpg", "test14.jpg", "test15.jpg", "test16.jpg",
                  "test17.jpg"]

    obj = ImageProcessing()
    obj.load_image("resuorses/" + image_list[0])
    obj.gray_image()

    circles = cv2.HoughCircles(obj._gray_image, cv2.HOUGH_GRADIENT, dp=1.5, minDist=50, param1=20, param2=0.9,
                               minRadius=0,
                               maxRadius=30)
    print("num of circle: ", len(circles[0]))

    output = obj._regular_image.copy()
    diameter = []
    materials = []
    coordinates = []

    count = 0
    if circles is not None:
Exemple #21
0
class MainWindow(QMainWindow):
    def __init__(self):
        super(MainWindow, self).__init__()
        uic.loadUi("pipeline.ui", self)

        self.show()

        self.process = ImageProcessing()
        self.point_cloud_processing = PointCloudProcessing()
        self.img1 = cv2.imread(
            "./images/subject1/subject1Left/subject1_Left_1.jpg")
        self.img2 = cv2.imread(
            "./images/subject1/subject1Middle/subject1_Middle_1.jpg")
        self.img3 = cv2.imread(
            "./images/subject1/subject1Right/subject1_Right_1.jpg")

        self.compute_disparity_left_button.clicked.connect(
            partial(self.recompute_disparity, True))
        self.compute_disparity_right_button.clicked.connect(
            partial(self.recompute_disparity, False))
        self.calibrate_button.clicked.connect(self.calibrate)
        self.process_button.clicked.connect(self.process_pcl)

        self.image_placeholder = self.findChild(QLabel, "disparity_image")

        self.block_size_slider = self.findChild(QSlider, "block_size")
        self.block_size_slider.valueChanged.connect(self._validate_values)
        self.min_disparity_slider = self.findChild(QSlider, "min_disparity")
        self.min_disparity_slider.valueChanged.connect(self._show_values)
        self.num_disparity_slider = self.findChild(QSlider, "num_disparity")
        self.num_disparity_slider.valueChanged.connect(self._show_values)
        self.p1_slider = self.findChild(QSlider, "p1")
        self.p1_slider.valueChanged.connect(self._validate_values)
        self.p2_slider = self.findChild(QSlider, "p2")
        self.p2_slider.valueChanged.connect(self._validate_values)
        self.max_dif_slider = self.findChild(QSlider, "disp_max_dif")
        self.max_dif_slider.valueChanged.connect(self._show_values)
        self.uniqueness_slider = self.findChild(QSlider, "uniqueness")
        self.uniqueness_slider.valueChanged.connect(self._show_values)
        self.speckle_slider = self.findChild(QSlider, "speckle_size")
        self.speckle_slider.valueChanged.connect(self._show_values)

        self._validate_values()

    def _validate_values(self):
        if self.block_size_slider.value() % 2 == 0:
            self.block_size_slider.setValue(self.block_size_slider.value() - 1)

        if self.p1_slider.value() >= self.p2_slider.value():
            self.p2_slider.setValue(self.p1_slider.value() + 1)

        self.p1_slider.setValue(8 * 3 * self.block_size_slider.value()**2)
        self.p2_slider.setValue(32 * 3 * self.block_size_slider.value()**2)

        self._show_values()

    def _show_values(self):
        self.block_size_value.setText(str(self.block_size_slider.value()))
        self.min_disparity_value.setText(str(
            self.min_disparity_slider.value()))
        self.num_disparity_value.setText(
            str(self.num_disparity_slider.value() * 16))
        self.p1_value.setText(str(self.p1_slider.value()))
        self.p2_value.setText(str(self.p2_slider.value()))
        self.max_diff_value.setText(str(self.max_dif_slider.value()))
        self.uniqueness_value.setText(str(self.uniqueness_slider.value()))
        self.speckle_value.setText(str(self.speckle_slider.value()))

    def calibrate(self):
        self.calibrate_button.setEnabled(False)
        self.process.calibrate()

    def recompute_disparity(self, is_left):
        self.process.set_sgbm_parameters(
            self.num_disparity_slider.value() * 16,
            self.min_disparity_slider.value(), self.block_size_slider.value(),
            self.p1_slider.value(), self.p2_slider.value(),
            self.max_dif_slider.value(), self.uniqueness_slider.value(),
            self.speckle_slider.value())
        if is_left:
            image_left = self.img1
            image_right = self.img2
        else:
            image_left = self.img2
            image_right = self.img3

        disparity, pcl = self.process.process_pair(image_left,
                                                   image_right,
                                                   is_left=is_left)
        self.show_image(disparity)
        cv2.imwrite(
            "disparity_{}.jpg".format("left" if is_left else "right"),
            cv2.normalize(disparity,
                          None,
                          alpha=0,
                          beta=255,
                          norm_type=cv2.NORM_MINMAX,
                          dtype=cv2.CV_8U))
        open3d.visualization.draw_geometries([pcl])
        open3d.io.write_point_cloud(
            "pcl_{}.pcd".format("left" if is_left else "right"), pcl)

    def process_pcl(self):
        self.process.set_sgbm_parameters(
            self.num_disparity_slider.value() * 16,
            self.min_disparity_slider.value(), self.block_size_slider.value(),
            self.p1_slider.value(), self.p2_slider.value(),
            self.max_dif_slider.value(), self.uniqueness_slider.value(),
            self.speckle_slider.value())
        mesh = self.process.process_image_batch(self.img1, self.img2,
                                                self.img3)
        open3d.visualization.draw_geometries([mesh])

    def show_image(self, image: np.ndarray):
        cv2.imshow("", image)
        cv2.waitKey()
        cv2.destroyAllWindows()
Exemple #22
0
                                 imageTransform)


def VideoPipeline(video):
    data = Data()
    if (video == 'project'):
        videoClip = data.LoadProjectVideo()
        outputFile = 'D:/Andreas/Programming/Python/UdacitySelfDrivingCar/Term1Projects/Project4/CarND-Advanced-Lane-Lines/DataStore/ProjectVideo.mp4'
    elif (video == 'challenge'):
        videoClip = data.LoadChallengeVideo()
        outputFile = 'D:/Andreas/Programming/Python/UdacitySelfDrivingCar/Term1Projects/Project4/CarND-Advanced-Lane-Lines/DataStore/ChallengeVideo.mp4'
    elif (video == 'hardchallenge'):
        videoClip = data.LoadHardChallengeVideo()
        outputFile = 'D:/Andreas/Programming/Python/UdacitySelfDrivingCar/Term1Projects/Project4/CarND-Advanced-Lane-Lines/DataStore/HardChallengeVideo.mp4'

    output = videoClip.fl_image(
        lambda x: VideoImageProcessing(cv2.cvtColor(x, cv2.COLOR_RGB2BGR)))
    output.write_videofile(outputFile, audio=False)


#TestImagePipeline()
video = 'project'

#video = 'challenge'

processing = ImageProcessing()
imageTransform = ImageTransform()
laneId = LaneLineIdentification(video=video)

VideoPipeline(video)