Example #1
0
 def __init__(self):
     self.exp = Explanation()
     self.hf = HelperFunctions()
     self.port = 21
     self.usedKeysDB = "../../Database/usedKeysDB.txt"
     self.keyDatabase = "../../Database/unUsedKeyDB.txt"
     self.cache = []
Example #2
0
 def test_get_zoom_zcale(self):
     """
         test zoom scaling
     """
     helper_functions = HelperFunctions()
     zoom_scale = helper_functions.getZoomScale(80, 35, 40, 40)
     self.assertEqual(zoom_scale, 2)
Example #3
0
 def test_get_zoom_zcale(self):
     """
         test zoom scaling
     """
     helper_functions = HelperFunctions()
     zoom_scale = helper_functions.getZoomScale(80, 35, 40, 40)
     self.assertEqual(zoom_scale, 2)
Example #4
0
    def __init__(self, url):

        self.site_url = url
        self.html = ""
        self.helper_f = HelperFunctions()
        self.table_output_rows = []
        self.LOGGER = logging.getLogger(__name__)
Example #5
0
    def importAnnotations(self):
        """
        Imports annotations to a DICOM image if loaded. The annotations will scale according to the zoom factor
        
        Note: annotations can be loaded for a different image, a system should be implemented to restrict the annotations to a specific image. Perhaps a hash of the image in the JSON export.

        Args: none

        Returns: nothing

        """
        # Check that image is loaded
        if not self.isDicomLoaded:
            tkMessageBox.showwarning("Open file",
                                     "Must load DICOM image first")
            return

        # Open file with dialog
        file = tkFileDialog.askopenfile(initialdir="C:\"",
                                        title="choose JSON file",
                                        filetypes=(("JSON files", "*.json"),
                                                   ("all files", "*.*")))
        jsonString = file.read()

        helperFunctions = HelperFunctions()

        # convert the json dictionary object to an array of annotation objects
        # and load into the saved annotation list
        self.annotationList = json.loads(
            jsonString, object_hook=helperFunctions.JsonDictToAnnotationObject)

        # scale the annotations to match the current object
        scaledAnnotationList = [
            helperFunctions.scaleAnnotationObject(item, self.zoomScale)
            for item in self.annotationList
        ]

        # Add annotations to canvas
        for annotationItem in scaledAnnotationList:
            # make a rectangle and add to canvas
            self.canvas.create_rectangle(annotationItem.startX,
                                         annotationItem.startY,
                                         annotationItem.endX,
                                         annotationItem.endY)
            # add the label
            xDiff = int(abs(annotationItem.endX - annotationItem.startX) / 2)
            labelX = annotationItem.startX + xDiff if annotationItem.startX < annotationItem.endX else annotationItem.endX + xDiff
            labelY = annotationItem.startY if annotationItem.startY < annotationItem.endY else annotationItem.endY
            annotationLabel = Label(self.canvas,
                                    text=annotationItem.title,
                                    fg='white',
                                    bg='black')
            self.canvas.create_window(labelX - 40,
                                      labelY - 20,
                                      window=annotationLabel,
                                      anchor=NW)
Example #6
0
class Pipeline:
    calibrationImages = glob.glob("camera_cal/calibration*.jpg")

    HelperFunctions().calibrateCamera(calibrationImages)

    #HelperFunctions().runTestImages()

    project_video = 'output_video.mp4'
    clip1 = VideoFileClip("project_video.mp4")
    test_clip = clip1.fl_image(HelperFunctions().process_image)
    test_clip.write_videofile(project_video, audio=False)
Example #7
0
 def __init__(self, BasePath, NumFeatures):
     self.BasePath = BasePath
     InputImageList = []
     for filename in sorted(glob.glob(self.BasePath + '/*.jpg')):
         ImageTemp = cv2.imread(filename)
         InputImageList.append(ImageTemp)
     self.NumFeatures = NumFeatures
     self.Images = np.array(InputImageList)
     self.NumImages = len(InputImageList)
     self.FirstHalf = []
     self.LastHalf = []
     self.MiddleImage = None
     self.HelperFunctions = HelperFunctions()
Example #8
0
 def __init__(self, top_n, bln_travel_times, api_key):
     self.name = "Get Nearest"
     self.top_n = top_n
     self.LOGGER = logging.getLogger(__name__)
     self.helper_f = HelperFunctions()
     self.dist_calc = DistanceCalc(api_key=api_key)
     self.bln_travel_times = bln_travel_times
     self.api_key = api_key
Example #9
0
    def loadFile(self):
        """
        Loads single DICOM image into the Tkinter python GUI, Zooms the image according to the space in the canvas
        
        Note: cannot load a stack of DICOM images

        Args: none

        Returns: nothing

        """

        self.removeAllWidgets()

        # Open file with dialog
        dicomImage = tkFileDialog.askopenfilename(
            initialdir="C:\"",
            title="choose DICOM file",
            filetypes=(("DICOM files", "*.dcm"), ("all files", "*.*")))

        # Read file into pydicom
        dFile = dicom.read_file(dicomImage)

        # Get the Tkinter photo image with pydicom_Tkinter
        dicomImage = pydicom_Tkinter.get_tkinter_photoimage_from_pydicom_image(
            dFile)

        # Zoom the photo image to match the window by rescaling for the longest side
        helperFunctions = HelperFunctions()
        self.zoomScale = helperFunctions.getZoomScale(
            dicomImage.width(), dicomImage.height(), self.canvas.winfo_width(),
            self.canvas.winfo_height())
        displayImage = dicomImage.subsample(self.zoomScale, self.zoomScale)
        self.scaledImageWidth = dicomImage.width() / self.zoomScale
        self.scaledImageHeight = dicomImage.height() / self.zoomScale

        # Update DICOM loaded property
        self.isDicomLoaded = True

        # Display image in canvas
        image1 = self.canvas.create_image(0, 0, image=displayImage, anchor=NW)
        self.mainloop()
Example #10
0
    def importAnnotations(self):
        """
        Imports annotations to a DICOM image if loaded. The annotations will scale according to the zoom factor
        
        Note: annotations can be loaded for a different image, a system should be implemented to restrict the annotations to a specific image. Perhaps a hash of the image in the JSON export.

        Args: none

        Returns: nothing

        """
        # Check that image is loaded
        if not self.isDicomLoaded:
            tkMessageBox.showwarning("Open file","Must load DICOM image first")
            return

        # Open file with dialog
        file = tkFileDialog.askopenfile(initialdir = "C:\"", title = "choose JSON file", filetypes = (("JSON files","*.json"),("all files","*.*")))
        jsonString = file.read()

        helperFunctions = HelperFunctions()

        # convert the json dictionary object to an array of annotation objects
        # and load into the saved annotation list
        self.annotationList = json.loads(jsonString, object_hook=helperFunctions.JsonDictToAnnotationObject)

        # scale the annotations to match the current object
        #scaledAnnotationList = map(helperFunctions.scaleAnnotationObject, self.annotationList)
        scaledAnnotationList = [helperFunctions.scaleAnnotationObject(item, self.zoomScale) for item in self.annotationList]

        # Add annotations to canvas
        for annotationItem in scaledAnnotationList:
            # make a rectangle and add to canvas
            newRectangle = self.canvas.create_rectangle(annotationItem.startX, annotationItem.startY, annotationItem.endX, annotationItem.endY)
            # add the label
            xDiff = int(abs(annotationItem.endX - annotationItem.startX) / 2)
            labelX = annotationItem.startX + xDiff if annotationItem.startX < annotationItem.endX else annotationItem.endX + xDiff
            labelY = annotationItem.startY if annotationItem.startY < annotationItem.endY else annotationItem.endY
            annotationLabel = Label(self.canvas, text=annotationItem.title, fg='white', bg='black')
            self.canvas.create_window(labelX - 40, labelY - 20, window=annotationLabel, anchor=NW)
Example #11
0
    def loadFile(self):
        """
        Loads single DICOM image into the Tkinter python GUI, Zooms the image according to the space in the canvas
        
        Note: cannot load a stack of DICOM images

        Args: none

        Returns: nothing

        """

        self.removeAllWidgets()

        # Open file with dialog
        dicomImage = tkFileDialog.askopenfilename(initialdir = "C:\"", title = "choose DICOM file", filetypes = (("DICOM files","*.dcm"),("all files","*.*")))
        
        # Read file into pydicom
        dFile = dicom.read_file(dicomImage)

        # Get the Tkinter photo image with pydicom_Tkinter
        dicomImage = pydicom_Tkinter.get_tkinter_photoimage_from_pydicom_image(dFile)
        
        # Zoom the photo image to match the window by rescaling for the longest side
        helperFunctions = HelperFunctions()
        self.zoomScale = helperFunctions.getZoomScale(dicomImage.width(), dicomImage.height(), self.canvas.winfo_width(), self.canvas.winfo_height())
        displayImage = dicomImage.subsample(self.zoomScale, self.zoomScale)
        self.scaledImageWidth = dicomImage.width() / self.zoomScale
        self.scaledImageHeight = dicomImage.height() / self.zoomScale

        # Update DICOM loaded property
        self.isDicomLoaded = True

        # Display image in canvas
        image1 = self.canvas.create_image(0, 0, image = displayImage, anchor=NW)
        self.mainloop()
Example #12
0
class Database(object):
    def __init__(self):
        self.port = 1009
        self.hashFile = "index.txt"
        self.hs = HelperFunctions()

    def server(self):
        s = socket.socket()
        s.bind(('', self.port))
        s.listen(5)
        print "Started server"
        return s

    def logic(self, pasteObj, key):
        #The given key already exists
        if (self.hs.isKeyInHash(self.hashFile, key)):
            return False
        #Write the text to file and put in folder
        self.hs.createFolder(pasteObj.folderName + "_" + pasteObj.userId)
        self.hs.createFile(
            os.path.join(pasteObj.folderName + "_" + pasteObj.userId,
                         pasteObj.fileName), pasteObj.text)
        #insert in hash key = key, value = path of text file
        self.hs.writeToHash(
            self.hashFile, key,
            os.path.join(pasteObj.folderName + "_" + pasteObj.userId,
                         pasteObj.fileName))
        print "File created"
        return True

    def run(self):
        #Start the server
        s = self.server()
        while True:
            connection, addr = s.accept()
            pasteObj_string = connection.recv(4096)
            pasteObj = pickle.loads(pasteObj_string)
            connection.send(
                'Database server has recieved the paste object successfully')
            key = connection.recv(1024)
            connection.send('Database server has recieved the key')
            if (self.logic(pasteObj, key)):
                connection.send("True")
            else:
                connection.send("False")
            connection.close()
Example #13
0
    def test_Json_dict_to_annotation_object(self):
        """
            test JSON to annotation list method
        """
        helper_functions = HelperFunctions()
        json_string = """[
                {
                    "endX": 1300, 
                    "endY": 592, 
                    "startX": 1222, 
                    "startY": 416, 
                    "title": "bone"
                }
        ]"""

        annontation_test_list = json.loads(
            json_string,
            object_hook=helper_functions.JsonDictToAnnotationObject)
        self.assertEqual(annontation_test_list[0].title, 'bone')
        self.assertEqual(annontation_test_list[0].endX, 1300)
Example #14
0
def main():
    """Main process

    """
    try:

        import argparse

        parser = argparse.ArgumentParser()
        parser.add_argument('--profile',
                            help='The aws profile to use.',
                            required=True)
        #parser.add_argument('--keep_bucket', help='Keeps the created bucket. When not '
        #                    action='store_true')

        args = parser.parse_args()
        aws_profile = args.profile
        #aws_profile = 'default'

        LOGGER.info('Started run. main:')

        uo_helper_funct = HelperFunctions()
        security_groups = []
        security_groups.append(SECURITY_GROUP_NAME)
        filename_full_path_pq = os.path.join(DIRNAME, ADDRESS_FILE_PARQUET)

        #write parquet file
        write_parquet_file()

        LOGGER.debug("Bucket name prefix: %s ", BUCKET_NAME_PREFIX)

        uo_storage_bucket = StorageBucket(REGION)
        uo_storage_bucket.create_bucket(BUCKET_NAME_PREFIX)
        uo_storage_bucket.add_file_to_bucket(filename_full_path_pq,
                                             ADDRESS_FILE_PARQUET,
                                             '')  #FILE_NAME_PREFIX

        LOGGER.debug("Bucket name : %s ", uo_storage_bucket.bucketname)
        LOGGER.debug("IAM_PATH : %s ", IAM_PATH)

        uo_iam_admin = IAMAdmin(REGION, IAM_PATH, aws_profile)

        exists_role_arn = uo_iam_admin.get_role(ROLE_NAME)
        if not exists_role_arn:
            role_arn = uo_iam_admin.create_role(ROLE_NAME, ROLE_DESCRIPTION,
                                                EC2POLICY_FILE)
        else:
            role_arn = exists_role_arn

        s3policy_dict = {}
        s3policy_dict['<bucket_name>'] = uo_storage_bucket.bucketname

        s3_policy_contents = uo_helper_funct.read_and_replace(
            s3policy_dict,
            POLICY_TEMPLATE_FOLDER + S3POLICY_FILE).replace('\n', '')
        s3_policy_contents = json.dumps(s3_policy_contents.replace(' ', ''))

        managed_policy = {
            "Version":
            "2012-10-17",
            "Statement": [{
                "Effect":
                "Allow",
                "Action": ["s3:ListBucket"],
                "Resource": ["arn:aws:s3:::" + uo_storage_bucket.bucketname]
            }, {
                "Effect":
                "Allow",
                "Action": [
                    "s3:PutObject", "s3:GetObject", "s3:DeleteObject",
                    "s3:PutObjectAcl"
                ],
                "Resource":
                ["arn:aws:s3:::" + uo_storage_bucket.bucketname + "/*"]
            }]
        }

        s3_policy_arn_exists = uo_iam_admin.get_policy_arn(
            S3POLICY_NAME)['Arn']
        if not s3_policy_arn_exists:
            s3_policy_arn = uo_iam_admin.create_policy(managed_policy,
                                                       S3POLICY_NAME,
                                                       ROLE_NAME)
        else:
            s3_policy_arn = s3_policy_arn_exists
        uo_iam_admin.attach_policy_role(s3_policy_arn, ROLE_NAME)

        exists_instance_profile = uo_iam_admin.get_instance_profile(
            INSTANCE_PROFILE_NAME)
        if not exists_instance_profile:
            instance_profile_arn = uo_iam_admin.create_instance_profile(
                INSTANCE_PROFILE_NAME)
        else:
            instance_profile_arn = exists_instance_profile
        '''
        Substitution is used to insert values into a template
        file - in this case the aws config
        '''

        config_dict = {}
        config_dict['<instanceprofile>'] = INSTANCE_PROFILE_NAME
        config_dict['<role_arn>'] = role_arn

        config_contents = uo_helper_funct.read_and_replace(
            config_dict, FILE_TEMPLATE_FOLDER + CONFIG_TEMPLATE)

        uo_helper_funct.write_file(config_contents,
                                   DIRNAME + '/' + CONFIG_FILENAME)
        '''
        Add created role the instance profile
        '''
        #uo_iam_admin.add_role_instance_profile(INSTANCE_PROFILE_NAME,
        #                                     instance_profile_arn,
        #                                     ROLE_NAME
        #                                    )
        '''
        Instance management - create instance and get it's
        instance id
        '''
        uo_instance_mgmt = InstanceManagement(REGION, aws_profile)

        startup_script = UO_HELPER.read_file_contents(FILE_TEMPLATE_FOLDER +
                                                      STARTUP_SCRIPT)

        instance_id = uo_instance_mgmt.create_instance(
            INSTANCE_KEY_FILE_NAME, INSTANCE_KEYPAIR, role_arn, ROLE_NAME,
            startup_script, security_groups, IMAGE_ID)
        '''
        Associate the instance profile with the instance
        '''
        uo_instance_mgmt.associate_profile_to_instance(INSTANCE_PROFILE_NAME,
                                                       instance_profile_arn,
                                                       instance_id)

        assumerole_policy_dict = {}
        assumerole_policy_dict['<role_arn>'] = role_arn
        assumerole_contents = uo_helper_funct.read_and_replace(
            assumerole_policy_dict,
            POLICY_TEMPLATE_FOLDER + ASSUMEROLE_POLICY_FILE)

        assumerole_contents = json.dumps(
            assumerole_contents.replace('\n', '').replace(' ', ''))

        assum_role_policy = {
            "Version": "2012-10-17",
            "Statement": {
                "Effect": "Allow",
                "Action": "sts:AssumeRole",
                "Resource": role_arn
            }
        }

        uo_iam_admin.create_policy(assum_role_policy, EC2POLICY_NAME,
                                   ROLE_NAME)

        uo_instance_mgmt.get_instance_metadata(instance_id)
        ip_address = uo_instance_mgmt.ec2info[instance_id]['public_ip']
        '''
        SSH onto the instance and execute set of commands from a file
        - update and install software e.g. R
        '''
        uo_ssh = SSHInstanceAdmin(INSTANCE_KEY_FILE_NAME, ip_address)
        uo_ssh.ssh_connect(INSTANCE_USER)

        install_script_list = uo_helper_funct.read_to_list(
            FILE_TEMPLATE_FOLDER + '/' + INSTALL_SCRIPT)
        uo_ssh.execute_commands(install_script_list)

        chmod_command_list = []
        cmd_list_env = []
        #Set env variable

        cmd_list_env.append('mkdir ' + AWS_CREDENTIALS_FOLDER)
        cmd_list_env.append('export AWS_ROLE_ARN=' + role_arn)

        uo_ssh.execute_commands(cmd_list_env)

        chmod_command_list.append('chmod 400 ' + AWS_CREDENTIALS_FOLDER +
                                  CONFIG_FILENAME)

        #Upload file to correct credentials location
        uo_ssh.upload_single_file(DIRNAME + '/' + CONFIG_FILENAME,
                                  AWS_CREDENTIALS_FOLDER + CONFIG_FILENAME)
        #Set permissions on AWS credentials
        uo_ssh.execute_commands(chmod_command_list)
        '''
        Get copy bucket
        '''

        copy_bucket_dict = {}
        copy_bucket_list = []  #copy_bucket.sh

        copy_bucket_dict['<bucket_name>'] = uo_storage_bucket.bucketname
        copy_bucket_dict['<file_name>'] = ADDRESS_FILE_PARQUET
        copy_bucket_dict['<location>'] = R_SCRIPT_REMOTE_LOC

        copy_bucket_contents = uo_helper_funct.read_and_replace(
            copy_bucket_dict, FILE_TEMPLATE_FOLDER + COPY_BUCKET_TEMPLATE)

        copy_bucket_list = copy_bucket_contents.splitlines()
        ##Get bucket from S3 to EC2 instance
        uo_ssh.execute_commands(copy_bucket_list)
        '''
        Get R script template
        
        '''
        '''
        Substitution is used to insert values into a template
        file - in this case r script
        '''
        rscript_dict = {}

        rscript_dict['<bucket_name>'] = uo_storage_bucket.bucketname
        rscript_dict['<file_name>'] = ADDRESS_FILE_PARQUET
        rscript_dict['<location>'] = R_SCRIPT_REMOTE_LOC

        rscript_contents = uo_helper_funct.read_and_replace(
            rscript_dict, FILE_TEMPLATE_FOLDER + RSCRIPT_TEMPLATE)

        uo_helper_funct.write_file(rscript_contents, R_SCRIPT)
        '''
        Copy r script to instance
        '''
        uo_ssh.upload_single_file(R_SCRIPT, R_SCRIPT_REMOTE_LOC)

        chmod_command_list_r = []
        chmod_command_list_r.append('sudo chmod +x ' + R_SCRIPT_REMOTE_LOC +
                                    R_SCRIPT)

        #Set permissions on r script
        uo_ssh.execute_commands(chmod_command_list_r)

        run_r_script = []
        run_r_script.append('sudo Rscript ' + R_SCRIPT_REMOTE_LOC + R_SCRIPT)

        uo_ssh.execute_commands(run_r_script)

    except Exception as error:

        LOGGER.error("An Exception occurred convex tech test ")
        LOGGER.error(repr(error))
        raise Exception("Convex Tech test failed!")

    finally:
        '''
        Clean up after run
        '''
        #uo_storage_bucket.delete_bucket(uo_storage_bucket.bucketname)
        #uo_iam_admin.delete_role(ROLE_NAME)

        LOGGER.info('Completed run.')
Example #15
0
 def __init__(self):
     self.port = 1009
     self.hashFile = "index.txt"
     self.hs = HelperFunctions()
Example #16
0
class Stitcher:
    """
    Read a set of images for Panorama stitching
    """
    def __init__(self, BasePath, NumFeatures):
        self.BasePath = BasePath
        InputImageList = []
        for filename in sorted(glob.glob(self.BasePath + '/*.jpg')):
            ImageTemp = cv2.imread(filename)
            InputImageList.append(ImageTemp)
        self.NumFeatures = NumFeatures
        self.Images = np.array(InputImageList)
        self.NumImages = len(InputImageList)
        self.FirstHalf = []
        self.LastHalf = []
        self.MiddleImage = None
        self.HelperFunctions = HelperFunctions()

    def ImageContainers(self):
        self.CenterImgId = self.NumImages / 2
        self.MiddleImage = self.Images[self.CenterImgId]
        for IdX in range(self.NumImages):
            if (IdX <= self.CenterImgId):
                self.FirstHalf.append(self.Images[IdX])
            else:
                self.LastHalf.append(self.Images[IdX])

    """
	Corner Detection
	Save Corner detection output as corners.png
	"""

    def DetectCornersShiTomasi(self, Image):
        grayImage = np.float32(cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY))
        ShiTomasiCorners = cv2.goodFeaturesToTrack(grayImage, self.NumFeatures,
                                                   0.01, 10)
        ShiTomasiCorners = np.int0(ShiTomasiCorners)
        ShiTomasiCorners = np.reshape(ShiTomasiCorners,
                                      (ShiTomasiCorners.shape[0], 2))
        return ShiTomasiCorners

    """
	Perform ANMS: Adaptive Non-Maximal Suppression
	Save ANMS output as anms.png
	"""

    def DetectCornersHarris(self, InputImage, Threshold=0.005):
        grayImage = np.float32(cv2.cvtColor(InputImage, cv2.COLOR_BGR2GRAY))
        CornerImage = cv2.cornerHarris(grayImage, 2, 3, 0.04)
        tempImage = grayImage.copy()
        tempImage[CornerImage < Threshold * CornerImage.max()] = 0
        return tempImage

    def ANMS(self, CornerScoreImage, Nbest=100):

        LocalMaxima = peak_local_max(CornerScoreImage, min_distance=15)
        r = np.zeros((LocalMaxima.shape[0], 3), dtype=np.float32)
        r[:, 2] = float("inf")
        IdX = 0

        for i in LocalMaxima:
            r[IdX][0] = i[1]
            r[IdX][1] = i[0]
            ED = float("inf")
            for j in LocalMaxima:
                if (CornerScoreImage[j[0], j[1]] > CornerScoreImage[i[0],
                                                                    i[1]]):
                    ED = (j[1] - i[1])**2 + (j[0] - i[0])**2
                if (ED < r[IdX][2]):
                    r[IdX][2] = ED
            IdX += 1
        ind = np.argsort(r[:, 2])
        r = r[ind]

        if debug:
            print("Features after ANMS: " + str(len(r[0:Nbest, 0:2])))
        return r[0:Nbest, 0:2]

    """
	Feature Descriptors
	Save Feature Descriptor output as FD.png
	"""

    def FeatureDescriptor(self, Features, Image):
        FeatureDescriptors = []
        PatchSize = 40
        Image = cv2.cvtColor(Image, cv2.COLOR_BGR2GRAY)

        for feature in Features:
            PatchY = int(feature[0] - PatchSize / 2)
            PatchX = int(feature[1] - PatchSize / 2)
            Patch = Image[PatchX:PatchX + PatchSize, PatchY:PatchY + PatchSize]

            if [Patch.shape[0], Patch.shape[1]] == [40, 40]:
                FeatureSet = []
                FeatureSet.append(feature)
                Patch = cv2.GaussianBlur(Patch, (5, 5), 1.2, 1.4)
                Patch = Patch[0:40:5, 0:40:5]
                # Patch = cv2.resize(Patch, dsize=(8, 8))
                Descriptor = Patch.ravel()
                mu = np.mean(Descriptor)
                sigma = np.std(Descriptor)
                Descriptor = (Descriptor - mu) / sigma
                FeatureSet.append(Descriptor)
                FeatureDescriptors.append(FeatureSet)

        return FeatureDescriptors

    """
	Feature Matching
	Save Feature Matching output as matching.png
	"""

    def FeatureMatching(self, FeatureDescriptor1, FeatureDescriptor2):

        MatchingFeatures = []
        des1 = []
        des2 = []
        for features1 in FeatureDescriptor1:
            best1 = float("inf")
            best2 = float("inf")
            Match = []
            for features2 in FeatureDescriptor2:
                SSD = self.HelperFunctions.ComputeSSD(features1[1],
                                                      features2[1])
                if (SSD < best1 and SSD < best2):
                    best2 = best1
                    best1 = SSD
                    tempBest1 = features1[0]
                    tempBest2 = features2[0]
                    tempdes1 = features1[1]
                    tempdes2 = features2[1]
                if (SSD > best1 and SSD < best2):
                    best2 = SSD
            ratio = float(best1) / float(best2)
            if (ratio < 0.5):
                Match.append(tempBest1)
                Match.append(tempBest2)
                MatchNp = np.array(Match)
                MatchingFeatures.append(MatchNp)
                des1.append(tempdes1)
                des2.append(tempdes2)

        size = 1
        angle = 1
        response = 1
        octave = 1
        class_id = 1
        kp1 = []
        kp2 = []

        for points in MatchingFeatures:
            kp1.append(
                cv2.KeyPoint(x=points[0][0],
                             y=points[0][1],
                             _size=size,
                             _angle=angle,
                             _response=response,
                             _octave=octave,
                             _class_id=class_id))
            kp2.append(
                cv2.KeyPoint(x=points[1][0],
                             y=points[1][1],
                             _size=size,
                             _angle=angle,
                             _response=response,
                             _octave=octave,
                             _class_id=class_id))
        return np.array(MatchingFeatures), np.array(kp1), np.array(
            kp2), np.array(des1), np.array(des2)

    """
	Refine: RANSAC, Estimate Homography
	"""

    def RANSACHomography(self, MatchingFeatures, Nmax, tolerance=0.5):

        InlierPercent = 0.0
        RANSACiter = 0
        maxHits = 0

        while InlierPercent < 0.95 and RANSACiter < Nmax:
            p = random.sample(MatchingFeatures, 4)
            p1 = p[0][0]
            p2 = p[1][0]
            p3 = p[2][0]
            p4 = p[3][0]

            p1d = p[0][1]
            p2d = p[1][1]
            p3d = p[2][1]
            p4d = p[3][1]

            pts1 = np.float32([[p1[1], p1[0]], [p2[1], p2[0]], [p3[1], p3[0]],
                               [p4[1], p4[0]]])

            pts2 = np.float32([[p1d[1], p1d[0]], [p2d[1], p2d[0]],
                               [p3d[1], p3d[0]], [p4d[1], p4d[0]]])

            H = cv2.getPerspectiveTransform(pts1, pts2)

            SSDVal = self.HelperFunctions.SSDRansac(MatchingFeatures, H)
            hits = (SSDVal < tolerance).sum()
            if (hits > maxHits):
                maxHits = hits
                IndicesBest = np.argwhere(SSDVal < tolerance)
            RANSACiter += 1
            InlierPercent = float(hits) / float(len(MatchingFeatures))

        Inliers = [MatchingFeatures[i] for i in IndicesBest]
        Inliers = np.array(Inliers)
        Inliers = np.reshape(Inliers, (Inliers.shape[0], 2, 2))
        # print("Inlier percent")
        # print(InlierPercent)
        # print(Inliers.shape)

        A = np.zeros([2 * len(Inliers), 9])
        idX = 0
        for inliers in Inliers:
            y = inliers[0][1]
            x = inliers[0][0]
            ydash = inliers[1][1]
            xdash = inliers[1][0]
            A[idX][0] = x
            A[idX][1] = y
            A[idX][2] = 1
            A[idX][6] = -xdash * x
            A[idX][7] = -xdash * y
            A[idX][8] = -xdash
            idX += 1
            A[idX][3] = x
            A[idX][4] = y
            A[idX][5] = 1
            A[idX][6] = -ydash * x
            A[idX][7] = -ydash * y
            A[idX][8] = -ydash
            idX += 1

        U, S, V = np.linalg.svd(A, full_matrices=True)
        Hpred = V[8, :].reshape((3, 3))

        return Hpred

    def EstimateHomography(self, Image1, Image2, RansacIter=1000):
        CornersImage1 = self.DetectCornersShiTomasi(Image1)
        CornersImage2 = self.DetectCornersShiTomasi(Image2)
        FeatureDescriptorImage1 = self.FeatureDescriptor(CornersImage1, Image1)
        FeatureDescriptorImage2 = self.FeatureDescriptor(CornersImage2, Image2)
        Matches, Kp1, Kp2, Des1, Des2 = self.FeatureMatching(
            FeatureDescriptorImage1, FeatureDescriptorImage2)
        self.HelperFunctions.DrawMatches(Image1, Image2, Kp1, Kp2)
        H = self.RANSACHomography(Matches, RansacIter)
        Hinv = np.linalg.inv(H)

        return H, Hinv

    """
	Image Warping + Blending
	Save Panorama output as mypano.png
	"""

    def RemoveBlackBoundary(self, ImageIn):
        gray = cv2.cvtColor(ImageIn, cv2.COLOR_BGR2GRAY)
        _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
        _, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)
        cnt = contours[0]
        x, y, w, h = cv2.boundingRect(cnt)
        ImageOut = ImageIn[y:y + h, x:x + w]
        return ImageOut

    def Warping(self, Img, Homography, NextShape):
        nH, nW, _ = Img.shape
        Borders = np.array([[0, nW, nW, 0], [0, 0, nH, nH], [1, 1, 1, 1]])
        BordersNew = np.dot(Homography, Borders)
        Ymin = min(BordersNew[1] / BordersNew[2])
        Xmin = min(BordersNew[0] / BordersNew[2])
        Ymax = max(BordersNew[1] / BordersNew[2])
        Xmax = max(BordersNew[0] / BordersNew[2])
        if Ymin < 0:
            MatChange = np.array([[1, 0, -1 * Xmin], [0, 1, -1 * Ymin],
                                  [0, 0, 1]])
            Hnew = np.dot(MatChange, Homography)
            h = int(round(Ymax - Ymin)) + NextShape[0]
        else:
            MatChange = np.array([[1, 0, -1 * Xmin], [0, 1, Ymin], [0, 0, 1]])
            Hnew = np.dot(MatChange, Homography)
            h = int(round(Ymax + Ymin)) + NextShape[0]
        w = int(round(Xmax - Xmin)) + NextShape[1]
        sz = (w, h)
        PanoHolder = cv2.warpPerspective(Img, Hnew, dsize=sz)
        return PanoHolder, int(Xmin), int(Ymin)

    def Blender(self):
        Pano = self.Images[0]
        for NextImage in self.Images[1:]:
            H, Hinv = self.EstimateHomography(Pano, NextImage)
            PanoHolder, oX, oY = self.Warping(Pano, H, NextImage.shape)
            oX = abs(oX)
            oY = abs(oY)
            for IdY in range(oY, NextImage.shape[0] + oY):
                for IdX in range(oX, NextImage.shape[1] + oX):
                    y = IdY - oY
                    x = IdX - oX
                    PanoHolder[IdY, IdX, :] = NextImage[y, x, :]
            # Pano = self.RemoveBlackBoundary(PanoHolder)
            Pano = PanoHolder
        PanoResize = cv2.resize(Pano, (1280, 1024))
        self.HelperFunctions.ShowImage(PanoResize, 'PanoResize')
        PanoResize = cv2.GaussianBlur(PanoResize, (5, 5), 1.2)
        return PanoResize
Example #17
0
class DistanceCalc:

    ############################################################
    # constructor
    ############################################################
    def __init__(self, api_key):

        self.api_key = api_key
        self.helper_f = HelperFunctions()
        self.LOGGER = logging.getLogger(__name__)

    ############################################################
    # str
    ############################################################
    def __str__(self):

        return repr("Distance Calc")

    ############################################################
    # Lookup travel time based on google for specified travel
    # time
    ############################################################
    def get_travel_time(self, start_point, end_point, dept_time):
        """Do API postcode lookup for travel time

        Keyword arguments:
        api_key -- URL for the API call
        start_point -- start point (lat,lon)
        end_point -- end point (lat,lon)
        dept_time -- departure time
        """
        ssl._create_default_https_context = ssl._create_unverified_context

        try:

            gmaps = googlemaps.Client(key=self.api_key)
            directions_result = gmaps.directions(
                start_point,  # ("52.141366,-0.479573",
                end_point,  # "52.141366,-0.489573",
                mode="driving",
                avoid="ferries",
                departure_time=dept_time)
            directions_dic = defaultdict(list)

            self.LOGGER.debug(directions_result)

            directions_dic["distance"].append(
                directions_result[0]['legs'][0]['distance']['text'])
            directions_dic["duration"].append(
                directions_result[0]['legs'][0]['duration']['text'])
            directions_dic["time"].append(dept_time)

            return directions_dic
        except Exception as exrec:
            self.LOGGER.error("Error in get_travel_time - please check: %s",
                              str(exrec.data),
                              exc_info=True)
            raise Exception("Error in get_travel_time")

    ############################################################
    # Get haversine distance
    ############################################################

    def get_haversine_dist(self, start_point, end_point):
        """get haversine distance betweent 2 points

        Keyword arguments:
        start_point -- start point
        end_point -- end point
        """

        dist = haversine(start_point, end_point, unit='mi')

        if math.isnan(dist):
            rv_value = 100000000
        else:
            rv_value = dist

        return rv_value

    ############################################################
    # Get a list of travel time for the nearest stations
    # at 2 different times
    ############################################################
    def get_travel_times(self, start_point, end_point):
        travel_times = []

        # Mon 8am
        current_time = datetime.datetime.now()
        new_period1 = current_time.replace(hour=8,
                                           minute=00,
                                           second=00,
                                           microsecond=0)

        # epoch next monday
        next_monday = self.helper_f.next_weekday(
            new_period1,
            0).timestamp()  # 0 = Monday, 1=Tuesday, 2=Wednesday...

        current_time2 = datetime.datetime.now()
        new_period2 = current_time2.replace(hour=23,
                                            minute=00,
                                            second=00,
                                            microsecond=0)

        next_thursday = self.helper_f.next_weekday(
            new_period2,
            3).timestamp()  # 0 = Monday, 1=Tuesday, 2=Wednesday...
        if str(start_point).find("nan") == False or str(end_point).find(
                "nan") == False:
            self.LOGGER.debug("start or end point is null %s %s",
                              str(start_point), str(end_point))
        else:

            lst_res_mon = self.get_travel_time(start_point, end_point,
                                               next_monday)
            # Thurs 11pm
            lst_res_thur = self.get_travel_time(start_point, end_point,
                                                next_thursday)
            travel_times.append(lst_res_mon)
            travel_times.append(lst_res_thur)

        return travel_times
Example #18
0
    def __init__(self, api_key):

        self.api_key = api_key
        self.helper_f = HelperFunctions()
        self.LOGGER = logging.getLogger(__name__)
Example #19
0
        r_list["lkpaddress"].append(lkp_list)
    """save as json"""
    json.dump(r_list, open(json_file, "w"))


############################################################
# Run
############################################################

if __name__ == "__main__":

    try:

        dirname = os.path.dirname(__file__)
        filename_ini = os.path.join(dirname, 'firestation_ingest.ini')
        UO_HELPER = HelperFunctions()

        configImport = UO_HELPER.load_config(filename_ini)

        LOG_PATH = configImport["logging.log_path"]
        LOG_FILE = configImport["logging.log_file"]
        THE_LOG = LOG_PATH + "\\" + LOG_FILE
        LOGGING_LEVEL = configImport["logging.logginglevel"]
        DISTANCE_MATRIX_API_KEY = configImport["firestation.api_key"]
        POSTCODE_API_URL = configImport["firestation.postcode_api_url"]
        B_TRAVEL = bool(configImport["firestation.b_travel"])
        NUMBER_CLOSEST = configImport["firestation.number_closest"]
        FIRESTATION_URL = configImport["firestation.url"]
        TEMP_FILE = configImport["firestation.temp_file"]
        CSV_FILE = configImport["firestation.csv_file"]
        TABLE_CLASS = configImport["firestation.table_class"]
Example #20
0
class KGS(object):
    def __init__(self):
        self.exp = Explanation()
        self.hf = HelperFunctions()
        self.port = 21
        self.usedKeysDB = "../../Database/usedKeysDB.txt"
        self.keyDatabase = "../../Database/unUsedKeyDB.txt"
        self.cache = []

    def getKey(self):
        key = ''.join(
            random.choice(string.ascii_uppercase + string.ascii_lowercase +
                          string.digits) for _ in range(7))
        return key

    def generateNKeys(self, n):
        keys = []
        for i in range(n):
            key = self.getKey()
            #while keys in used keys database, generate new key.
            while (self.hf.isKeyInHash(self.usedKeysDB, key)):
                key = self.getKey()
            keys.append(key)
        return keys

    def writeKeysToFile(self, keys, fileName):
        keyString = ""
        for key in keys:
            keyString = keyString + key + "\n"
        self.hf.write(fileName, keyString)

    def fillKeyDatabase(self):
        self.exp.explanation1()
        #if the file doesnt exits then create it and fill it with generated keys
        # or if file is empty then fill it with keys
        if ((not self.hf.ifFile(self.keyDatabase))
                or (self.hf.isEmpty(self.keyDatabase))):
            keys = self.generateNKeys(10)
            self.writeKeysToFile(keys, self.keyDatabase)
            return

    def loadKeysInCache(self):
        self.exp.explanation2()
        #if the keys database is empty or missing first create that
        self.fillKeyDatabase()
        #Get 5 keys from database
        for i in range(5):
            key = self.hf.getKey(self.keyDatabase)[:-1]
            #load the key in cache
            self.cache.append(key)
            #put the key in used database
            self.hf.writeToHash(self.usedKeysDB, key, "True")
        print self.cache

    #test function.
    def checkIfKeysInCacheAreInUsedKeysDB(self):
        for key in self.cache:
            if (self.hf.readFromHash(self.usedKeysDB, key) != "True"):
                return False
        return True

    def refreshCache(self):
        if not self.cache:
            self.loadKeysInCache()

    def server(self):
        s = socket.socket()
        s.bind(('', self.port))
        s.listen(5)
        print "Started server"
        return s

    def run(self):
        #Start the server
        s = self.server()
        while True:
            connection, addr = s.accept()
            self.refreshCache()
            connection.send(self.cache.pop())
            connection.close()
Example #21
0
 def __init__(self):
     self.__helper = HelperFunctions()
Example #22
0
class PageScraper:
    ############################################################
    # constructor
    ############################################################
    def __init__(self, url):

        self.site_url = url
        self.html = ""
        self.helper_f = HelperFunctions()
        self.table_output_rows = []
        self.LOGGER = logging.getLogger(__name__)

    ############################################################
    # str
    ############################################################
    def __str__(self):

        return repr(self.site_url)

    ############################################################
    # Get page html
    ############################################################
    def get_pagedata(self, form_value):
        """get html from page

        Keyword arguments:
        site_url -- Site URL to post to
        form_values -- form values to post
        """
        try:
            output_html = ""
            form_val = {form_value: '%', 'Submit': 'Select'}
            form_data = urllib.parse.urlencode(form_val)
            header = {
                "Content-type": "application/x-www-form-urlencoded",
                "Accept": "text/plain",
                "Referer": self.site_url
            }

            header2 = {}

            header2[
                'User-Agent'] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)" \
                                " Chrome/41.0.2272.101 Safari/537.36"
            header2[
                'Accept'] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
            header2['Content-type'] = "application/x-www-form-urlencoded"

            # body = form_data.encode(encoding='utf-8')
            s = requests.Session()
            # providers = s.post(url, params=form_data, data=form_data, timeout=15, verify=True,headers=header)

            # Open a sesssion first
            s.get(self.site_url)
            # Post form data to get
            r = s.post(self.site_url,
                       data=form_data,
                       headers=header2,
                       verify=True)
            self.html = str(r.text)

            self.LOGGER.debug("Status: %s", str(r.status_code))
            self.LOGGER.debug("reason: %s", str(r.reason))

        except Exception:
            raise Exception(
                "Error in Page_scraper - URL : %s  form value : %s ",
                self.site_url, self.form_value)

        finally:
            return self.html

    ############################################################
    # Get table of data
    ############################################################
    def get_table(self, table_class):
        """get table from html page

        Keyword arguments:
        html -- html
        table_class -- class of table to get
        """
        soup = BeautifulSoup(self.html, features="html.parser")
        table = soup.find('table', {'class': table_class})
        self.html_table = table

    ############################################################
    # Write Temp File of html
    ############################################################
    def write_table_to_temp(self, temp_file):
        """get table from html page

        Keyword arguments:
        self -- self
        table_class -- Table Class to get
        """
        with io.open(temp_file, "w", encoding="utf-8") as f:
            f.write(self.html)

    ############################################################
    # take table of data and convert to list
    ############################################################
    def save_tab_as_list(self):
        """get table from html page
        Keyword arguments:
        tab -- table
        csv_file - csv file to write to
        """
        try:
            row_marker = 0
            td_count = 0
            column_marker = 0

            for table_row in self.html_table.findAll('tr'):
                #row 1, store column count and append extra lat & lon cols"""
                if row_marker == 1:
                    td_count = column_marker
                    output_row.append("lat")
                    output_row.append("lon")

                column_marker = 0

                row_marker += 1

                columns = table_row.findAll('td')
                output_row = []

                for column in columns:

                    column_marker += 1

                    #Get link in detail for first column in row"""
                    if column_marker == 1:
                        fs_link = ""

                        for link in column.findAll('a', href=True):
                            """print(link['href'])"""
                            fs_link = link['href']
                            """Get latitude qs value"""
                            lat = self.helper_f.get_qs_value(fs_link, 'lat')
                            """Get longitude qs value"""
                            lon = self.helper_f.get_qs_value(fs_link, 'lon')
                            """TODO get_accuracy(row)"""

                        #Append first column header as not in data
                        if row_marker == 1:
                            output_row.append("Detail")
                        else:
                            output_row.append(column.text + " " + fs_link)

                    else:
                        output_row.append(column.text)

                    #append extra derived cols
                    if column_marker == td_count:
                        output_row.append(str(lat))
                        output_row.append(str(lon))

                self.table_output_rows.append(output_row)

            self.LOGGER.debug("cols: %s", str(td_count))

        except Exception:
            raise Exception("Error in save_tab_as_csv  : ")

    ############################################################
    # Write CSV file
    ############################################################
    def write_csv(self, csv_file):
        """get table from html page
        Keyword arguments:
        csv_file - csv file to write to
        """
        try:
            """Save to CSV"""
            with open(csv_file, 'w', encoding="utf-8") as csvfile:
                writer = csv.writer(csvfile)
                writer.writerows(self.table_output_rows)

        except Exception:
            raise Exception("Error in write_csv - csv_file : %s ", csv_file)
Example #23
0
    pq.write_table(address_table, ADDRESS_FILE_PARQUET)


############################################################
# Run
############################################################

if __name__ == "__main__":

    try:

        # create LOGGER
        LOGGER = logging.getLogger('convex')
        DIRNAME = os.path.dirname(__file__)
        FILENAME_INI = os.path.join(DIRNAME, 'convex.ini')
        UO_HELPER = HelperFunctions()

        CONFIGIMPORT = UO_HELPER.load_config(FILENAME_INI)

        LOG_PATH = CONFIGIMPORT["logging.log_path"]
        LOG_FILE = CONFIGIMPORT["logging.log_file"]
        THE_LOG = LOG_PATH + "\\" + LOG_FILE
        LOGGING_LEVEL = CONFIGIMPORT["logging.logginglevel"]

        LEVELS = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL
        }