def test_empty_images_histogram_compare_returns_error(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = None

        img_comp = ImageTools()
        img_diff = img_comp._compare_images_histogram(im1, im2)
        self.assertEqual(img_diff, 'One or more images are empty')
    def test_equal_images_histogram_compare_returns_zero(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = im1.copy()

        img_comp = ImageTools()
        img_diff = img_comp._compare_images_histogram(im1, im2)
        self.assertEqual(img_diff, 0)
Ejemplo n.º 3
0
def parse_disk_image(io_manager: IOManager):
    """
    Разбирает образ диска, доступ к которому получен через io_manager
    :param io_manager: менеджер, необходимый для работы с образом
    :return: FileSystem
    """
    info = InfoAboutImage(io_manager)

    f_processor = ImageTools.FatProcessor(info, io_manager)
    error_detector = ErrorDetector(f_processor)
    d_parser = ImageTools.DirectoryParser(f_processor)
    ft_printer = ImageTools.FileTreePrinter(d_parser)

    if error_detector.check_differences_fats():
        return FileSystem(info, f_processor, {}, error_detector)

    ft_indexer = ImageTools.FatTableIndexer(d_parser)
    full_indexed_fat_table = ft_indexer.get_full_indexed_fat_table()

    if error_detector.analysis_fat_indexed_table(full_indexed_fat_table):
        return FileSystem(info, f_processor, full_indexed_fat_table,
                          error_detector)

    correct_indexed_fat_table = ft_indexer.get_correct_indexed_fat_table()

    file_system = FileSystem(info, f_processor, correct_indexed_fat_table,
                             error_detector)
    file_system.set_file_tree_printer(ft_printer)

    return file_system
    def test_equal_images_histogram_compare_returns_zero(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = im1.copy()

        img_comp = ImageTools()
        img_diff = img_comp._compare_images_histogram(im1, im2)
        self.assertEqual(img_diff, 0 )
Ejemplo n.º 5
0
    def __init__(self, img_rgb, pattern_name, threshold, probe_func_list):
        img_tools = ImageTools()

        self.img_rgb = img_rgb
        self.threshold = threshold
        self.img_gray = img_tools.rgb2gray(self.img_rgb, self.threshold)
        self.pattern_name = pattern_name
        self.arr_image = img_tools.img2arr(self.img_gray)

        self.probe_func_list = probe_func_list

        self.lst_near_patterns = []  #...patterns of this class
        self.lst_L2 = []  #...distance L2 by each pattern of thi class
        self.ave_contrast = 0
        self.ave_correlation = 0
        self.ave_energy = 0
        self.ave_entropy = 0
        self.ave_homogeneity = 0
        self.l2 = 0
        self.class_name = ''
        self.class_index = 0

        if probe_func_list[0]:
            self.contrast = self.calculateContrast(self.arr_image)
        if probe_func_list[1]:
            self.correlation = self.calculateCorrelation(self.arr_image)
        if probe_func_list[2]:
            self.energy = self.calculateEnergy(self.arr_image)
        if probe_func_list[3]:
            self.entropy = self.calculateEntropy(self.arr_image)
        if probe_func_list[4]:
            self.homogeneity = self.calculateHomogeneity(self.arr_image)
Ejemplo n.º 6
0
def main():
    img_size = 128
    classSize = 2000

    # Loading model from .pkl
    model_file = open('model.pkl', 'rb')
    model = pickle.load(model_file)

    # Loading data
    print("\nImporting data..")
    hotdog_files = ImageTools.parseImagePaths('./img/hotdog/')
    print("\t..done.\n")

    # Preprocess the hotdog files, just like what was done in trainModel.py
    # note that the class label isn't necessary, as that is what we're trying to determine.
    print("\nGreyscaling and Normalizing Images..")
    x, _ = ImageTools.expandClass(hotdog_files, 0, classSize, img_size)
    x = np.array(x)
    x = ImageTools.greyscaleImgs(x)
    x = ImageTools.normalizeImgs(x)
    print("\t..done.\n")

    # Generating results from the model:
    results = model.predict(x)
    mean = np.mean(results)
    stddev = np.std(results)

    print("--")
    print("'Is a hotdog a sandwich?''")
    print("RESULTS:")
    print("\tMean: {}".format(mean))
    print("\tStandard Deviation: {}".format(stddev))
    print("--")
    def test_empty_images_histogram_compare_returns_error(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = None

        img_comp = ImageTools()
        img_diff = img_comp._compare_images_histogram(im1, im2)
        self.assertEqual(img_diff, 'One or more images are empty' )
    def test_opposite_images_pixel_compare_returns_one_hundred(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        img_comp = ImageTools()
        img_diff = img_comp._compare_images_pixel(im1, im2)
        print(img_diff)
        self.assertEqual(img_diff, 100.0)
    def test_nonexist_imagefiles_return_error(self):
        im1 = Image.new('RGB', (256, 256), 'white')

        im1.save(self.imfile1)

        img_comp = ImageTools()
        img_diff = img_comp.compare_image_files(self.imfile1, 'notreal.jpg')
        self.assertEqual(img_diff, 'Error reading one or more files')
    def test_opposite_images_histogram_compare_returns_nonzero(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        img_comp = ImageTools()
        img_diff = img_comp._compare_images_histogram(im1, im2)
        print(img_diff)
        self.assertGreater(img_diff, 0)
    def test_opposite_images_histogram_compare_returns_nonzero(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        img_comp = ImageTools()
        img_diff = img_comp._compare_images_histogram(im1, im2)
        print(img_diff)
        self.assertGreater(img_diff, 0)
Ejemplo n.º 12
0
 def write_average_frame(self, filename):
     if self.currentAverageFrame is None:
         logger.error('Average frame not created')
         raise ValueError
     #check to see if an error has occured before this
     if not self.b_continue:
         return
     ImageTools.write_image(filename, self.currentAverageFrame)
    def test_opposite_images_pixel_compare_returns_one_hundred(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        img_comp = ImageTools()
        img_diff = img_comp._compare_images_pixel(im1, im2)
        print(img_diff)
        self.assertEqual(img_diff, 100.0)
Ejemplo n.º 14
0
 def write_average_frame(self,filename):
     if self.currentAverageFrame is None:
         logger.error('Average frame not created')
         raise ValueError
     #check to see if an error has occured before this
     if not self.b_continue:
         return        
     ImageTools.write_image(filename, self.currentAverageFrame)
    def test_nonexist_imagefiles_return_error(self):
        im1 = Image.new('RGB', (256, 256), 'white')

        im1.save(self.imfile1)

        img_comp = ImageTools()
        img_diff = img_comp.compare_image_files(self.imfile1, 'notreal.jpg')
        self.assertEqual(img_diff, 'Error reading one or more files')
    def test_imagefiles_calls_pixels_algo_by_default(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        im1.save(self.imfile1)
        im2.save(self.imfile2)

        img_comp = ImageTools()
        img_diff = img_comp.compare_image_files(self.imfile1, self.imfile2)
        self.assertEqual(img_diff, 100.0)
    def test_imagefiles_calling_histogram_algo(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        im1.save(self.imfile1)
        im2.save(self.imfile2)

        img_comp = ImageTools()
        img_diff = img_comp.compare_image_files(self.imfile1, self.imfile2, algorithm='histogram')
        self.assertGreater(img_diff, 100.0)
    def test_imagefiles_calls_pixels_algo_by_default(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        im1.save(self.imfile1)
        im2.save(self.imfile2)

        img_comp = ImageTools()
        img_diff = img_comp.compare_image_files(self.imfile1, self.imfile2)
        self.assertEqual(img_diff, 100.0)
    def test_imagefiles_calling_unknown_algo_returns_error(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        im1.save(self.imfile1)
        im2.save(self.imfile2)

        img_comp = ImageTools()
        img_diff = img_comp.compare_image_files(self.imfile1, self.imfile2, algorithm='unknown')
        self.assertEqual(img_diff, 'Unsupported algorithm')
Ejemplo n.º 20
0
 def __init__(self, img_rgb, solar_disk_name, threshold, size,
              probe_func_list):
     img_tools = ImageTools()
     self.size = size
     self.img_rgb = img_rgb
     self.threshold = threshold
     self.img_gray = img_tools.rgb2gray(self.img_rgb, self.threshold)
     self.pattern_name = solar_disk_name
     self.arr_image = img_tools.img2arr(self.img_gray)
     self.lst_windows = self.generateFixedWindows(self.arr_image, self.size,
                                                  probe_func_list)
    def test_imagefiles_calling_unknown_algo_returns_error(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        im1.save(self.imfile1)
        im2.save(self.imfile2)

        img_comp = ImageTools()
        img_diff = img_comp.compare_image_files(self.imfile1,
                                                self.imfile2,
                                                algorithm='unknown')
        self.assertEqual(img_diff, 'Unsupported algorithm')
    def test_imagefiles_calling_histogram_algo(self):
        im1 = Image.new('RGB', (256, 256), 'white')
        im2 = Image.new('RGB', (256, 256), 'black')

        im1.save(self.imfile1)
        im2.save(self.imfile2)

        img_comp = ImageTools()
        img_diff = img_comp.compare_image_files(self.imfile1,
                                                self.imfile2,
                                                algorithm='histogram')
        self.assertGreater(img_diff, 100.0)
Ejemplo n.º 23
0
    def computeNextPath(self):
        # print('Recomputing path...')
        magnitude = 10

        # continue straight until we've found a line
        if not self.foundLine:
            targetPoint = itools.vectorToPoint(self.x, self.y, magnitude, self.degreeRot)
        else:
            targetPoint = (self.x, self.y)

        path = itools.getPixelsBetween(self.x, self.y, targetPoint[0], targetPoint[1])
        # print('New path (rot {}) goes from {} to {}'.format(self.degreeRot, (self.x, self.y), (targetPoint[0], targetPoint[1])))
        self.stepIter = iter(path) # don't initialize iter to first value
Ejemplo n.º 24
0
 def write_frame(self, filename, frameTypes):
     #check to see if an error has occured before this
     if self.data is None:
         return
     if isinstance(frameTypes, str):
         frameTypes = [frameTypes]
     for frameType in frameTypes:
         assert frameType in ['average', 'stdev']
         if frameType == 'average':
             self.write_average_frame(filename)
         if frameType == 'stdev':
             if self.currentStdevFrame is not None:
                 ImageTools.write_image(filename, self.currentStdevFrame)
Ejemplo n.º 25
0
 def write_frame(self,filename,frameTypes):
     #check to see if an error has occured before this
     if self.data is None:
         return
     if isinstance(frameTypes,str):
         frameTypes = [frameTypes]
     for frameType in frameTypes:
         assert frameType in ['average','stdev']
         if frameType == 'average':
             self.write_average_frame(filename)
         if frameType == 'stdev':
             if self.currentStdevFrame is not None:
                 ImageTools.write_image(filename,self.currentStdevFrame)
Ejemplo n.º 26
0
 def __init__(self,
              path=TIF_IMAGE,
              n_samples=N_SAMPLES,
              low_res=LOW_RES,
              high_res=HIGH_RES):
     """
     :param path: the path of the tif file (TODO make it more general)
     :param n_samples: the number of wanted samples in the batch.
     :param low_res: the low resolution of the 2d image.
     :param high_res: the high resolution of the 2d image.
     """
     self.path = path
     self.im_3d = imread(path)
     self.min_d = min(self.im_3d.shape)  # the minimal dimension of the 3d
     # image
     self.n_samples = n_samples
     self.low_res = low_res
     self.high_res = high_res
     self.d_train = self.generate_a_random_batch(1)  # test is y slices
     self.g_train_hr = self.generate_a_random_batch(0)  # train
     # is x slices
     ImageTools.show_gray_image(self.g_train_hr[0, 0, :, :])
     self.g_train_no_cbd = ImageTools.cbd_to_grey(self.g_train_hr)
     self.g_train = ImageTools.down_sample(self.g_train_no_cbd)
     ImageTools.show_gray_image(self.g_train[0, 0, :, :])
     # change both test and train to one hot encoding:
     self.ohe_d_train = ImageTools.one_hot_encoding(self.d_train)
     self.ohe_g_train = ImageTools.one_hot_encoding(self.g_train)
     self.save_batches()
Ejemplo n.º 27
0
def main():
    # 读取配置文件的信息

    citycode = Setting.city

    if Setting.downwall:
        wallpath = '/tmp/auw/bing_wall.jpg'
    else:
        if len(sys.argv) == 1 or len(sys.argv[1]) == 0:
            wallpath = Setting.wallpath
        else:
            wallpath = sys.argv[1]
            config = "# -*-coding:utf-8-*-\n" \
                     "# 城市代码\n" \
                     "city = '%s'\n" \
                     "# 是否下载网络壁纸\n" \
                     "downwall=%s\n" \
                     "# 本地壁纸路径\n" \
                     "wallpath = '%s'" \
                     % (citycode, 'False', wallpath)

            config_file = open('Setting.py', 'w')
            config_file.write(config)
            config_file.close()

    # 设置请求地址与请求参数
    request_url = 'http://d1.weather.com.cn/sk_2d/%s.html?_=%s' % (
        citycode, int(round(time.time() * 1000)))

    request = urllib2.Request(request_url)
    request.add_header('referer',
                       'http://www.weather.com.cn/weather1d/101120101.shtml')
    response = urllib2.urlopen(request)

    # 读取请求结果并转换为JSON
    result = response.read()
    result = result.replace('var dataSK = ', '')
    result = json.loads(result)

    watermark = ImageTools.toWatermarkImage(result)

    # 在必应下载壁纸
    DownBingWallpaper.downNow(wallpath)

    new_wallpath = ImageTools.brand(wallpath, watermark)

    commands.getstatusoutput(
        "gsettings set org.gnome.desktop.background picture-uri %s" %
        new_wallpath)
Ejemplo n.º 28
0
    def make_looped_file(self, name_dir: str):
        """
        Создаёт зацикленный файл в директории с названием name_dir, отличной от корневой директории
        :param name_dir: название директории, в которой будет создан зацикленный файл
        :return: None
        """
        empty_entry_point = self._get_free_entry_point_in_dir(name_dir)

        free_clusters = ImageTools.find_empty_clusters(
            3, self._info, self._file_system.get_indexed_fat_table())

        if free_clusters is None:
            raise ValueError(
                "Not enough free image clusters. Clusters required: " + str(3))

        self._dir_parser.create_entry_in_directory(empty_entry_point,
                                                   'ERRORLOOP  ', 0x00,
                                                   free_clusters[0])

        for i in range(len(free_clusters)):
            if i == len(free_clusters) - 1:
                self._ft_proc.write_val_in_all_fat(free_clusters[0],
                                                   free_clusters[i])
            else:
                self._ft_proc.write_val_in_all_fat(free_clusters[i + 1],
                                                   free_clusters[i])
Ejemplo n.º 29
0
def compute_lucky_image(stack):
    '''Use the 'lucky' algorithm to compute an average frame from an aligned stack

    Original Code Gang Huang, Indiana University, 2010
       revised to use convolution by Steve Burns, 2012
       Indiana University
    this code may be freely distributed and used in publications with attribution to the original paper
    reference Huang, G., Zhong, Z. Y., Zou, W. Y., and Burns, S. A., Lucky averaging: quality improvement of adaptive optics scanning laser ophthalmoscope images, Optics Letters 36, 3786-3788 (2011).
    '''
    nFrames, height, width = stack.shape

    numFinal = 15
    covStack = np.empty_like(stack)  #stack to hold covariance matrices
    imageStackSorted = np.empty((numFinal, height, width))

    for iFrame in range(nFrames):
        covStack[iFrame, :, :] = ImageTools.comatrix(stack[iFrame, :, :])

    covStackSortIndex = covStack.argsort(0)
    covStackSortIndex[:] = covStackSortIndex[::-1]  #reverse the sort

    #now resort the image stack so that each pixel in the stack is sorted with the highest contrast pixels in the first frame
    for iRow in range(height):
        for iCol in range(width):
            imageStackSorted[:, iRow,
                             iCol] = stack[covStackSortIndex[0:numFinal, iRow,
                                                             iCol], iRow, iCol]

    finalImage = imageStackSorted.mean(axis=0)
    return finalImage
Ejemplo n.º 30
0
def interlaceStack(stack):
    """Attempt to fix poor interlacing in a frame stack
    """
    logger.info('Fixing interlace')
    nFrames, nRows, nCols = stack.shape
    shifts = [ImageTools.getInterlaceShift(stack[iFrame,:,:]) for iFrame in range(nFrames)]
    
    # find the modal shift value
    shift = int(max(set(shifts), key=shifts.count))
    logger.debug('Shifting interlace by %s pixels',shift)
    #allocate a new imageStack
    newStack = np.zeros((nFrames,nRows,nCols+abs(shift)))
    even_rows = np.arange(0,nRows,2)
    odd_rows = np.arange(1,nRows,2)
    
    if shift < 0:
        newStack[:,even_rows,abs(shift):newStack.shape[2]] = stack.data[:,even_rows,:]
        newStack[:,odd_rows,0:nCols] = stack.data[:,odd_rows,:]
    elif shift > 0:
        newStack[:,even_rows,0:nCols] = stack.data[:,even_rows,:]
        newStack[:,odd_rows,abs(shift):newStack.shape[2]] = stack.data[:,odd_rows,:]
    else:
        newStack=stack.data
        
    stack.data=newStack
    
Ejemplo n.º 31
0
def interlaceStack(stack):
    """Attempt to fix poor interlacing in a frame stack
    """
    logger.info('Fixing interlace')
    nFrames, nRows, nCols = stack.shape
    shifts = [
        ImageTools.getInterlaceShift(stack[iFrame, :, :])
        for iFrame in range(nFrames)
    ]

    # find the modal shift value
    shift = int(max(set(shifts), key=shifts.count))
    logger.debug('Shifting interlace by %s pixels', shift)
    #allocate a new imageStack
    newStack = np.zeros((nFrames, nRows, nCols + abs(shift)))
    even_rows = np.arange(0, nRows, 2)
    odd_rows = np.arange(1, nRows, 2)

    if shift < 0:
        newStack[:, even_rows,
                 abs(shift):newStack.shape[2]] = stack.data[:, even_rows, :]
        newStack[:, odd_rows, 0:nCols] = stack.data[:, odd_rows, :]
    elif shift > 0:
        newStack[:, even_rows, 0:nCols] = stack.data[:, even_rows, :]
        newStack[:, odd_rows,
                 abs(shift):newStack.shape[2]] = stack.data[:, odd_rows, :]
    else:
        newStack = stack.data

    stack.data = newStack
Ejemplo n.º 32
0
def compute_lucky_image(stack):
    '''Use the 'lucky' algorithm to compute an average frame from an aligned stack

    Original Code Gang Huang, Indiana University, 2010
       revised to use convolution by Steve Burns, 2012
       Indiana University
    this code may be freely distributed and used in publications with attribution to the original paper
    reference Huang, G., Zhong, Z. Y., Zou, W. Y., and Burns, S. A., Lucky averaging: quality improvement of adaptive optics scanning laser ophthalmoscope images, Optics Letters 36, 3786-3788 (2011).
    '''
    nFrames, height, width = stack.shape
    
    numFinal = 15 
    covStack = np.empty_like(stack) #stack to hold covariance matrices
    imageStackSorted = np.empty((numFinal,height,width))
    
    for iFrame in range(nFrames):
        covStack[iFrame,:,:] = ImageTools.comatrix(stack[iFrame,:,:])
        
    covStackSortIndex = covStack.argsort(0)
    covStackSortIndex[:] = covStackSortIndex[::-1] #reverse the sort
    
    #now resort the image stack so that each pixel in the stack is sorted with the highest contrast pixels in the first frame
    for iRow in range(height):
        for iCol in range(width):
            imageStackSorted[:,iRow,iCol] = stack[covStackSortIndex[0:numFinal,iRow,iCol],iRow,iCol]
            
    finalImage = imageStackSorted.mean(axis=0)
    return finalImage
Ejemplo n.º 33
0
 def fixed_align_frames(self,maxDisplacement=50):
     '''perform fixed alignment on the framestack
     maxDisplacement=50 - maximum allowed displacement, frames with > than this will be removed from the stack'''
     if self.data is None:
         logger.warning('No frames found')
         return
         
     if self.data.templateFrame is None:
         logger.warning('template frame not set')
         return
     
     framesToProcess = [i for i in self.data.frameIds if i != self.data.templateFrameId]
     
     midRow = int(self.data.frameWidth / 2)
     midCol = int(self.data.frameHeight / 2)        
     
     targetFrame = self.data.templateFrame
     targetFrame = targetFrame[midRow - self.largeFrameSize : midRow + self.largeFrameSize,
                               midCol - self.largeFrameSize : midRow + self.largeFrameSize]
     
     results = []
     #ensure the target frame is included in the output
     results.append({'frameid':self.data.templateFrameId,
                     'correlation':1,
                     'shift':(0,0)})
     
     for iFrame in framesToProcess:
         templateFrame = self.data.get_frame_by_id(iFrame)
         templateFrame = templateFrame[midRow - self.smallFrameSize : midRow + self.smallFrameSize,
                                       midCol - self.smallFrameSize : midCol + self.smallFrameSize]
     
         displacement = ImageTools.find_frame_shift(targetFrame,
                                                    templateFrame,
                                                    topLeft=[(midRow - self.largeFrameSize,midCol - self.largeFrameSize),
                                                             (midRow - self.smallFrameSize,midCol - self.smallFrameSize)],
                                                    applyBlur=True,
                                                    method='xcorr',
                                                    attemptSubPixelAlignment=False)
         
         results.append({'frameid':iFrame,
                         'correlation':displacement['maxcorr'],
                         'shift':displacement['coords']})
     #Check displacement is les than 50 pixels
     good_results = [result for result in results 
                     if abs(result['shift'][1])<=maxDisplacement 
                     and abs(result['shift'][0]) <= maxDisplacement]
     bad_results = [result['frameid'] for result in results 
                     if abs(result['shift'][1]) > maxDisplacement 
                     or abs(result['shift'][0]) > maxDisplacement]
     logger.info('Removing frames {} for too large displacements'.format(bad_results))
     if not good_results:
         #no good frames found
         logger.warning('frame displacements are too large')
         raise RuntimeError('frame displacements are too large')
     
     alignedData = StackTools.apply_displacements(self.data,good_results)
     self.data = alignedData
     
     self.currentStack = alignedData
Ejemplo n.º 34
0
 def save_image_url_to_ftp(self, img_url, file_name, session):
     ftp_path = 'ImageLib/' + self.manufacturer_long_name + '/'
     try:
         file = ImageTools.resize_url_for_ebay(img_url)
         temp_picture = BytesIO()
         file.save(temp_picture, 'jpeg')
         temp_picture.seek(0)
         session.storbinary("STOR " + ftp_path + file_name, temp_picture)
         file.close()
         return 'content.powerequipdeals.com/ImageLib/' + self.manufacturer_long_name + '/' + file_name
     except:
         return ''
Ejemplo n.º 35
0
 def fix_looped_files(self):
     """
     Избавляется от найденных зацикленных файлов
     :return: None
     """
     dir_parser = ImageTools.DirectoryParser(self._fat_proc)
     for entry in self.looped_files:
         dir_parser.delete_entry_in_directory(
             entry.dir_entry_info.entry_point)
         self._name_of_indexed_files_to_remove.add(
             entry.dir_entry_info.name)
     self.looped_files = []
Ejemplo n.º 36
0
 def fix_intersecting_files(self):
     """
     Избавляется от найденных пересекающихся файлов
     :return: None
     """
     dir_parser = ImageTools.DirectoryParser(self._fat_proc)
     for list_entries in self.intersecting_files:
         for entry in list_entries:
             dir_parser.delete_entry_in_directory(
                 entry.dir_entry_info.entry_point)
             self._name_of_indexed_files_to_remove.add(
                 entry.dir_entry_info.name)
     self.intersecting_files = []
Ejemplo n.º 37
0
    def make_intersecting_files(self, name_dir: str):
        """
        Создаёт два пересекающихся файла в директории с названием name_dir, отличной от корневой директории
        :param name_dir: название директории, в которой будут созданы файлы
        :return: None
        """
        empty_entry_point = self._get_free_entry_point_in_dir(name_dir)
        free_clusters = ImageTools.find_empty_clusters(
            3, self._info, self._file_system.get_indexed_fat_table())

        if free_clusters is None:
            raise ValueError(
                "Not enough free image clusters. Clusters required: " + str(3))

        self._dir_parser.create_entry_in_directory(empty_entry_point,
                                                   'ERRINTERSEC', 0x00,
                                                   free_clusters[0])

        for i in range(len(free_clusters)):
            if i == len(free_clusters) - 1:
                self._ft_proc.write_val_in_all_fat(self.end_clus_val,
                                                   free_clusters[i])
            else:
                self._ft_proc.write_val_in_all_fat(free_clusters[i + 1],
                                                   free_clusters[i])

        empty_entry_point = self._get_free_entry_point_in_dir(name_dir)
        new_free_clusters = ImageTools.find_empty_clusters(
            1, self._info, self._file_system.get_indexed_fat_table())

        if new_free_clusters is None:
            raise ValueError(
                "Not enough free image clusters. Clusters required: " + str(1))

        self._dir_parser.create_entry_in_directory(empty_entry_point,
                                                   'ERRINTERS 2', 0x00,
                                                   new_free_clusters[0])
        self._ft_proc.write_val_in_all_fat(free_clusters[1],
                                           new_free_clusters[0])
def _complete_align_frame(image,frameid):
    """Perform strip alignment on a frame
    Expects to be called from complete_align_parallel
    Uses some objects placed in shared memory
    """
    #start building the output list
    result = {'frameid':frameid,'stripResults':[]}
    #rebuild the target image from the shared data
    targetFrameData = np.asarray(sharedTargetFrameData).reshape(image.shape) #assume that target frame and procFrame are the same shape
    smallRowStart = np.asarray(sharedSmallRowStart)
    largeRowStart = np.asarray(sharedLargeRowStart)
    smallColStart = np.asarray(sharedSmallColStart)[0]
    largeColStart = np.asarray(sharedLargeColStart)[0]
    largeSzRow = np.asarray(sharedLargeSzRow)[0]
    largeSzCol = np.asarray(sharedLargeSzCol)[0]
    smallSzRow = np.asarray(sharedSmallSzRow)[0]
    smallSzCol = np.asarray(sharedSmallSzCol)[0]
    
    
    #apply a random mask to the processed frame
    mask = np.zeros(image.shape,dtype=np.bool)
    mask[image > 0] = 1
    image = np.ma.array(image,
                        mask=~mask)
    randomData = image.std() * np.random.standard_normal(image.shape) + image.mean()
    image = (image.data * ~image.mask) + (randomData * image.mask) #no longer a masked array    
    
    for idxStrip in range(len(sharedSmallRowStart)):
        #loop through the strips here
        #print('from:{}, to:{}'.format(smallColStart[0],smallColStart[0] + smallSzCol))
        smallStrip = image[smallRowStart[idxStrip]:smallRowStart[idxStrip]+smallSzRow,
                           smallColStart:smallColStart + smallSzCol]
       
        largeStrip = targetFrameData[largeRowStart[idxStrip]:largeRowStart[idxStrip]+largeSzRow,
                                     largeColStart:largeColStart + largeSzCol]
        
        displacement = ImageTools.find_frame_shift(largeStrip, 
                                            smallStrip,
                                            topLeft=[(largeRowStart[idxStrip],largeColStart),
                                                     (smallRowStart[idxStrip],smallColStart)],
                                            method='xcorr',
                                            applyBlur=True,
                                            attemptSubPixelAlignment=True)
        #the offsets returned here are for the small strip within the large strip
        #coords = displacement['coords']
        #displacement['coords'] = (coords[0] + largeRowStart[idxStrip],
                                  #coords[1] + largeColStart)
        result['stripResults'].append(displacement)
    logger.debug('done parallel frame{}'.format(frameid))
    return result
Ejemplo n.º 39
0
 def __init__(self, nro_line, nro_column, arr_window, probe_func_list):
     img_tools = ImageTools()
     self.nro_line = nro_line
     self.nro_column = nro_column
     self.arr_image = arr_window
     if probe_func_list[0]:
         self.contrast = self.calculateContrast(self.arr_image)
     if probe_func_list[1]:
         self.correlation = self.calculateCorrelation(self.arr_image)
     if probe_func_list[2]:
         self.energy = self.calculateEnergy(self.arr_image)
     if probe_func_list[3]:
         self.entropy = self.calculateEntropy(self.arr_image)
     if probe_func_list[4]:
         self.homogeneity = self.calculateHomogeneity(self.arr_image)
Ejemplo n.º 40
0
    def getPathsForSpineExtension(self, arcNum, n):
        if arcNum >= len(
                self.arcPixels):  # not initalized with forceInitialized
            print("Error: Arc {} hasn't been initialized yet")
            return

        # get end points of the spine
        spineEndPoints = self.getSpineEndPoints(arcNum)

        # keep track of the points use for linear regression (for drawing)
        allLinRegPoints = set()

        # keep track of both paths to return
        paths = []

        for ep in spineEndPoints:  # for each end point
            # get which endpoint this is
            forwardEnd = len(self.getNextSpinePixels(ep)) == 0
            dirKey = "prev" if forwardEnd else "next"

            # this endpoints points for linear regression
            epLinRegPoints = []

            # iterate into the arc and "chop off" the last couple points
            currPoint = ep
            for _ in range(0, POINTS_TO_CUT):
                nextPoints = self._getSpineNeighbors(currPoint, dirKey)
                if len(nextPoints) > 1:
                    print("Erro: There was more than one neighbor.")
                currPoint = nextPoints[0]

            # currPoint is the new endpoint to be used for linreg
            for _ in range(0, POINTS_FOR_LINREG):
                epLinRegPoints.append(currPoint)
                allLinRegPoints.add(currPoint)
                nextPoints = self._getSpineNeighbors(currPoint, dirKey)
                if len(nextPoints) > 1:
                    print("Erro: There was more than one neighbor.")
                currPoint = nextPoints[0]

            # get n-length path from the endpoint using the linregpoints
            epLinRegPoints.reverse()  # points added in the wrong direction
            path, r2 = itools.interpolateToPath(epLinRegPoints, n, ep)

            paths.append(path)
        # now we have two n-length paths extending out of both endpoints
        return paths
Ejemplo n.º 41
0
def main():
    img_size = 128
    classSize = 5000
    num_epochs = 15

    # Loading Data
    print("\nImporting data..")
    food_files = ImageTools.parseImagePaths('./img/food/')
    sandwich_files = ImageTools.parseImagePaths('./img/sandwich/')
    print("\t..done.\n")

    print("\nAssigning Labels, Generating more images via transformation..")
    print("\tParsing/Labeling foods (sandwiches exclusive)..")
    food_x, food_y = ImageTools.expandClass(food_files, 0, classSize, img_size)
    print("\t\t..done.")
    print("\tParsing/Labeling sandwiches..")
    sandwich_x, sandwich_y = ImageTools.expandClass(sandwich_files, 1,
                                                    classSize, img_size)
    print("\t\t..done.\n")

    # Arranging
    X = np.array(food_x + sandwich_x)
    y = np.array(food_y + sandwich_y)

    # Greyscaling and normalizing inputs to reduce features and improve comparability
    print("\nGreyscaling and Normalizing Images..")
    X = ImageTools.greyscaleImgs(X)
    X = ImageTools.normalizeImgs(X)
    print("\t..done.\n")
    y = to_categorical(y)

    # Train n' test:
    print("\nSplitting data into training and testing..")
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=np.random.randint(0, 100))
    print("\t..done.\n")

    print("\tCalling model..")
    model = network(img_size)  # Calling of CNN
    model.compile('adam', 'categorical_crossentropy', ['accuracy'])
    print("\t..done.\n")

    print("\nTraining Model..")
    model.fit(X_train, y_train, nb_epoch=num_epochs, validation_split=0.1)
    print("\t..done.\n")

    # Saving model
    print("\nPickling and saving model as 'model.pkl'...")
    modelsave = open('model.pkl', 'wb')
    pickle.dump(model, modelsave)
    modelsave.close()
    print("\t..done.\n")
Ejemplo n.º 42
0
def compute_variance_image(image,medImage, idx = None):
    """Compute the variance image of a frame
    medImage - an ndarray of same size as image with stack median values for each pixel, contains np.nan values outside of the mask region
    """
    assert len(image.shape)==2, 'Expected an height x width image'
    assert np.all(np.equal(image.shape,medImage.shape))
    logger.debug('calulating variance for frame: {}'.format(idx))
    image = image.astype(np.float32) #medianBlur function only works on 32bit floats
    image = cv2.medianBlur(image,3)
    
    #mask the image
    medMask = image > 0
    medMask = cv2.erode(medMask.astype(np.float32),np.ones((3,3)),1)
    image = image * medMask
    image = image + (np.logical_not(medMask) * medImage)
    
    image = ImageTools.unsharp(image,21,3)
    if idx is not None:
        return [idx, image]
    else:
        return image
Ejemplo n.º 43
0
    def set_mask(self,roi=None):
        """Create a mask for the image
        Params:
        roi - [(x1,y1),(x2,y2)]
        
        If roi is None user is prompted to draw a mask, otherwise mask is created from roi"""
        if roi is None:
            mask = ImageTools.click_and_crop(self.data[0:,:,],types=['mask'])
            self.mask = mask['mask']
        else:
            x1,y1 = roi[0]
            x2,y2 = roi[1]

            assert x1 >= 0 
            assert x2 >= x1 and x2 <= self.data.frameWidth
            assert y1 >= 0
            assert y2 >= y1 and y2 <= self.data.frameHeight
            
            mask = np.zeros((self.data.frameHeight,self.data.frameWidth),dtype=np.bool)
            mask[y1:y2, x1:x2] = 1
            self.mask = mask
Ejemplo n.º 44
0
    def set_mask(self, roi=None):
        """Create a mask for the image
        Params:
        roi - [(x1,y1),(x2,y2)]

        If roi is None user is prompted to draw a mask, otherwise mask is created from roi"""
        if roi is None:
            mask = ImageTools.click_and_crop(self.data[0:, :, ],
                                             types=['mask'])
            self.mask = mask['mask']
        else:
            x1, y1 = roi[0]
            x2, y2 = roi[1]

            assert x1 >= 0
            assert x2 >= x1 and x2 <= self.data.frameWidth
            assert y1 >= 0
            assert y2 >= y1 and y2 <= self.data.frameHeight

            mask = np.zeros((self.data.frameHeight, self.data.frameWidth),
                            dtype=np.bool)
            mask[y1:y2, x1:x2] = 1
            self.mask = mask
Ejemplo n.º 45
0
def handleConnection(camera, connection):
	# Read the input and send the data
	detectColor = (0 ,0, 255)
	pixel = ImageTools.detectColor(camera, detectColor)
	direction = float(pixel[0]) / camera.resolution[0] * 2 - 1

	# Create left speed factor and increase when robot is turning left
	leftSpeedFactor = 1
	if direction > 0:
		leftSpeedFactor = 1 + direction

	# Create right speed factor and increase when robot is turning right
	rightSpeedFactor = 1
	if direction < 0:
		rightSpeedFactor = 1 - direction

	# Create left and right speeds
	leftSpeed = int(leftSpeedFactor * motorSpeed)
	rightSpeed = int(rightSpeedFactor * motorSpeed)
	print(str(leftSpeed) + ', ' + str(rightSpeed))

	# Send the values to the ev3 connection
	connection.send('left run ' + str(leftSpeed))
	connection.send('right run ' + str(rightSpeed))
Ejemplo n.º 46
0
    def filter_frames(self, minCorr=0.38):
        '''Perform an initial filtering on a frame set
        filterFrames()
        minCorr default 0.38 is ok for cones, other structures require lower values
        '''
        #check to see if an error has occured before this
        if not self.b_continue:
            return
        
        framestack = self.data
        
        # calculate mean brightness for each frame, 
        #  if framestack is a masked array masked values are ignored
        frame_brightnesses = np.apply_over_axes(np.mean, framestack, [1,2]).flatten()
        max_bright = frame_brightnesses.max()
        
        #find frame index for frames >50% max_bright
        #  frames not in this list will be excluded
        good_brights = np.array(frame_brightnesses > max_bright * 0.5, dtype=np.bool)
        #good_brights = [i for i, val in enumerate(frame_brightnesses) if val > 0.5* max_bright]
        brightestFrames = np.array(frame_brightnesses > max_bright * 0.85, dtype=np.bool)
        
        framelist = np.where(good_brights)[0]
        framestack.filter_frames_by_idx(good_brights) # only going to use good frames from here on.

        if len(good_brights) < 1:
            logger.error("No good frames found")
            self.data = None
            raise RuntimeError("No good frames found:Brightness too low")
    
        results = []

        midRow = int(framestack.frameWidth / 2)
        midCol = int(framestack.frameHeight / 2)        

        for iFrame in np.arange(1,len(framelist)):
            currImage = framestack[iFrame - 1,:,:] #target frame
            tempImage = framestack[iFrame,:,:] #template frame
            
            shear = ImageTools.find_frame_shear(currImage, tempImage)
            tempImage = tempImage[midRow - self.templateSize / 2 : midRow + self.templateSize / 2,
                                  midCol - self.templateSize / 2 : midCol + self.templateSize / 2]            
                
            displacement = ImageTools.find_frame_shift(currImage,
                                                       tempImage,
                                                       topLeft=[(0,0),
                                                                (midRow - self.templateSize / 2,midCol - self.templateSize / 2)],
                                                       method='xcorr',
                                                       applyBlur=True,
                                                       attemptSubPixelAlignment=False)
            motion = (displacement['coords'][0]**2 + displacement['coords'][0]**2)**0.5
            results.append({'frameid':framestack.frameIds[iFrame],
                            'shear':shear['shearval'],
                            'correlation':displacement['maxcorr'],
                            'shift':displacement['coords'],
                            'motion':motion})
        #filter frames where sheer > 20
        #results = [r for r in results if r['motion'] <= 20]
        #if len(results) < 1:
            #logger.error("No good frames found")
            #self.data = None
            #raise RuntimeError("No good frames found:Shear too high")

        #data for frame 0 is missing, use the data from the first remaining frame
        #r=[r for r in results if r['frameid'] == 1]
        if not results:
            raise RuntimeError('Could not get displacements')
        r =dict(results[0])    #make a copy of this item
        r['frameid']=framelist[0]
        r['shift']=(0,0)
        r['motion']=0
        results.append(r)
            
        maxCorr = max([result['correlation'] for result in results])
        
        if maxCorr < minCorr:
            #all correlations are crummy, just bail
            #TODO
            logger.warning('No good frames found')
            raise RuntimeError("No good frames found:Correlation too low")
        else:
            goodFrames = [result['frameid'] for result in results if result['shear'] < 20 and result['correlation'] > 0.5 * maxCorr and result['motion'] < 50 ]
            badFrames = [frameid for frameid in self.data.frameIds if frameid not in goodFrames]
            if not goodFrames:
                logger.warning('No good frames found')
                raise RuntimeError('No good frames found:Group criteria (Shear, Correlation, Motion) not met.')
                
            logger.info('Removing frames {} due to brightness or shear'.format(badFrames))
            self.data.filter_frames_by_id(goodFrames)
            self.data.templateFrameId = goodFrames[frame_brightnesses[goodFrames].argmax()] #return the brightest of the remaining frames as a potential template
            self.filterResults = results    #store this for debugging
Ejemplo n.º 47
0
 def complete_align(self,minCorr = 0.38):
     """Takes a roughly aligned stack and performs a complete alignment
     minCorr (default 0.38, minimum correlation for inclusion in the output stack)
     """        
     if self.data is None:
         logger.warning('Aborting:No good frames found')
         return
     nrows,ncols = self.data.frameHeight, self.data.frameWidth
     
     targetFrameData = self.data.templateFrame
     framesToProcess = [frameid for frameid in self.data.frameIds if not frameid == self.data.templateFrameId]
     #apply a mask to the target frame
     mask = np.zeros(targetFrameData.shape,dtype=np.bool)
     mask[targetFrameData > 0] = 1
     
     #convert the targetFrameData to a masked array for simple calculation of means
     targetFrameData = np.ma.array(targetFrameData,
                                   mask=~mask)
     randomData = targetFrameData.std() * np.random.standard_normal(targetFrameData.shape) + targetFrameData.mean()
     
     targetFrameData = (targetFrameData.data * ~targetFrameData.mask) + (randomData * targetFrameData.mask) #no longer a masked array
     
     #setup the row indices
     defaultStep = int((nrows - self.smallSzRow + 1) / (self.numberPointsToAlign))
     smallRowStart = np.array(range(self.numberPointsToAlign)) * defaultStep
     
     #the large rows should be centered on the small rows
     halfDifference = int((self.largeSzRow - self.smallSzRow) / 2)
     largeRowStart = smallRowStart - halfDifference # this gives some values out of bounds
     largeRowStart[largeRowStart < 0] = 0
     maxRowStart = nrows - self.largeSzRow
     largeRowStart[largeRowStart > maxRowStart] = maxRowStart
     
     smallColStart = (ncols / 2) - (self.smallSzCol / 2)
     largeColStart = (ncols / 2) - (self.largeSzCol / 2)
     
     results = []
     for frameId in framesToProcess:
         #loop through all the frames here
         #need to generate a new mask for each frame
         image = self.data.get_frame_by_id(frameId)
         mask = np.zeros(image.shape,dtype=np.bool)
         mask[image > 0] = 1
         image = np.ma.array(image,
                             mask=~mask)
         randomData = image.std() * np.random.standard_normal(image.shape) + image.mean()
         image = (image.data * ~image.mask) + (randomData * image.mask) #no longer a masked array
         results.append({'frameid':frameId,'stripResults':[]})
         for idxStrip in range(len(smallRowStart)):
             #loop through the strips here
             stripResults = [result['stripResults'] for result in results if result['frameid'] == frameId][0]
             smallStrip = image[smallRowStart[idxStrip]:smallRowStart[idxStrip]+self.smallSzRow,
                                smallColStart:smallColStart + self.smallSzCol]
         
             largeStrip = targetFrameData[largeRowStart[idxStrip]:largeRowStart[idxStrip]+self.largeSzRow,
                                          largeColStart:largeColStart + self.largeSzCol]
             
             displacement = ImageTools.find_frame_shift(largeStrip, 
                                                 smallStrip,
                                                 topLeft=[(largeRowStart[idxStrip],largeColStart),
                                                          (smallRowStart[idxStrip],smallColStart)],
                                                 method='xcorr',
                                                 applyBlur=True,
                                                 attemptSubPixelAlignment=True)
             #the offsets returned here are for the small strip within the large strip
             #coords = displacement['coords']
             #displacement['coords'] = (coords[0] + largeRowStart[idxStrip],
                                       #coords[1] + largeColStart)
             stripResults.append(displacement)
             
     newCoords = self._get_coords(nrows, ncols)
     timetics=[]
     for jndx in range(self.numberPointsToAlign):
         timetics.append(newCoords['times'][(smallRowStart[jndx]+int(self.smallSzRow/2)),
                                            (smallColStart+int(self.smallSzCol/2)-1)])
         
     self.timeTics = np.array(timetics)
     self.times = newCoords['times']
     alignmentSplines = self._make_valid_points(results,minCorr)
     self.data = self.fast_align(alignmentSplines)