def reshape_images(cls, source_folder, target_folder, height=128, width=128,
                       extensions=('.jpg', '.jpeg', '.png')):
        """ copy images and reshape them"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Resizing '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.reshape_images(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   height, width, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                    image_resized = misc.imresize(image, (height, width))
                    misc.imsave(target_folder + "/" + filename, image_resized)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, mode="RGB")
                            image_resized = misc.imresize(image, (height, width))
                            misc.imsave(target_folder + "/" + filename, image_resized)
def main():
    if PRESERVE_COLOR:
        image = imread(IMAGE_FILE)
    else:
        image = imread(IMAGE_FILE, flatten=True)
    image_out = reduce_image_artifacts(image)
    image_out = reduce_image_artifacts(image_out)

    plt.figure(figsize=(20, 10))
    # display original
    ax = plt.subplot(1, 2, 1)
    plt.imshow(image)
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    ax.set_title("Compressed")

    # display cleaned
    ax = plt.subplot(1, 2, 2)
    plt.imshow(image_out)
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
    ax.set_title("Cleaned")
    plt.show()

    if SAVE_IMAGE:
        imsave(IMAGE_FILE.split("/")[-1]+'.png', image_out)

    return
    def convert_to_array(cls, source_folder, target_folder, create_labels_file=False,
                       extensions=('.jpg', '.jpeg', '.png')):
        """ Read all images in subfolders and convert them to a single array """

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and concatenate:
        print("Converting '", source_folder, "' images...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_to_array(source_folder + '/' + filename, target_folder, 
                    create_labels_file=create_labels_file, extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    image = ndimage.imread(source_folder + "/" + filename, mode="RGB")
                    cls.flatten_images.append(image[:,:,0] + image[:,:,1] + image[:,:,2])
                    if create_labels_file:
                        cls.labels.append(source_folder.replace('/', '_'))
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            image = ndimage.imread(source_folder + "/" + filename, mode="RGB")
                            cls.flatten_images.append(image[:,:,0] + image[:,:,1] + image[:,:,2])
                            if create_labels_file:
                                cls.labels.append(source_folder.replace('/', '_'))
Esempio n. 4
0
    def nextImage(self):
        print "Updating Image to ", self.imgFiles[self.imgIdx]
        imgFile = self.imgFiles[self.imgIdx]
        depthFile = self.depthFiles[self.imgIdx]
        #Read data, with normalization to be -1 to 1
        self.currImage = imread(imgFile).astype(np.float32)/256
        #Here, each pixel is uint16. We change it to uint8 by dividing by 256.
        #The range will go from 0 to 255, but most depths do not exceed 128.
        self.currDepth = imread(depthFile).astype(np.float32)/256

        #Update imgIdx
        self.imgIdx = (self.imgIdx + 1) % len(self.imgFiles)

        #Segment image
        self.currSegments = calcSegments(self.currImage)
        (segMean, segStd, segCoords, segLabels) = segmentDepth(self.currDepth, self.currSegments)

        #Normalize ground truth here
        self.segVals = segMean
        self.segCoords = segCoords
        self.segLabels = segLabels

        assert(len(self.segVals) == len(self.segCoords))

        #Generate shuffled index based on how many segments
        self.shuffleIdx = range(len(self.segVals))
        shuffle(self.shuffleIdx)
    def convert_to_grayscale(cls, source_folder, target_folder,
                             extensions=('.jpg', '.jpeg', '.png')):
        """ convert images from RGB to Grayscale"""

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Convert '", source_folder, "' images to grayscale...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_to_grayscale(source_folder + '/' + filename,
                                         target_folder + '/' + filename,
                                         extensions=extensions)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename)
                    image = ndimage.imread(target_folder + "/" + filename, flatten=True)
                    misc.imsave(target_folder + "/" + filename, image)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + filename)
                            image = ndimage.imread(target_folder + "/" + filename, flatten=True)
                            misc.imsave(target_folder + "/" + filename, image)
Esempio n. 6
0
def test_imread():
    lp = os.path.join(os.path.dirname(__file__), 'dots.png')
    img = ndi.imread(lp, mode="RGB")
    assert_array_equal(img.shape, (300, 420, 3))

    img = ndi.imread(lp, flatten=True)
    assert_array_equal(img.shape, (300, 420))
def Main():

    #RG_idx = pickle.load(open("RG_idx.pkl","rb"))
    #path ='/home/andyc/image/Feb11/'
    #RG_idx = [86,179,196,221]
    path ='/home/andyc/image/Mar10/'
    imlist = sorted(glob.glob( os.path.join(path, '*.jpg')))
    H,W,O = nd.imread(imlist[0]).shape
    diff = np.zeros([H,W])
    im1  = np.zeros([H,W,O])
    im2  = np.zeros([H,W,O])
    Cimg = np.zeros([H,W*2,O])

    #for i in range(len(RG_idx)):
    #for i in range(1):
    for j in range(len(imlist)-1):
        #lb = RG_idx[i]-1
        #ub = RG_idx[i]+2
        #lb = 120
        #ub = 150  
        #for j in range(lb,ub):
            #print('in {0}/{1} idx set, image {2}/{3}'.format(i+1,len(RG_idx),j-lb+1,(ub-lb)))  
            print('image {0}'.format(j))
            im1 = nd.imread(imlist[j]).astype(np.float)
            im2 = nd.imread(imlist[j+1]).astype(np.float)  
            diff= ((im2[:,:,0]-im1[:,:,0])+300)/2.0 
            Cimg[:,0:W,:]   = im2
            Cimg[:,W:2*W,0] = diff
            Cimg[:,W:2*W,1] = diff
            Cimg[:,W:2*W,2] = diff
            result = Image.fromarray(Cimg.astype(numpy.uint8))
            result.save('/home/andyc/image/diff/{0}.jpg'.format(str(j).zfill(5)))
Esempio n. 8
0
def compression():
    orig_im = ndimage.imread(get_image_path())
    if os.path.exists(get_image_path(type='compressed')):
        comp_im = ndimage.imread(get_image_path(type='compressed'))
        diff = image_diff(orig_im, comp_im)
    else:
        diff = 0

    if request.method == 'POST':
        compression_method = request.form['compression_method']
        compression_level = float(request.form['compression_level'])

        if compression_method == 'dft':
            compression_function = compress_dft
        elif compression_method == 'dct':
            compression_function = compress_dct
        else:
            return about(400)

        image = ndimage.imread(get_image_path())
        image_c = compression_function(image, compression_level)
        Image.fromarray(image_c.astype(np.uint8)).save(get_image_path(type='compressed'))

        return redirect(url_for('.compression', **request.form))
    return render_template('main/compression.jinja', diff=diff)
Esempio n. 9
0
 def features(image_path):
     #red mean
     img2d = ndimg.imread(image_path)
     img2d_gray = ndimg.imread(image_path, flatten= True)
     
     row_n = row(img2d_gray)
     col_n = col(img2d_gray)
     
     red_mean = layer_mean(img2d[...,0])
     green_mean = layer_mean(img2d[...,1])
     blue_mean = layer_mean(img2d[...,2])
     gray_mean = layer_mean(img2d_gray)
     
     red_most = hist_max(img2d[...,0])
     green_most = hist_max(img2d[...,1])
     blue_most = hist_max(img2d[...,2])
     gray_most = hist_max(img2d_gray)
     
     length = edge_length(img2d_gray)
     sobel_h = edge_sobel_h(img2d_gray)
     sobel_v = edge_sobel_v(img2d_gray)
     sobel = edge_sobel(img2d_gray)
     hough = houghLine(img2d_gray)
     
     cate = cate_extract(image_path)
     
     return list([row_n, col_n, red_mean, green_mean, blue_mean, gray_mean,\
                      red_most,green_most,blue_most, gray_most,\
                      length, sobel_h, sobel_v, sobel, hough, cate])
Esempio n. 10
0
def get_online_png(url, outname, myopener=MyOpener()):
    fname = url.split('/')[-1]

    # download file if we don't already have it
    if not path.exists(outname):
        F = myopener.retrieve(url, outname)
    else:
        # TODO: this is glitched?
        F = [outname]
    try:
        I = imread(F[0]) * 1. / 255
    except Exception as er:
        # this field probably didn't download for *reasons*
        print(F, url, outname)
        try:
            import time
            time.sleep(10)
            I = imread(F[0]) * 1. / 255
        except TypeError as er:

            # okay let's give this another shot
            from os import remove
            remove(outname)
            F = myopener.retrieve(url, outname)
            I = imread(F[0]) * 1. / 255

    return I
Esempio n. 11
0
def dump_images(mpv_args=[]):
    """
    invoke mpv with the scan.lua script and image vo and return a numpy array of screenshots
    """
    with tmp_dir() as tmp_dir_path:
        cmd=['mpv']+mpv_args
        cmd+=['--no-config',
              '--no-resume-playback',
              '--vo=image:outdir=%s'%tmp_dir_path,
              '--ao=null',
              '--no-audio']
        print ' '.join(cmd)
        p=Popen(cmd,stdout=PIPE,stderr=STDOUT)
        stdout,stderr=p.communicate()
        rc=p.wait()
        if rc!=0:
            print 'mpv screenshot command exited with non-zero status'
            print 'COMMAND WAS'
            print cmd
            print 'STDOUT/STDERR was'
            print stdout
            sys.exit(1)
        fpaths=[os.path.join(tmp_dir_path,fname) for fname in os.listdir(tmp_dir_path)]

        ims=imread(fpaths[0])
        shape=[len(fpaths)]+list(ims.shape)
        ims.resize(shape,refcheck=False)#add spaces for the other images
        for i in xrange(1,len(fpaths)):
            ims[i]=imread(fpaths[i])
    return ims
Esempio n. 12
0
 def loadFile(self, imageSize=(None,None), numberOfSlices=None, dataType='uint8'):
     """Loads the file or images containing the volume data into a numpy array"""
     if not self.loaded:
         if self.fileList:
             if self.fileList[0].endswith('.dcm'):
                 #Dicom files
                 self.data = self._readDicom(self.fileList[0])
                 for i in range(1, len(self.fileList)):
                     self.data = np.dstack((self.data, self._readDicom(self.fileList[i])))
             else:
                 #Standard image extensions
                 self.data = ndimage.imread(self.fileList[0], flatten=True)
                 #Uses PIL to load the images
                 for i in range(1, len(self.fileList)):
                     self.data = np.dstack((self.data, ndimage.imread(self.fileList[i], flatten=True)))
             self.loaded = True
         else:
             #Check by file extension
             if self.fileExtension == '.nrrd':
                 self._readNrrd()
                 self.loaded = True
             elif self.fileExtension == '.raw':
                 if numberOfSlices != None and imageSize != None:
                     self._readRaw(imageSize,numberOfSlices,dataType)
                     self.loaded = True
                 else:
                     raise VolumeFileReaderException('Image size and number of slices not specified!!')
             else:
                 raise FormatException('Not supported file extension!')
def get_image_lena(query):
    """
    get the image
    :param query:
    :type query:
    :return:
    :rtype:
    """

    args = query.split(sep='_')

    if args[2] == 'grey':
        lena = ndimage.imread('lena.jpg', mode='L')
    elif args[2] == 'rgb':
        lena = ndimage.imread('lena.jpg', mode='RGB')
    else:
        raise ValueError('Invalid color type. Allowed rgb or grey')

    if args[3] == 'small':
        lena = misc.imresize(lena, (2048, 2048), interp='bilinear')
    elif args[3] == 'large':
        lena = misc.imresize(lena, (4096, 4096), interp='bilinear')
    else:
        raise ValueError('Invalid size. Allowed small or large')

    if args[4] == 'uint8':
        lena = lena.astype(np.uint8)
    elif args[4] == 'float':
        lena = lena.astype(np.float)
    else:
        raise ValueError('Invalid size. Allowed uint8 or float')

    return lena
Esempio n. 14
0
 def load_datum(self, datum, train):
     assert isinstance(datum, tuple)
     assert len(datum) == 3
     datum_id = '_'.join(datum)
     inputs = self.db.get(datum_id)
     if inputs is None:
         img_file, mask_file, label_name = datum
         img = ndi.imread(img_file, mode='RGB')
         mask = ndi.imread(mask_file, mode='L')
         # resize mask image
         if img.shape[:2] != mask.shape[:2]:
             print('WARNING: img and mask must have same shape. '
                   'Resizing mask {} to img {}.'
                   .format(img.shape[:2], mask.shape))
             mask = skimage.transform.resize(
                 mask, img.shape[:2], preserve_range=True).astype(np.uint8)
         inputs = (img, mask, label_name)
         self.db.put(datum_id, pickle.dumps(inputs))
     else:
         inputs = pickle.loads(inputs)
     img, mask, label_name = inputs
     img_trans = self.transform_img(img, mask, train=train)
     blob = self.rgb_to_blob(img_trans)
     label_id = np.where(self.target_names == label_name)[0][0]
     return blob, label_id
Esempio n. 15
0
def process_filter(filter_type, options):
    filtered_image_path = get_image_path(type='filtered_{}'.format(filter_type))
    orig_im = ndimage.imread(get_image_path())
    if os.path.exists(filtered_image_path):
        comp_im = ndimage.imread(filtered_image_path)
        diff = image_diff(orig_im, comp_im)
    else:
        diff = 0

    if request.method == 'POST':
        filter_name = request.form['filter_name']
        option_values = {
            option: float(request.form[option])
            for option in options
        }
        if filter_name == 'butterworth':
            option_values['order'] = int(request.form['order'])

        image = ndimage.imread(get_image_path())

        filter_func = FILTERS[filter_type][filter_name]

        image_filtered = filter_image(image, filter_func, option_values)
        im = Image.fromarray(image_filtered.astype(np.uint8))
        im.save(filtered_image_path)

        return redirect(url_for('.{}'.format(filter_type), **request.form))

    return render_template('main/filter.jinja', filter_type=filter_type, options=options, diff=diff)
    def convert_format(cls, source_folder, target_folder,
                       extensions=('.jpg', '.jpeg', '.png'), new_extension='.jpg'):
        """ change images from one format to another (eg. change png files to jpeg) """

        # check source_folder and target_folder:
        cls.check_folder_existance(source_folder, throw_error_if_no_folder=True)
        cls.check_folder_existance(target_folder, display_msg=False)
        if source_folder[-1] == "/":
            source_folder = source_folder[:-1]
        if target_folder[-1] == "/":
            target_folder = target_folder[:-1]

        # read images and reshape:
        print("Change format of '", source_folder, "' files...")
        for filename in os.listdir(source_folder):
            if os.path.isdir(source_folder + '/' + filename):
                cls.convert_format(source_folder + '/' + filename,
                                   target_folder + '/' + filename,
                                   extensions=extensions, new_extension=new_extension)
            else:
                if extensions == '' and os.path.splitext(filename)[1] == '':
                    copy2(source_folder + "/" + filename,
                          target_folder + "/" + filename + new_extension)
                    image = ndimage.imread(target_folder + "/" + filename + new_extension)
                    misc.imsave(target_folder + "/" + filename + new_extension, image)
                else:
                    for extension in extensions:
                        if filename.endswith(extension):
                            new_filename = os.path.splitext(filename)[0] + new_extension
                            copy2(source_folder + "/" + filename,
                                  target_folder + "/" + new_filename)
                            image = ndimage.imread(target_folder + "/" + new_filename)
                            misc.imsave(target_folder + "/" + new_filename, image)
Esempio n. 17
0
def calculate_psnr(refImage, noisyImage):
    imageRefContainer = imread(refImage)
    #.astype(float)
    noisyImageContainer = imread(noisyImage)
    print imageRefContainer.size
    print imageRefContainer 
    exit()      
    return mse(imageRefPtr, noisyImagePtr)
Esempio n. 18
0
def load_dataset():
   # Load all the images
   inputDir = 'imgs'
   labelsFile = 'whales.csv'
   output = np.genfromtxt(labelsFile, skip_header=1, dtype=[('image', 'S10'), ('label', 'S11')], delimiter=',')
   labels = [x[1] for x in output]
   numberOfClasses = len(set(labels))
   
   # Read the images into 4D array (numImages, numChannels, width, height)
   # Assume all files in dir are images and all are the same size
   fileNames = os.listdir(inputDir)
   im = imread(os.path.join(inputDir, fileNames[0]))
   w, h = im.shape
   
   # Initialize the ndarray
   images = np.ndarray((len(fileNames), 1, w, h))
   # Load the first image into it's slot
   images[0,0,:,:] = im
   
   for i in xrange(1, len(fileNames)):
      im = imread(os.path.join(inputDir, fileNames[i]))
      images[i,0:,:] = im
   
   labelsDict = {int(re.search("w_(\\d+)\.jpg", x[0]).group(1)):int(re.search("whale_(\\d+)", x[1]).group(1)) for x in output}
   
   # to provide default values
   labelsDict = defaultdict(lambda:0, labelsDict)
   examples = [int(re.search("w_(\\d+).jpg",x).group(1)) for x in fileNames]
   labels = [labelsDict[x] for x in examples]
   labels = np.array(labels)
   
   origTrainLabels = labels[labels > 0]

   # Renumber the labels to have consecutive numnbers   
   l = np.zeros(max(origTrainLabels))
   i = 0
   for k in sorted(set(origTrainLabels)):
      l[k-1] = i
      i += 1
   trainLabels = np.array([l[x-1] for x in origTrainLabels])
   trainLabels = trainLabels.astype(np.uint8)
   
   # We can now download and read the training and test set images and labels.
   X_train = images
   y_train = trainLabels
   X_test = images
   y_test = trainLabels
   
   X_val = images
   y_val = trainLabels
   # We reserve the last 10000 training examples for validation.
#    X_train, X_val = X_train[:-10000], X_train[-10000:]
#    y_train, y_val = y_train[:-10000], y_train[-10000:]
   
   # We just return all the arrays in order, as expected in main().
   # (It doesn't matter how we do this as long as we can read them again.)
   return X_train, y_train, X_val, y_val, X_test, y_test
Esempio n. 19
0
    def examine_entry(i):
        entry = catalog.iloc[i]
        ZooID = entry['ZooID']
        display(entry[columns])


        cluster_path = cluster_directory + entry['object_name']

        cluster = imread(cluster_path) * 1. / 255.
        if invert_color:
            cluster = np.abs(cluster - cluster.max())

        fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(10, 5))
        axs[0].imshow(cluster)
        cat = annotated_catalog.set_index(['ZooID'])

        entry = cat.loc[ZooID]
        # it is possible that we have ones from multiple stages
        if len(entry) == 2:
            # two stages, choose the one with the correct stage
            truth = entry['stage'].values == catalog.iloc[i]['stage']
            ith = int(np.argwhere(truth))
            entry = entry.iloc[ith]  # needs to be an int to return series. if array, returns dataframe


        if plot_points:
            x_unflat = literal_eval(entry['At_X'])
            y_unflat = literal_eval(entry['At_Y'])
            # flatten out x and y. also cut out empty entries
            x = np.array([xi for xj in x_unflat for xi in xj])
            y = np.array([yi for yj in y_unflat for yi in yj])

            users = np.array([xj for xj in xrange(len(x_unflat)) for xi in x_unflat[xj]])
            binusers = np.bincount(users)
            w = 1. / binusers[users]

            cluster_centers, cluster_labels, labels = outlier_clusters_dbscan(x, y)
            if color_by_user:
                c = users
            else:
                c = labels
            points = axs[1].scatter(x, y, c=c, s=50, alpha=0.8, cmap=plt.cm.Accent)
            tooltiplabels = ['({0}, {1}, {2})'.format(labels[i], users[i], w[i]) for i in xrange(len(labels))]
            tooltip = mpld3.plugins.PointLabelTooltip(points, tooltiplabels)
            mpld3.plugins.connect(fig, tooltip)

        field = imread(field_directory + ZooID + '.png') * 1. / 255. #entry['ZooID'] + '.png')
        if plot_alpha:
            if np.shape(field)[-1] == 4:
                field = field[:, :, 3]
        else:
            field = field[:, :, :3]
        if invert_color:
            field = np.abs(field - field.max())
        IM = axs[1].imshow(field)
        mpld3.plugins.connect(fig, mpld3.plugins.MousePosition(fontsize=14))
Esempio n. 20
0
def test_imread():
    lp = os.path.join(os.path.dirname(__file__), "dots.png")
    img = ndi.imread(lp, mode="RGB")
    assert_array_equal(img.shape, (300, 420, 3))

    img = ndi.imread(lp, flatten=True)
    assert_array_equal(img.shape, (300, 420))

    with open(lp, "rb") as fobj:
        img = ndi.imread(fobj, mode="RGB")
        assert_array_equal(img.shape, (300, 420, 3))
 def test_save_render(self):
     # test object
     part1 = CuboidPrimitive([0.0, 0.0, 0.0], [0.8, 1.0, 0.4])
     part2 = CuboidPrimitive([0.6, 0.0, 0.0], [0.4, 0.4, 0.3])
     part3 = CuboidPrimitive([0.0, 0.0, 0.50], [0.2, 0.6, 0.6])
     s = Shape(forward_model=None, parts=[part1, part2, part3])
     self.fwm_custom_lighting.save_render('test_images/r.png', s)
     for i in range(len(self.fwm_custom_lighting.camera_pos)):
         r = spi.imread('test_images/r_{0:d}.png'.format(i))
         correct = spi.imread('test_images/cube_object_custom_lighting_{0:d}.png'.format(i))
         self.assertAlmostEqual(np.sum(np.abs(r - correct)), 0.0)
         os.remove('test_images/r_{0:d}.png'.format(i))
Esempio n. 22
0
def test_imread():
    lp = os.path.join(os.path.dirname(__file__), 'dots.png')
    with warnings.catch_warnings(record=True):  # Py3k ResourceWarning
        img = ndi.imread(lp, mode="RGB")
    assert_array_equal(img.shape, (300, 420, 3))

    with warnings.catch_warnings(record=True):  # PIL ResourceWarning
        img = ndi.imread(lp, flatten=True)
    assert_array_equal(img.shape, (300, 420))

    with open(lp, 'rb') as fobj:
        img = ndi.imread(fobj, mode="RGB")
        assert_array_equal(img.shape, (300, 420, 3))
Esempio n. 23
0
    def examine_entry(i):
        entry = catalog.iloc[i]
        ZooID = entry['ZooID']
        display(entry[columns])


        cluster_path = cluster_directory + entry['object_name']

        cluster = imread(cluster_path) * 1. / 255.
        if invert_color:
            cluster = np.abs(cluster - cluster.max())

        fig, axs = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))
        cluster = imread(cluster_path)
        axs.imshow(cluster)
Esempio n. 24
0
def get_stack(path):

  from glob import glob
  from scipy.ndimage import imread

  files = sorted(glob(path+'*.png'))

  stack = imread(files[0])[:,:,0]/256.

  for f in sorted(files[1:]):
    print(f)
    img = imread(f)/256.
    stack += img[:,:,0]/256.

  return stack
def load_letter(folder, min_num_images):
    """Load the data for a single letter label."""
    image_files = os.listdir(folder)
    dataset = np.ndarray(shape=(len(image_files), image_size, image_size), dtype=np.float32)
    image_index = 0
    print(folder)
    for image in os.listdir(folder):
        image_file = os.path.join(folder, image)
        try:
            image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth
            if image_data.shape != (image_size, image_size):
                raise Exception("Unexpected image shape: %s" % str(image_data.shape))
            dataset[image_index, :, :] = image_data
            image_index += 1
        except IOError as e:
            print("Could not read:", image_file, ":", e, "- it's ok, skipping.")

    num_images = image_index
    dataset = dataset[0:num_images, :, :]
    if num_images < min_num_images:
        raise Exception("Many fewer images than expected: %d < %d" % (num_images, min_num_images))

    print("Full dataset tensor:", dataset.shape)
    print("Mean:", np.mean(dataset))
    print("Standard deviation:", np.std(dataset))
    return dataset
Esempio n. 26
0
def get_positive_features(train_path_pos, cell_size, window_size, block_size, nbins):
    '''
    'train_path_pos' is a string. This directory contains 36x36 images of
      faces
    'feature_params' is a struct, with fields
      feature_params.template_size (probably 36), the number of pixels
         spanned by each train / test template and
      feature_params.hog_cell_size (default 6), the number of pixels in each
         HoG cell. template size should be evenly divisible by hog_cell_size.
         Smaller HoG cell sizes tend to work better, but they make things
         slower because the feature dimensionality increases and more
         importantly the step size of the classifier decreases at test time.

    'features_pos' is N by D matrix where N is the number of faces and D
    is the template dimensionality, which would be
      (feature_params.template_size / feature_params.hog_cell_size)^2 * 31
    if you're using the default vl_hog parameters
    '''

    image_files = [os.path.join(train_path_pos, f) for f in os.listdir(train_path_pos) if f.endswith('.jpg')]
    num_images = len(image_files)
    total_block_size = block_size * cell_size
    template_size = int((np.floor((window_size[0] - 2) / (total_block_size / 2)) - 1) * (np.floor((window_size[1] - 2) / (total_block_size / 2)) - 1))
    D = template_size * block_size * block_size * nbins
    features_pos = np.zeros((num_images, D))
    for i in range(num_images):
        img = imread(image_files[i])
        features_pos[i] = compute_hog_features(img, cell_size, block_size, nbins).reshape(-1)
    return features_pos
Esempio n. 27
0
def load(data_folders, min_num_images, max_num_images):
  dataset = np.ndarray(
    shape=(max_num_images, image_size, image_size), dtype=np.float32)
  labels = np.ndarray(shape=(max_num_images), dtype=np.int32)
  label_index = 0
  image_index = 0
  for folder in data_folders:
    print(folder)
    for image in os.listdir(folder):
      if image_index >= max_num_images:
        raise Exception('More images than expected: %d >= %d' % (
          num_images, max_num_images))
      image_file = os.path.join(folder, image)
      try:
        image_data = (ndimage.imread(image_file).astype(float) -
                      pixel_depth / 2) / pixel_depth
        if image_data.shape != (image_size, image_size):
          raise Exception('Unexpected image shape: %s' % str(image_data.shape))
        dataset[image_index, :, :] = image_data
        labels[image_index] = label_index
        image_index += 1
      except IOError as e:
        print('Could not read:', image_file, ':', e, '- it\'s ok, skipping.')
    label_index += 1
  num_images = image_index
  dataset = dataset[0:num_images, :, :]
  labels = labels[0:num_images]
  if num_images < min_num_images:
    raise Exception('Many fewer images than expected: %d < %d' % (
        num_images, min_num_images))
  print('Full dataset tensor:', dataset.shape)
  print('Mean:', np.mean(dataset))
  print('Standard deviation:', np.std(dataset))
  print('Labels:', labels.shape)
  return dataset, labels
Esempio n. 28
0
def load(data_folders, min_num_images, max_num_images):
    dataset = np.ndarray(
        shape=(max_num_images, image_size, image_size), dtype=np.float32)
    labels = np.ndarray(shape=(max_num_images), dtype=np.int32)
    label_index = 0
    image_index = 0
    for folder in data_folders:
        print folder
        for image in os.listdir(folder):
            if image_index >= max_num_images:
                raise Exception('More images than expected: %d > %d'
                        (num_images, max_num_images))
                image_file = os.path.join(folder, image)
                try:
                    # Loads the file. There seems to be some sort of translation on it.
                    image_data = (ndimage.imread(image_file).astype(float) - pixel_depth / 2) / pixel_depth
                    if image_data.shape != (image_size, image_size):
                        raise Exception('Unexpected image shape: %s' % str(image_data.shape))
                    dataset[image_index, :, :] = image_data
                    labels[image_index] = label_index
                    image_index += 1
                except IOError as e:
                    print 'Could not read: ', image_file, ': ', e, '- skipped.'
            label_index += 1
        num_images = image_index
        dataset = dataset[0:num_images, :, :]
        labels = labels[0:num_images]
        if num_images < min_num_images:
            raise Exception('Many fewer images than expected: %d < %d' %
                (num_images, min_num_images))
        print 'Full dataset tensor: ', dataset.shape
        print 'Mean: ', np.mean(dataset)
        print 'Stdev: ', np.std(dataset)
        print 'Labels: ', labels.shape
        return dataset, labels
Esempio n. 29
0
def shader_room(length=10, width=10, height=10, url_prefix=""):
    L, W, H = 1.0*length, 1.0*width, 1.0*height
    yMin, yMax = -1.2, -1.2 + H
    xMin, xMax = -L/2, L/2
    zMin, zMax = -W/2, W/2
    yAvg = (yMin + yMax) / 2
    scene = Scene()
    scene.add(AmbientLight(color=0x151515))
    scene.add(PointLight(color=0x880000, intensity=0.7, distance=50,
                         position=[0.45 * L, 0, -0.4 * W]))
    scene.add(PointLight(color=0x008800, intensity=0.7, distance=50,
                         position=[-0.45 * L, 0, -0.4 * W]))
    cannonData = {'mass': 0, 'shapes': ['Plane']}
    scene.add(Mesh(name="floor", geometry=square,
                   material=MeshBasicMaterial(side=FrontSide, color=0xffffff,
                                              map=Texture(image=Image("deck", url="images/deck.png"),
                                                          repeat=[L, W], wrap=[RepeatWrapping, RepeatWrapping])),
                   receiveShadow=True,
                   position=[0, yMin, 0],
                   scale=[L, 1, W],
                   userData={'cannonData': cannonData}))
    heightmap = url_prefix + 'images/terrain128.png'
    image = ndimage.imread(heightmap)
    scene.add(Mesh(name="heightfield",
                   geometry=PlaneBufferGeometry(width=L, height=W, widthSegments=image.shape[0]-1, heightSegments=image.shape[1]-1),
                   material=MeshLambertMaterial(color=0xffffff, shading=SmoothShading),
                   position=[L, -6, 0],
                   rotation=[-np.pi/2, 0, 0],
                   userData={'cannonData': {'mass': 0.0, 'shapes': ['Heightfield']},
                             'heightmap': heightmap}))
    return scene
Esempio n. 30
0
def readImages(path, sz=None):
   
    c = 0
    X,y,z = [], [], []
    for dirname, dirnames, filenames in os.walk(path):
        for subdirname in dirnames:
            subject_path = os.path.join(dirname, subdirname)
            
            for filename in os.listdir(subject_path):
                try:
                    #im = cv2.imread(os.path.join(subject_path, filename), cv2.IMREAD_GRAYSCALE)  #pourrait marcher dans d'autres supports. a tester
                    im= ndimage.imread(os.path.join(subject_path, filename),flatten=True)  #marche pour l'instant. a tester
                                         
                    if (len(im)==0):    #erreur si imread ne marche pas (ne retourne pas une liste)
                        continue                         
                    # resize to given size
                    if (sz is not None):
                        im = cv2.resize(im, sz)
                    X.append(np.asarray(im, dtype=np.uint8))
                    y.append(c)
                except IOError:
                    print ""   #exception lancee meme si le script marche. a verifier
                except:
                    print "Erreur inatendue:", sys.exc_info()[0]
                    raise
            c = c+1
            z.append(subdirname)
    return [X,y,z]
def main():
  # 初始化一些参数
  print("Start Pokemon classifier")
  if os.path.exists(FLAGS.checkpoint_path) == False:
    os.makedirs(FLAGS.checkpoint_path)
  CHECKPOINT_FILE = FLAGS.checkpoint_path + "/checkpoint.ckpt"
  LATEST_CHECKPOINT = tf.train.latest_checkpoint(FLAGS.checkpoint_path)

  # Initialize train and test data
  TRAIN_IMAGE_NUMBER = 646
  TEST_IMAGE_NUMBER = 68
  IMAGE_SIZE = 32
  RGB_CHANNEL_SIZE = 3
  LABEL_SIZE = 17

  train_dataset = np.ndarray(
      shape=(TRAIN_IMAGE_NUMBER, IMAGE_SIZE, IMAGE_SIZE, RGB_CHANNEL_SIZE), # channel last
      dtype=np.float32)
  test_dataset = np.ndarray(
      shape=(TEST_IMAGE_NUMBER, IMAGE_SIZE, IMAGE_SIZE, RGB_CHANNEL_SIZE),
      dtype=np.float32)

  train_labels = np.ndarray(shape=(TRAIN_IMAGE_NUMBER, ), dtype=np.int32)
  test_labels = np.ndarray(shape=(TEST_IMAGE_NUMBER, ), dtype=np.int32)

  TRAIN_DATA_DIR = "./data/train/"
  TEST_DATA_DIR = "./data/test/"
  VALIDATE_DATA_DIR = "./data/validate/"
  IMAGE_FORMAT = ".png"
  index = 0 #图像个数计数器
  pokemon_type_id_map = {
      "Bug": 0,
      "Dark": 1,
      "Dragon": 2,
      "Electric": 3,
      "Fairy": 4,
      "Fighting": 5,
      "Fire": 6,
      "Ghost": 7,
      "Grass": 8,
      "Ground": 9,
      "Ice": 10,
      "Normal": 11,
      "Poison": 12,
      "Psychic": 13,
      "Rock": 14,
      "Steel": 15,
      "Water": 16
  }
  pokemon_types = [
      "Bug", "Dark", "Dragon", "Electric", "Fairy", "Fighting", "Fire",
      "Ghost", "Grass", "Ground", "Ice", "Normal", "Poison", "Psychic", "Rock",
      "Steel", "Water"
  ]

  # step 1加载训练数据
  for pokemon_type in os.listdir(TRAIN_DATA_DIR):
    for image_filename in os.listdir(
        os.path.join(TRAIN_DATA_DIR, pokemon_type)):
      if image_filename.endswith(IMAGE_FORMAT):

        image_filepath = os.path.join(TRAIN_DATA_DIR, pokemon_type,
                                      image_filename)
        image_ndarray = ndimage.imread(image_filepath, mode="RGB") #RGB
        train_dataset[index] = image_ndarray

        train_labels[index] = pokemon_type_id_map.get(pokemon_type) # 把label转化成数值型
        index += 1

  
  index = 0
  # step2 加载测试数据
  for pokemon_type in os.listdir(TEST_DATA_DIR):
    for image_filename in os.listdir(
        os.path.join(TEST_DATA_DIR, pokemon_type)):
      if image_filename.endswith(IMAGE_FORMAT):

        image_filepath = os.path.join(TEST_DATA_DIR, pokemon_type,
                                      image_filename)
        image_ndarray = ndimage.imread(image_filepath, mode="RGB")
        test_dataset[index] = image_ndarray

        test_labels[index] = pokemon_type_id_map.get(pokemon_type)
        index += 1

  # step3 定义model
  # placeholder
  keys_placeholder = tf.placeholder(tf.int32, shape=[None, 1])
  keys = tf.identity(keys_placeholder)

  # base64编码图像
  model_base64_placeholder = tf.placeholder(
      shape=[None], dtype=tf.string, name="model_input_b64_images")
  model_base64_string = tf.decode_base64(model_base64_placeholder)
  # 等价于python的map()
  model_base64_input = tf.map_fn(lambda x: tf.image.resize_images(tf.image.decode_jpeg(x, channels=RGB_CHANNEL_SIZE), [IMAGE_SIZE, IMAGE_SIZE]), model_base64_string, dtype=tf.float32)

  x = tf.placeholder(
      tf.float32, shape=(None, IMAGE_SIZE, IMAGE_SIZE, RGB_CHANNEL_SIZE))
  y = tf.placeholder(tf.int32, shape=(None, ))

  batch_size = FLAGS.batch_size
  epoch_number = FLAGS.epoch_number
  checkpoint_dir = FLAGS.checkpoint_dir
  if not os.path.exists(checkpoint_dir):
    os.makedirs(checkpoint_dir)
  tensorboard_dir = FLAGS.tensorboard_dir
  mode = FLAGS.mode
  checkpoint_file = checkpoint_dir + "/checkpoint.ckpt"
  steps_to_validate = FLAGS.steps_to_validate

  def cnn_inference(x):
    # Convolution layer result: [BATCH_SIZE, 16, 16, 64]
    # (n+2p-f)/s+1
    with tf.variable_scope("conv1"):
      weights = tf.get_variable(
          "weights", [3, 3, 3, 32], initializer=tf.random_normal_initializer())
      bias = tf.get_variable(
          "bias", [32], initializer=tf.random_normal_initializer())

      layer = tf.nn.conv2d(x, weights, strides=[1, 1, 1, 1], padding="SAME") # 32*32*32
      layer = tf.nn.bias_add(layer, bias)
      layer = tf.nn.relu(layer)
      # (n-f)/s+1
      layer = tf.nn.max_pool(
          layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") #16*16*32

    # Convolution layer result: [BATCH_SIZE, 8, 8, 64]
    with tf.variable_scope("conv2"):
      weights = tf.get_variable(
          "weights", [3, 3, 32, 64],
          initializer=tf.random_normal_initializer())
      bias = tf.get_variable(
          "bias", [64], initializer=tf.random_normal_initializer())

      layer = tf.nn.conv2d(
          layer, weights, strides=[1, 1, 1, 1], padding="SAME") #16*16*64
      layer = tf.nn.bias_add(layer, bias)
      layer = tf.nn.relu(layer)
      layer = tf.nn.max_pool(
          layer, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME") #8*8*64

    # 拉直做全连接
    layer = tf.reshape(layer, [-1, 8 * 8 * 64])

    # Full connected layer result: [BATCH_SIZE, 17]
    with tf.variable_scope("fc1"):
      # weights.get_shape().as_list()[0]] = 8 * 8 * 64
      weights = tf.get_variable(
          "weights", [8 * 8 * 64, LABEL_SIZE],
          initializer=tf.random_normal_initializer())
      bias = tf.get_variable(
          "bias", [LABEL_SIZE], initializer=tf.random_normal_initializer())
      layer = tf.add(tf.matmul(layer, weights), bias)

    return layer  # 17个节点

  def lstm_inference(x):
    RNN_HIDDEN_UNITS = 128

    # x was [BATCH_SIZE, 32, 32, 3]
    # x changes to [32, BATCH_SIZE, 32, 3]
    x = tf.transpose(x, [1, 0, 2, 3]) 
    # x changes to [32 * BATCH_SIZE, 32 * 3]
    x = tf.reshape(x, [-1, IMAGE_SIZE * RGB_CHANNEL_SIZE])
    # x changes to array of 32 * [BATCH_SIZE, 32 * 3]
    x = tf.split(axis=0, num_or_size_splits=IMAGE_SIZE, value=x)

    weights = tf.Variable(tf.random_normal([RNN_HIDDEN_UNITS, LABEL_SIZE]))
    biases = tf.Variable(tf.random_normal([LABEL_SIZE]))

    # output size is 128, state size is (c=128, h=128)
    lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
    # outputs is array of 32 * [BATCH_SIZE, 128]
    outputs, states = rnn.rnn(lstm_cell, x, dtype=tf.float32)

    # outputs[-1] is [BATCH_SIZE, 128]
    return tf.matmul(outputs[-1], weights) + biases

  def bidirectional_lstm_inference(x):
    RNN_HIDDEN_UNITS = 128

    # x was [BATCH_SIZE, 32, 32, 3]
    # x changes to [32, BATCH_SIZE, 32, 3]
    x = tf.transpose(x, [1, 0, 2, 3])
    # x changes to [32 * BATCH_SIZE, 32 * 3]
    x = tf.reshape(x, [-1, IMAGE_SIZE * RGB_CHANNEL_SIZE])
    # x changes to array of 32 * [BATCH_SIZE, 32 * 3]
    x = tf.split(axis=0, num_or_size_splits=IMAGE_SIZE, value=x)

    weights = tf.Variable(tf.random_normal([2 * RNN_HIDDEN_UNITS, LABEL_SIZE]))
    biases = tf.Variable(tf.random_normal([LABEL_SIZE]))

    # output size is 128, state size is (c=128, h=128)
    fw_lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
    bw_lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)

    # outputs is array of 32 * [BATCH_SIZE, 128]
    outputs, _, _ = rnn.bidirectional_rnn(
        fw_lstm_cell, bw_lstm_cell, x, dtype=tf.float32)

    # outputs[-1] is [BATCH_SIZE, 128]
    return tf.matmul(outputs[-1], weights) + biases

  def stacked_lstm_inference(x):
  	'''
	lstm_inference(x)
  	'''
    RNN_HIDDEN_UNITS = 128

    # x was [BATCH_SIZE, 32, 32, 3]
    # x changes to [32, BATCH_SIZE, 32, 3]
    x = tf.transpose(x, [1, 0, 2, 3])
    # x changes to [32 * BATCH_SIZE, 32 * 3]
    x = tf.reshape(x, [-1, IMAGE_SIZE * RGB_CHANNEL_SIZE])
    # x changes to array of 32 * [BATCH_SIZE, 32 * 3]
    x = tf.split(axis=0, num_or_size_splits=IMAGE_SIZE, value=x)

    weights = tf.Variable(tf.random_normal([RNN_HIDDEN_UNITS, LABEL_SIZE]))
    biases = tf.Variable(tf.random_normal([LABEL_SIZE]))

    # output size is 128, state size is (c=128, h=128)
    lstm_cell = rnn.BasicLSTMCell(RNN_HIDDEN_UNITS, forget_bias=1.0)
    lstm_cells = rnn.MultiRNNCell([lstm_cell] * 2) # 2层

    # outputs is array of 32 * [BATCH_SIZE, 128]
    outputs, states = rnn.rnn(lstm_cells, x, dtype=tf.float32)

    # outputs[-1] is [BATCH_SIZE, 128]
    return tf.matmul(outputs[-1], weights) + biases
Esempio n. 32
0
import numpy as np
import pandas as pd
import pprint

bees = pd.read_csv('bees/train_labels.csv')
bees = bees.reindex(np.random.permutation(bees.index))

print len(bees), len(bees[bees.genus == 1]), len(bees[bees.genus == 0])

from scipy.ndimage import imread
from scipy.misc import imresize
#print bees.id

waka = imread('bees.orig/images/train/3246.jpg')
print waka.shape
print waka
print "eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee"
waka = imread('bees/images/train/1000.jpg')
print waka.shape
print waka

bees['images'] = [
    imresize(imread('bees/images/train/' + str(bee) + '.jpg'),
             (100, 100))[:, :, :3] for bee in bees.id
]
#bees['images'] = [imresize(imread('bees/images/train/'+str(bee)+'.jpg'),
#                           (100, 100))[:, :, ] for bee in bees.id]
#bees['images'] = [imresize(imread('bees/images/train/'+str(bee)+'.jpg'),
#                           (100, 100))[:1] for bee in bees.id]

#bees['images'] = [print('waka'+str(bee)+'.jpg') for bee in bees.id]
Esempio n. 33
0
    torch.set_default_dtype(torch.float64)
    dataloader = KittiLoader(cfg.KITTIPATH, cfg.MAX_DISPARITY, cfg.KERNEL,
                             cfg.CONV_LAYERS, cfg.BATCH_SIZE)
    model = SiameseNet(cfg.CHANNELS,
                       cfg.FILTERS,
                       cfg.MAX_DISPARITY,
                       cfg.KERNEL,
                       cfg.CONV_LAYERS,
                       train_mode=False)
    model.load_state_dict(torch.load(cfg.SAVE_PATH.format(cfg.TEST_EPOCH)))
    model.eval()

    image_2_path = '/home/data_scene_flow/testing/image_2/000005_10.png'
    image_3_path = '/home/data_scene_flow/testing/image_3/000005_10.png'

    raw_image_2 = ndimage.imread(image_2_path, mode='RGB')
    raw_image_3 = ndimage.imread(image_3_path, mode='RGB')

    image_2 = torch.from_numpy(
        np.expand_dims(np.moveaxis((np.uint8(raw_image_2) - 128) / 256, -1, 0),
                       0))
    image_3 = torch.from_numpy(
        np.expand_dims(np.moveaxis((np.uint8(raw_image_3) - 128) / 256, -1, 0),
                       0))

    vec_2 = model(patch=Variable(image_2, requires_grad=False))
    vec_3 = model(patch=Variable(image_3, requires_grad=False))

    #pad = 2 * int(cfg.KERNEL / 2) * cfg.CONV_LAYERS
    output = torch.Tensor(
        cfg.MAX_DISPARITY,
Esempio n. 34
0
    return result


if __name__ == '__main__':
    np.set_printoptions(
            threshold=np.inf,
            precision=4,
            suppress=True)

    print("Reading image")
    image_dir_path = "./new/"
    image_file_names = [f for f in listdir("./new") if isfile(join(image_dir_path, f)) and f != ".DS_Store"]
    for img in image_file_names:
        destinationImage = image_dir_path + img
        image = ndimage.imread(image_dir_path + img, mode="L").astype("float64")
        if options.images > 0:
            utils2.showImage(image, "original", vmax=255.0)

        print("Normalizing")
        image = utils2.normalize(image)
        if options.images > 1:
            utils2.showImage(image, "normalized")

        print("Finding mask")
        mask = utils2.findMask(image)
        if options.images > 1:
            utils2.showImage(mask, "mask")

        print("Applying local normalization")
        image = np.where(mask == 1.0, utils2.localNormalize(image), image)
Esempio n. 35
0
# for i in learning_rates:
#     print ("learning rate is: " + str(i))
#     models[str(i)] = training.model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
#     print ('\n' + "-------------------------------------------------------" + '\n')
#
# for i in learning_rates:
#     plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
#
# plt.ylabel('cost')
# plt.xlabel('iterations')
#
# legend = plt.legend(loc='upper center', shadow=True)
# frame = legend.get_frame()
# frame.set_facecolor('0.90')
# plt.show()

# Test with your own image
my_image = "my_image.jpg"
image = np.array(ndimage.imread(my_image, flatten=False))
my_image = scipy.misc.imresize(image,
                               size=(num_px,
                                     num_px)).reshape(num_px * num_px * 3, 1)
my_predicted_image = training.predict(d["w"], d["b"], my_image)

plt.imshow(image)
plt.imshow(my_image.reshape(num_px, num_px, 3))
plt.show()
print("y = " + str(np.squeeze(my_predicted_image)) +
      ", your algorithm predicts a \"" +
      classes[int(np.squeeze(my_predicted_image)), ].decode("utf-8") +
      "\" picture.")
Esempio n. 36
0
def detect_objects(image_np, sess, detection_graph):
    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
    image_np_expanded = np.expand_dims(image_np, axis=0)
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Each box represents a part of the image where a particular object was detected.
    boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represent how level of confidence for each of the objects.
    # Score is shown on the result image, together with the class label.
    scores = detection_graph.get_tensor_by_name('detection_scores:0')
    classes = detection_graph.get_tensor_by_name('detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Actual detection.
    (boxes, scores, classes, num_detections) = sess.run(
        [boxes, scores, classes, num_detections],
        feed_dict={image_tensor: image_np_expanded})

    # the 0.8 is the confidence threshold. This script sorts the detected classes in descending confidence level and the code below checks to see if any of the top five detected objects match our target class with a threshold confidence greater than 80%
    if ((np.squeeze(scores)[0] > 0.8) and (np.squeeze(classes)[0] == 1)) \
            or ((np.squeeze(scores)[1] > 0.4) and (np.squeeze(classes)[1] == 1)) \
            or ((np.squeeze(scores)[2] > 0.4) and (np.squeeze(classes)[2] == 1)) \
            or ((np.squeeze(scores)[3] > 0.4) and (np.squeeze(classes)[3] == 1)) \
            or ((np.squeeze(scores)[4] > 0.4) and (np.squeeze(classes)[4] == 1)):


        # Visualization of the results of a detection.
        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=8)

        img = Image.fromarray(image_np, 'RGB')
        # print(os.path.splitext(segmentFileName)[0])
        img.save("<CONFIGURE_PATH_TO_SAVING_THE_IMAGE_SEGMENTS_WITH_BOUNDINGBOXES>" + "_detected.jpg")
        # img.show()

    pageHeight = 3300
    pageWidth = 5100

    cropHeight = int(pageHeight / 30)
    cropWidth = int(pageWidth / 50)

    pageIndex = 1

    with tf.Session(graph=detection_graph) as sess:
        startTime = datetime.now()

    imageNumpyArray = ndimage.imread("<CONFIGURE_PATH_TO_JPG_FILE_TO_CONDUCT_OBJECT_DETECTION_ON>")

    overlapWidth = 10

    #code below loads the jpg image into a single numpy array and extracts shorter segments to feed them individualy as input tensors into tensor_flow model

    segmentIndex = 0
    while segmentIndex <= 1499:
        if (segmentIndex == 0):
            cropArray = imageNumpyArray[0:cropHeight + overlapWidth, 0:cropWidth + overlapWidth, :]

        # catch top right corner tile
        elif (segmentIndex == 49):
            cropArray = imageNumpyArray[0:cropHeight + overlapWidth,
                    segmentIndex * cropWidth - overlapWidth:segmentIndex * cropWidth + cropWidth, :]

            # catch bottom left corner tile
        elif (segmentIndex == 1450):
            cropArray = imageNumpyArray[
                        cropHeight * segmentIndex // 50 - overlapWidth:cropHeight * segmentIndex // 50 + cropHeight,
                        0:cropWidth + overlapWidth, :]
            # catch bottom right corner tile
        elif (segmentIndex == 1499):
            cropArray = imageNumpyArray[
                        cropHeight * segmentIndex // 50 - cropHeight - overlapWidth:cropHeight * segmentIndex // 50,
                        segmentIndex % 50 * cropWidth - overlapWidth:segmentIndex % 50 * cropWidth + cropWidth, :]
            # catch right edge tiles so no overlap on left
        elif (segmentIndex % 50 == 0):
            # print(cropHeight*segmentIndex//50)
            cropArray = imageNumpyArray[cropHeight * (segmentIndex // 50):cropHeight * (segmentIndex // 50) + cropHeight,
                        0:cropWidth + overlapWidth, :]
             # catch top edge tiles so no overlap on top
        elif (segmentIndex <= 48):
            # print(segmentIndex*cropWidth)
            cropArray = imageNumpyArray[0:cropHeight + overlapWidth,
                        segmentIndex * cropWidth:segmentIndex * cropWidth + cropWidth, :]
            # catch left edge tiles so no overlap on left
        elif (segmentIndex + 1) % 50 == 0:
            # print(segmentIndex * cropWidth)
            cropArray = imageNumpyArray[((segmentIndex + 1) // 50) * cropHeight - overlapWidth:((
                                                                                                segmentIndex + 1) // 50) * cropHeight + cropHeight + overlapWidth,
                        (segmentIndex) % 50 * cropWidth - overlapWidth:(segmentIndex) % 50 * cropWidth + cropWidth, :]
            # catch bottom edge tiles so no overlap on top
        elif (segmentIndex > 1450):
            # print(segmentIndex * cropWidth)
            cropArray = imageNumpyArray[((segmentIndex + 1) // 50) * cropHeight:((
                                                                                 segmentIndex + 1) // 50) * cropHeight + cropHeight + overlapWidth,
                        (segmentIndex) % 50 * cropWidth - overlapWidth:(segmentIndex) % 50 * cropWidth + cropWidth, :]
         else:
            cropArray = imageNumpyArray[(segmentIndex // 50) * cropHeight - overlapWidth: (
                                                                                          segmentIndex // 50) * cropHeight + cropHeight + overlapWidth,
                        (segmentIndex) % 50 * cropWidth - overlapWidth:(
                                                                       segmentIndex) % 50 * cropWidth + cropWidth + overlapWidth,
                        :]
        detect_objects(cropArray, sess, detection_graph)
        if segmentIndex % 150 == 0:
            print(str(segmentIndex // 150 * 10) + " percent complete")
    segmentIndex += 1
Esempio n. 37
0
def main():
    # batch size for extracting feature vectors from vggnet.
    batch_size = 100
    # maximum length of caption(number of word). if caption is longer than max_length, deleted.
    max_length = 15
    # if word occurs less than word_count_threshold in training dataset, the word index is special unknown token.
    word_count_threshold = 1
    # vgg model path
    vgg_model_path = '/home/most12lee/data/imagenet-vgg-verydeep-19.mat'
    ##### vgg model-> wget http://www.vlfeat.org/matconvnet/models/imagenet-vgg-verydeep-19.mat -P data/

    caption_file = '/home/most12lee/downloads/data/token_3000imgs.json'
    image_dir = '/home/most12lee/downloads/data/%s_resized/'

    # about 80000 images and 400000 captions for train dataset
    # -> ME: about 2100 images and 10500 captions for train datasets
    train_dataset = _process_caption_data(
        caption_file=
        '/home/most12lee/downloads/data/token_3000imgs_train_revised_f2.json',  #### DONT FORGET TO CHANGE CSV INTO JSON FILE!!!!!!
        image_dir='/home/most12lee/downloads/data/train_resized/',
        max_length=max_length)

    # about 40000 images and 200000 captions
    # -> ME: about 900 images and 4500 captions for val datasets
    val_dataset = _process_caption_data(
        caption_file=
        '/home/most12lee/downloads/data/token_3000imgs_val_revised_f2.json',  #### DONT FORGET TO CHANGE CSV INTO JSON FILE!!!!!!
        image_dir='/home/most12lee/downloads/data/val_resized/',
        max_length=max_length)

    # about 4000 images and 20000 captions for val / test dataset
    # -> ME: about 90 images and 450 captions for val / test datasets
    val_cutoff = int(0.1 * len(val_dataset))
    test_cutoff = int(0.2 * len(val_dataset))
    print 'Finished processing caption data'

    ###### USING load_pickle() & save_pickle() FROM core_utils.py
    save_pickle(train_dataset,
                '/home/most12lee/downloads/data/train/train.annotations.pkl')
    save_pickle(val_dataset[:val_cutoff],
                '/home/most12lee/downloads/data/val/val.annotations.pkl')
    save_pickle(val_dataset[val_cutoff:test_cutoff].reset_index(drop=True),
                '/home/most12lee/downloads/data/test/test.annotations.pkl')

    for split in ['train', 'val', 'test']:
        annotations = load_pickle(
            '/home/most12lee/downloads/data/%s/%s.annotations.pkl' %
            (split, split))

        if split == 'train':
            word_to_idx = _build_vocab(annotations=annotations,
                                       threshold=word_count_threshold)
            save_pickle(
                word_to_idx,
                '/home/most12lee/downloads/data/%s/word_to_idx.pkl' % split)

        captions = _build_caption_vector(annotations=annotations,
                                         word_to_idx=word_to_idx,
                                         max_length=max_length)
        save_pickle(
            captions, '/home/most12lee/downloads/data/%s/%s.captions.pkl' %
            (split, split))

        file_names, id_to_idx = _build_file_names(annotations)
        save_pickle(
            file_names, '/home/most12lee/downloads/data/%s/%s.file.names.pkl' %
            (split, split))

        image_idxs = _build_image_idxs(annotations, id_to_idx)
        save_pickle(
            image_idxs, '/home/most12lee/downloads/data/%s/%s.image.idxs.pkl' %
            (split, split))

        # prepare reference captions to compute bleu scores later
        image_ids = {}
        feature_to_captions = {}
        i = -1
        for caption, image_id in zip(annotations['caption'],
                                     annotations['image_id']):
            if not image_id in image_ids:
                image_ids[image_id] = 0
                i += 1
                feature_to_captions[i] = []
            feature_to_captions[i].append(caption.lower() + ' .')
        save_pickle(
            feature_to_captions,
            '/home/most12lee/downloads/data/%s/%s.references.pkl' %
            (split, split))
        print "Finished building %s caption dataset" % split

    # extract conv5_3 feature vectors
    vggnet = Vgg19(vgg_model_path)
    vggnet.build()
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        for split in ['train', 'val', 'test']:
            anno_path = '/home/most12lee/downloads/data/%s/%s.annotations.pkl' % (
                split, split)
            save_path = '/home/most12lee/downloads/data/%s/%s.features.hkl' % (
                split, split)
            annotations = load_pickle(anno_path)
            image_path = list(annotations['file_name'].unique())
            n_examples = len(image_path)

            all_feats = np.ndarray([n_examples, 196, 512], dtype=np.float32)

            for start, end in zip(
                    range(0, n_examples, batch_size),
                    range(batch_size, n_examples + batch_size, batch_size)):
                image_batch_file = image_path[start:end]
                image_batch = np.array(
                    map(lambda x: ndimage.imread(x, mode='RGB'),
                        image_batch_file)).astype(np.float32)
                feats = sess.run(vggnet.features,
                                 feed_dict={vggnet.images: image_batch})
                all_feats[start:end, :] = feats
                print("Processed %d %s features.." % (end, split))

            # use hickle to save huge feature vectors
            hickle.dump(all_feats, save_path)
            print("Saved %s.." % (save_path))
Esempio n. 38
0
def process():

    with tf.name_scope('input') as scope:
        x = tf.placeholder(
            tf.float32,
            shape=[BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNEL])
        y = tf.placeholder(tf.float32, shape=[BATCH_SIZE, NUM_LABELS])

    y_, params = model(x)
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

    with tf.name_scope('acc'):
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('acc', accuracy)
    with tf.name_scope('loss'):
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_))
        tf.summary.scalar('loss', loss)

    opt = tf.train.GradientDescentOptimizer(0.001).minimize(loss,
                                                            var_list=params)

    data_x = np.zeros([BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNEL])
    data_y = np.zeros([BATCH_SIZE, NUM_LABELS])
    #merged = tf.merge_all_summaries()
    saver = tf.train.Saver()
    Data = DataFrame(np.zeros([40000, 4]),
                     index=[np.arange(0, 40000, 1)],
                     columns=['predict1', 'predict2', 'label1', 'lable2'])
    with tf.Session() as sess:
        saver.restore(
            sess,
            "/home/liugq/Workspace/ResNet_BN/data/models/2018.01/DRN_resnet_3_180000.ckpt"
        )
        test_writer = tf.summary.FileWriter(
            "/home/liugq/Workspace/ResNet_BN/data/logs/2018.01/drn_3/test/batch_1/bn_constant"
        )
        cover_count = 0
        stego_count = 0

        result2 = np.array([])  #accuracy for testing set
        test_count = 0
        i = 180000
        print 'DRN result test:'
        print 'epoch:', i

        while test_count < 40000:
            if test_count % 2 == 0:
                imc = ndimage.imread(path2 + '/' +
                                     fileList2[test_count % 20000])
                data_y[0, 0] = 0
                data_y[0, 1] = 1
            else:
                imc = ndimage.imread(path4 + '/' +
                                     fileList2[test_count % 20000])
                data_y[0, 0] = 1
                data_y[0, 1] = 0
            test_count = test_count + 1
            data_x[0, :, :, 0] = imc.astype(np.float32)

            c2, temp2 = sess.run([loss, accuracy],
                                 feed_dict={
                                     x: data_x,
                                     y: data_y,
                                     is_train: False
                                 })
            result2 = np.insert(result2, 0, temp2)
            if test_count % 2000 == 0:
                print temp2
        print 'test:', np.mean(result2)
        summary = tf.Summary(value=[
            tf.Summary.Value(tag="test_acc", simple_value=np.mean(result2)),
        ])
        test_writer.add_summary(summary, i)
    def test(self,
             data,
             split='train',
             attention_visualization=False,
             save_sampled_captions=True):
        '''
        Args:
            - data: dictionary with the following keys:
            - features: Feature vectors of shape (5000, 196, 512)
            - file_names: Image file names of shape (5000, )
            - captions: Captions of shape (24210, 17)
            - image_idxs: Indices for mapping caption to image of shape (24210, )
            - features_to_captions: Mapping feature to captions (5000, 4~5)
            - split: 'train', 'val' or 'test'
            - attention_visualization: If True, visualize attention weights with images for each sampled word. (ipthon notebook)
            - save_sampled_captions: If True, save sampled captions to pkl file for computing BLEU scores.
        '''

        features = data['features']
        n_examples = self.data['captions'].shape[0]
        n_iters_per_epoch = int(np.ceil(float(n_examples) / self.batch_size))
        # build a graph to sample captions
        alphas, betas, sampled_captions = self.model.build_sampler(
            max_len=20)  # (N, max_len, L), (N, max_len)

        config = tf.ConfigProto(allow_soft_placement=True)
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            saver = tf.train.Saver()
            saver.restore(sess, self.test_model)
            features_batch, image_files = sample_coco_minibatch_inference(
                data, self.batch_size)
            feed_dict = {self.model.features: features_batch}
            alps, bts, sam_cap = sess.run(
                [alphas, betas, sampled_captions],
                feed_dict)  # (N, max_len, L), (N, max_len)
            decoded = decode_captions(sam_cap, self.model.idx_to_word)

            if self.print_bleu:
                all_gen_cap = np.ndarray((features.shape[0], 20))
                for i in range(n_iters_per_epoch):
                    features_batch = features[i * self.batch_size:(i + 1) *
                                              self.batch_size]
                    feed_dict = {self.model.features: features_batch}
                    gen_cap = sess.run(sampled_captions, feed_dict=feed_dict)
                    all_gen_cap[i * self.batch_size:(i + 1) *
                                self.batch_size] = gen_cap

                all_decoded = decode_captions(all_gen_cap,
                                              self.model.idx_to_word)
                save_pickle(all_decoded,
                            "./data/val/val.candidate.captions.pkl")
                scores = evaluate(data_path='./data',
                                  split='val',
                                  get_scores=True)

            if attention_visualization:
                for n in range(10):
                    print "Sampled Caption: %s" % decoded[n]

                    # Plot original image
                    img = ndimage.imread(image_files[n])
                    plt.clf()
                    plt.subplot(4, 5, 1)
                    plt.imshow(img)
                    plt.axis('off')

                    # Plot images with attention weights
                    words = decoded[n].split(" ")
                    for t in range(len(words)):
                        if t > 18:
                            break
                        plt.subplot(4, 5, t + 2)
                        plt.text(0,
                                 1,
                                 '%s(%.2f)' % (words[t], bts[n, t]),
                                 color='black',
                                 backgroundcolor='white',
                                 fontsize=8)
                        plt.imshow(img)
                        alp_curr = alps[n, t, :].reshape(14, 14)
                        alp_img = skimage.transform.pyramid_expand(alp_curr,
                                                                   upscale=16,
                                                                   sigma=20)
                        plt.imshow(alp_img, alpha=0.85)
                        plt.axis('off')
                    plt.savefig(str(n) + 'test.pdf')

            if save_sampled_captions:
                all_sam_cap = np.ndarray((features.shape[0], 20))
                num_iter = int(
                    np.ceil(float(features.shape[0]) / self.batch_size))
                for i in range(num_iter):
                    features_batch = features[i * self.batch_size:(i + 1) *
                                              self.batch_size]
                    feed_dict = {self.model.features: features_batch}
                    all_sam_cap[i * self.batch_size:(i + 1) *
                                self.batch_size] = sess.run(
                                    sampled_captions, feed_dict)
                all_decoded = decode_captions(all_sam_cap,
                                              self.model.idx_to_word)
                save_pickle(
                    all_decoded,
                    "./data/%s/%s.candidate.captions.pkl" % (split, split))
Esempio n. 40
0
# In[7]:

# Sorting the files in the directory specified based on filenames
root_path = ""
filenames = []
for root, dirnames, filenames in os.walk("DenoisedTrain"):
    filenames.sort(key=natural_keys)
    rootpath = root

# In[8]:

# Reads the images as per the sorted filenames and stores it in a list
images = []
for filename in filenames:
    filepath = os.path.join(root, filename)
    image = ndimage.imread(filepath, mode="L")
    images.append(image)
    print(filename)

# In[9]:

len(images)

# In[10]:

#Loading the labels for resized cropped images
labels = np.load('resized_cropped_labeledimages.npy')
labels_list = []
for i in range(len(labels)):
    labels_list.append(labels[i])
print(labels.shape)
Esempio n. 41
0
gridx = np.arange(X0, X1, (X1-X0)/NUM)
gridy = np.arange(Y0, Y1, (Y1-Y0)/NUM)

OK = OrdinaryKriging(data[:, 0], data[:, 1], data[:, 2], variogram_model='exponential', verbose=False, enable_plotting=False)
z, ss = OK.execute('grid', gridx, gridy)

print("AVG sigma:", np.average(ss), "\nMAX sigma:", np.max(ss), "\nMIN sigma:", np.min(ss), "\nMEDIAN sigma:", np.median(ss), "\n75° percentile sigma:", 
		np.percentile(ss, 75), "\n90° percentile sigma:", np.percentile(ss, 90), "\n95° percentile sigma:", np.percentile(ss, 95), "\n99° percentile sigma:", np.percentile(ss, 99))

extent = (X0,X1,Y0,Y1)
im = plt.imshow(z, cmap='Wistia', aspect='auto', extent=extent, origin="lower")
plt.colorbar(im)

from scipy.ndimage import imread
im = imread('Maschera_Milano.png', mode='RGBA')
plt.imshow(im, extent=extent)

for shape in sf.shapeRecords():
	x = [i[0] for i in shape.shape.points[:]]
	y = [i[1] for i in shape.shape.points[:]]
	
	plt.plot(x,y)

data = rr
delta = []
dev = []
import math

print("\n\n--- Delta temperature rispetto alle misurazioni delle centraline ---\n\n")
for staz in ["Milano Lambrate", "Milano v.Brera", "Milano v.Juvara", "Milano v.Marche", "Milano P.zza Zavattari", "Milano v.Feltre"]:
Esempio n. 42
0
def detect_keypoints(imagename, threshold):
    # SIFT Detector
    # --------------

    original = ndimage.imread(imagename, flatten=True)

    # SIFT Parameters

    s = 3
    k = 2**(1.0 / s)

    kvec1 = np.array(
        [1.3, 1.6, 1.6 * k, 1.6 * (k**2), 1.6 * (k**3), 1.6 * (k**4)])
    kvec2 = np.array([
        1.6 * (k**2), 1.6 * (k**3), 1.6 * (k**4), 1.6 * (k**5), 1.6 * (k**6),
        1.6 * (k**7)
    ])
    kvec3 = np.array([
        1.6 * (k**5), 1.6 * (k**6), 1.6 * (k**7), 1.6 * (k**8), 1.6 * (k**9),
        1.6 * (k**10)
    ])
    kvec4 = np.array([
        1.6 * (k**8), 1.6 * (k**9), 1.6 * (k**10), 1.6 * (k**11),
        1.6 * (k**12), 1.6 * (k**13)
    ])
    kvectotal = np.array([
        1.6, 1.6 * k, 1.6 * (k**2), 1.6 * (k**3), 1.6 * (k**4), 1.6 * (k**5),
        1.6 * (k**6), 1.6 * (k**7), 1.6 * (k**8), 1.6 * (k**9), 1.6 * (k**10),
        1.6 * (k**11)
    ])

    # Downsampling images
    doubled = misc.imresize(original, 200, 'bilinear').astype(int)
    normal = misc.imresize(doubled, 50, 'bilinear').astype(int)
    halved = misc.imresize(normal, 50, 'bilinear').astype(int)
    quartered = misc.imresize(halved, 50, 'bilinear').astype(int)

    # Initialize Gaussian pyramids
    pyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 6))
    pyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 6))
    pyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 6))
    pyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 6))

    print("Constructing pyramids...")

    # Construct Gaussian pyramids

    list_blur = []
    for i in range(0, 6):
        pyrlvl1[:, :, i] = ndimage.filters.gaussian_filter(doubled, kvec1[i])
        pyrlvl2[:, :, i] = misc.imresize(
            ndimage.filters.gaussian_filter(doubled, kvec2[i]), 50, 'bilinear')
        pyrlvl3[:, :, i] = misc.imresize(
            ndimage.filters.gaussian_filter(doubled, kvec3[i]), 25, 'bilinear')
        pyrlvl4[:, :, i] = misc.imresize(
            ndimage.filters.gaussian_filter(doubled, kvec4[i]), 1.0 / 8.0,
            'bilinear')

    list_blur.append(pyrlvl1)
    list_blur.append(pyrlvl2)
    list_blur.append(pyrlvl3)
    list_blur.append(pyrlvl4)
    fig = plt.figure(figsize=(4, 6))
    for i in range(4):
        for j in range(6):
            fig.add_subplot(4, 6, i * 6 + j + 1)
            plt.imshow(list_blur[i][:, :, j], cmap='gray')
    plt.show()

    # Initialize Difference-of-Gaussians (DoG) pyramids
    diffpyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 5))
    diffpyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 5))
    diffpyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 5))
    diffpyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 5))

    # Construct DoG pyramids
    for i in range(0, 5):
        diffpyrlvl1[:, :, i] = pyrlvl1[:, :, i + 1] - pyrlvl1[:, :, i]
        diffpyrlvl2[:, :, i] = pyrlvl2[:, :, i + 1] - pyrlvl2[:, :, i]
        diffpyrlvl3[:, :, i] = pyrlvl3[:, :, i + 1] - pyrlvl3[:, :, i]
        diffpyrlvl4[:, :, i] = pyrlvl4[:, :, i + 1] - pyrlvl4[:, :, i]

    list_dog = []
    list_dog.append(diffpyrlvl1)
    list_dog.append(diffpyrlvl2)
    list_dog.append(diffpyrlvl3)
    list_dog.append(diffpyrlvl4)

    fig_dog = plt.figure(figsize=(4, 5))
    for i in range(4):
        for j in range(5):
            fig_dog.add_subplot(4, 5, i * 5 + j + 1)
            plt.imshow(list_dog[i][:, :, j], cmap="gray")
    plt.show()

    # Initialize pyramids to store extrema locations
    extrpyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))
    extrpyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 3))
    extrpyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 3))
    extrpyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 3))

    print("Starting extrema detection...")
    print("First octave")

    # In each of the following for loops, elements of each pyramids that are larger or smaller than its 26 immediate neighbors in space and scale are labeled as extrema. As explained in section 4 of Lowe's paper, these initial extrema are pruned by checking that their contrast and curvature are above certain thresholds. The thresholds used here are those suggested by Lowe.

    for i in range(1, 4):
        for j in range(0, doubled.shape[0] - 1):
            for k in range(0, doubled.shape[1] - 1):
                if np.absolute(diffpyrlvl1[j, k, i]) < threshold:
                    continue

                maxbool = (diffpyrlvl1[j, k, i] > 0
                           )  #xet so nay la am hay duong
                minbool = (diffpyrlvl1[j, k, i] < 0
                           )  #xet so nay la am hay duong

                for di in range(-1, 2):
                    for dj in range(-1, 2):
                        for dk in range(-1, 2):
                            if di == 0 and dj == 0 and dk == 0:
                                continue
                            maxbool = maxbool and (diffpyrlvl1[j, k, i] >
                                                   diffpyrlvl1[j + dj, k + dk,
                                                               i + di])
                            minbool = minbool and (diffpyrlvl1[j, k, i] <
                                                   diffpyrlvl1[j + dj, k + dk,
                                                               i + di])
                            if not maxbool and not minbool:
                                break

                        if not maxbool and not minbool:
                            break

                    if not maxbool and not minbool:
                        break

                if maxbool or minbool:
                    dx = (diffpyrlvl1[j, k + 1, i] - diffpyrlvl1[j, k - 1, i]
                          ) * 0.5 / 255  #dao ham theo x va dua ve 0 1
                    dy = (diffpyrlvl1[j + 1, k, i] -
                          diffpyrlvl1[j - 1, k, i]) * 0.5 / 255
                    ds = (diffpyrlvl1[j, k, i + 1] -
                          diffpyrlvl1[j, k, i - 1]) * 0.5 / 255
                    dxx = (diffpyrlvl1[j, k + 1, i] + diffpyrlvl1[j, k - 1, i]
                           - 2 * diffpyrlvl1[j, k, i]) * 1.0 / 255
                    dyy = (diffpyrlvl1[j + 1, k, i] + diffpyrlvl1[j - 1, k, i]
                           - 2 * diffpyrlvl1[j, k, i]) * 1.0 / 255
                    dss = (diffpyrlvl1[j, k, i + 1] + diffpyrlvl1[j, k, i - 1]
                           - 2 * diffpyrlvl1[j, k, i]) * 1.0 / 255
                    dxy = (diffpyrlvl1[j + 1, k + 1, i] -
                           diffpyrlvl1[j + 1, k - 1, i] -
                           diffpyrlvl1[j - 1, k + 1, i] +
                           diffpyrlvl1[j - 1, k - 1, i]) * 0.25 / 255
                    dxs = (diffpyrlvl1[j, k + 1, i + 1] -
                           diffpyrlvl1[j, k - 1, i + 1] -
                           diffpyrlvl1[j, k + 1, i - 1] +
                           diffpyrlvl1[j, k - 1, i - 1]) * 0.25 / 255
                    dys = (diffpyrlvl1[j + 1, k, i + 1] -
                           diffpyrlvl1[j - 1, k, i + 1] -
                           diffpyrlvl1[j + 1, k, i - 1] +
                           diffpyrlvl1[j - 1, k, i - 1]) * 0.25 / 255

                    dD = np.matrix([[dx], [dy], [ds]])  #ma tran  3x1
                    H = np.matrix([[dxx, dxy, dxs], [dxy, dyy, dys],
                                   [dxs, dys, dss]])  #ma tran 3x3
                    x_hat = numpy.linalg.lstsq(H, dD)[0]  #
                    D_x_hat = diffpyrlvl1[j, k, i] + 0.5 * np.dot(
                        dD.transpose(), x_hat)

                    r = 10.0
                    if ((((dxx + dyy)**2) * r) < (dxx * dyy - (dxy**2)) *
                        (((r + 1)**2))) and (np.absolute(x_hat[0]) < 0.5) and (
                            np.absolute(x_hat[1]) < 0.5) and (np.absolute(
                                x_hat[2]) < 0.5) and (np.absolute(D_x_hat) >
                                                      0.03):
                        extrpyrlvl1[j, k, i - 1] = 1

    print("Second octave")

    for i in range(1, 4):
        for j in range(0, normal.shape[0] - 1):
            for k in range(0, normal.shape[1] - 1):
                if np.absolute(diffpyrlvl2[j, k, i]) < threshold:
                    continue

                maxbool = (diffpyrlvl2[j, k, i] > 0)
                minbool = (diffpyrlvl2[j, k, i] < 0)

                for di in range(-1, 2):
                    for dj in range(-1, 2):
                        for dk in range(-1, 2):
                            if di == 0 and dj == 0 and dk == 0:
                                continue
                            maxbool = maxbool and (diffpyrlvl2[j, k, i] >
                                                   diffpyrlvl2[j + dj, k + dk,
                                                               i + di])
                            minbool = minbool and (diffpyrlvl2[j, k, i] <
                                                   diffpyrlvl2[j + dj, k + dk,
                                                               i + di])
                            if not maxbool and not minbool:
                                break

                        if not maxbool and not minbool:
                            break

                    if not maxbool and not minbool:
                        break

                if maxbool or minbool:
                    dx = (diffpyrlvl2[j, k + 1, i] -
                          diffpyrlvl2[j, k - 1, i]) * 0.5 / 255
                    dy = (diffpyrlvl2[j + 1, k, i] -
                          diffpyrlvl2[j - 1, k, i]) * 0.5 / 255
                    ds = (diffpyrlvl2[j, k, i + 1] -
                          diffpyrlvl2[j, k, i - 1]) * 0.5 / 255
                    dxx = (diffpyrlvl2[j, k + 1, i] + diffpyrlvl2[j, k - 1, i]
                           - 2 * diffpyrlvl2[j, k, i]) * 1.0 / 255
                    dyy = (diffpyrlvl2[j + 1, k, i] + diffpyrlvl2[j - 1, k, i]
                           - 2 * diffpyrlvl2[j, k, i]) * 1.0 / 255
                    dss = (diffpyrlvl2[j, k, i + 1] + diffpyrlvl2[j, k, i - 1]
                           - 2 * diffpyrlvl2[j, k, i]) * 1.0 / 255
                    dxy = (diffpyrlvl2[j + 1, k + 1, i] -
                           diffpyrlvl2[j + 1, k - 1, i] -
                           diffpyrlvl2[j - 1, k + 1, i] +
                           diffpyrlvl2[j - 1, k - 1, i]) * 0.25 / 255
                    dxs = (diffpyrlvl2[j, k + 1, i + 1] -
                           diffpyrlvl2[j, k - 1, i + 1] -
                           diffpyrlvl2[j, k + 1, i - 1] +
                           diffpyrlvl2[j, k - 1, i - 1]) * 0.25 / 255
                    dys = (diffpyrlvl2[j + 1, k, i + 1] -
                           diffpyrlvl2[j - 1, k, i + 1] -
                           diffpyrlvl2[j + 1, k, i - 1] +
                           diffpyrlvl2[j - 1, k, i - 1]) * 0.25 / 255

                    dD = np.matrix([[dx], [dy], [ds]])
                    H = np.matrix([[dxx, dxy, dxs], [dxy, dyy, dys],
                                   [dxs, dys, dss]])
                    x_hat = numpy.linalg.lstsq(H, dD)[0]
                    D_x_hat = diffpyrlvl2[j, k, i] + 0.5 * np.dot(
                        dD.transpose(), x_hat)

                    r = 10.0
                    if (((dxx + dyy)**2) * r) < (dxx * dyy - (dxy**2)) * ((
                        (r + 1)**2)) and np.absolute(
                            x_hat[0]) < 0.5 and np.absolute(
                                x_hat[1]) < 0.5 and np.absolute(
                                    x_hat[2]) < 0.5 and np.absolute(
                                        D_x_hat) > 0.03:
                        extrpyrlvl2[j, k, i - 1] = 1

    print("Third octave")

    for i in range(1, 4):
        for j in range(0, halved.shape[0] - 1):
            for k in range(0, halved.shape[1] - 1):
                if np.absolute(diffpyrlvl3[j, k, i]) < threshold:
                    continue

                maxbool = (diffpyrlvl3[j, k, i] > 0)
                minbool = (diffpyrlvl3[j, k, i] < 0)

                for di in range(-1, 2):
                    for dj in range(-1, 2):
                        for dk in range(-1, 2):
                            if di == 0 and dj == 0 and dk == 0:
                                continue
                            maxbool = maxbool and (diffpyrlvl3[j, k, i] >
                                                   diffpyrlvl3[j + dj, k + dk,
                                                               i + di])
                            minbool = minbool and (diffpyrlvl3[j, k, i] <
                                                   diffpyrlvl3[j + dj, k + dk,
                                                               i + di])
                            if not maxbool and not minbool:
                                break

                        if not maxbool and not minbool:
                            break

                    if not maxbool and not minbool:
                        break

                if maxbool or minbool:
                    dx = (diffpyrlvl3[j, k + 1, i] -
                          diffpyrlvl3[j, k - 1, i]) * 0.5 / 255
                    dy = (diffpyrlvl3[j + 1, k, i] -
                          diffpyrlvl3[j - 1, k, i]) * 0.5 / 255
                    ds = (diffpyrlvl3[j, k, i + 1] -
                          diffpyrlvl3[j, k, i - 1]) * 0.5 / 255
                    dxx = (diffpyrlvl3[j, k + 1, i] + diffpyrlvl3[j, k - 1, i]
                           - 2 * diffpyrlvl3[j, k, i]) * 1.0 / 255
                    dyy = (diffpyrlvl3[j + 1, k, i] + diffpyrlvl3[j - 1, k, i]
                           - 2 * diffpyrlvl3[j, k, i]) * 1.0 / 255
                    dss = (diffpyrlvl3[j, k, i + 1] + diffpyrlvl3[j, k, i - 1]
                           - 2 * diffpyrlvl3[j, k, i]) * 1.0 / 255
                    dxy = (diffpyrlvl3[j + 1, k + 1, i] -
                           diffpyrlvl3[j + 1, k - 1, i] -
                           diffpyrlvl3[j - 1, k + 1, i] +
                           diffpyrlvl3[j - 1, k - 1, i]) * 0.25 / 255
                    dxs = (diffpyrlvl3[j, k + 1, i + 1] -
                           diffpyrlvl3[j, k - 1, i + 1] -
                           diffpyrlvl3[j, k + 1, i - 1] +
                           diffpyrlvl3[j, k - 1, i - 1]) * 0.25 / 255
                    dys = (diffpyrlvl3[j + 1, k, i + 1] -
                           diffpyrlvl3[j - 1, k, i + 1] -
                           diffpyrlvl3[j + 1, k, i - 1] +
                           diffpyrlvl3[j - 1, k, i - 1]) * 0.25 / 255

                    dD = np.matrix([[dx], [dy], [ds]])
                    H = np.matrix([[dxx, dxy, dxs], [dxy, dyy, dys],
                                   [dxs, dys, dss]])
                    x_hat = numpy.linalg.lstsq(H, dD)[0]
                    D_x_hat = diffpyrlvl3[j, k, i] + 0.5 * np.dot(
                        dD.transpose(), x_hat)

                    r = 10.0
                    if (((dxx + dyy)**2) * r) < (dxx * dyy - (dxy**2)) * ((
                        (r + 1)**2)) and np.absolute(
                            x_hat[0]) < 0.5 and np.absolute(
                                x_hat[1]) < 0.5 and np.absolute(
                                    x_hat[2]) < 0.5 and np.absolute(
                                        D_x_hat) > 0.03:
                        extrpyrlvl3[j, k, i - 1] = 1

    print("Fourth octave")

    for i in range(1, 4):
        for j in range(0, quartered.shape[0] - 1):
            for k in range(0, quartered.shape[1] - 1):
                if np.absolute(diffpyrlvl4[j, k, i]) < threshold:
                    continue

                maxbool = (diffpyrlvl4[j, k, i] > 0)
                minbool = (diffpyrlvl4[j, k, i] < 0)

                for di in range(-1, 2):
                    for dj in range(-1, 2):
                        for dk in range(-1, 2):
                            if di == 0 and dj == 0 and dk == 0:
                                continue
                            maxbool = maxbool and (diffpyrlvl4[j, k, i] >
                                                   diffpyrlvl4[j + dj, k + dk,
                                                               i + di])
                            minbool = minbool and (diffpyrlvl4[j, k, i] <
                                                   diffpyrlvl4[j + dj, k + dk,
                                                               i + di])
                            if not maxbool and not minbool:
                                break

                        if not maxbool and not minbool:
                            break

                    if not maxbool and not minbool:
                        break

                if maxbool or minbool:
                    dx = (diffpyrlvl4[j, k + 1, i] -
                          diffpyrlvl4[j, k - 1, i]) * 0.5 / 255
                    dy = (diffpyrlvl4[j + 1, k, i] -
                          diffpyrlvl4[j - 1, k, i]) * 0.5 / 255
                    ds = (diffpyrlvl4[j, k, i + 1] -
                          diffpyrlvl4[j, k, i - 1]) * 0.5 / 255
                    dxx = (diffpyrlvl4[j, k + 1, i] + diffpyrlvl4[j, k - 1, i]
                           - 2 * diffpyrlvl4[j, k, i]) * 1.0 / 255
                    dyy = (diffpyrlvl4[j + 1, k, i] + diffpyrlvl4[j - 1, k, i]
                           - 2 * diffpyrlvl4[j, k, i]) * 1.0 / 255
                    dss = (diffpyrlvl4[j, k, i + 1] + diffpyrlvl4[j, k, i - 1]
                           - 2 * diffpyrlvl4[j, k, i]) * 1.0 / 255
                    dxy = (diffpyrlvl4[j + 1, k + 1, i] -
                           diffpyrlvl4[j + 1, k - 1, i] -
                           diffpyrlvl4[j - 1, k + 1, i] +
                           diffpyrlvl4[j - 1, k - 1, i]) * 0.25 / 255
                    dxs = (diffpyrlvl4[j, k + 1, i + 1] -
                           diffpyrlvl4[j, k - 1, i + 1] -
                           diffpyrlvl4[j, k + 1, i - 1] +
                           diffpyrlvl4[j, k - 1, i - 1]) * 0.25 / 255
                    dys = (diffpyrlvl4[j + 1, k, i + 1] -
                           diffpyrlvl4[j - 1, k, i + 1] -
                           diffpyrlvl4[j + 1, k, i - 1] +
                           diffpyrlvl4[j - 1, k, i - 1]) * 0.25 / 255

                    dD = np.matrix([[dx], [dy], [ds]])
                    H = np.matrix([[dxx, dxy, dxs], [dxy, dyy, dys],
                                   [dxs, dys, dss]])
                    x_hat = numpy.linalg.lstsq(H, dD)[0]
                    D_x_hat = diffpyrlvl4[j, k, i] + 0.5 * np.dot(
                        dD.transpose(), x_hat)

                    r = 10.0
                    if (((dxx + dyy)**2) * r) < (dxx * dyy - (dxy**2)) * ((
                        (r + 1)**2)) and np.absolute(
                            x_hat[0]) < 0.5 and np.absolute(
                                x_hat[1]) < 0.5 and np.absolute(
                                    x_hat[2]) < 0.5 and np.absolute(
                                        D_x_hat) > 0.03:
                        extrpyrlvl4[j, k, i - 1] = 1

    print("Number of extrema in first octave: %d" % np.sum(extrpyrlvl1))
    print("Number of extrema in second octave: %d" % np.sum(extrpyrlvl2))
    print("Number of extrema in third octave: %d" % np.sum(extrpyrlvl3))
    print("Number of extrema in fourth octave: %d" % np.sum(extrpyrlvl4))

    #tinh toan do lon va huong cua moi point tren moi scale
    # Gradient magnitude and orientation for each image sample point at each scale

    #do lon chua hieu vi sao 3 chieu
    magpyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))
    magpyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 3))
    magpyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 3))
    magpyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 3))

    #huong
    oripyrlvl1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))
    oripyrlvl2 = np.zeros((normal.shape[0], normal.shape[1], 3))
    oripyrlvl3 = np.zeros((halved.shape[0], halved.shape[1], 3))
    oripyrlvl4 = np.zeros((quartered.shape[0], quartered.shape[1], 3))

    for i in range(0, 3):
        for j in range(1, doubled.shape[0] - 1):
            for k in range(1, doubled.shape[1] - 1):
                magpyrlvl1[j, k, i] = (
                    ((doubled[j + 1, k] - doubled[j - 1, k])**2) +
                    ((doubled[j, k + 1] - doubled[j, k - 1])**2))**0.5
                oripyrlvl1[j, k, i] = (36 / (2 * np.pi)) * (np.pi + np.arctan2(
                    (doubled[j, k + 1] - doubled[j, k - 1]),
                    (doubled[j + 1, k] - doubled[j - 1, k])))

    for i in range(0, 3):
        for j in range(1, normal.shape[0] - 1):
            for k in range(1, normal.shape[1] - 1):
                magpyrlvl2[j, k,
                           i] = (((normal[j + 1, k] - normal[j - 1, k])**2) + (
                               (normal[j, k + 1] - normal[j, k - 1])**2))**0.5
                oripyrlvl2[j, k, i] = (36 / (2 * np.pi)) * (np.pi + np.arctan2(
                    (normal[j, k + 1] - normal[j, k - 1]),
                    (normal[j + 1, k] - normal[j - 1, k])))

    for i in range(0, 3):
        for j in range(1, halved.shape[0] - 1):
            for k in range(1, halved.shape[1] - 1):
                magpyrlvl3[j, k,
                           i] = (((halved[j + 1, k] - halved[j - 1, k])**2) + (
                               (halved[j, k + 1] - halved[j, k - 1])**2))**0.5
                oripyrlvl3[j, k, i] = (36 / (2 * np.pi)) * (np.pi + np.arctan2(
                    (halved[j, k + 1] - halved[j, k - 1]),
                    (halved[j + 1, k] - halved[j - 1, k])))

    for i in range(0, 3):
        for j in range(1, quartered.shape[0] - 1):
            for k in range(1, quartered.shape[1] - 1):
                magpyrlvl4[j, k, i] = (
                    ((quartered[j + 1, k] - quartered[j - 1, k])**2) +
                    ((quartered[j, k + 1] - quartered[j, k - 1])**2))**0.5
                oripyrlvl4[j, k, i] = (36 / (2 * np.pi)) * (np.pi + np.arctan2(
                    (quartered[j, k + 1] - quartered[j, k - 1]),
                    (quartered[j + 1, k] - quartered[j - 1, k])))

    extr_sum = int(
        np.sum(extrpyrlvl1) + np.sum(extrpyrlvl2) + np.sum(extrpyrlvl3) +
        np.sum(extrpyrlvl4))
    keypoints = np.zeros((extr_sum, 4))

    print("Calculating keypoint orientations...")

    count = 0

    for i in range(0, 3):
        for j in range(0, doubled.shape[0] - 1):
            for k in range(0, doubled.shape[1] - 1):
                if extrpyrlvl1[j, k, i] == 1:
                    gaussian_window = multivariate_normal(
                        mean=[j, k], cov=((1.5 * kvectotal[i])**2))
                    two_sd = np.floor(2 * 1.5 * kvectotal[i])
                    orient_hist = np.zeros([36, 1])
                    for x in range(int(-1 * two_sd * 2), int(two_sd * 2) + 1):
                        ylim = int(
                            (((two_sd * 2)**2) - (np.absolute(x)**2))**0.5)
                        for y in range(-1 * ylim, ylim + 1):
                            if j + x < 0 or j + x > doubled.shape[
                                    0] - 1 or k + y < 0 or k + y > doubled.shape[
                                        1] - 1:
                                continue
                            weight = magpyrlvl1[j + x, k + y,
                                                i] * gaussian_window.pdf(
                                                    [j + x, k + y])
                            bin_idx = np.clip(
                                np.floor(oripyrlvl1[j + x, k + y, i]), 0, 35)
                            # print(bin_idx)
                            orient_hist[int(np.floor(bin_idx))] += weight

                    maxval = np.amax(orient_hist)
                    maxidx = np.argmax(orient_hist)
                    keypoints[count, :] = np.array(
                        [int(j * 0.5),
                         int(k * 0.5), kvectotal[i], maxidx])
                    count += 1
                    orient_hist[maxidx] = 0
                    newmaxval = np.amax(orient_hist)
                    while newmaxval > 0.8 * maxval:
                        newmaxidx = np.argmax(orient_hist)
                        np.append(keypoints,
                                  np.array([[
                                      int(j * 0.5),
                                      int(k * 0.5), kvectotal[i], newmaxidx
                                  ]]),
                                  axis=0)
                        orient_hist[newmaxidx] = 0
                        newmaxval = np.amax(orient_hist)

    for i in range(0, 3):
        for j in range(0, normal.shape[0] - 1):
            for k in range(0, normal.shape[1] - 1):
                if extrpyrlvl2[j, k, i] == 1:
                    gaussian_window = multivariate_normal(
                        mean=[j, k], cov=((1.5 * kvectotal[i + 3])**2))
                    two_sd = np.floor(2 * 1.5 * kvectotal[i + 3])
                    orient_hist = np.zeros([36, 1])
                    for x in range(int(-1 * two_sd), int(two_sd + 1)):
                        ylim = int(((two_sd**2) - (np.absolute(x)**2))**0.5)
                        for y in range(-1 * ylim, ylim + 1):
                            if j + x < 0 or j + x > normal.shape[
                                    0] - 1 or k + y < 0 or k + y > normal.shape[
                                        1] - 1:
                                continue
                            weight = magpyrlvl2[j + x, k + y,
                                                i] * gaussian_window.pdf(
                                                    [j + x, k + y])
                            bin_idx = np.clip(
                                np.floor(oripyrlvl2[j + x, k + y, i]), 0, 35)
                            orient_hist[int(np.floor(bin_idx))] += weight

                    maxval = np.amax(orient_hist)
                    maxidx = np.argmax(orient_hist)
                    keypoints[count, :] = np.array(
                        [j, k, kvectotal[i + 3], maxidx])
                    count += 1
                    orient_hist[maxidx] = 0
                    newmaxval = np.amax(orient_hist)
                    while newmaxval > 0.8 * maxval:
                        newmaxidx = np.argmax(orient_hist)
                        np.append(keypoints,
                                  np.array(
                                      [[j, k, kvectotal[i + 3], newmaxidx]]),
                                  axis=0)
                        orient_hist[newmaxidx] = 0
                        newmaxval = np.amax(orient_hist)

    for i in range(0, 3):
        for j in range(0, halved.shape[0] - 1):
            for k in range(0, halved.shape[1] - 1):
                if extrpyrlvl3[j, k, i] == 1:
                    gaussian_window = multivariate_normal(
                        mean=[j, k], cov=((1.5 * kvectotal[i + 6])**2))
                    two_sd = np.floor(2 * 1.5 * kvectotal[i + 6])
                    orient_hist = np.zeros([36, 1])
                    for x in range(int(-1 * two_sd * 0.5),
                                   int(two_sd * 0.5) + 1):
                        ylim = int(
                            (((two_sd * 0.5)**2) - (np.absolute(x)**2))**0.5)
                        for y in range(-1 * ylim, ylim + 1):
                            if j + x < 0 or j + x > halved.shape[
                                    0] - 1 or k + y < 0 or k + y > halved.shape[
                                        1] - 1:
                                continue
                            weight = magpyrlvl3[j + x, k + y,
                                                i] * gaussian_window.pdf(
                                                    [j + x, k + y])
                            bin_idx = np.clip(
                                np.floor(oripyrlvl3[j + x, k + y, i]), 0, 35)
                            orient_hist[int(np.floor(bin_idx))] += weight

                    maxval = np.amax(orient_hist)
                    maxidx = np.argmax(orient_hist)
                    keypoints[count, :] = np.array(
                        [j * 2, k * 2, kvectotal[i + 6], maxidx])
                    count += 1
                    orient_hist[maxidx] = 0
                    newmaxval = np.amax(orient_hist)
                    while newmaxval > 0.8 * maxval:
                        newmaxidx = np.argmax(orient_hist)
                        np.append(keypoints,
                                  np.array([[
                                      j * 2, k * 2, kvectotal[i + 6], newmaxidx
                                  ]]),
                                  axis=0)
                        orient_hist[newmaxidx] = 0
                        newmaxval = np.amax(orient_hist)

    for i in range(0, 3):
        for j in range(0, quartered.shape[0] - 1):
            for k in range(0, quartered.shape[1] - 1):
                if extrpyrlvl4[j, k, i] == 1:
                    gaussian_window = multivariate_normal(
                        mean=[j, k], cov=((1.5 * kvectotal[i + 9])**2))
                    two_sd = np.floor(2 * 1.5 * kvectotal[i + 9])
                    orient_hist = np.zeros([36, 1])
                    for x in range(int(-1 * two_sd * 0.25),
                                   int(two_sd * 0.25) + 1):
                        ylim = int(
                            (((two_sd * 0.25)**2) - (np.absolute(x)**2))**0.5)
                        for y in range(-1 * ylim, ylim + 1):
                            if j + x < 0 or j + x > quartered.shape[
                                    0] - 1 or k + y < 0 or k + y > quartered.shape[
                                        1] - 1:
                                continue
                            weight = magpyrlvl4[j + x, k + y,
                                                i] * gaussian_window.pdf(
                                                    [j + x, k + y])
                            bin_idx = np.clip(
                                np.floor(oripyrlvl4[j + x, k + y, i]), 0, 35)
                            orient_hist[int(np.floor(bin_idx))] += weight

                    maxval = np.amax(orient_hist)
                    maxidx = np.argmax(orient_hist)
                    keypoints[count, :] = np.array(
                        [j * 4, k * 4, kvectotal[i + 9], maxidx])
                    count += 1
                    orient_hist[maxidx] = 0
                    newmaxval = np.amax(orient_hist)
                    while newmaxval > 0.8 * maxval:
                        newmaxidx = np.argmax(orient_hist)
                        np.append(keypoints,
                                  np.array([[
                                      j * 4, k * 4, kvectotal[i + 9], newmaxidx
                                  ]]),
                                  axis=0)
                        orient_hist[newmaxidx] = 0
                        newmaxval = np.amax(orient_hist)

    img_draw = original.copy()
    for i in range(keypoints.shape[0]):
        kp = keypoints[i]
        x = int(kp[0])
        y = int(kp[1])
        r = kp[2]
        cv2.circle(img_draw, (x, y), 1, (0, 255, 0), 1)

    plt.imshow(img_draw)
    plt.show()

    print("Calculating descriptor...")

    magpyr = np.zeros((normal.shape[0], normal.shape[1], 12))
    oripyr = np.zeros((normal.shape[0], normal.shape[1], 12))

    for i in range(0, 3):
        magmax = np.amax(magpyrlvl1[:, :, i])
        magpyr[:, :, i] = misc.imresize(magpyrlvl1[:, :, i],
                                        (normal.shape[0], normal.shape[1]),
                                        "bilinear").astype(float)
        magpyr[:, :, i] = (magmax / np.amax(magpyr[:, :, i])) * magpyr[:, :, i]
        oripyr[:, :, i] = misc.imresize(oripyrlvl1[:, :, i],
                                        (normal.shape[0], normal.shape[1]),
                                        "bilinear").astype(int)
        oripyr[:, :, i] = ((36.0 / np.amax(oripyr[:, :, i])) *
                           oripyr[:, :, i]).astype(int)

    for i in range(0, 3):
        magpyr[:, :, i + 3] = (magpyrlvl2[:, :, i]).astype(float)
        oripyr[:, :, i + 3] = (oripyrlvl2[:, :, i]).astype(int)

    for i in range(0, 3):
        magpyr[:, :, i + 6] = misc.imresize(magpyrlvl3[:, :, i],
                                            (normal.shape[0], normal.shape[1]),
                                            "bilinear").astype(int)
        oripyr[:, :, i + 6] = misc.imresize(oripyrlvl3[:, :, i],
                                            (normal.shape[0], normal.shape[1]),
                                            "bilinear").astype(int)

    for i in range(0, 3):
        magpyr[:, :, i + 9] = misc.imresize(magpyrlvl4[:, :, i],
                                            (normal.shape[0], normal.shape[1]),
                                            "bilinear").astype(int)
        oripyr[:, :, i + 9] = misc.imresize(oripyrlvl4[:, :, i],
                                            (normal.shape[0], normal.shape[1]),
                                            "bilinear").astype(int)

    descriptors = np.zeros([keypoints.shape[0], 128])

    for i in range(0, keypoints.shape[0]):
        for x in range(-8, 8):
            for y in range(-8, 8):
                theta = 10 * keypoints[i, 3] * np.pi / 180.0
                xrot = np.round((np.cos(theta) * x) - (np.sin(theta) * y))
                yrot = np.round((np.sin(theta) * x) + (np.cos(theta) * y))
                scale_idx = np.argwhere(kvectotal == keypoints[i, 2])[0][0]
                x0 = keypoints[i, 0]
                y0 = keypoints[i, 1]
                gaussian_window = multivariate_normal(mean=[x0, y0], cov=8)
                weight = magpyr[int(x0 + xrot),
                                int(y0 + yrot),
                                scale_idx] * gaussian_window.pdf(
                                    [int(x0 + xrot),
                                     int(y0 + yrot)])
                angle = oripyr[int(x0 + xrot),
                               int(y0 + yrot), scale_idx] - keypoints[i, 3]
                if angle < 0:
                    angle = 36 + angle

                bin_idx = np.clip(np.floor((8.0 / 36) * angle), 0,
                                  7).astype(int)
                descriptors[i, 32 * int((x + 8) / 4) + 8 * int(
                    (y + 8) / 4) + bin_idx] += weight

        descriptors[i, :] = descriptors[i, :] / norm(descriptors[i, :])
        descriptors[i, :] = np.clip(descriptors[i, :], 0, 0.2)
        descriptors[i, :] = descriptors[i, :] / norm(descriptors[i, :])

    return [keypoints, descriptors]
#
# Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
#     1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
#     2. Add your image to this Jupyter Notebook's directory, in the "images" folder
#     3. Change your image's name in the following code
#     4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!

# In[85]:

## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "tiger2.JPG"  # change this to the name of your image file
## END CODE HERE ##

# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px, num_px)).reshape(
    (1, num_px * num_px * 3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)

plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) +
      ", your algorithm predicts a \"" +
      classes[int(np.squeeze(my_predicted_image)), ].decode("utf-8") +
      "\" picture.")

# <font color='blue'>
# **What to remember from this assignment:**
# 1. Preprocessing the dataset is important.
# 2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().
# 3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course!
              })
          })
      model_exporter.export(FLAGS.model_path,
                            tf.constant(FLAGS.export_version), sess)
      logging.info("Done export model: {}".format(FLAGS.model_path))
      """

    elif mode == "inference":
      ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
      if ckpt and ckpt.model_checkpoint_path:
        logging.info("Load the model {}".format(ckpt.model_checkpoint_path))
        saver.restore(sess, ckpt.model_checkpoint_path)

      start_time = datetime.datetime.now()

      image_ndarray = ndimage.imread(FLAGS.image, mode="RGB")
      # TODO: Update for server without gui
      #print_image(image_ndarray)

      image_ndarray = image_ndarray.reshape(1, IMAGE_SIZE, IMAGE_SIZE,
                                            RGB_CHANNEL_SIZE)
      prediction = sess.run(predict_op, feed_dict={x: image_ndarray})

      end_time = datetime.datetime.now()
      pokemon_type = pokemon_types[prediction[0]]
      logging.info(
          "[{}] Predict type: {}".format(end_time - start_time, pokemon_type))

    elif FLAGS.mode == "savedmodel":
      if restore_from_checkpoint(sess, saver, LATEST_CHECKPOINT) == False:
        logging.error("No checkpoint for exporting model, exit now")
Esempio n. 45
0
# grid of points, on a masked rectangular grid of points, or with arbitrary points.
# (See OrdinaryKriging.__doc__ for more information.)
z, ss = OK.execute('grid', gridx, gridy)

print("AVG sigma:", np.average(ss), "\nMAX sigma:", np.max(ss), "\nMIN sigma:", np.min(ss), "\nMEDIAN sigma:", np.median(ss), "\n75° percentile sigma:", 
		np.percentile(ss, 75), "\n90° percentile sigma:", np.percentile(ss, 90), "\n95° percentile sigma:", np.percentile(ss, 95), "\n99° percentile sigma:", np.percentile(ss, 99))

#colors = ["sandybrown" for x in range(int(NUM))]
#plt.hist(ss, bins=np.arange(ss.min(), ss.max()), color=colors)

extent = (X0,X1,Y0,Y1)
im = plt.imshow(z, cmap='inferno', aspect='auto', extent=extent, origin="lower") # pl is pylab imported a pl
plt.colorbar(im)

from scipy.ndimage import imread
im = imread('Maschera.png', mode='RGBA')
plt.imshow(im, extent=extent)

for shape in sf.shapeRecords():
	x = [i[0] for i in shape.shape.points[:]]
	y = [i[1] for i in shape.shape.points[:]]
	
	plt.plot(x,y)

#plt.axis('off')
plt.title('Biossido di azoto [µg/m^3]')
plt.xlabel('Longitudine [UTM32N_Est]')
plt.ylabel('Latitudine [UTM32N_Nord]')
plt.savefig("Mappa_biossido_azoto.png", dpi=300)
plt.show()
Esempio n. 46
0
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage
from scipy.signal import savgol_filter
import matplotlib as mpl

fig,axarr = plt.subplots(2,2, figsize=(5,5))

center_pos = (np.round(np.array([8639, 5848])/1000*300)).astype(int)
box_size = 400
image = ndimage.imread('data/optical/reference.tif')
image1 = image[center_pos[1]-box_size:center_pos[1]+box_size,
               center_pos[0]-box_size:center_pos[0]+box_size, :]
image1 = np.sum(image1, axis=2)
image1 = image1 - np.min(image1)
refmax = np.max(image1)
ref_graph = np.copy(image1[box_size,:])
ref_graph = savgol_filter(ref_graph, 31, 2)
# image1 = image[500:, :, :]
axarr[0,0].imshow(image1, cmap='gray', vmax = refmax)
axarr[0,0].axis('off')
axarr[0,0].set_aspect('equal')
axarr[1,0].imshow(image1, cmap='plasma', vmax = refmax)
axarr[1,0].axis('off')
axarr[1,0].set_aspect('equal')

aspects = np.loadtxt('data/optical/aspects.txt', skiprows=1)
# get angles
def get_angles(aspects):
    angles = []
    width_to_height_normal = 150/60
Esempio n. 47
0
def process():

    with tf.name_scope('input') as scope:
        x = tf.placeholder(
            tf.float32,
            shape=[BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNEL])
        y = tf.placeholder(tf.float32, shape=[BATCH_SIZE, NUM_LABELS])

    y_, params = model(x)
    correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

    with tf.name_scope('acc'):
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('acc', accuracy)
    with tf.name_scope('loss'):
        loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_))
        tf.summary.scalar('loss', loss)

    opt = tf.train.GradientDescentOptimizer(0.001).minimize(loss,
                                                            var_list=params)

    data_x = np.zeros([BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNEL])
    data_y = np.zeros([BATCH_SIZE, NUM_LABELS])
    merged = tf.summary.merge_all()
    saver = tf.train.Saver()
    Data = DataFrame(np.zeros([40000, 4]),
                     index=[np.arange(0, 40000, 1)],
                     columns=['predict1', 'predict2', 'label1', 'lable2'])
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        train_writer = tf.summary.FileWriter(
            "/home/liugq/Workspace/ResNet_BN/data/logs/2018.01/drn_1/train",
            sess.graph)
        test_writer = tf.summary.FileWriter(
            "/home/liugq/Workspace/ResNet_BN/data/logs/2018.01/drn_1/test")
        summary1 = tf.Summary()
        cover_count = 0
        stego_count = 0
        for i in range(1, NUM_ITER + 1):
            ################################## TRAIN ###################################################################
            for j in range(BATCH_SIZE):
                cover_count = cover_count % 20000
                stego_count = stego_count % 20000
                if j % 2 == 0:
                    imc = ndimage.imread(path1 + '/' + fileList1[cover_count])
                    data_y[j, 0] = 0
                    data_y[j, 1] = 1
                    cover_count = cover_count + 1
                else:
                    imc = ndimage.imread(path3 + '/' + fileList1[stego_count])
                    data_y[j, 0] = 1
                    data_y[j, 1] = 0
                    stego_count = stego_count + 1
                data_x[j, :, :, 0] = imc.astype(np.float32)
            summary1, pre, _, c, temp = sess.run(
                [merged, y_, opt, loss, accuracy],
                feed_dict={
                    x: data_x,
                    y: data_y,
                    is_train: True
                })

            if i % NUM_SHOWTRAIN == 0:
                #                summary.ParseFromString(sess.run(merged,feed_dict={x:data_x,y:data_y,is_train:True}))
                train_writer.add_summary(summary1, i)
                print 'DRN result train:'
                print 'epoch:', i
                print 'loss:', c
                print 'accuracy:', temp
                print ' '
            if i % (NUM_SHOWTEST) == 0:
                saver.save(
                    sess, '../../data/models/2018.01/DRN_resnet_1_' + str(i) +
                    '.ckpt')
            ################################ TEST ########################################################################
            if i % NUM_SHOWTEST == 0:
                result2 = np.array([])  #accuracy for testing set
                test_count = 0
                print 'DRN result test:'
                print 'epoch:', i
                while test_count < 20000:
                    for j in range(BATCH_SIZE):
                        if j % 2 == 0:
                            imc = ndimage.imread(path2 + '/' +
                                                 fileList2[test_count])
                            data_y[j, 0] = 0
                            data_y[j, 1] = 1
                        else:
                            imc = ndimage.imread(path4 + '/' +
                                                 fileList2[test_count])
                            data_y[j, 0] = 1
                            data_y[j, 1] = 0
                            test_count = test_count + 1
                        data_x[j, :, :, 0] = imc.astype(np.float32)

                    c2, temp2 = sess.run([loss, accuracy],
                                         feed_dict={
                                             x: data_x,
                                             y: data_y,
                                             is_train: True
                                         })
                    result2 = np.insert(result2, 0, temp2)
                    if test_count % 1000 == 0:  # print 40 times
                        print temp2
                print 'accuracy:', np.mean(result2)
                summary2 = tf.Summary(value=[
                    tf.Summary.Value(tag="test_acc",
                                     simple_value=np.mean(result2)),
                ])
                #summary2.value.add(tag='test_acc', simple_value=np.mean(result2))
                test_writer.add_summary(summary2, i)
            #################################################################################################################
        train_writer.close()
        test_writer.close()
Esempio n. 48
0
import numpy as np
from scipy import ndimage
from datetime import datetime

host = '127.0.0.1'
port = 2201
server = socket.socket()
server.bind((host, port))
server.listen(0)
print("Starting server at {} port {}. Listening...".format(host, port))
connection, adress = server.accept()
binary_stream = connection.makefile('rb')

folder = r'/mnt/c/Users/hawker/Dropbox/Public/selfie_car/data_intake3/v1.20'
images = os.listdir(folder)
images = filter(lambda x: ".jpg" in x, images)
images = sorted(images)
#images = map(lambda x: (x.split("_")[1][:-4], x), images)
images = map(lambda x: os.path.join(folder, x), images)

time.sleep(2)

for imgname in images:
    time.sleep(2)
    print(imgname)
    img = ndimage.imread(imgname)
    binary_stream.flush()
    binary_stream.write(img.tobytes())

binary_stream.close()
server.close()
Esempio n. 49
0
def test_denoise_bilateral():
    image_str = "data/raghav_low_res.jpg"
    image = ndimage.imread(image_str, flatten=True)
    res = denoise_bilateral(image, sigma_s=1, sigma_r=25.5, reg=3 * 10**-8)
    plt.imshow(res)
    plt.show()
Esempio n. 50
0
def confusion_matrix(sess, data_type=2):
    images = []
    targets = []

    cn_matrix = np.zeros((CLASS_SIZE, CLASS_SIZE))
    append_class_num = []

    if data_type == 1:
        data_type_post = data_valid
    elif data_type == 2:
        data_type_post = data_test

    for i in range(CLASS_SIZE):

        files =glob.glob(data_dir + '\\' + str(i) + data_type_post + '\\' + '*.png')

        num_test_class = np.shape(files)[0]

        append_class_num.append(num_test_class)

        target = []
        for j in range(CLASS_SIZE):
            t_building = []

            for k in range(CLASS_SIZE):
                if k < j:
                    t_building.append(0)
                elif k == j:
                    t_building.append(1)
                else:
                    t_building.append(0)

            for l in range(IMAGE_LENGTH):
                target.append(t_building)

        Y_target = np.asarray(target)

        y_pred = []
        y_true = []
        for j in range(num_test_class):
            pngfile = imread(files[j])
            images.append(pngfile)
            targets.append(i)

            if (np.shape(images)[0] == BATCH_SIZE) or ((i == (CLASS_SIZE - 1)) and (j == (num_test_class - 1))):
                images = np.asarray(images, dtype=np.float32)
                images = images.reshape(-1, INPUT_SIZE)

                pred_out_ = sess.run(pred_out, feed_dict={x: images, y_: Y_target,keep_prob: 1.0,phase_train: False})

                for k in range(np.shape(pred_out_)[0]):
                    index_pred = np.argmax(pred_out_[k])
                    y_pred.append(index_pred)
                    target = targets[k]
                    y_true.append(target)
                    cn_matrix[target][index_pred] = cn_matrix[target][index_pred] + 1

                targets = []
                images = []

    num_class = np.shape(append_class_num)[0]
    for i in range(num_class):
        cn_matrix[i, :] = cn_matrix[i, :] / append_class_num[i]
        print(i)
        print(cn_matrix[i, :])


    fig = plt.figure()
    plt.matshow(cn_matrix)
    plt.title('Problem: Hologram Confusion Matrix')
    plt.colorbar()
    plt.ylabel('True Label')
    plt.xlabel('Predicated Label')
    plt.savefig('confusion_matrix.jpg')
Esempio n. 51
0
from keras.models import model_from_json
import matplotlib.pyplot as plt

IMAGE_PATH = 'D:\Jhonatan\Documentos\DL\FINAL\DataManos'
#file_paths = glob.glob(path.join(IMAGE_PATH, '*.png'))
file_paths = []
for ext in ('*.jpg', '*.png'):
    file_paths.extend(glob.glob(path.join(IMAGE_PATH, ext)))

images = []

for root, dirnames, filenames in os.walk(IMAGE_PATH):
    for filename in filenames:
        if re.search("\.(jpg|jpeg|png|bmp|tiff)$", filename):
            filepath = os.path.join(root, filename)
            image = ndimage.imread(filepath, mode="RGB")
            image_resized = misc.imresize(image, (120, 160))
            images.append(image_resized)
            #plt.imshow(image_resized)
            #plt.show()
images = np.asarray(images)

n_images = images.shape[0]
labels = np.zeros(n_images)
for i in range(n_images):
    filename = path.basename(file_paths[i])[0]
    labels[i] = int(filename[0])

TRAIN_TEST_SPLIT = 0

shuffled_indices = np.random.permutation(n_images)
Esempio n. 52
0
def obj_det_img():

    print("file upload")
    voc_classes = [
        'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
        'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
        'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
        'tvmonitor'
    ]
    result1 = 'static/uploads/test'
    result2 = 'static/uploads/test_det'
    #if not gfile.Exists(result):
    #os.mkdir(result)
    if gfile.Exists(result1):
        shutil.rmtree(result1)
    if gfile.Exists(result2):
        shutil.rmtree(result2)
    if request.method == 'POST' or request.method == 'GET':
        # check if the post request has the file part
        if 'file' not in request.files:
            print('No file part')
            return redirect(request.url)
        file = request.files['file']
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            print('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            inputloc = os.path.join(app.config['UPLOAD_FOLDER'], filename)
            outputimage = 'static/det_results/testplot.jpg'
            #label1, score, accuracy = predict.inference(inputloc,outputloc)
            objects = detectimg(inputloc)
            final_objs = []

            for i in objects:
                obj = voc_classes[i]
                final_objs.append(obj)
            print("final objects", final_objs)
            image1 = ndimage.imread(inputloc)
            image2 = ndimage.imread(outputimage)
            os.mkdir(result1)
            os.mkdir(result2)
            timestr = datetime.now().strftime("%Y%m%d%H%M%S")
            name1 = timestr + "." + "test"
            name2 = timestr + "." + "test_det"
            #print(name)
            name3 = 'static/uploads/test/' + name1 + '.jpg'
            name4 = 'static/uploads/test_det/' + name2 + '.jpg'
            imsave(name3, image1)
            imsave(name4, image2)
            os.remove(inputloc)
            #label = label1.replace("\n", "")
            #if(label == 'oots'):
            #label = 'boots'
            #print("label:",label)

            image_path1 = "/uploads/test"
            image_list1 = [
                os.path.join(image_path1, file) for file in os.listdir(result1)
                if not file.startswith('.')
            ]
            image_path2 = "/uploads/test_det"
            image_list2 = [
                os.path.join(image_path2, file) for file in os.listdir(result2)
                if not file.startswith('.')
            ]
            print("image name", image_list1)
            print("image name", image_list2)
            #if(accuracy < 0.3):
            #label = 'Unknown Class'
            #score = '-'
            data = {
                'image0': image_list1[0],
                'image1': image_list2[0],
                'objects': final_objs
            }
            return jsonify(data)
Esempio n. 53
0
from keras import backend as K
model = Sequential()
model = load_model('my_model3conlayer.h5')
#testData = np.memmap('testdataNew.mymemmap',dtype='float16',mode='r+',shape=(84, 2988, 5312, 3))
#Output = model.predict(testData, batch_size=1, verbose=2)
#print(Output)
#all_weights = []
#for layer in model.layers:
#   w = layer.get_weights()
#   all_weights.append(w)

#all_weights = np.array(all_weights)
#conv1out = all_weights[0]
#plt.imshow(conv1out[0])
I = ndimage.imread(
    'C:/Users/baseb/Desktop/BuildingClassifier/BuildingImages/NewTest/20171121_151746.jpg'
)

#normalize the trainning data

import keras.backend as K


def get_activations(model,
                    model_inputs,
                    print_shape_only=False,
                    layer_name=None):
    print('----- activations -----')
    activations = []
    inp = model.input
Esempio n. 54
0
def segment_image():

    print("file upload for image segmentation")

    #if not gfile.Exists(result):
    #os.mkdir(result)
    result1 = 'static/uploads/test'
    result2 = 'static/uploads/test_det'
    #if not gfile.Exists(result):
    #os.mkdir(result)
    if gfile.Exists(result1):
        shutil.rmtree(result1)
    if gfile.Exists(result2):
        shutil.rmtree(result2)
    if request.method == 'POST' or request.method == 'GET':
        # check if the post request has the file part
        if 'file' not in request.files:
            print('No file part')
            return redirect(request.url)
        file = request.files['file']
        # if user does not select file, browser also
        # submit a empty part without filename
        if file.filename == '':
            print('No selected file')
            return redirect(request.url)
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            inputloc = os.path.join(app.config['UPLOAD_FOLDER'], filename)

            #label1, score, accuracy = predict.inference(inputloc,outputloc)
            segment_img(inputloc)
            outputimage = os.path.join(app.config['UPLOAD_FOLDER'],
                                       "segmented.jpg")

            image1 = ndimage.imread(inputloc)
            image2 = ndimage.imread(outputimage)
            os.mkdir(result1)
            os.mkdir(result2)
            timestr = datetime.now().strftime("%Y%m%d%H%M%S")
            name1 = timestr + "." + "test"
            name2 = timestr + "." + "test_det"
            #print(name)
            name3 = 'static/uploads/test/' + name1 + '.jpg'
            name4 = 'static/uploads/test_det/' + name2 + '.jpg'
            imsave(name3, image1)
            imsave(name4, image2)
            #os.remove(inputloc)

            image_path1 = "/uploads/test"
            image_list1 = [
                os.path.join(image_path1, file) for file in os.listdir(result1)
                if not file.startswith('.')
            ]
            image_path2 = "/uploads/test_det"
            image_list2 = [
                os.path.join(image_path2, file) for file in os.listdir(result2)
                if not file.startswith('.')
            ]
            print("image name", image_list1)
            print("image name", image_list2)
            #if(accuracy < 0.3):
            #label = 'Unknown Class'
            #score = '-'
            data = {'image0': image_list1[0], 'image1': image_list2[0]}
            return jsonify(data)
Esempio n. 55
0
X_test = X_test.astype('float32')
X_train = X_train / 255.0
X_test = X_test / 255.0

# normalize inputs from 0-255 to 0.0-1.0
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]

for i in range(0, 9):
    pyplot.subplot(330 + 1 + i)
    pyplot.imshow(toimage(X_train[i]))
# show the plot
pyplot.show()

kedi = ndimage.imread("kedi.jpg")
kedi = scipy.misc.imresize(kedi, (32, 32))
kedi = numpy.array(kedi)
print("jfjef")
kedi = kedi.reshape(1, 3, 32, 32)

model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           input_shape=(3, 32, 32),
           padding='same',
           activation='relu',
           kernel_constraint=maxnorm(3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(
    Conv2D(32, (3, 3),
Esempio n. 56
0
# 0. Read the images
PathDicom = "./images/"
lstFilesDCM = []  # create an empty list
for dirName, subdirList, fileList in os.walk(PathDicom):
    for filename in fileList:
        lstFilesDCM.append(os.path.join(dirName, filename))

# 0. Read the data into Numpy
one = np.zeros((7, 512, 512))

# 0.5 Transfer All of the Data into array
print('===== READING DATA ========')
for file_index in range(len(lstFilesDCM)):
    one[file_index, :, :] = imresize(
        imread(lstFilesDCM[file_index], mode='F', flatten=True), (512, 512))
print('===== Done READING DATA ========')

# 1. DoG
for x in range(len(one)):
    s = 2
    w = 5
    t = (((w - 1) / 2) - 0.5) / s
    temp = scipy.ndimage.filters.gaussian_filter(one[x, :, :],
                                                 sigma=s,
                                                 truncate=t)

    s = 2
    w = 3
    t = (((w - 1) / 2) - 0.5) / s
    temp2 = scipy.ndimage.filters.gaussian_filter(one[x, :, :],
Esempio n. 57
0
maxprice = 88.90
minprice = 37.99

import time

start_time = '05.05.2011 12:00:00'
stop_time = '12.02.2017 12:00:00'
pattern = '%d.%m.%Y %H:%M:%S'
epoch = int(time.mktime(time.strptime(start_time, pattern)))
print(epoch)
epoch = int(time.mktime(time.strptime(stop_time, pattern)))
print(epoch)

chart = imread(
    "C:/Users/franc/Downloads/camelchart-locale-frasin-b00ql1u4topricetypes-amazonforce-1zero-0w-2725h-1440desired-falselegend-0ilt-0fo-0lang-en2017-02-1200-43-36.png",
    flatten=False,
    mode=None)
raster = (chart.dot([2, 3, 5]) == (99 * 2 + 168 * 3 + 94 * 5))[-1::-2, ::2]
raster = np.array([np.nan, 1])[raster.astype(np.int)]
raster *= np.arange(raster.shape[0])[:, np.newaxis]
mymin = np.nanmin(raster, axis=0)
mymax = np.nanmax(raster, axis=0)
toto = np.vstack((mymax, mymin)).ravel("F")

toto -= np.nanmin(toto)
toto /= np.nanmax(toto)
toto *= maxprice - minprice
toto += minprice

plt.plot(toto)
Esempio n. 58
0
%matplotlib inline
import scipy as sp
import scipy.ndimage as nd
import matplotlib.image as mpimg
import numpy as np
import matplotlib.pyplot as plt

img = nd.imread('results_L3.jpg', mode = 'L')
imgplot = plt.imshow(img, cmap='Greys_r')
Esempio n. 59
0
def crop_and_resize(image, img_size):
    """ Crop and resize an image
    """
    image_data = imread(image['filename'])
    crop = image_data[image['y0']:image['y1'], image['x0']:image['x1'], :]
    return imresize(crop, img_size)
Esempio n. 60
0
    # Note that there is exactly one more zero added to the start of spectrum than the end.
    # *** this is the DC frequency, which is the only exception to the symmetry of spectrum

    # 2.7) IFFT: performing an Inverse Fast Fourier Transformation on the frequency
    # spectrum created above results in a time-domain data; which is then transmitted
    symbol = np.real(np.fft.ifft(paddedPilotTonedData))
    return symbol


### DATA TRANSMISSION

plt.close("all")

# 1) loading the data: in this case the data is an image.
img = ndimage.imread("greytee.png")

# At this point the data is a 100 by 256 array.
# you can check this by typing in the command line "img.shape"
# Entries of this array are the grey-scale values of their corresponding pixels
# Figure(00) shows this image and its grey-scale histogram
# grey-scale values are from 0 to 255, corresponding to black and white respectively.

plt.subplot(2, 1, 1)
plt.imshow(img, cmap='gray')
plt.title("Image: Before transmission (before IFFT)")
plt.subplot(2, 1, 2)
plt.hist(img)
plt.title("Histogram of pixles' greyScale")

transmitData = []