예제 #1
0
def show_signature(img_path, fignum=1):
    print('Showing signature: ' + img_path)
    img = plt.imread(join(imgdir, img_path))
    sig_img = sliding_window_signature(img)
    fig = plt.figure(fignum)
    ax1 = fig.add_subplot(1, 2, 1)
    ax2 = fig.add_subplot(1, 2, 2)
    ax1.imshow(img)
    ax2.imshow(sig_img)
    fig.show()
    fig.canvas.draw()
예제 #2
0
파일: shadows.py 프로젝트: Erotemic/local
def show_signature(img_path, fignum=1):
    print('Showing signature: '+img_path)
    img = plt.imread(join(imgdir, img_path))
    sig_img = sliding_window_signature(img)
    fig = plt.figure(fignum)
    ax1 = fig.add_subplot(1,2,1)
    ax2 = fig.add_subplot(1,2,2)
    ax1.imshow(img)
    ax2.imshow(sig_img)
    fig.show()
    fig.canvas.draw()
예제 #3
0
def prepare_ShanghaiTech_dataset(root, part, dm_generator, resetFlag=False):
    root = os.path.join(root, "ShanghaiTech")
    paths_dict = dict()
    print('\t  #Preparing Dataset : ShanghaiTech part ', part, ' :')
    # generate the ShanghaiA's ground truth
    if not part == "A" and not part == "B":
        raise Exception("Invalide parts passed for shanghai ")

    train_path = os.path.join(root, 'part_' + part, 'train_data')
    test_path = os.path.join(root, 'part_' + part, 'test_data')

    # save both train and test paths
    paths_dict["images"] = os.path.join(train_path, 'images')
    paths_dict["ground-truth"] = os.path.join(train_path, 'ground-truth')

    path_sets = [paths_dict["images"], paths_dict["ground-truth"]]

    img_paths = []
    # Grab all .jpg images paths
    for path in path_sets:
        for img_path in glob.glob(os.path.join(path, '*.jpg')):
            img_paths.append(img_path)

            # Generate density map for each image
    for img_path in img_paths:
        if os.path.exists(
                img_path.replace('.jpg', '.npy').replace(
                    'images', 'ground-truth')) and not resetFlag:
            #print("\t Already exists.")
            continue
        print('\t\t Generating Density map for : ',
              os.path.basename(img_path),
              " :",
              end=' ')

        # load matrix containing ground truth infos
        mat = io.loadmat(
            img_path.replace('.jpg', '.mat').replace('images',
                                                     'ground-truth').replace(
                                                         'IMG_', 'GT_IMG_'))
        img = plt.imread(img_path)  #768行*1024列
        density_map = np.zeros((img.shape[0], img.shape[1]))
        points = mat["image_info"][0, 0][0, 0][0]  #1546person*2(col,row)

        # Generate the density map
        density_map = dm_generator.generate_densitymap(img, points)

        # save density_map on disk
        np.save(
            img_path.replace('.jpg', '.npy').replace('images', 'ground-truth'),
            density_map)
    print('\t Done.')
    return paths_dict
예제 #4
0
def crop_images(input_folder, crop_percentage):
    im_files = glob.glob(os.path.join(input_folder, '*.png'))
    print(im_files)
    for imf in im_files:
        im = plt.imread(imf)
        orig_shape = np.array(im.shape[0:2])
        new_shape = np.round(orig_shape * crop_percentage).astype(
            'int')  #crop to 60% of the image size
        offset = np.round(0.5 * (orig_shape - new_shape)).astype('int')
        im_out = im[offset[0]:offset[0] + new_shape[0],
                    offset[1]:offset[1] + new_shape[1]]
        plt.imsave(imf, im_out)
예제 #5
0
def readTrafficSigns(rootpath, data):
    '''Reads traffic sign data for German Traffic Sign Recognition Benchmark.
    Arguments: path to the traffic sign data, for example './GTSRB/Training'
    Returns:   list of images, list of corresponding labels'''

    images = []  # images
    labels = []  # corresponding labels
    # loop over all 43 classes
    if data == 'Train':
        for c in range(0, 43):
            prefix = rootpath + '/' + format(
                c, '05d') + '/'  # subdirectory for class
            gtFile = open(prefix + 'GT-' + format(c, '05d') +
                          '.csv')  # annotations file
            gtReader = csv.reader(
                gtFile, delimiter=';')  # csv parser for annotations file
            next(gtReader)  # skip header
            # loop over all images in current annotations file
            for row in gtReader:
                images.append(
                    plt.imread(prefix +
                               row[0]))  # the 1th column is the filename
                labels.append(row[7])  # the 8th column is the label
            gtFile.close()
    elif data == 'Test':
        prefix = rootpath + '/'  # subdirectory for class
        gtFile = open(prefix + 'GT-final_test' + '.csv')  # annotations file
        gtReader = csv.reader(gtFile,
                              delimiter=';')  # csv parser for annotations file
        next(gtReader)  # skip header
        # loop over all images in current annotations file
        for row in gtReader:
            images.append(plt.imread(prefix +
                                     row[0]))  # the 1th column is the filename
            labels.append(row[7])  # the 8th column is the label
        gtFile.close()
    return images, labels
예제 #6
0
def prepare_dataset(root, dirname, dm_generator, resetFlag=False):
    root = os.path.join(root, dirname)
    paths_dict = dict()

    print('\t #Preparing Dataset : ', dirname)
    # save both train and test paths
    paths_dict["images"] = os.path.join(root, 'images')
    paths_dict["ground-truth"] = os.path.join(root, 'ground-truth')

    path_sets = [paths_dict["images"], paths_dict["ground-truth"]]

    img_paths = []
    # Grab all .jpg images paths

    for img_path in glob.glob(os.path.join(paths_dict["images"], '*.jpg')):
        img_paths.append(img_path)

        # Generate density map for each image
    for img_path in img_paths:
        if os.path.exists(
                img_path.replace('.jpg', '.npy').replace(
                    'images', 'ground-truth')) and not resetFlag:
            #print("\t Already exists.")
            continue
        print('\t\t Generating Density map for : ', os.path.basename(img_path),
              " :")

        # load matrix containing ground truth infos
        mat = io.loadmat(
            img_path.replace('.jpg', '.mat').replace('images', 'ground-truth'))
        img = plt.imread(img_path)  #768行*1024列
        density_map = np.zeros((img.shape[0], img.shape[1]))
        # points = mat["image_info"][0,0][0,0][0] #1546person*2(col,row)
        key = [el for el in list(mat) if el.lower().endswith('points')][0]
        points = [tuple(el) for el in mat[key]]  #1546person*2(col,row)

        # Generate the density map
        density_map = dm_generator.generate_densitymap(img, points)

        # save density_map on disk
        np.save(
            img_path.replace('.jpg', '.npy').replace('images', 'ground-truth'),
            density_map)
    print('\t Done.')
    return paths_dict
예제 #7
0
    elif m == '06':
        return int(d)+14
    elif m == '07':
        return int(d)+34

df["date"] = [t[0:4] + t[5:7] + t[8:10] for t in df["lastupdated"]]
df["time"] = [t[11:13] + t[14:16] for t in df["lastupdated"]]
df["day"] = [date2int(t) for t in df["lastupdated"]] # day 0 is May 17, 2014
df["dayofwk"] = [(t+6)%7 for t in df["day"]] # 0 indexed Sunday
df.head()

# <codecell>

plt.figure(figsize=(10,15))

im = plt.imread('chicago.png')
implot = plt.imshow(im)

x = (df['west'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
y = 798-(df['north'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
s = df['currentspeed'] / df['currentspeed'].max()
plt.scatter(x,y,c=s,linewidth=0,s=1000,alpha=0.1)

#x0 = (df.ix[0]['west'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
#y0 = 798-(df.ix[0]['north'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
#plt.scatter(x0,y0,c='r',s=2000)
#x0 = (df.ix[0]['east'] - df['west'].min())*477/(df['east'].max() - df['west'].min())
#y0 = 798-(df.ix[0]['south'] - df['south'].min())*798/(df['north'].max() - df['south'].min())
#plt.scatter(x0,y0,c='r',s=2000)
plt.xlim(0,477)
plt.ylim(798,0)
예제 #8
0
'''
import numpy as np
import matplotlib as plt
from submission import *
from helper import *

data = np.load('../data/some_corresp.npz')
intrinsics = np.load('../data/intrinsics.npz')

pts1 = data['pts1']
pts2 = data['pts2']
K1 = intrinsics['K1']
K2 = intrinsics['K2']


im1 = plt.imread('../data/im1.png')
im2 = plt.imread('../data/im2.png')

M = max(im1.shape[0], im1.shape[1])

F = eightpoint(pts1, pts2, M)
E = essentialMatrix(F, K1, K2)

print('F: ')
for rows in F:
    print(rows)

print('E: ')
for rows in E:
    print(rows)
            .format(epoch+1, num_epochs, loss/n_batches))
    losses.append(loss/n_batches)
    #if epoch == 1:
    pic_org = to_img(img.cpu().data)
    pic_cropped = to_img_cropped(cropped_img.cpu().data)
    #pic_noised = to_img(noised_img.cpu().data)
    pic_pred = to_img(output.cpu().data)
    res = torch.cat((pic_org,pic_cropped, pic_pred), dim=3)
    save_image(res[:8], f'{output_dir}/res_{epoch}.png')  # save 8 images

# save the model
torch.save(net.state_dict(), f'{output_dir}/conv_autoencoder_4.pth')


# show performance of autoencoder after some epochs
imgs = [plt.imread(f'{output_dir}/res_4_{i}.png') for i in range(3)]

NUM_ROWS = 1
IMGS_IN_ROW = 1
f, ax = plt.subplots(NUM_ROWS, IMGS_IN_ROW, figsize=(5,10))

for i in range(1):
    ax[i].imshow(imgs[i])
    ax[i].set_title(f'Results after {i} epoch') #Change if changed to epoch or mod epoch

plt.tight_layout()
plt.show()

#Change for the 3rd version!!

예제 #10
0
from os import listdir
import matplotlib as plt

for file in listdir("C:\\Users\\bdgecyt\\Desktop\\dataset\\Household Shelter"):
    #    print(files)
    if file.endswith(".jpg"):
        print(file)

        #        str.endswith(suffix)
        im = Image.open(
            "C:\\Users\\bdgecyt\\Desktop\\dataset\\Household Shelter\\" + file)
        print(file[:-3] + "png")
        im.save("C:\\Users\\bdgecyt\\Desktop\\dataset\\Household Shelter\\" +
                file[:-3] + "png")

im = Image.open(
    "C:\\Users\\bdgecyt\\Desktop\\dataset\\Household Shelter\\test2_84.png")

im.size

img = plt.pyplot.imread(
    "C:\\Users\\bdgecyt\\Desktop\\dataset\\Household Shelter\\test2_84.png")
img = plt.pyplot.imread(
    "C:\\Users\\bdgecyt\\Desktop\\dataset\\Household Shelter\\271_25-1_00_23_41_0_2.png"
)

plt.pyplot.imread('Household Shelter/271_25-1_00_23_41_0_2.png')

image = plt.imread('Household Shelter/271_25-1_00_23_41_0_2.png')

matplotlib.pyplot.imread
예제 #11
0
	'''    
	if cv2.waitKey()&0xff==ord('q')			#这里与oxff运算是为了提取ASCII码,因为不同系统可能不一样 #ord('q')返回113
		break		#若按下的键的ASCII与q的ASCII码相等,就执行break语句
		
	'''ord()函数是以单个字符为参数,返回其对应的ASCII码;与ord()对应的chr()和unichr()[它与chr()不同
	的是返回unicode字符]以0到255整数做参数,返回对应字符。如chr(113)返回'q'。'''
	&是按位与运算,and是逻辑运算
	cv2.waitKey(5)表示等待5 毫秒,但是只要按了键,就不必等待5毫秒。要和time.sleep(5)分清楚
	
==============================================================================================
21.对matplotlib、pylab、imread、cv2之间的关系与区别
	
	如果不使用opencv中的cv2模块来读取显示图片时,应该这么做:
	import matplotlib as plt
	import pylab
	img=plt.imread('a.jpg')
	plt.imshow(img)		#----只这一句话并不会显示图像窗口
	pylab.show()		#----加上这句话才会显示图像窗口
	
	-------------------------------------------------------
	对比一下使用cv2模块:
	import cv2
	img=cv2.imread('a.jpg')
	cv2.imshow('my_window',img)
	cv2.waitKey()
	cv2.destroyAllWindow()

================================================================================================
22.python中数组的各种属性的用法:
		a=np.random.random(4)
		type(a)					#---输出 <class 'numpy.ndarray'>