def extractframesResize(v_path, f_path):
    #creating a tmp directory
    path = str(f_path)
    try:
        os.mkdir(path)
    except OSError as e:
        print("Creation of the directory %s failed" % path)
        print("Error: %s : %s" % (path, e.strerror))
    else:
        print("Successfully created the directory %s " % path)
    #open video
    cap = cv2.VideoCapture(str(v_path))
    i = 0
    #save frames to tmp directory
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == False:
            break
        cv2.imwrite(f_path + "/frm" + str(i) + ".jpg", frame)

        image = Image.open(f_path + "/frm" + str(i) + ".jpg")
        new_image = image.resize((32, 32))
        new_image.save(f_path + "/frm" + str(i) + ".jpg")

        i += 1

    return i
def gen(batch_size=1, flag='train'):
    if flag == 'train':
        start = train_start / batch_size
        end = train_end / batch_size
    else:
        start = train_end / batch_size
        end = val_end / batch_size
    x_train = np.zeros((batch_size, IMAGE_WIDTH, IMAGE_HEIGHT, 3),
                       dtype='float32')
    y_train = np.zeros((batch_size, 1), dtype='float32')

    while True:
        for i in range(start, end):
            for j in range(batch_size):
                if (len(content[i]) > 23):
                    y_train[j, :] = float(
                        content[i * batch_size +
                                j][content[i * batch_size + j].find(" ") + 1:])
                    path = "pre/" + content[i * batch_size +
                                            j][:content[i * batch_size +
                                                        j].find(" ")]
                else:
                    y_train[j, :] = float(content[i * batch_size + j][13:])
                    path = "lamem/images/" + content[i * batch_size + j][:12]

                # preprocess the image
                img = image.load_img(path)
                img = img.resize((IMAGE_WIDTH, IMAGE_HEIGHT))
                img = image.img_to_array(img)
                img = preprocess_input(img, mode='tf')
                x_train[j, :img.shape[0], :img.shape[1], :] = img

            # return train data
            yield x_train, y_train
示例#3
0
def process_images(fp):
    imgs = []
    for f in fp:
        img = load_img(f)
        img = img.resize((w, h), Image.ANTIALIAS)
        img = img_to_array(img) / 255
        img = img.reshape(3, w, h)
        imgs.append(img)
    return np.array(imgs)
示例#4
0
def get_concatenated_images(indexes, thumb_height):
    thumbs = []
    for idx in indexes:
        img = image.load_img(images_all[idx])
        img = img.resize(
            (int(img.width * thumb_height / img.height), thumb_height))
        thumbs.append(img)
    concat_image = np.concatenate([np.asarray(t) for t in thumbs], axis=1)
    return concat_image
示例#5
0
文件: tpi.py 项目: CBienvenue/TPI
def functionODF(img, step=1):
    """
        
        Function that compute the orientation density function (ODF) of an image.
        
        In:
            img: Image to process (.png)
            step: Angle step for Radon transform [degrees]
        Out: 
            theta: vector of angles [degrees]
            odf: vector ODF values
        
    """

    #Libraries

    from PIL import Image
    import numpy as np
    from skimage.transform import radon

    #Verify if image is square and resize if not

    (Lx, Ly) = img.size
    L = max(Lx, Ly)

    if Lx != Ly:
        img = img.resize((L, L), Image.BICUBIC)

#Transform image into matrix and apply circular mask

    x0 = y0 = (L + 1) / 2
    rmax = L / 2

    img = np.asarray(img)
    img2d = np.zeros((L, L))

    for i in np.arange(L):
        for j in np.arange(L):
            r = np.sqrt((i - x0)**2 + (j - y0)**2)
            if r <= rmax:
                img2d[i, j] = img[j][i][0]

#Apply Radon and Fourier transform

    theta = np.arange(0, 180, step)
    imgRad = radon(img2d, theta, circle=False)
    imgFT = abs(np.fft.fftshift(np.fft.fft(imgRad, axis=0)))

    #Compute odf

    odf = np.sum(imgFT, 0)
    odf = odf / sum(odf)

    #Output parameters
    return odf, theta
def load_img(img, WIDTH, HEIGHT):
    ## load model
    img = Image.open(img)
    img = img.resize((WIDTH, HEIGHT))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    # remove alpha channel if present
    x = x[:, :, :, :3]
    # rescale data
    #x = x * 1. / 255
    return x
def search_windows(img,
                   windows,
                   clf,
                   scaler,
                   color_space='BGR',
                   spatial_size=(32, 32),
                   hist_bins=32,
                   hist_range=(0, 256),
                   orient=9,
                   pix_per_cell=8,
                   cell_per_block=2,
                   hog_channel=0,
                   spatial_feat=True,
                   hist_feat=True,
                   hog_feat=True):

    #1) Create an empty list to receive positive detection windows
    on_windows = []
    #2) Iterate over all windows in the list
    for window in windows:
        #3) Extract the test window from original image
        test_img = cv2.resize(
            img[window[0][1]:window[1][1], window[0][0]:window[1][0]],
            (64, 64))
        #test_img = img[350: img.shape[0]-100, 600:]
        #4) Extract features for that window using single_img_features()

        features = single_img_features(test_img,
                                       color_space=color_space,
                                       spatial_size=spatial_size,
                                       hist_bins=hist_bins,
                                       orient=orient,
                                       pix_per_cell=pix_per_cell,
                                       cell_per_block=cell_per_block,
                                       hog_channel=hog_channel,
                                       spatial_feat=spatial_feat,
                                       hist_feat=hist_feat,
                                       hog_feat=hog_feat)
        #5) Scale extracted features to be fed to classifier
        test_features = scaler.transform(np.array(features).reshape(1, -1))
        #test_features = scaler.transform(features)
        #6) Predict using your classifier
        prediction = clf.predict(test_features)

        confidence = clf.decision_function(test_features)
        # Save the window if prediction is positive.

        #7) If positive (prediction == 1) then save the window
        if prediction == 1 and confidence > 0.1:
            on_windows.append(window)
    #8) Return windows for positive detections
    return on_windows
示例#8
0
def load_image(img_path):
    image = Image.open(img_path)
    image = image.resize((640, 480))
    image = np.array(image)

    imagenet_stats = {
        'mean': [0.485, 0.456, 0.406],
        'std': [0.229, 0.224, 0.225]
    }

    image = (image - imagenet_stats['mean']) / imagenet_stats['std']
    image = image / 255
    image = np.clip(image, 0, 1)

    return image
test_input_folder = "/content/sample_data/Rain-Haze/Haze"
test_output_folder = "/content/sample_data/dehazed_test_images2/"
if not os.path.exists(test_output_folder):
  os.mkdir(test_output_folder)
  
file_types = ['jpeg','jpg','png']

with tf.Session() as sess:
  #saver.restore(sess,'models/model_checkpoint_9.ckpt')
  saver.restore(sess,'/content/sample_data/TESTING_TAR/MY_WEIGHTS_USING_EEC206_DATASET/model_checkpoint_9.ckpt')
  test_image_paths = []
  for file_type in file_types:
    test_image_paths.extend(glob.glob(test_input_folder+"/*."+file_type))
  
  
  for path in test_image_paths:
    image_label = path.split(test_input_folder)[-1][1:]
    image = Image.open(path)
    image = image.resize((640, 480))
    image = np.asarray(image) / 255.0
    image = image.reshape((1,) + image.shape)
    dehazed_image = sess.run(dehazed_X,feed_dict={X:image,Y:image})
    
    
    fig, axes = plt.subplots(nrows=1, ncols=2,figsize=(10,10))
    axes[0].imshow(image[0])
    axes[1].imshow(dehazed_image[0])
    fig.tight_layout()
    
    dehazed_image = np.asarray(dehazed_image[0] * 255,dtype=np.uint8)
    mpl.image.imsave(test_output_folder + "/" + 'dehazed_' + image_label, dehazed_image)
示例#10
0
from PIL import Image
import numpy as np
import matplotlib.image as img
import matplotlib.pyplot as plt
import time

########################################
#Baisc
#opens image for reading
img = Image.open('my_fitz_roy.jpg', 'r')

#resizes image for testing
newImage = img.resize((200, 200))
newImage.save('new_fitz.jpg')

#prints out all the pixel values
pixels = list(newImage.getdata())
width, height = newImage.size
pixels = [pixels[i * width:(i + 1) * width] for i in range(height)]
print(newImage.size)
newImage.show()
print(pixels)
#End here for basic version
################################################

pixels2 = [[
    [0, 0, 255],
    [0, 0, 255],
    [0, 0, 255],
    [0, 0, 255],
    [0, 0, 255],
示例#11
0
import matplotlib.pyplot as plt
import csv
import numpy as np
import numpy.linalg as LA
from os import listdir
from matplotlib import image

Total = 40
size = 50  #size of image

images = []

#Read data from category1 and category2
for filename in listdir('Dataset/Category1/'):
    image = Image.open('Dataset/Category1/' + filename)
    im = np.array((image.resize((size, size))).convert('L'))
    row, col = im.shape
    i = im.reshape(row * col)
    images.append(i)
for filename in listdir('Dataset/Category2/'):
    image = Image.open('Dataset/Category2/' + filename)
    im = np.array((image.resize((size, size))).convert('L'))
    row, col = im.shape
    i = im.reshape(row * col)
    images.append(i)

X = np.array(images).T  #columns are flatten images i.e features

meanX = np.mean(X, axis=1, keepdims=True)
inputMatrix = X - meanX
def bin_spatial(img, size=(32, 32)):
    # Use cv2.resize().ravel() to create the feature vector
    features = cv2.resize(img, size).ravel()
    # Return the feature vector
    return features
示例#13
0
def main():

        #have land cover and slope, reclassify land cover for impedance
        #have elevation map, use arcpy to calculate slope for each cell might not need this
        #use raster to numpy array to get numpy arrays of each thing
        #use formula from tobler to calculate walking speed across each cell
        #arc.env.workspace = "C:\Users\Eric Cawi\Documents\SAR\Motion Model Test"
        
        sl = misc.imread(base_dir + "slope.tif")
        sl = np.add(sl,.1)#get rid of divide by 0 errors
        sl = sl* np.pi/180.0 #convert slope to radians
        #have to resize image to make the arrays multiply correctly
        img = Image.open(base_dir + 'imp2.png')
        shp = np.shape(sl)
        shp = tuple(reversed(shp))#need to get in width x height instead of height x width for the resize
        img = img.resize(shp,Image.BILINEAR)
        img.save(base_dir + 'imp2.png')
        imp = misc.imread(base_dir + 'imp2.png')
        impedance_weight = .5
        slope_weight = .5
        #before getting inverse of slope use array to make walking speed array
        # for simplicity right now using tobler's hiking function and multiplying with the impedance weight
        walking_speeds = 6*np.exp(-3.5*np.abs(np.add(np.tan(sl),.05)))*1000.0/60.0 # speed in kmph*1000 m/km *1hr/60min
        sl = np.divide(1,sl)

        imp.astype('float')
        
  
        #walking speed weighted with land cover here from doherty paper, which uses a classification as 25 = 25% slower than normal, 100 = 100% slower than normal walking speed
        vel_weight = np.divide(np.subtract(100.0,imp),100.0)
        walking_speeds = np.multiply(vel_weight, walking_speeds) #since 1 arcsecond is roughy 30 meters the dimensions will hopefully work out
        
        #lower impedance is higher here, will work for probabilities, have to convert to float to avoid divide by zero errors
        imp = np.divide(1.0,imp)
        		
        print 'imp:', imp
        print 'sl:', sl
        print 'vw:', vel_weight
        res = 25000/shp[0]#30 meter impedance resolution from the land cover dataset
        end_time = 240#arbitrary 4 hours
        #establish initial conditions in polar coordinates, using polar coordinates because i think it's easier to deal with the angles
        #r is the radius from the last known point, theta is the angle from "west"/the positive x axis through the lkp
        r = np.zeros(NUM_SIMS)#simulates 1000 hikers starting at ipp
        theta = np.random.uniform(0,2*np.pi,NUM_SIMS) #another 1000x1 array of angles, uniformly distributing heading for simulation
        stay = .05
        rev = .05#arbitrary values right now, need to discuss
        sweep = [-45.0 , -35.0 , -25.0 , -15.0 , -5.0 , 5.0 , 15.0 , 25.0 , 35.0, 45.0 , 0.0 , 180.0] #0 represents staying put 180 is a change in heading
        for i in range(len(sweep)):
                sweep[i] = sweep[i]*np.pi/180.0 #convert sweep angles to radians
        dr = 120.0 #120 meters is four cells ahead in the land cover raster which gives 9 specific cells for the sweeping angles
        
        #for each lookahead, have ten angles between -45 and plus 45 degrees of the current heading
        #get average impedance around 100 meters ahead, average flatness, weight each one by half
        #then scale to 1 - (prob go back + prob stay put)  these guys are all free parameters I think

        for i in range(NUM_SIMS):
        	#print 'hiker: ',str(i)
        	#print 'current cell:'
        	current_cell = [shp[0]/2 - 1, shp[1]/2 - 1] #-1 is to compensate for the indeces starting at 0, starting at middle of array should represent the ipp
                t = 0.0
                current_r = r[i]
                current_theta = theta[i]
                while t < end_time:
                        #look in current direction, need to figure out how to do the sweep of slope
                        slope_sweep = attract(current_r, current_theta,current_cell,sweep,sl, res,dr)
                        impedance_sweep = attract(current_r,current_theta,current_cell,sweep, imp, res,dr)
                        #goal: have relative attractiveness of both slope and land cover by taking values
                        #for slope might have a function that takes the least average change in each direction
                        #then take reciprocal to make smaller numbers bigger and take each change/sum of total to get relative goodness
                        #land cover take average 1/impedance in each direction and get relative attractiveness
                        sl_w = np.multiply(slope_sweep,slope_weight)
                        imp_w = np.multiply(impedance_sweep, impedance_weight)
                        probabilities= np.add(sl_w, imp_w)

                        #create a random variable with each of the 12 choices assigned the appropopriate probability
                        dist = rv_discrete(values = (range(len(sweep)), probabilities))
                        ind = dist.rvs(size = 1)
                        dtheta = sweep[ind]
                        
                        #Note: cannot test floating point for equality!  Modified.  -crt
                        eps = 1e-4
                        if (-eps < dtheta < eps):
                                v = 0.0 #staying put, no change
                                dt = 10.0 #stay arbitrarily put for 10 minutes before making next decision
                                r_new = current_r
                                theta_new = current_theta + dtheta
                        elif (np.pi-eps < dtheta < np.pi + eps):#reversal case
                                v = avg_speed(current_cell, dtheta,dr,walking_speeds, res)
                                dt = dr/v
                                r_new = current_r-dr
                                theta_new = -1*current_theta
                        else:
                                #update the current hiker's new radius                          
                                if -eps < current_r < eps:
                                        r_new = dr
                                        theta_new = current_theta + dtheta 
                                else:
                                        r_new = np.sqrt(current_r**2 + dr**2 -2.0*current_r*dr*np.cos(np.pi-dtheta))#law of cosines to find new r
                                #law of sines to find new theta relative to origin, walking speeds treats each original cell as origin to calculate walking velocity
                                	asin = np.arcsin(dr* np.sin(np.pi-dtheta)/r_new )
                                	theta_new = current_theta+asin
                                v = avg_speed(current_cell,dtheta,dr,walking_speeds,res)#some way to figure out either average speed or distance traveled along the line chosen
                                dt = dr/v
                        #update for current time step
                        
                        current_r = r_new
                        current_theta= theta_new
                        t=t+dt
                        #print t
                        #update current_cell for slope and impedance
                        x = np.floor(current_r*np.cos(current_theta)/res)
                        y = np.floor(current_r*np.sin(current_theta)/res)
                        
                        current_cell = [y,x]#since the array was changed only need one current cell
                        
                #print current_cell
                        #print t
                        
                #update r and theta
                r[i] = current_r
                theta[i] = current_theta
                #print current_r, current_theta*180./np.pi
        #now that we have final positions for NUM_SIMS hikers at endtime the goal is to display/plot
        #Note: streamlined to eliminated some loops etc. -crt
        Xs = r * np.cos(theta)/50.
        Ys = r * np.sin(theta)/50.

        #Count the outsiders using vector logic: "OR" these arrays together
        outsiders = (Xs < -250) | (Xs > 250) | (Ys < -250) | (Ys > 250)
        num_outside = np.sum(outsiders)
        insiders = (-250<=Xs) & (Xs<=250) & (-250<=Ys) & (Ys<=250)
        num_inside = np.sum(insiders)
        prob_outside = 1. * num_outside / NUM_SIMS
        prob_inside = 1. * num_inside / NUM_SIMS

        # Create nominal grid of N 50m cells. Will get resized later.
        # Avoid zeros by putting 10/NUM_SIMS observations in each cell.
        # TODO: let prior be the distance model, with weight of ~1/100 motion model.
        bias = 10.
        counts = bias * np.ones((501,501)) / NUM_SIMS
        coords = zip(Ys, Xs)
        for y,x in coords:
                if -250<=x<=250 and -250<=y<=250:
                        counts[250+y][250+x] += 1

        probs = counts/(np.sum(counts)+bias)
        print 'SumCounts  :', np.sum(counts)
        print '\nCounts   :',counts
        print 'Probs\n', probs
        print '\nP_inside  :', prob_inside
        print 'P_outside :', prob_outside
        print 'Sum(probs):', np.sum(probs)
        case_name = 'test'
        #example plotting for testing:
        plt.title("Motion Model Test")
        plt.imshow(probs,cmap = 'gist_gray')
        plt.colorbar()
        name = '%s/%s.png' % (base_dir, case_name)
        plt.imsave(name, probs, cmap = 'gist_gray')
        tag_image(name, prob_outside)
示例#14
0
    stream.stop_stream()
    stream.close()
    audio.terminate()

    wave_file = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
    wave_file.setnchannels(CHANNELS)
    wave_file.setsampwidth(audio.get_sample_size(FORMAT))
    wave_file.setframerate(RATE)
    wave_file.writeframes(b''.join(frames))
    wave_file.close()

    return wave_file


audio = record()
audio_fpath = "resources/"
audio_clips = os.listdir(audio_fpath)
print("No. of .wav files in audio folder = ", len(audio_clips))
graph_spectrogram()

image = image.imread('sp_xyz.png')

audio_clip = image.resize(50, 245, 1)
audio_clip = np.array(image, dtype=np.float32)
clip.append(audio_clip.reshape(50, 245, 1))
clip = np.array(clip)

prediction = model.predict(clip)
classes = np.argmax(prediction, axis=1)
print(classes)
示例#15
0
from PIL import Image
import numpy as np
from matplotlib import pyplot
from matplotlib import image
import random
###################################################
## Import Image here
image = Image.open('src/pics/lit.jpg')
## Set the Size here
size = 128 #px
###################################################
load_img_rz = np.array(image.resize((size,size)))
Image.fromarray(np.uint8(load_img_rz)).save('r_pic.jpg')
grey_img = np.zeros(shape = (size,size))
threshold = 300
bl_threshold = 150
wh_threshold = 500
for i in range(0,size):
    for j in range(0,size):
        if(sum(load_img_rz[i,j]) < bl_threshold):
            grey_img[i,j] = 0
            if(j<size-1 and random.randint(1,16) == 2):
                grey_img[i,j+1] = 0
            if(i<size-1 and random.randint(1,16) == 2):
                grey_img[i+1,j] = 0
            if(j > 0 and random.randint(1,16) == 2):
                grey_img[i,j-1] = 0
        elif(sum(load_img_rz[i,j]) > wh_threshold):
            grey_img[i,j] = 255
            if(j<size-1 and random.randint(1,2) == 2):
                grey_img[i,j+1] = 255