def preprocess_image(image_path, height=None, width=None):
    height = 400 if not height else height
    width = width if width else int(width * height / height)
    img = load_img(image_path, target_size=(height, width))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)
    return img
Exemplo n.º 2
0
def extract_feats(img_path):
        try:
            img = image.load_img(img_path, target_size=(224, 224))
        except IOError:
            print 'couldn\'t load file'
            return None

        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        block4_pool_features = model.predict(x)
        return block4_pool_features
def featureextraction(imageFilepath,maskFilepath):
    image_array = sitk.GetArrayFromImage(imageFilepath) 
    mask_array = sitk.GetArrayFromImage(maskFilepath)
    (zstart, ystart, xstart), (zstop, ystop, xstop) = maskcroppingbox(mask_array, use2D=False)
    roi_images = image_array[zstart-1:zstop+1,ystart:ystop,xstart:xstop].transpose((2,1,0))
    roi_images1 = zoom(roi_images, zoom=[224/roi_images.shape[0], 224/roi_images.shape[1],1], order=3)
    roi_images2 = np.array(roi_images1,dtype=np.float)    
    
    x = image.img_to_array(roi_images2)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    base_model_pool_features = model.predict(x)
    
    feature_map = base_model_pool_features[0]
    feature_map = feature_map.transpose((2,1,0))
    features = np.max(feature_map,-1)
    features = np.max(features,-1)
    deeplearningfeatures = collections.OrderedDict()
    for ind_,f_ in enumerate(features):
    	deeplearningfeatures[str(ind_)] = f_
    return deeplearningfeatures
Exemplo n.º 4
0
 def get_features(self):
     """
     :param im: input image
     :return:
     """
     if self.feature_type == "HDT":
         from keras.applications.vgg19 import preprocess_input
         x = imresize(self.im_crop.copy(), self.resize_size)
         x = x.transpose((2, 0, 1)).astype(np.float64)
         x = np.expand_dims(x, axis=0)
         x = preprocess_input(x)
         features_list = self.extract_model_function(x)
         for i, features in enumerate(features_list):
             features = np.squeeze(features)
             features = (features.transpose(1, 2, 0) -
                         features.min()) / (features.max() - features.min())
             features_list[i] = np.multiply(features,
                                            self.cos_window[i][:, :, None])
         return features_list
     else:
         assert 'Non implemented!'
def extra_feat(img_path):
        #Using a VGG19 as feature extractor
        base_model = VGG19(weights='imagenet',include_top=False)
        img = image.load_img(img_path)
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        block1_pool_features=get_activations(base_model, 3, x)
        block2_pool_features=get_activations(base_model, 6, x)
        block3_pool_features=get_activations(base_model, 10, x)
        block4_pool_features=get_activations(base_model, 14, x)
        block5_pool_features=get_activations(base_model, 18, x)
    
        x1 = tf.image.resize_images(block1_pool_features[0],[112,112])
        x2 = tf.image.resize_images(block2_pool_features[0],[112,112])
        x3 = tf.image.resize_images(block3_pool_features[0],[112,112])
        x4 = tf.image.resize_images(block4_pool_features[0],[112,112])
        x5 = tf.image.resize_images(block5_pool_features[0],[112,112])
        
        F = tf.concat([x1,x2,x3,x4,x5],3) #Change to only x1, x1+x2,x1+x2+x3..so on, in order to visualize features from different blocks
        return F
Exemplo n.º 6
0
def run(image_bytes):

    image_bytes = json.loads(image_bytes)['data'][0]
    image_bytes = image_bytes.encode('utf-8')
    encode_len = len(image_bytes)
    print(encode_len)

    image = Image.frombytes('RGBA', (1315, 640), image_bytes, 'raw')

    image = image.resize((224, 224), Image.ANTIALIAS)
    image = img_to_array(image)
    image = image[:, :, 0:3]
    print(image.shape)
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    image = preprocess_input(image)

    model = VGG19()
    yhat = model.predict(image)
    label = decode_predictions(yhat)
    label = label[0][0]
    return label
    def predictBreed(self, imageTensor):
        bottleneck_feature = VGG19(weights='imagenet',
                                   include_top=False).predict(
                                       preprocess_input(imageTensor))

        VGG19_model = Sequential()
        VGG19_model.add(GlobalAveragePooling2D(input_shape=(7, 7, 512)))
        VGG19_model.add(Dropout(0.25))
        VGG19_model.add(BatchNormalization())
        VGG19_model.add(Activation('elu'))
        VGG19_model.add(Dense(133, activation='softmax'))
        VGG19_model.load_weights('saved_models/weights.best.VGG19.hdf5')

        predicted_vector = VGG19_model.predict(bottleneck_feature)
        predictionLikelihoodOrder = np.argsort(-predicted_vector)

        prediction = []
        for predictionIndex in predictionLikelihoodOrder:
            prediction.append((self.dogBreedNames[predictionIndex[0]],
                               predicted_vector.item(predictionIndex[0])))
        return prediction
    def cluster_images(self):
        image.LOAD_TRUNCATED_IMAGES = True
        model = NNModel(weights='imagenet', include_top=False)

        filelist = glob.glob(os.path.join(self.org_images, '*.jpg'))
        filelist.sort()
        featurelist = []
        for i, imagepath in enumerate(filelist):
            info = "Progress {curr}/{total}".format(curr=i,
                                                    total=len(filelist))
            print("\r" + info, end="")

            # noinspection PyBroadException
            try:
                img = image.load_img(imagepath, target_size=(224, 224))
                img_data = image.img_to_array(img)
                img_data = np.expand_dims(img_data, axis=0)
                img_data = preprocess_input(img_data)
                features = np.array(model.predict(img_data, batch_size=1000))
                featurelist.append(features.flatten())
            except:
                continue

        # Clustering
        to_fit = np.array(featurelist)
        kmeans = KMeans(n_clusters=self.k, random_state=0,
                        verbose=1).fit(to_fit)

        # This is a computation heavy task so I'm copying the images renamed according to cluster for the next time.
        try:
            os.makedirs(self.cluster_home)
        except OSError:
            pass

        for i, m in enumerate(kmeans.labels_):
            shutil.copy(
                filelist[i], self.cluster_home + str(m) + "_" +
                os.path.basename(filelist[i]))

        return kmeans
Exemplo n.º 9
0
def read_and_extract_features_cnn_SVM(images_filenames, cnn):
    # Extract features using a CNN.

    descriptors = []
    nimages = len(images_filenames)
    nfeatures_img = np.zeros(nimages, dtype=np.uint)
    print 'Extracting features with CNN...'
    sys.stdout.flush()
    progress = 0
    for i in range(nimages):
        if(np.float32(i) / nimages * 100 > progress + 10):
            progress = 10 * int(round(np.float32(i) / nimages * 10))
            print str(progress) + '% completed'
            sys.stdout.flush()
            
        # Read and process image:
        img = image.load_img(images_filenames[i], target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        # Extract the features using the CNN:
        des = cnn.predict(x)
        # Append to list with features of all images:
        descriptors.append(des)
        # # Number of features per image (height times with of the convolutional layer):
        nfeatures_img[i] = len(des)
    print '100% completed'
    sys.stdout.flush()
    
    # Transform everything to numpy arrays
    size_features = descriptors[0].shape[1] # Length of each feature (depth of the convolutional layer).
    index_D = np.int(nimages * nfeatures_img[i])
    D = np.zeros((index_D, size_features), dtype=np.float32)
    idx_fin = 0
    for i in range(nimages):
        idx_ini = idx_fin
        idx_fin = idx_ini + int(nfeatures_img[i])
        D[idx_ini:idx_fin,:] = descriptors[i]
    return D, nfeatures_img
Exemplo n.º 10
0
def predict(image_path):
    """Use VGG19 to label image"""
    # Load the VGG19 model
    # https://keras.io/applications/#VGG19
    model = VGG19(include_top=True, weights='imagenet')

    # Define default image size for VGG19
    image_size = (224, 224)
    img = image.load_img(image_path, target_size=image_size)
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    prediction = model.predict(x)
    predictions = decode_predictions(prediction, top=1)[0]
    predicted = predictions[0][1]
    print('Predicted:', predicted)
    predicted_clean = predicted.replace('_', ' ')
    #https://github.com/RasaHQ/rasa_nlu/issues/3102
    K.clear_session()
    if (predicted_clean == 'Siamese cat'):
        predicted_clean = 'Siamese'
    return predicted_clean
Exemplo n.º 11
0
    def get_cnn_feature(self, image, roi):
        cx, cy, w, h = roi
        w = int(w * self.padding) // 2 * 2
        h = int(h * self.padding) // 2 * 2
        x = int(cx - w // 2)
        y = int(cy - h // 2)

        sub_image = image[y:y + h, x:x + w, :]
        cv2.imshow('sub', image)
        c = cv2.waitKey(1) & 0xFF
        if c == 27 or c == ord('q'):
            return

        print(sub_image.shape)
        resized_image = cv2.resize(sub_image, (224, 224),
                                   interpolation=cv2.INTER_LINEAR)
        cv2.imshow('resized', resized_image)
        c = cv2.waitKey(1) & 0xFF
        if c == 27 or c == ord('q'):
            return
        #resized_image = cv2.resize(sub_image, (self.pw, self.ph))

        #resized_image = img_to_array(resized_image)
        img = expand_dims(resized_image, axis=0)

        img = preprocess_input(img)
        feature_maps = model.predict(img)
        ffs = []
        for fmap in feature_maps:
            ffs.append(
                cv2.resize(fmap[0, :, :, :], (224, 224),
                           interpolation=cv2.INTER_LINEAR))
        f = feature_maps[0][0, :, :, :]

        fc, fh, fw = f.shape
        self.scale_h = float(fh) / h
        self.scale_w = float(fw) / w

        return f
Exemplo n.º 12
0
def evaluation_batch_generator(paths, labels, batch_size, preprocess_input):
    num_images = len(paths)
    while True:
        current_position = 0
        while current_position < num_images:
            if current_position + batch_size < num_images:
                idx_batch = np.arange(current_position,current_position + batch_size)
            else:
                idx_batch = np.arange(current_position,num_images)    

            O = pool.imap(read_image_valid, paths[idx_batch])
            
            X_init = [x for x in O]
            
            X = np.array([preprocess_input(x[0].astype('float32')) for x in X_init]).astype('float32')
            M = np.array([x[1] for x in X_init])
            y = to_categorical(labels[idx_batch], num_classes=10)

            current_position += batch_size

            #yield [X,M], y
            yield X, y
Exemplo n.º 13
0
    def runs(self, path):
        model = VGG19(weights='imagenet')
        img_path = path
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        predict = model.predict(x)
        
        dec = decode_predictions(predict, top=3)[0]
        aiArr = []
        aiObj = {}
        # print(dec)
        for item in dec:
            aiObj = {
                "id" : item[0],
                "name" : str(item[1]).replace("_", " "),
                "zh_name" : googletranslate(item[1]),
                "accurate" : str(item[2])
            }
            aiArr.append(aiObj)

        return json.dumps(aiArr)
Exemplo n.º 14
0
def extract_VGG_features(rel_path_videos, video_names, n):
    VGG_features_dictionary = dict()
    model = VGG19(weights='imagenet')
    #model.summary()
    vgg19_without_last = Model(inputs=model.inputs, outputs=model.get_layer('fc2').output)
    
    for i in range(0,n):
        path_video = get_path_video(rel_path_videos, video_names[i])
       
        video = cv2.VideoCapture(path_video)
        video_length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
        VGG19_features = []
        for j in range(0,video_length):
            frame_rgb = get_frame_resized(video,j)
            #formatted_img = convert_image_to_format(frame_rgb)
            preprocessed_img = image.img_to_array(frame_rgb)
            preprocessed_img = np.expand_dims(preprocessed_img, axis=0)
            preprocessed_img = preprocess_input(preprocessed_img)
           
            img_VGG19 = np.array(vgg19_without_last.predict(preprocessed_img)).flatten()#neural_representations(preprocessed_img)
            VGG19_features.append(img_VGG19)
        VGG_features_dictionary[video_names[i]] = np.array(VGG19_features)
    return VGG_features_dictionary
Exemplo n.º 15
0
def preprocess_image(img, model_name=model_name, matrix=False):
    if model_name == 'faceNet':
        if matrix:
            mat = []
            for im in img:
                image = im.copy()
                image = image.resize((160, 160))
                image = img_to_array(image)
                mean, std = image.mean(), image.std()
                image = (image - mean) / std
                mat.append(image)
            return np.asarray(mat)
        else:
            img = img.resize((160, 160))
            img = img_to_array(img)
            mean, std = img.mean(), img.std()
            img = (img - mean) / std
    else:
        img = img.resize((224, 224))
        img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)
    return img
 def CNN_all_features(names, cnn):
     from keras.applications.vgg19 import VGG19
     from keras.applications.inception_v3 import InceptionV3
     from keras.applications.vgg19 import preprocess_input
     f = []
     if cnn == 'VGG':
         model = VGG19(weights='imagenet')
         dsize = (224, 224)
     else:
         model = InceptionV3(weights='imagenet')
         dsize = (299, 299)
     for i in range(len(names)):
         img = cv2.imread(names[i])
         img = cv2.resize(img, dsize=dsize)
         img = img.astype('float32')
         x = np.expand_dims(img, axis=0)
         x = preprocess_input(x)
         features = model.predict(x)
         if i == 0:
             f = features
         else:
             f = np.vstack((f, features))
     return f
def preprocess_image(x):
    array = np.asarray(x)
    width, height = array.shape
    phases = np.empty((width, height))
    tmp = np.empty((width, height, 3))
    for i in range(width):
        for j in range(height):
            pol = cmath.polar(array[i][j])
            tmp[i][j][0] = pol[0]
            phases[i][j] = pol[1]
    maximum = np.max(np.abs(tmp))
    print("maximum1 = " + str(maximum))
    for i in range(width):
        for j in range(height):
            val = tmp[i][j][0]
            r, g, b = norm_to_rgb(val / maximum)
            tmp[i][j][0] = r
            tmp[i][j][1] = g
            tmp[i][j][2] = b
    #scipy.misc.imsave('images/' + cle + '.png', tmp)
    ret = np.expand_dims(tmp, axis=0)
    ret = vgg19.preprocess_input(ret)
    return ret, phases, maximum
Exemplo n.º 18
0
    def call_predict(self, images, folder):

        predictions = []
        #predict
        for image_name in images:
            image_path = folder + "/" + image_name
            print(f"imagepath: {image_path}")
            test_image = keras.preprocessing.image.load_img(image_path,
                                                            target_size=(224,
                                                                         224),
                                                            grayscale=False)
            test_image = image.img_to_array(test_image)
            test_image = np.expand_dims(test_image, axis=0)
            test_image = preprocess_input(test_image)
            # print(test_image)
            predict = self.model_final.predict(test_image)
            # print(predict)
            zip_pred = zip(predict[0], self.labels)
            for pred_value, pred in zip_pred:
                if (pred_value > 0.7):
                    predictions.append((image_name, pred))

        return predictions
Exemplo n.º 19
0
    def build_base_model(self,
                         inputs,
                         nb_blocks=5,
                         nb_layers_per_block=2,
                         init_nb_filters=64,
                         growth_rate=2,
                         max_nb_filters=512,
                         activation='relu',
                         batch_normalization=False):

        # create the vgg backbone
        if self.backbone_name == 'vgg16':
            inputs = Lambda(lambda x: keras_vgg16.preprocess_input(x))(inputs)
            base_model = keras_vgg16.VGG16(input_tensor=inputs,
                                           include_top=False,
                                           weights='imagenet')
        elif self.backbone_name == 'vgg19':
            inputs = Lambda(lambda x: keras_vgg19.preprocess_input(x))(inputs)
            base_model = keras_vgg19.VGG19(input_tensor=inputs,
                                           include_top=False,
                                           weights='imagenet')
        elif self.backbone_name == 'unet':
            x = Lambda(lambda x: x / 255.)(inputs)
            base_model = encoder(input_tensor=x,
                                 init_nb_filters=init_nb_filters,
                                 growth_rate=growth_rate,
                                 nb_blocks=nb_blocks,
                                 nb_layers_per_block=nb_layers_per_block,
                                 max_nb_filters=max_nb_filters,
                                 activation=activation,
                                 batch_normalization=batch_normalization,
                                 name='unet')
        else:
            raise NotImplementedError("Backbone '{}' not recognized.".format(
                self.backbone_name))

        return base_model
Exemplo n.º 20
0
def prepare_vgg19_features(limit=1000):
	print "starting vgg19 image preprocessing"
	from keras.applications.vgg19 import VGG19
	from keras.preprocessing import image
	from keras.applications.vgg19 import preprocess_input
	from keras.models import Model

	model = VGG19(weights='imagenet', include_top=False)

	data_ques_json = 'data/vqa_data_prepro.json'
	with open(data_ques_json, 'r') as an_file:
	    ques_json_data = json.loads(an_file.read())

	hf = h5py.File('img_prepro.h5', 'w')

	total_count = 0
	count = 0
	imgs = []
	final_features = np.empty((0,512,14,14))
	while total_count < len(ques_json_data['unique_img_train']):
		img_path = ques_json_data['unique_img_train'][count]
		img = image.load_img(img_path, target_size=(448, 448))
		x = image.img_to_array(img)
		x = np.expand_dims(x, axis=0)
		x = preprocess_input(x)
		imgs.append(x)
		count = count + 1
		total_count = total_count + 1
		if(count == limit):
			imgs = np.vstack(tuple(imgs))
			features = model.predict(imgs)
			final_features = np.vstack((final_features,features))
			count = 0 
			imgs = []
	print final_features.shape
	hf.create_dataset('train_images', data=final_features)
	print "Finished preprocessing"
Exemplo n.º 21
0
    def read_im(self, f):
        im = cv2.imread(f)  #(1920, 2560, 3)
        h, w, _ = im.shape
        rw, rh = self.im_size  #(512, 384)

        im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
        if u'正常' in f:
            x = np.random.randint(0, w - rw)
            y = np.random.randint(0, h - rh)
            im = im[y:y + rh, x:x + rw, :]

        else:

            bboxes = self.read_xml(f)
            bbox = bboxes[0] if len(bboxes) == 1 else np.random.choice(bboxes)
            xmin, ymin, xmax, ymax = bbox
            if self.view:
                cv2.rectangle(im, (xmin, ymin), (xmax, ymax), (255, 0, 0), 5)
            if xmin + rw / 2 < xmax - rw / 2:
                x = np.random.randint(xmin + rw / 2, xmax - rw / 2)
            else:
                x = (xmin + xmax) / 2
            if ymin + rh / 2 < ymax - rh / 2:
                y = np.random.randint(ymin + rh / 2, ymax - rh / 2)
            else:
                y = (ymin + ymax) / 2
            x = max(0, x - rw / 2)
            y = max(0, y - rh / 2)
            im = im[y:y + rh, x:x + rw, :]

        im = self.normal_seq.augment_image(im)
        # print im.shape, u'正常' in f, x, y
        im = cv2.resize(im, self.im_size, interpolation=cv2.INTER_AREA)
        if not self.view:
            im = preprocess_input(im)

        return im
Exemplo n.º 22
0
def get_saliency(file_name):
    model = VGG19(weights='imagenet')
    model.summary()
    CLASS_INDEX = json.load(open("imagenet_class_index.json"))
    classlabel = []
    for i_dict in range(len(CLASS_INDEX)):
        classlabel.append(CLASS_INDEX[str(i_dict)][1])
    print("N of class={}".format(len(classlabel)))
    _img = load_img(file_name, target_size=(224, 224))
    plt.imshow(_img)
    plt.show()

    img = img_to_array(_img)
    img = preprocess_input(img)
    y_pred = model.predict(img[np.newaxis, ...])
    class_idxs_sorted = np.argsort(y_pred.flatten())[::-1]
    topNclass = 5
    for i, idx in enumerate(class_idxs_sorted[:topNclass]):
        print("Top {} predicted class:     Pr(Class={:18} [index={}])={:5.3f}".
              format(i + 1, classlabel[idx], idx, y_pred[0, idx]))
    # Utility to search for layer index by name.
    # Alternatively we can specify this as -1 since it corresponds to the last layer.
    layer_idx = utils.find_layer_idx(model, 'predictions')

    model.layers[layer_idx].activation = keras.activations.linear
    model = utils.apply_modifications(model)

    class_idx = class_idxs_sorted[0]
    grad_top1 = visualize_saliency(model,
                                   layer_idx,
                                   filter_indices=class_idx,
                                   seed_input=img[np.newaxis, ...])
    currentDT = datetime.datetime.now()
    out_name = "saliency_map" + str(currentDT) + ".jpg"
    save_name = "./static/images/" + out_name
    plt.imsave(fname=save_name, arr=grad_top1, cmap="plt.cm.hot")
    return out_name
    def Clicked1():
        
        in_path = filedialog.askopenfilename()
        print (in_path)

        root = Toplevel()
        #the input microstructure image is conveted into dimension of 224 by 224 as required by the Resnet50 architecture0
        img = image.load_img(in_path, target_size=(224, 224))
        if img is not None:
            #this conversion is used to print the rounded value of the predicted probability score.
            np.set_printoptions(suppress=True,formatter={'float_kind':'{:f}'.format})
            #the features of input microstructure are converted into arrays
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            features = model.predict(x)[0]
            features = features.reshape(1,2048)
            # p is the prediction value by a model
            # k is the prediction score by a model
            p = m.predict_classes(features)
            k = m.predict(features)
            p_f = m_f.predict_classes(features)
            k_f = m_f.predict(features)
        im = PIL.Image.open(in_path).resize((300,300))
        photo = PIL.ImageTk.PhotoImage(im)
        s, s_f = getText(p[0],p_f[0])
        
        
        pr = round((k[0][p[0]] * 100),3)
        prob = str(pr) 
        
        pr_f = round((k_f[0][p_f[0]] * 100),3)
        prob_f = str(pr_f)
        
        label = Label(root, image=photo,font=("Arial Bold", 10), text= s + " with probability of " + prob + " %" + "\n" + " and " + s_f + " with probability of " + prob_f + " % ",    compound=tkinter.BOTTOM).pack()
        
        label.image = photo  
Exemplo n.º 24
0
def get_frame_feature(frame, target_pos, shape):
    downer_target = get_downer_coordinate(target_pos, image_shape, shape)
    #print("downer target coordinate: ", downer_target)
    x = mu.crop_image(frame, target_pos, shape=shape,
                      mode='gray')  # 1.png (253, 437)
    x = mu.resize_image(x)
    x = np.expand_dims(x, axis=0)
    x = np.float64(x)
    x = preprocess_input(x)

    models = [low_model, mid_model, high_model]
    features = []
    for model in models:
        #lock.acquire()
        feature = model.predict(x)
        #lock.release()
        #x = featured
        # note that for PIL, x is horizontal while y is vertical, which is contrast with numpy.
        feature = mu.resize_image(feature, shape)
        feature = mu.hann2D(feature)

        features.append(feature)

    return features, downer_target
Exemplo n.º 25
0
    def generate_training_data(self, X, Y, num_images=250, batch_size=10):

        image_generator = image_preprocessing.ImageDataGenerator(
            zoom_range=0.3, width_shift_range=0.3, height_shift_range=0.3)

        image_generator.fit(X)

        features_list = []
        labels_list = []
        total_batches = int(num_images / batch_size)
        batch_i = 0

        for X_batch, Y_batch in image_generator.flow(X,
                                                     Y,
                                                     batch_size=batch_size):
            batch_i += 1
            percent = batch_i / total_batches * 100

            print('Process image batch {} of {} ({}% complete) . . .'.format(
                batch_i, total_batches, percent))

            generated_images = preprocess_input(X_batch)
            features_list.append(
                self.vgg19_model.predict_on_batch(generated_images))
            labels_list.append(Y_batch)

            if len(features_list) * batch_size >= num_images:
                break

        X_features = np.concatenate(features_list, axis=0)
        labels = np.concatenate(labels_list, axis=0)

        np.save(self.preprocessed_X_train_file_path, X_features)
        np.save(self.preprocessed_Y_train_file_path, labels)

        return X_features, labels
Exemplo n.º 26
0
def extract_features(file_dir):

    base_model = VGG19(weights='imagenet')
    model = Model(inputs=base_model.input,
                  outputs=base_model.get_layer('flatten').output)

    vgg19_feature_list = []
    classe = []
    list_files = [file_dir + f for f in os.listdir(file_dir)]
    for idx, dirname in enumerate(list_files):
        list_images = [
            dirname + '/' + f for f in os.listdir(dirname)
            if re.search('jpg|JPG', f)
        ]
        class_name = list_files[idx]
        class_name = class_name[42:]
        for i, fname in enumerate(list_images):

            print('Processing image: ' + list_images[i] + ' ' + str(i + 1) +
                  '/' + str(len(list_images)))

            img = image.load_img(fname, target_size=(224, 224))
            img_data = image.img_to_array(img).reshape()
            img_data = np.expand_dims(img_data, axis=0)
            img_data = preprocess_input(img_data)

            vgg19_feature = model.predict(img_data)
            vgg19_feature_np = np.array(vgg19_feature)
            vgg19_feature_list.append(vgg19_feature_np.flatten())
            classe.append(class_name)

    se = pd.Series(classe)
    vgg19_feature_list_np = np.array(vgg19_feature_list)
    np_data = pd.DataFrame(vgg19_feature_list_np)
    np_data['Class'] = se.values
    return np_data
Exemplo n.º 27
0
    def predict(self):
        ret, frame = self.cap.read()
        largeur_split = int(frame.shape[1] / 3)
        hauteur_split = int(frame.shape[0] / 3)

        mat_im = [[
            frame[0:hauteur_split, 0:largeur_split],
            frame[hauteur_split:hauteur_split * 2, 0:largeur_split],
            frame[hauteur_split * 2:frame.shape[0], 0:largeur_split]
        ],
                  [
                      frame[0:hauteur_split, largeur_split:largeur_split * 2],
                      frame[hauteur_split:hauteur_split * 2,
                            largeur_split:largeur_split * 2],
                      frame[hauteur_split * 2:frame.shape[0],
                            largeur_split:largeur_split * 2]
                  ],
                  [
                      frame[0:hauteur_split, largeur_split * 2:frame.shape[1]],
                      frame[hauteur_split:hauteur_split * 2,
                            largeur_split * 2:frame.shape[1]],
                      frame[hauteur_split * 2:frame.shape[0],
                            largeur_split * 2:frame.shape[1]]
                  ]]

        debris = False
        for i in range(3):
            for j in range(3):
                im = cv2.resize(np.array(mat_im[i][j]), (224, 224))
                x = image.img_to_array(im)
                x = preprocess_input(x)
                tab = [x]
                out = self.model.model.predict(np.array(tab))
                if self.debris_or_not_debris(out):
                    return True
        return False
Exemplo n.º 28
0
def compute_feature(image_filename, detector):

    print 'Reading image ' + image_filename
    img = image.load_img(image_filename, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    features = detector.predict(x)

    features = features.reshape(features.shape[1:])
    to_grid = features[:, :, 3]

    step_size = 2
    kpt = [
        cv2.KeyPoint(x, y, step_size)
        for y in range(0, to_grid.shape[0], step_size)
        for x in range(0, to_grid.shape[1], step_size)
    ]

    des = []
    for kp in kpt:
        kp = kp.pt
        des.append(features[kp[1], kp[0], :])
    return (kpt, des)
Exemplo n.º 29
0
def extract_feature_one_image(img_path, intermediate_layer_model,
                              feature_extraction_method, input_img):
    img = image.load_img(img_path, target_size=(input_img, input_img))
    img_data = image.img_to_array(img)
    img_data = np.expand_dims(img_data, axis=0)

    if (feature_extraction_method == 'pretrained_lenet'):
        img_data = img_data / 255
    elif (feature_extraction_method == 'pretrained_vgg16'):
        img_data = vgg16.preprocess_input(img_data)
    elif (feature_extraction_method == 'pretrained_vgg19'):
        img_data = vgg19.preprocess_input(img_data)
    elif (feature_extraction_method == 'pretrained_xception'):
        img_data = xception.preprocess_input(img_data)
    elif (feature_extraction_method == 'pretrained_resnet'):
        img_data = resnet.preprocess_input(img_data)
    elif (feature_extraction_method == 'pretrained_inception_resnet'):
        img_data = inception_resnet.preprocess_input(img_data)
    elif (feature_extraction_method == 'pretrained_nasnet'):
        img_data = nasnet.preprocess_input(img_data)

    features = intermediate_layer_model.predict(img_data)
    features = features.reshape((-1))
    return features
Exemplo n.º 30
0
def preprocess_image(image_path):
	img = load_img(image_path, target_size=(img_height, img_width))
	img = img_to_array(img)
	img = np.expand_dims(img, axis=0)
	img = vgg19.preprocess_input(img)
	return img
Exemplo n.º 31
0
#import matplotlib.pyplot as plt

# load VGG model
base_model = VGG16(weights='imagenet')

# visualize topology in an image
plot(base_model,
     to_file='modelVGG16.png',
     show_shapes=True,
     show_layer_names=True)

# read and process image
img_path = '/data/MIT/test/coast/art1130.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

# crop the model up to a certain layer
model = Model(input=base_model.input,
              output=base_model.get_layer('block5_conv2').output)

# get the features from images
features = model.predict(x)

if K.image_dim_ordering() == 'th':
    # theano and thensorflow deal with tensor in different order
    pass

weights = base_model.get_layer('block1_conv1').get_weights()
Exemplo n.º 32
0
    return np.expand_dims(x, axis=0)


def paths_to_tensor(img_paths):
    list_of_tensors = [
        path_to_tensor(img_path) for img_path in tqdm(img_paths, ncols=80)
    ]
    return np.vstack(list_of_tensors)


ImageFile.LOAD_TRUNCATED_IMAGES = True

# pre-process the data for Keras
# train_tensors = preprocess_input( paths_to_tensor(train_files) )
# valid_tensors = preprocess_input( paths_to_tensor(valid_files) )
test_tensors = preprocess_input(paths_to_tensor(test_files))

# =============================================== #

params = {
    'dim': (img_width, img_height),
    'batch_size': 32,
    'n_classes': num_classes,
    'n_channels': 3,
    'shuffle': True
}

train_labels = dict(zip(train_files, train_targets[:, 1]))
training_generator = DataGenerator(train_files, train_labels, **params)

valid_labels = dict(zip(valid_files, valid_targets[:, 1]))
Exemplo n.º 33
0
def preprocess_image(image_path):
    img = load_img(image_path, target_size=(img_nrows, img_ncols))
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg19.preprocess_input(img)
    return img
Exemplo n.º 34
0
from keras.applications.vgg19 import VGG19
from keras.preprocessing import image
from keras.applications.vgg19 import preprocess_input
from keras.models import Model
import numpy as np
import pylab

base_model = VGG19(weights='imagenet',include_top=False, pooling='max')
#model = Model(inputs=base_model.input, outputs=base_model.get_layer('').output)

img_path = 'cat.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

features = base_model.predict(x)
print(features.shape)

print(features.tolist())