def get_overfeat_features(imgs, weights_path, typ, layer=None, cache=None): """Returns features at layer for given image(s) from OverFeat model. Small (fast) network: 22 layers Large (accurate) network: 25 layers Args: imgs: Iterable of images each of shape (h,w,c) weights_path: Path to the OverFeat weights typ: 0 for small, 1 for large version of OverFeat layer: The layer to extract features from cache: Dict containing descs/other cached values """ if cache is None: cache = {} if 'overfeat_descs' not in cache: # Initialize network print('Loading OverFeat ({}) model...'.format(typ)) overfeat.init(weights_path, typ) # Determine feature layer if none specified if layer is None: if overfeat.get_n_layers() == 22: # small layer = 19 # 16 also recommended else: # large # Layer used by Zhang et al. layer = 22 # Determine resize dim if typ == 0: resize = 231 # small network else: resize = 221 # large network # Allocate for feature descriptors descs = [] # Run images through network print('Running images through OverFeat, extracting features ' 'at layer {}.'.format(layer)) for idx, img in enumerate(imgs): if (idx + 1) % 100 == 0: print('Processing image {}...'.format(idx + 1)) # Preprocess image img = overfeat_preprocess(img, resize) # Run through model _ = overfeat.fprop(img) # Retrieve feature output desc = overfeat.get_output(layer) descs.append(desc) # Free network overfeat.free() # NumPy-ify descs = np.asarray(descs) cache.update(overfeat_descs=descs) else: descs = cache['overfeat_descs'] return descs, cache
def extract_feat(self, imgs, ftype): ''' extract features ''' if ftype is 'hog': from skimage.feature import hog L = np.sqrt(len(imgs[0])).astype(int) X_im = [imresize(arr.reshape((L, L)), (64, 64)) for arr in imgs] pool = Pool(processes=8) X = pool.map(hog, X_im) pool.close() return np.asarray(X) elif ftype is 'overfeat': overfeat.init('OverFeat/data/default/net_weight_0', 1) L = np.sqrt(len(imgs[0])).astype(int) imgs_color = [imresize(arr.reshape((L, L)), (231, 231)) for arr in imgs] if len(imgs_color[0].shape) != 3: cmap = pl.get_cmap('jet') imgs_color = [np.delete(cmap(im/255.), 3, 2) for im in imgs_color] imgs_roll = [im.transpose((2, 0, 1)).astype(np.float32) for im in imgs_color] feats = np.zeros((len(imgs_roll), 4096), dtype = float) for i in range(len(imgs_roll)): b = overfeat.fprop(imgs_roll[i]) f22 = overfeat.get_output(22) f22 = np.asarray(f22).squeeze().astype(np.float) feats[i, :] = f22 return feats elif ftype is 'pix': return imgs else: raise NameError('{0} is not implemented!'.format(ftype))
from sklearn.externals import joblib from scipy.ndimage import imread from scipy.misc import imresize LAYER = 6 X = None L = [] with open('data/train_outputs.csv', 'rb') as f: reader = csv.reader(f, delimiter=',') next(reader, None) for data in reader: L.append(data) # quick network: layers 17, 19 # slow network: layers 20, 22 overfeat.init('/Users/npow/code/OverFeat/data/default/net_weight_0', 0) def extract_features(layer, file_name): global X image = imread(file_name) image.resize((image.shape[0], image.shape[1], 1)) # overfeat expects rgb, so replicate the grayscale values twice image = np.repeat(image, 3, 2) image = imresize(image, (231, 231)).astype(np.float32) # numpy loads image with colors as last dimension, so transpose tensor h = image.shape[0] w = image.shape[1]
if filename == ".directory": continue image = imread(os.path.join(directory, filename)).astype(numpy.float32) # numpy loads image with colors as last dimension, transpose tensor h = image.shape[0] w = image.shape[1] c = image.shape[2] image = image.reshape(w * h, c) image = image.transpose() image = image.reshape(c, h, w) # run overfeat on the image overfeat.fprop(image) features = overfeat.get_output(19) d[filename] = features.copy() pickle.dump(d, open(dirname + "_full.txt", "w")) return d overfeat.init('/home/juanjo/U/overfeat/data/default/net_weight_0', 0) rootDir = "/home/juanjo/U/Búsqueda por Contenido en Imágenes y Videos/Proyecto/ResizedImages_6/" # dTotal = {} for subdir in os.listdir(rootDir): print subdir if os.path.exists(subdir + "_full.txt"): continue d = getDictOfFeatures(os.path.join(rootDir, subdir), subdir) # dTotal[subdir] = d # pickle.dump(dTotal, open("features_full.txt", "w"))
# resize and crop into a 231x231 image h0 = image.shape[0] w0 = image.shape[1] d0 = float(min(h0, w0)) image = image[int(round((h0-d0)/2.)):int(round((h0-d0)/2.)+d0), int(round((w0-d0)/2.)):int(round((w0-d0)/2.)+d0), :] image = imresize(image, (231, 231)).astype(numpy.float32) # numpy loads image with colors as last dimension, transpose tensor h = image.shape[0] w = image.shape[1] c = image.shape[2] image = image.reshape(w*h, c) image = image.transpose() image = image.reshape(c, h, w) print "Image size :", image.shape # initialize overfeat. Note that this takes time, so do it only once if possible overfeat.init('../../data/default/net_weight_0', 0) # run overfeat on the image b = overfeat.fprop(image) # display top 5 classes b = b.flatten() top = [(b[i], i) for i in xrange(len(b))] top.sort() print "\nTop classes :" for i in xrange(5): print(overfeat.get_class_name(top[-(i+1)][1]))
# OverFeat has 22 layers, including original image num_layers = 22 # For filename purposes layer = 'all' layer = 10 if layer == 'all': # Put all layers into one stacked confusion matrix confusion_matrix = np.zeros((num_layers, len(training_images), len(testing_images))) else: # Make the confusion matrix for a single layer confusion_matrix = np.zeros((len(training_images), len(testing_images))) overfeat.init(overfeat_root + 'data/default/net_weight_0', 0) for i in range(len(training_images)): print("Training Image %s of %s" % (i, len(training_images))) if smush: image = smush_overfeat_images(training_images[i]) else: image = load_overfeat_image(training_images[i]) b = overfeat.fprop(image) if layer == 'all': # Calculate features for all layers at once features = []
from datetime import datetime from multiprocessing import Process, Pool from sensor_msgs.msg import Image from rospy.numpy_msg import numpy_msg from scipy.misc import imresize, imsave from sklearn import svm from sklearn.cross_validation import StratifiedKFold, cross_val_score from skimage.feature import hog OVERFEAT_DIM = 231 KINECT_WIDTH = 640 KINECT_HEIGHT = 480 # initialize overfeat. 0 = fast net, 1 = accurate net. overfeat.init("../data/weights/net_weight_0", 0) # list of unique classes as strings. # indices correspond to the 'target' of each 'classification'. classes = [] # list of training samples. training = [] # each Sample is features data and the target of a classification. Sample = namedtuple('Sample', ['features', 'depth', 'classification', 'time', 'image']) # will train svm classifier on features extracted using overfeat svm_clf = svm.LinearSVC()
def fit(self, X=None, y=None): import overfeat # soft dep overfeat.init(self.pretrained_params, self.network_size) return self
from PIL import Image import numpy import skimage from skimage.feature import daisy, hog import subprocess import overfeat import numpy from scipy.ndimage import imread from scipy.misc import imresize import database_file import time from find_obj import init_feature from asift import affine_detect print "Initializing overfeat" overfeat.init('overfeat/overfeat/data/default/net_weight_0', 0) #overfeat_initialized = False def extract_overfeat(image_path): # if overfeat_initialized == None or not overfeat_initialized: # overfeat_initialized = True print "Overfeat: ", image_path image = imread(image_path) print "Image shape: ", image.shape if len(image.shape) == 2 or image.shape[2] == 2: image = skimage.color.gray2rgb(image) elif image.shape[2] == 4: image_rgb = numpy.zeros((image.shape[0], image.shape[1], 3), numpy.uint8)
def initialize(self): # initialize overfeat. Note that this takes time, so do it only once if possible if self.debug == True: pdb.set_trace() overfeat.init('../../data/default/net_weight_'+str(self.network), self.network)
w0 = image.shape[1] d0 = float(min(h0, w0)) h1 = int(round(231*h0/d0)) w1 = int(round(231*w0/d0)) image = imresize(image, (h1, w1)).astype(numpy.float32) image = image[int(round((h0-d0)/2.)):int(round((h0-d0)/2.)+231), int(round((w0-d0)/2.)):int(round((w0-d0)/2.)+231), :] # numpy loads image with colors as last dimension, transpose tensor h = image.shape[0] w = image.shape[1] c = image.shape[2] image = image.reshape(w*h, c) image = image.transpose() image = image.reshape(c, h, w) print "Image size :", image.shape # initialize overfeat. Note that this takes time, so do it only once if possible overfeat.init('../../data/default/net_weight_0', 0) # run overfeat on the image b = overfeat.fprop(image) # display top 5 classes b = b.flatten() top = [(b[i], i) for i in xrange(len(b))] top.sort() print "\nTop classes :" for i in xrange(5): print(overfeat.get_class_name(top[-(i+1)][1]))
# numpy loads image with colors as last dimension, transpose tensor h = image.shape[0] w = image.shape[1] c = image.shape[2] image = image.reshape(w*h, c) image = image.transpose() image = image.reshape(c, h, w) except: return None return image def print_features(image_path, features): line = image_path + '\t' + '\t'.join(map(str,features)) print(line) # initialize overfeat. Note that this takes time, so do it only once if possible overfeat.init(network_path, 0) for image_path in sys.stdin: image_path = image_path[:-1] # remove \n char image = read_image(image_path) # run overfeat on the image features = [] if not image is None: b = overfeat.fprop(image) features = overfeat.get_output(layer_index).flatten() print_features(image_path, features)
'''train and test scikit.svm on photo features extracted using overfeat.''' # read in and reshape photos photo_files = get_photo_files(get_photo_path()) photos = [read_in_photo(path) for path in photo_files] # split photos into training and test test_photo = photos.pop(-2) train_photos = photos # (TODO don't hard code targets) test_target = np.array([1]) train_targets = np.array([0,0,0,0,0,0,0,0,1,1,1,1,1,1,1]) # TODO make this cleaner photo_files.pop(-2) # initialize overfeat weights. fast net: 0. large net: 1. overfeat.init("./data/default/net_weight_0", 0) # concurrently extract photo features and predictions using overfeat pool = Pool() likelihoods, train_features = [np.array(tup) for tup in zip(*pool.map(process_through_net, train_photos))] pool.close() pool.join() # print overfeat predictions print_overfeat_predictions(photo_files, likelihoods) # train svm on extracted photo features classifier = svm.SVC() classifier.fit(train_features, train_targets) # test classification
from datetime import datetime from multiprocessing import Process, Pool from sensor_msgs.msg import Image from rospy.numpy_msg import numpy_msg from scipy.misc import imresize, imsave from sklearn import svm from sklearn.cross_validation import StratifiedKFold, cross_val_score from skimage.feature import hog OVERFEAT_DIM = 231 KINECT_WIDTH = 640 KINECT_HEIGHT = 480 # initialize overfeat. 0 = fast net, 1 = accurate net. overfeat.init("../data/weights/net_weight_0", 0) # list of unique classes as strings. # indices correspond to the 'target' of each 'classification'. classes = [] # list of training samples. training = [] # each Sample is features data and the target of a classification. Sample = namedtuple('Sample', ['features','depth','classification','time','image']) # will train svm classifier on features extracted using overfeat svm_clf = svm.LinearSVC() # use `image` and `depth` in other functions to access kinect frames.
testing_images.append(index_mat[i][0, j][0]) #net_types = ["OverFeat", "GoogLeNet", "CaffeNet", "Cifar10"] net_types = ["GoogLeNet", "CaffeNet", "Cifar10"] num_images = [10, 50, 100, 500, 1000] timing_data = {} for net_type in net_types: timing_data[net_type] = {} for num_image in num_images: print("Timing %s for %s images" % (net_type, num_image)) # OverFeat does not use caffe if net_type == 'OverFeat': overfeat.init(overfeat_root + 'data/default/net_weight_0', 0) start = time.clock() for i in range(num_image): image = smush_overfeat_images(training_images[i]) b = overfeat.fprop(image) end = time.clock() elapsed = end - start print(elapsed) timing_data[net_type][num_image] = elapsed # Use caffe for all other models else:
from PIL import Image import numpy import skimage from skimage.feature import daisy, hog import subprocess import overfeat import numpy from scipy.ndimage import imread from scipy.misc import imresize import database_file import time from find_obj import init_feature from asift import affine_detect print "Initializing overfeat" overfeat.init('overfeat/overfeat/data/default/net_weight_0', 0) #overfeat_initialized = False def extract_overfeat(image_path): # if overfeat_initialized == None or not overfeat_initialized: # overfeat_initialized = True print "Overfeat: ", image_path image = imread(image_path) print "Image shape: ", image.shape if len(image.shape) == 2 or image.shape[2] == 2: image = skimage.color.gray2rgb(image) elif image.shape[2] == 4: image_rgb = numpy.zeros((image.shape[0],image.shape[1], 3), numpy.uint8) image_rgb[:,:,0] = image[:,:,0]