Exemplo n.º 1
0
 def __init__(self, modelText):
     # store the layer number and initialize the Overfeat transformer
     #self.layerNum = layerNum
     self.modelText = modelText
     print("[INFO] loading {}...".format(modelText))
     if modelText in ("inception", "xception", "vgg16", "vgg19", "resnet"):
         Network = MODELS[modelText]
         self.model = Network(include_top=False)
     if modelText == "googlenet":
         self.model = GoogLeNetTransformer()
     if modelText == "overfeat":
         self.model = OverfeatTransformer(output_layers=[-3])
     if modelText == "lab888":
         self.model = LABModel()
     if modelText == "lab444":
         self.model = HSVModel(bins=[4, 4, 4])
     if modelText == "hsv888":
         self.model = LABModel()
     if modelText == "hsv444":
         self.model = HSVModel(bins=[4, 4, 4])
     if modelText == "haralick":
         self.model = Haralick()
     if modelText == "lbp":
         self.model = LBP()
     if modelText == "hog":
         self.model = HOG()
     if modelText == "haarhog":
         self.model = HaarHOG()
     if modelText == "densenet":
         self.model = DenseNet()
     if "annulus" in modelText:
         bags = int(modelText[modelText.find('_') + 1:modelText.rfind('_')])
         p_segments = int(modelText[modelText.rfind('_') + 1])
         self.model = HistogramsSeveralMasksAnnulusLabSegments(
             plainImagePath=
             "/home/joheras/Escritorio/Research/Fungi/FungiImages/plain.jpg",
             bags=[bags, bags, bags],
             p_segments=p_segments)
Exemplo n.º 2
0
import cv2
import decimal
import json
import ijson.backends.yajl2_cffi as ijson
from sklearn_theano.feature_extraction import OverfeatTransformer

tr = OverfeatTransformer(output_layers=[8])


class DecimalEncoder(json.JSONEncoder):
    def default(self, o):
        if isinstance(o, decimal.Decimal):
            return float(o)
        return super(DecimalEncoder, self).default(o)


with open('../workspace/ds.json') as inh:
    with open('../workspace/ds_deep.json', 'w') as outh:
        ds = ijson.items(inh, 'item')
        outh.write('[')

        for i, item in enumerate(ds):
            print 'running', i + 1
            if i > 0:
                outh.write(',')
            img = cv2.imread('set1/' + item['file'])
            img = cv2.resize(img, (231, 231))
            item['deep'] = tr.transform(img)[0].tolist()
            json.dump(item, outh, cls=DecimalEncoder)

        outh.write(']')
Exemplo n.º 3
0
        print 'y>x', crop.shape
    else:
        h = 231
        w = sz[1] * h / sz[0]
        mid = w / 2
        rimg = cv2.resize(img, (w, h))
        crop = rimg[:, mid - h / 2:mid + h / 2 + 1]
        print 'y<x', crop.shape

    return crop


if __name__ == '__main__':
    catlists = json.load(file('catimagelist.json'))
    #print catlists
    tranformer = OverfeatTransformer(output_layers=[-2])

    features = None
    ctype = []
    cmap = {}

    ct = 0
    for k, v in catlists.iteritems():
        print v['name']
        cnt = 0
        cmap[ct] = {'id': k, 'name': v['name']}
        for fn in v['images']:
            img = cv2.imread(fn)
            if img is None:
                continue
from sklearn_theano.feature_extraction import OverfeatTransformer
from sklearn_theano.utils import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.metrics import classification_report, accuracy_score
import matplotlib.pyplot as plt
import time

asirra = fetch_asirra(image_count=20)
X = asirra.images.astype('float32')
y = asirra.target
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    train_size=.6,
                                                    random_state=1999)
tf = OverfeatTransformer(output_layers=[-3])
clf = LogisticRegression()
pipe = make_pipeline(tf, clf)
t0 = time.time()
pipe.fit(X_train, y_train)
print("Total transform time")
print("====================")
print(time.time() - t0)
print()
y_pred = pipe.predict(X_test)
print(classification_report(y_test, y_pred))
print()
print("Accuracy score")
print("==============")
print(accuracy_score(y_test, y_pred))
f, axarr = plt.subplots(1, 2)
# Show the original image
f, axarr = plt.subplots(2, 3)
X = load_sample_image("sloth.jpg")
axarr[0, 0].imshow(X)
axarr[0, 0].axis('off')

# Show a single box
axarr[0, 1].imshow(X)
axarr[0, 1].axis('off')
r = Rectangle((0, 0), 231, 231, fc='yellow', ec='black', alpha=.8)
axarr[0, 1].add_patch(r)

# Show all the boxes being processed
axarr[0, 2].imshow(X)
axarr[0, 2].axis('off')
clf = OverfeatTransformer(force_reshape=False)
X_tf = clf.transform(X)
x_points = np.linspace(0, X.shape[1] - 231, X_tf[0].shape[3])
y_points = np.linspace(0, X.shape[0] - 231, X_tf[0].shape[2])
xx, yy = np.meshgrid(x_points, y_points)
for x, y in zip(xx.flat, yy.flat):
    axarr[0, 2].add_patch(
        Rectangle((x, y), 231, 231, fc='yellow', ec='black', alpha=.4))

# Get all points with sloth in the top 5 labels
sloth_label = "three-toed sloth, ai, Bradypus tridactylus"
clf = OverfeatLocalizer(match_strings=[sloth_label])
sloth_points = clf.predict(X)[0]
axarr[1, 0].imshow(X)
axarr[1, 0].axis('off')
axarr[1, 0].autoscale(enable=False)
Exemplo n.º 6
0
 def __init__(self, layerNum):
     # store the layer number and initialize the Overfeat transformer
     self.layerNum = layerNum
     self.of = OverfeatTransformer(output_layers=[layerNum])
Exemplo n.º 7
0
                              shape=(len(imagePaths), ),
                              dtype=h5py.special_dtype(vlen=unicode))
featuresDB = db.create_dataset("features",
                               shape=(len(imagePaths), output_size),
                               dtype="float")

#just to reproduce the results
random.seed(42)
random.shuffle(imagePaths)

print("[INFO] encoding labels...")
le = LabelEncoder()
le.fit([p.split("/")[-2] for p in imagePaths])

print("[INFO] initializing network...")
overfeat = OverfeatTransformer(output_layers=config["layer_num"])

print("[INFO] extracting features...")

for start in xrange(0, len(imagePaths), batch_size):
    end = start + batch_size

    #read and resize the images
    images = [cv2.imread(impath) for impath in imagePaths[start:end]]
    images = [cv2.cvtColor(image, cv2.COLOR_BGR2RGB) for image in images]
    images = np.array(
        [cv2.resize(image, tuple(config["image_size"])) for image in images],
        dtype="float")
    #dump the image ID and features to the hdf5 database
    imageIDDB[start:end] = [
        ":".join(impath.split("/")[-2:]) for impath in imagePaths[start:end]
Exemplo n.º 8
0
 def __init__(self, configPath):
     self.config = json.load(open(configPath))
     self.model = cPickle.load(open(self.config["classifier_path"]))
     self.le = cPickle.load(open(self.config["label_encoder_path"]))
     self.overfeat = OverfeatTransformer(output_layers=self.config["layer_num"])
Exemplo n.º 9
0
 def __init__(self, layerNum):
     self.layerNum = layerNum
     self.of = OverfeatTransformer(output_layers=[layerNum])