Exemplo n.º 1
0
def getMoreFeatures():
    net = DecafNet()

    features = []
    labels = []
    counter = 0

    for participant in os.listdir(os.path.join(data_dir, image_dir)):
        for sequence in os.listdir(
                os.path.join(data_dir, image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(
                    os.listdir(
                        os.path.join(data_dir, image_dir, participant,
                                     sequence)))
                cutoff = len(image_files) / 2
                image_files = image_files[cutoff::]
                label_file = open(
                    os.path.join(data_dir, label_dir, participant, sequence,
                                 image_files[-1][:-4] + "_emotion.txt"))
                label = eval(label_file.read())
                label_file.close()
                for image_file in image_files:
                    print counter, image_file
                    imarray = numpy.asarray(
                        Image.open(
                            os.path.join(data_dir, image_dir, participant,
                                         sequence, image_file)))
                    scores = net.classify(imarray, center_only=True)
                    features.append(net.feature(feature_level))
                    labels.append(label)
                    counter += 1

    numpy.save("featuresMore", numpy.array(features))
    numpy.save("labelsMore", numpy.array(labels))
Exemplo n.º 2
0
def full_check_decaf(win_slide=5, win_size=1024, blob_name='fc6_cudanet_out'):
    from decaf.scripts.imagenet import DecafNet
    net = DecafNet()
    clf = joblib.load("420_decaf/classifier_decaf.pkl")
    g_raster = gdal.Open('20-21-22-part2.tif') # test.tif
    # plt.axis('off')
    # f, axarr = plt.subplots(n, n)
    result = {}
    cols = range(0, g_raster.RasterXSize - win_size, win_slide)
    rows = range(0, g_raster.RasterYSize - win_size, win_slide)
    full = len(rows) * len(cols)
    count = 0
    pbar = progressbar.ProgressBar(maxval=full).start()
    for i in range(0, g_raster.RasterXSize - win_size, win_slide):
        for j in range(0, g_raster.RasterYSize - win_size, win_slide):
            img = get_sample(g_raster, i, j, win_size)
            net.classify(img, True)
            tmp = net.feature(blob_name) #与训练时候保持一致
            result[(j,i)] = clf.predict(tmp)
            if result[(j,i)] == 2:
                io.imsave("420_decaf/slide_target/%s_%s_%s_%s.png" % (j, i, j+win_size, i+win_size), img)
            pbar.update(count+1)
            count = count + 1
    pbar.finish()
    
    arr = np.ones((len(rows), len(cols)))
    for k, v in result.items():
        if v != 0 and v[0] == 2:
            arr[k[0]/win_slide, k[1]/win_slide] = v[0]
    return arr
Exemplo n.º 3
0
def getPeakFeatures():
    net = DecafNet()

    features = numpy.zeros((number_sequences, feature_length))
    labels = numpy.zeros((number_sequences, 1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir, image_dir)):
        for sequence in os.listdir(
                os.path.join(data_dir, image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(
                    os.listdir(
                        os.path.join(data_dir, image_dir, participant,
                                     sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(
                    os.path.join(data_dir, image_dir, participant, sequence,
                                 image_file))
                imarray = cv2.cvtColor(imarray, cv2.COLOR_BGR2GRAY)
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level)  #.flatten()
                label_file = open(
                    os.path.join(data_dir, label_dir, participant, sequence,
                                 image_file[:-4] + "_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeak5", features)
    numpy.save("labelsPeak5", labels)
Exemplo n.º 4
0
 def __init__(
         self,
         layer_name,
         model_path='dist/decaf-release/model/imagenet.decafnet.epoch90',
         meta_path='dist/decaf-release/model/imagenet.decafnet.meta'):
     self.layer_name = layer_name
     self.net = DecafNet(model_path, meta_path)
     self.transforms = [NopTransform()]
Exemplo n.º 5
0
    def fit(self, X=None, y=None):
        from decaf.scripts.imagenet import DecafNet  # soft dep

        if self.net_ is None:
            self.net_ = DecafNet(
                self.pretrained_params,
                self.pretrained_meta,
                )
        return self
Exemplo n.º 6
0
 def __init__(self, decaf_folder=None, classifer_file=None):
     if decaf_folder is None:
         decaf_folder = '../models/imagenet_pretrained/'
     if classifer_file is None:
         classifer_file = "../models/lg_classifier_public"
     self.net = DecafNet(
         path.join(decaf_folder, 'imagenet.decafnet.epoch90'),
         path.join(decaf_folder, 'imagenet.decafnet.meta'))
     self.feat_layer = 'fc6_cudanet_out'
     self.classifier = cPickle.load(open(classifer_file, "r"))
Exemplo n.º 7
0
 def __init__(self, model_spec_filename, model_filename=None,\
              wnid_words_filename=None, center_only=False, wnid_subset = []):
     """
     *** PRIVATE CONSTRUCTOR ***
     """
     # the following is just an hack to allow retro-compatibility
     # with existing code
     if isinstance(model_spec_filename, NetworkDecafParams):
         params = model_spec_filename
         model_spec_filename = params.model_spec_filename
         model_filename = params.model_filename
         wnid_words_filename = params.wnid_words_filename
         center_only = params.center_only
         wnid_subset = params.wnid_subset
         if wnid_subset != []:
             print 'Warning: subset of labels not supported yet'
     else:
         assert isinstance(model_spec_filename, str)
         assert model_filename != None
         assert wnid_words_filename != None
     # load Decaf model
     self.net_ = DecafNet(model_filename, model_spec_filename)
     self.center_only_ = center_only
     # build a dictionary label --> description
     self.dict_label_desc_ = {}
     dict_desc_label = {}
     fd = open(wnid_words_filename)
     for line in fd:
         temp = line.strip().split('\t')
         wnid = temp[1].strip()
         self.dict_label_desc_[wnid] = temp[2].strip()
         dict_desc_label[temp[2].split(',')[0]] = wnid
     fd.close()
     # build a dictionary label --> label_id
     self.dict_label_id_ = {}
     self.labels_ = []
     for i, desc in enumerate(self.net_.label_names):
         self.dict_label_id_[dict_desc_label[desc]] = i
         self.labels_.append(dict_desc_label[desc])
     # Load the mean vector from file
     # mean of 3 channels
     self.net_.mean_img = np.mean(np.mean(self.net_._data_mean, axis=1),
                                  axis=0)
Exemplo n.º 8
0
def getPeakFaceFeatures():
    net = DecafNet()
    cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')

    features = numpy.zeros((number_sequences, feature_length))
    labels = numpy.zeros((number_sequences, 1))
    counter = 0
    # Maybe sort them
    for participant in os.listdir(os.path.join(data_dir, image_dir)):
        for sequence in os.listdir(
                os.path.join(data_dir, image_dir, participant)):
            if sequence != ".DS_Store":
                image_files = sorted(
                    os.listdir(
                        os.path.join(data_dir, image_dir, participant,
                                     sequence)))
                image_file = image_files[-1]
                print counter, image_file
                imarray = cv2.imread(
                    os.path.join(data_dir, image_dir, participant, sequence,
                                 image_file))
                imarray = cv2.cvtColor(imarray, cv2.COLOR_BGR2GRAY)
                rects = cascade.detectMultiScale(imarray, 1.3, 3,
                                                 cv2.cv.CV_HAAR_SCALE_IMAGE,
                                                 (150, 150))
                if len(rects) > 0:
                    facerect = rects[0]
                    imarray = imarray[facerect[1]:facerect[1] + facerect[3],
                                      facerect[0]:facerect[0] + facerect[2]]
                scores = net.classify(imarray, center_only=True)
                features[counter] = net.feature(feature_level).flatten()
                label_file = open(
                    os.path.join(data_dir, label_dir, participant, sequence,
                                 image_file[:-4] + "_emotion.txt"))
                labels[counter] = eval(label_file.read())
                label_file.close()
                counter += 1

    numpy.save("featuresPeakFace5", features)
    numpy.save("labelsPeakFace5", labels)
Exemplo n.º 9
0
from classify.ttypes import *

# Thrift files
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer

#*******************************************
import sys
sys.path.append("decaf")
from decaf.scripts.imagenet import DecafNet
import cStringIO as StringIO
import Image
import numpy as np
net = DecafNet('imagenet.decafnet.epoch90', 'imagenet.decafnet.meta')
from time import time as tic

#********************************************


# Server implementation
class ClassifyHandler:
    ## return current time stamp
    #def showCurrentTimestamp(self):
    #    timeStamp = time.time()
    #    return str(timeStamp)

    ## print something to string, wait 10 secs, than print something again
    #def asynchronousJob(self):
    #    print 'Assume that this work takes 10 seconds'
from sklearn.base import TransformerMixin
from skimage.io import imread
import numpy as np
#import operator
import logging
logging.getLogger().setLevel(logging.ERROR)

from PIL import Image
img_size = (256,256,3)
def resize(img):
    tmp = Image.fromarray(img)
    tmp = tmp.resize(img_size[0:2])
    return np.array(tmp)

from decaf.scripts.imagenet import DecafNet
NET = DecafNet()

class DecafFeature(TransformerMixin):
    """ 
    Extract Decaf Feature
        
    Parameters
    ----------
    layer_name : str
      Decaf layer name, default:fc6_cudanet_out
    
    img_size : tuple
      the size of X, default: (256, 256, 3)
      
    """
    def __init__(self, layer='fc6_cudanet_out', img_size=(256, 256, 3)):
from decaf.scripts.imagenet import DecafNet
from skimage import io
import numpy as np
import scipy.io as sio
import os
import sys

if len(sys.argv) != 5:
    print "Usage ", sys.argv[0], "<model_root_dir> <image_dir> <output_feature_path> <num_imgs>"
    exit(1)

model_root = sys.argv[1]
net = DecafNet(model_root + 'imagenet.decafnet.epoch90', model_root + 'imagenet.decafnet.meta')
img_dir = sys.argv[2]
feature_path = sys.argv[3]
NUM_IMGS = int(sys.argv[4])
FEATURE_DIM = 4096 #fc6_cudanet_out's dimension

features = np.zeros((NUM_IMGS,FEATURE_DIM))
for i in range(NUM_IMGS):
    filename = img_dir + "/%05d.jpg"  %(i+1)
    if os.path.exists(filename):
        sys.stdout.write("Extracting DeCAF feature from image %d\n" %(i+1))
        img = io.imread(filename)
        net.classify(img, center_only=True)
        features[i,:] = net.feature('fc6_cudanet_out')

sio.savemat(feature_path,{'features':features})


from decaf.scripts.imagenet import DecafNet
import numpy, scipy, PIL, csv, glob
import numpy as np
from PIL import Image
from sklearn.decomposition import PCA
import os
import cv2, scipy
import pickle
from mlabwrap import mlab

ucfloc = 'decaf/KitchenData/'
imgnetPath = 'decaf/imagenet_pretrained/'
flowdir = 'flowdata/'

net = DecafNet(imgnetPath + 'imagenet.decafnet.epoch90',
               imgnetPath + 'imagenet.decafnet.meta')
pca = PCA(n_components=20)


class Feature():
    def __init__(self, decaf, category, _id):
        self.decaf = decaf
        self.category = category
        self.path = _id


def imToNumpy(img):
    return numpy.asarray(PIL.Image.open(img))


def getFeature(img):
Exemplo n.º 13
0
    try:
        return img_as_ubyte(resize(img, (256,256), mode='wrap')) # resize 后是float64
    except:
        #保存检测过程图像
        io.imsave("420_decaf/tmp/%s_%s_%s_%s.png" % \
                 (lu_offset_x, lu_offset_y, w, h), img)
        tmp = cv2.imread("420_decaf/tmp/%s_%s_%s_%s.png" % \
                        (lu_offset_x, lu_offset_y, w, h))
        
        return cv2.resize(img, (256,256), interpolation=cv2.INTER_LINEAR)
        #return resize(img, (256,256))


# 加载 decaf 和 classifier
from decaf.scripts.imagenet import DecafNet
net = DecafNet()
clf = joblib.load("420_decaf/classifier_svc.pkl")
blob_name='fc6_cudanet_out'


# 命令行参数情况
if len(sys.argv) < 3:
    print "usage: object_classify.py path_to_image path_to_segmentation_folder..."
    sys.exit()
else:
    img_path = sys.argv[1]
    segmentation_folder = sys.argv[2:]
# 读取栅格图像
g_raster = gdal.Open(img_path) # 与分割文件对应的原始栅格
    
# 读取分割结果 shp 文件
def main():
    net = DecafNet()

    video = cv2.VideoCapture(0)
    cascade = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')

    arrays = Queue.LifoQueue()
    results = Queue.LifoQueue()

    detector = ComputeFeatures(net, [], arrays, results)
    detector.daemon = True
    detector.start()

    pygame.init()

    screen = pygame.display.set_mode((width, height))
    pygame.display.set_caption('This is a video game')

    background = pygame.Surface((width, height))
    background.fill(white)
    screen.blit(background, (0, 0))
    pygame.display.flip()

    allsprites = pygame.sprite.RenderUpdates()

    # Some parameters #

    size = 10
    enemy_surface = pygame.Surface((size, size))
    speed = 200.0
    playersize = 44

    # # # # # # # # # #

    player = Unit([256.0, 256.0], pygame.Surface((playersize, playersize)))
    allsprites.add(player)

    enemy_counter = 1.0
    clock = pygame.time.Clock()
    elapsed = 0.0
    accumulator = 0.0

    run = True
    face = None

    emotion_window = [
        "neutral", "neutral", "neutral", "neutral", "neutral", "neutral"
    ]
    #emotion_accumulator = 0.0
    current_emotion = "neutral"
    emotion = "neutral"
    health = 50
    game_time = 0.0

    while run:
        seconds = elapsed / 1000.0
        accumulator += seconds
        game_time += seconds
        #emotion_accumulator += seconds

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False
            elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
                run = False

            #elif event.type == pygame.KEYDOWN and event.key == pygame.K_w:
            #    current = Unit((random.randint(0,512), random.randint(0,512)))
            #    allsprites.add(current)
            #elif event.type == pygame.KEYDOWN and event.key == pygame.K_s:
            #    for sprite in allsprites:
            #        sprite.position = [sprite.position[0]+random.randint(-5,5),sprite.position[1]+random.randint(-5,5)]

        if accumulator > enemy_counter:
            allsprites.add(Unit([random.randint(0, 512), 0], enemy_surface))
            accumulator = 0.0

        for sprite in allsprites.sprites():
            if sprite.image == enemy_surface:
                sprite.position[1] += speed * seconds
            if sprite.position[1] > height - 10:
                allsprites.remove(sprite)

        pressed = pygame.key.get_pressed()

        if pressed[pygame.K_RIGHT]:
            player.position[0] += speed * seconds
        if pressed[pygame.K_LEFT]:
            player.position[0] -= speed * seconds
        if pressed[pygame.K_DOWN]:
            player.position[1] += speed * seconds
        if pressed[pygame.K_UP]:
            player.position[1] -= speed * seconds

        allsprites.update()

        allsprites.remove(player)
        health -= len(pygame.sprite.spritecollide(player, allsprites, True))
        allsprites.add(player)

        allsprites.clear(screen, background)
        changed = allsprites.draw(screen)
        pygame.display.update(changed)

        frame = video.read()[1]
        rects = cascade.detectMultiScale(frame, 1.3, 3,
                                         cv2.cv.CV_HAAR_SCALE_IMAGE,
                                         (150, 150))

        #arrays.put(frame)

        # Idea: increase the size of the rectangle
        if len(rects) > 0:
            facerect = rects[0]
            #facerect[0] -= (rectangle_margin-30)
            #facerect[2] += rectangle_margin
            #facerect[1] -= (rectangle_margin-20)
            #facerect[3] += rectangle_margin
            face = frame[facerect[1]:facerect[1] + facerect[3],
                         facerect[0]:facerect[0] + facerect[2]]
            face = cv2.cvtColor(face, cv2.COLOR_BGR2GRAY)
            arrays.put(face)
            if True:
                for (x, y, w, h) in rects:
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0),
                                  2)

        if not results.empty():
            emotion = results.get()
            emotion_window.append(emotion)
            emotion_window.pop(0)
            current_emotion = max(set(emotion_window),
                                  key=emotion_window.count)
            print "Current emotion:", current_emotion, "- Last detected:", emotion  #, emotion_window
            if current_emotion == "happy":
                enemy_counter += 0.03
                enemy_counter = min(0.7, enemy_counter)
            else:
                enemy_counter += -0.02
                enemy_counter = max(0.01, enemy_counter)
            print "Health:", health, "- Time:", game_time

        if health < 1:
            run = False
            print "Game over! Score:", game_time

        if face != None:
            cv2.imshow("face", face)
        cv2.imshow("frame", frame)
        c = cv2.waitKey(1)
        if c == 27:
            cv2.destroyWindow("frame")
            cv2.destroyWindow("face")
            break

        elapsed = clock.tick(framerate)

    video.release()
    cv2.destroyAllWindows()

    pygame.quit()