Ejemplo n.º 1
0
def progress(port, mode, record):
    #    print(cap)

    cameList, number = CameraList(
        record=record)  # set the input, from camera or Video

    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            detect = FaceDetect(sess)  # face detect class
            classifier = Classifier(sess, mode=mode)  # face recognition class
    if (mode == 2):
        classifier.train()  # if mode is 2, train svm
        return 0
    client = Client(port)  # if we need send data by UDP, set it.
    while True:
        for cam in cameList:  # from camera get frame and progress it in detect and recognition face
            ret, frame = cam.capFrame()  # get face from class camera
            if not ret:
                cam.release()
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)  # BGR into RGB
            res = reg_face(frame, detect, classifier, client, cam,
                           mode)  # main progress
            res = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)  # RGB into BGR
            if record:
                cam.writeFrame(res)

            cv2.imshow(cam.getLabel(), res)  # show result
            cv2.waitKey(1)
Ejemplo n.º 2
0
def index(request):
    if 'text' not in request.POST:
        return HttpResponse(json.dumps([]), content_type='application/json')

    clf = cache.get('clf')
    if clf == None:
        clf = Classifier()
        # cache the classifier
        cache.set('clf', clf)
    
    emails = request.POST.getlist('text')
    response = clf.classify(emails)
    return HttpResponse(json.dumps(response), content_type='application/json')
Ejemplo n.º 3
0
 def classifier(self):
     with lock:
         if getattr(self, '_classifier', None) == None:
             print(" - Building new classifier - might take a while.")
             self._classifier = Classifier().build()
             print(" - Done!")
         return self._classifier
Ejemplo n.º 4
0
                    exit(1)
                else:
                    cap = cv2.VideoCapture(video)
                detect.translatevideo(cap)
            else:
                detect.translateface()


if __name__ == '__main__':
    import os
    path = os.path.join(os.path.dirname(os.path.dirname(__file__)),
                        'cera/1.avi')
    with tf.Graph().as_default():
        gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
        sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                                log_device_placement=False))
        with sess.as_default():
            detect = FaceDetect(sess)
            classifier = Classifier(sess)
    cap = cv2.VideoCapture(0)
    cap = cv2.VideoCapture(path)
    ret, frame = cap.read()
    client = Client()
    while ret:
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        res = reg_face(frame, detect, classifier, client, "demo")
        res = cv2.cvtColor(res, cv2.COLOR_RGB2BGR)
        cv2.imshow("demo", res)
        cv2.waitKey(1)
        ret, frame = cap.read()
Ejemplo n.º 5
0
testdata = [{
    "text": "local chamber of commerce takes action on legislation"
}, {
    "text":
    "consumption of food is estimated to have increased twofold in the past year"
}, {
    "text":
    "banking company offers settlement in long running financial case"
}, {
    "text":
    "negotiations on foreign affairs between china and australia enter into a new phase"
}]

f = Featurizer.load('model/reuters')
x = f.transform(testdata)
x_inv = f.transform_inv(x)
del f

m = Classifier.load('model/reuters')
y = m.predict(x)
del m

c = Categorizer.load('model/reuters_y')
y = c.transform_inv(y)

for i, line in enumerate(testdata):
    print("Input:", json.dumps(line))
    print("Features:", json.dumps(x_inv[i]))
    print("Prediction:", y[i])
    # too bad we don't know the category name - https://stackoverflow.com/questions/45138290
Ejemplo n.º 6
0
#!/usr/bin/env python3
import os
import json
from flask import Flask, jsonify, render_template, request
from lib.featurizer import Featurizer
from lib.categorizer import Categorizer
from lib.classifier import Classifier

app = Flask(__name__)

featurizer = Featurizer.load('model/reuters')
model = Classifier.load('model/reuters')
categorizer = Categorizer.load('model/reuters_y')

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/api/v1/predict', methods=['POST', 'GET']) # GET for easy debugging
def predict():
    data = request.get_json() or request.args
    x = featurizer.transform([data])
    y = model.predict(x)
    r = categorizer.transform_inv(y)
    return jsonify({ 'result': int(r[0]) })

if __name__ == '__main__':
    port = int(os.environ.get('PORT', 5000))
    app.run(port=port)
from lib.classifier import Classifier

if __name__ == "__main__":

    dir_path = os.path.dirname(os.path.realpath(__file__))

    mobnet_model_file = dir_path + "/saved_models/mobilenet/mobilenet_output_graph.pb"
    mobnet_label_file = dir_path + "/saved_models/mobilenet/mobilenet_output_labels.txt"
    mobnet_input_layer = "input"
    mobnet_output_layer = "final_result"
    mobnet_input_name = "import/" + mobnet_input_layer
    mobnet_output_name = "import/" + mobnet_output_layer
    print "Initializing Mobilenet..."
    mobnet = Classifier(mobnet_model_file,
                        mobnet_label_file,
                        mobnet_input_name,
                        mobnet_output_name,
                        net="mobilenet")

    incptn_model_file = dir_path + "/saved_models/inception/inception_output_graph.pb"
    incptn_label_file = dir_path + "/saved_models/inception/inception_output_labels.txt"
    incptn_input_layer = "Mul"
    incptn_output_layer = "final_result"
    incptn_input_name = "import/" + incptn_input_layer
    incptn_output_name = "import/" + incptn_output_layer
    print "Initializing Inception..."
    incptn = Classifier(incptn_model_file,
                        incptn_label_file,
                        incptn_input_name,
                        incptn_output_name,
                        net="inception")
Ejemplo n.º 8
0
    '''Invert a dictionary'''
    return {v: k for k, v in d.items()}

def x2text(x, word_index_inv):
    '''Return text from an x vector and inverted word index'''
    words = [word_index_inv.get(i) for i in x]
    words = [w for w in words if w]
    return ' '.join(words)

# we use our own featurizer, so first reconstruct input text
word_index_inv = dict_inv(word_index)
texts = [{'text': x2text(a, word_index_inv)} for a in x]
del x, word_index, word_index_inv

# extract features, save results
f = Featurizer()
x = f.fit_transform(texts)
f.save('model/reuters')
del f

# build class mapping
m = Categorizer()
y = m.fit_transform(y)
m.save('model/reuters_y')
del m

# train the classifier, save results
c = Classifier(x.shape[1], y.shape[1])
c.train(x, y, epochs=12, batch_size=128)
c.save('model/reuters')
Ejemplo n.º 9
0

def x2text(x, word_index_inv):
    '''Return text from an x vector and inverted word index'''
    words = [word_index_inv.get(i) for i in x]
    words = [w for w in words if w]
    return ' '.join(words)


# we use our own featurizer, so first reconstruct input text
word_index_inv = dict_inv(word_index)
texts = [{'text': x2text(a, word_index_inv)} for a in x]
del x, word_index, word_index_inv

# extract features, save results
f = Featurizer()
x = f.fit_transform(texts)
f.save('model/reuters')
del f

# build class mapping
m = Categorizer()
y = m.fit_transform(y)
m.save('model/reuters_y')
del m

# train the classifier, save results
c = Classifier(x.shape[1], y.shape[1])
c.train(x, y, epochs=12, batch_size=128)
c.save('model/reuters')
Ejemplo n.º 10
0
from lib.classifier import Classifier
from lib.restful import Restful


if __name__ == "__main__":
    dir_path = os.path.dirname(os.path.realpath(__file__))
    model_file = dir_path + "/saved_models/mobilenet/mobilenet_output_graph.pb"
    label_file = dir_path + "/saved_models/mobilenet/mobilenet_output_labels.txt"
    input_layer = "input"
    output_layer = "final_result"
    input_name = "import/" + input_layer
    output_name = "import/" + output_layer

    with picamera.PiCamera() as camera:
        with picamera.array.PiRGBArray(camera) as output:
            c = Classifier(model_file, label_file, input_name, output_name, net="mobilenet")
            c.start()

            time.sleep(.1)
            try:
                while(True):
                    camera.resolution = (640, 480)
                    #TODO MAKE BUTTON HERE
                    print "Press 'Enter' to capture an image."
                    dummy = raw_input()

                    print "Capturing..."
                    start = time.time()
                    camera.capture(output, 'rgb')
                    print "Done! Labeling image..."
                    checkpoint = time.time()
Ejemplo n.º 11
0
 def __init__(self, classifier_list: list[:Classifier]):
     self._classifier_list = classifier_list
     self._classifier_list.append(Classifier())
Ejemplo n.º 12
0
def get_inputs_dict(args):
    """Gets the input dict for the current model and dataset.
    """
    if cfg.CONST.DATASET == 'shapenet':
        if (args.text_encoder is True) or (args.end2end is
                                           True) or (args.classifier is True):
            inputs_dict = utils.open_pickle(cfg.DIR.TRAIN_DATA_PATH)
            val_inputs_dict = utils.open_pickle(cfg.DIR.VAL_DATA_PATH)
            test_inputs_dict = utils.open_pickle(cfg.DIR.TEST_DATA_PATH)
        else:  # Learned embeddings
            inputs_dict = utils.open_pickle(
                cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_TRAIN)
            val_inputs_dict = utils.open_pickle(
                cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_VAL)
            test_inputs_dict = utils.open_pickle(
                cfg.DIR.SHAPENET_METRIC_EMBEDDINGS_TEST)
    elif cfg.CONST.DATASET == 'primitives':
        if ((cfg.CONST.SYNTH_EMBEDDING is True) or (args.text_encoder is True)
                or (args.classifier is True)):
            if args.classifier and not cfg.CONST.REED_CLASSIFIER:  # Train on all splits for classifier
                tf.logging.info(
                    'Using all (train/val/test) splits for training')
                inputs_dict = utils.open_pickle(
                    cfg.DIR.PRIMITIVES_ALL_SPLITS_DATA_PATH)
            else:
                tf.logging.info('Using train split only for training')
                inputs_dict = utils.open_pickle(
                    cfg.DIR.PRIMITIVES_TRAIN_DATA_PATH)
            val_inputs_dict = utils.open_pickle(
                cfg.DIR.PRIMITIVES_VAL_DATA_PATH)
            test_inputs_dict = utils.open_pickle(
                cfg.DIR.PRIMITIVES_TEST_DATA_PATH)
        else:  # Learned embeddings
            inputs_dict = utils.open_pickle(
                cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_TRAIN)
            val_inputs_dict = utils.open_pickle(
                cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_VAL)
            test_inputs_dict = utils.open_pickle(
                cfg.DIR.PRIMITIVES_METRIC_EMBEDDINGS_TEST)
    else:
        raise ValueError('Please use a valid dataset (shapenet, primitives).')

    if args.tiny_dataset is True:
        if ((cfg.CONST.DATASET == 'primitives'
             and cfg.CONST.SYNTH_EMBEDDING is True)
                or (args.text_encoder is True)):
            raise NotImplementedError(
                'Tiny dataset not supported for synthetic embeddings.')

        ds = 5  # New dataset size
        if cfg.CONST.BATCH_SIZE > ds:
            raise ValueError(
                'Please use a smaller batch size than {}.'.format(ds))
        inputs_dict = utils.change_dataset_size(inputs_dict,
                                                new_dataset_size=ds)
        val_inputs_dict = utils.change_dataset_size(val_inputs_dict,
                                                    new_dataset_size=ds)
        test_inputs_dict = utils.change_dataset_size(test_inputs_dict,
                                                     new_dataset_size=ds)

    # Select the validation/test split
    if args.split == 'train':
        split_str = 'train'
        val_inputs_dict = inputs_dict
    elif (args.split == 'val') or (args.split is None):
        split_str = 'val'
        val_inputs_dict = val_inputs_dict
    elif args.split == 'test':
        split_str = 'test'
        val_inputs_dict = test_inputs_dict
    else:
        raise ValueError('Please select a valid split (train, val, test).')
    print('Validation/testing on {} split.'.format(split_str))

    if (cfg.CONST.DATASET
            == 'shapenet') and (cfg.CONST.SHAPENET_CT_CLASSIFIER is True):
        category_model_list, class_labels = Classifier.set_up_classification(
            inputs_dict)
        val_category_model_list, val_class_labels = Classifier.set_up_classification(
            val_inputs_dict)
        assert class_labels == val_class_labels

        # Update inputs dicts
        inputs_dict['category_model_list'] = category_model_list
        inputs_dict['class_labels'] = class_labels
        val_inputs_dict['category_model_list'] = val_category_model_list
        val_inputs_dict['class_labels'] = val_class_labels

    return inputs_dict, val_inputs_dict