Ejemplo n.º 1
0
def serve_client(socket, graph):
    client = socket[0]
    logging.info("Received Client {}.".format(socket[1]))
    N = socket[2]
    data = client.recv(1024)
    data = data.decode()
    # print(data)
    url = '' + (data[:data.find('[END]')])
    logging.info("Client submitted URL {}".format(url))
    print_local = "image/images000" + str(N + 1) + ".jpg"
    local = "./" + print_local
    request.urlretrieve(url, local)
    logging.info("Image saved to {}".format(print_local))
    with graph.as_default():
        model = SqueezeNet()
        img = image.load_img(local, target_size=(227, 227))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)  #type of preds: <class 'numpy.ndarray'>
        preds = decode_predictions(preds)
    result = "(\"{}\", {})".format(preds[0][0][1], preds[0][0][2])
    logging.info("SqueezeNet result: {}".format(result))
    sendit = result.encode()  #why cannot use json?
    # print(sendit)
    client.sendall(sendit)
    client.shutdown(2)  #different between shutdown and close   why??
    logging.info("Client connection closed")
    client.close()
Ejemplo n.º 2
0
    def set_tags(self):
        """
            Run a convolutional neural network on the server-side per image
            to associate tags (doesn't take more than a few seconds), and
            runs in parallel since thread is started per client request
        """
        import numpy as np
        import keras
        from keras.preprocessing import image
        from keras_squeezenet import SqueezeNet
        from keras.applications.imagenet_utils import preprocess_input, decode_predictions
        # dtype must be uint8
        # Encode image to ascii for Python 3 compatibility
        server_buffer = np.frombuffer(base64.decodestring(
            self.image.encode('ascii')),
                                      dtype="uint8")
        server_frame = cv2.imdecode(server_buffer, cv2.IMREAD_UNCHANGED)

        keras.backend.clear_session()
        model = SqueezeNet()
        resized = cv2.resize(server_frame,
                             dsize=(227, 227),
                             interpolation=cv2.INTER_CUBIC)
        x = image.img_to_array(resized)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        preds = decode_predictions(model.predict(x))
        # preds is an array containing an array of tags and scores
        print("Tags: {}".format(preds[0]))
        for id, pred, score in preds[0]:
            self.tags.append(pred)
Ejemplo n.º 3
0
def squeezenet_featurize(imagename, imagedir):
	''' 
	This network model has AlexNet accuracy with small footprint (5.1 MB) 
	Pretrained models are converted from original Caffe network.

	This may be useful for production-purposes if the accuracy is similar to other
	types of featurizations.

	See https://github.com/rcmalli/keras-squeezenet
	'''

	model = SqueezeNet()

	img = image.load_img(imagedir+'/'+imagename, target_size=(227, 227))
	x = image.img_to_array(img)
	x = np.expand_dims(x, axis=0)
	x = preprocess_input(x)

	preds = model.predict(x)
	print('Predicted:', decode_predictions(preds))
	features = preds[0]
	labels=list()

	for i in range(len(features)):
		label='squeezenet_feature_%s'%(str(i))
		labels.append(label)

	return features, labels 
Ejemplo n.º 4
0
class Learner:
    def __init__(self, n_classes=3):
        self._fitted = False
        self.X = []
        self.y = []
        self.X_conv = []
        self._lr = SGDClassifier(
            loss='log',
            penalty='l1',
            #warm_start=True,
            max_iter=50,
            tol=1e-3,
            random_state=2 + 3)
        self._features = SqueezeNet(weights='imagenet',
                                    input_shape=(image_size, image_size, 3))

    def add_image(self, img, klass):
        self.X.append(img)
        self.y.append(klass)
        X = np.array(self.X[-1:], dtype=np.float32)
        X = preprocess_input(X)
        X_conv = self._features.predict(X)
        print(decode_predictions(X_conv))
        X_conv = X_conv[0]
        print(X_conv.shape)
        X_conv = X_conv / np.linalg.norm(X_conv)
        self.X_conv.append(X_conv)
        print(Counter(self.y))

    def fit(self):
        X_conv = np.array(self.X_conv)
        y = np.array(self.y)
        print(X_conv.shape, y.shape)
        if len(set(self.y)) > 1:
            self._lr.fit(X_conv, y)
            self._fitted = True

    def predict(self, img):
        if not self._fitted:
            return 0
        # a batch of one image
        X = np.array(img, dtype=np.float32)
        X = X[np.newaxis]
        X = preprocess_input(X)
        X_conv = self._features.predict(X)[0]
        X_conv = X_conv / np.linalg.norm(X_conv)
        return self._lr.predict([X_conv])[0]
Ejemplo n.º 5
0
def make_predictions(image_file):
    model = SqueezeNet()
    img = image.load_img(image_file, target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    return decode_predictions(preds)
Ejemplo n.º 6
0
 def testTHPrediction(self):
     keras.backend.set_image_dim_ordering('th')
     model = SqueezeNet()
     img = image.load_img('images/cat.jpeg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     preds = model.predict(x)
     decoded_preds = decode_predictions(preds)
     #print('Predicted:', decoded_preds)
     self.assertIn(decoded_preds[0][0][1], 'tabby')
Ejemplo n.º 7
0
def server_worker(client_socket, address, tf_graph, pic_num):

    logger = logging.getLogger('logger' + str(pic_num))
    logger.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    formatter = logging.Formatter(
        "[%(asctime)s], [%(levelname)s], [%(processName)s], [%(threadName)s] :  %(message)s"
    )
    ch.setFormatter(formatter)
    logger.addHandler(ch)

    logger.info("Recieve Client ('%s', %s)." % (address[0], address[1]))

    recieve_string = ""

    # Read data from a client and send it response back
    while True:
        #print(client_socket)
        data = client_socket.recv(2048)
        data_decode = data.decode(("utf-8"))
        recieve_string = recieve_string + data_decode
        # All the message from client is recieved completely
        if recieve_string.find("[END]") != -1:
            break
    recieve_string = recieve_string[:-5]
    logger.info("Client submitted URL %s" % recieve_string)
    request.urlretrieve(recieve_string, "images/%d.jpg" % pic_num)
    print("get the pic")

    logger.info("finish download")

    with tf_graph.as_default():
        model = SqueezeNet()
        img = image.load_img('images/1.jpg', target_size=(227, 227))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        result = decode_predictions(preds)
        #print('Predicted:', result)
        result = result[0][0]

    logger.info("SqueezeNet result: (\"%s\", %.3f)" % (result[1], result[2]))
    result = [result[1], str(result[2])]
    result_string = ';'.join(result)
    client_socket.sendall(result_string.encode("utf-8"))
    client_socket.shutdown(socket.SHUT_RDWR)
    client_socket.close()
    logger.info("Client connection closed")
    del logger
Ejemplo n.º 8
0
 def new_state(self,futur_state0,state0,state_1,state_2):
     
     sq_model = SqueezeNet()
     start = time.time()
     #img = state_2
     img = image.load_img('/home/i16djell/Bureau/state_2.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature1 = sq_model.predict(x)
     
     #img = state_1
     img = image.load_img('/home/i16djell/Bureau/state_1.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature2 = sq_model.predict(x)
     
     #img = state0
     img = image.load_img('/home/i16djell/Bureau/state0.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature3 = sq_model.predict(x)  
     
     #img = futur_state0
     img = image.load_img('/home/i16djell/Bureau/futur_state0.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature4 = sq_model.predict(x)
     
     features=[feature4,feature3,feature2,feature1]
     features=np.stack(features,axis=0)
     features=features.reshape(1,4,1000)
     
     return features
Ejemplo n.º 9
0
 def initial_state(self,state0):
     #turn the image into gray then resize it to (84,84) then compress the data to 8bits
     #state0=np.uint8(resize(rgb2gray(state0),(frame_width,frame_height))*255)
     
     
     sq_model = SqueezeNet()
     start = time.time()
     #img = state0
     img = image.load_img('/home/i16djell/Bureau/state0.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature = sq_model.predict(x)
     
     features=[feature ,feature ,feature ,feature ]
     features=np.stack(features,axis=0)
     features=features.reshape(1,4,1000)
     return features
Ejemplo n.º 10
0
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
import time

if __name__ == '__main__':

    model = SqueezeNet()
    # model.load_weights('../model/squeezenet_weights_tf_dim_ordering_tf_kernels.h5', by_name=True)

    start = time.time()

    img = image.load_img('blah/train/cats/cat.3.jpg', target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    duration = time.time() - start
    print('Predicted:', decode_predictions(preds))
Ejemplo n.º 11
0
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg


model = SqueezeNet()

img = image.load_img('pexels-photo-280207.jpeg', target_size=(227, 227))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
all_results = decode_predictions(preds)
for results in all_results:
    for result in results:
        print('Probability %0.2f%% => [%s]' % (100*result[2], result[1]))
        #result_text= 'Probability %0.2f%% => [%s]' % (100*result[2], result[1])
        #break
#plt.figure(num=1,figsize=(8, 6), dpi=80)
#plt.imshow(img)
#plt.text(130,90,result_text,horizontalalignment='center', verticalalignment='center',fontsize=16,color='black')
#plt.axis('off')
#plt.show()

Ejemplo n.º 12
0
    dataset[i] = cv2.resize(dataset[i],
                            dimension,
                            interpolation=cv2.INTER_AREA)

#get embeddings from image
#model =VGG16(weights='imagenet',include_top=False)

model = SqueezeNet()
plot_model(model, to_file='vgg.png')

embeddings = []
for img in dataset:
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = preprocess_input(img)
    embeddings.append(model.predict(img))

#final_dataset=np.reshape(embeddings,(2377,1*7*7*512))
final_dataset = np.reshape(embeddings, (2377, 1000))

#split dataset

from sklearn.model_selection import train_test_split

train, test, train_label, test_label = train_test_split(final_dataset,
                                                        labels,
                                                        test_size=0.2,
                                                        random_state=50)

from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
Ejemplo n.º 13
0
model = SqueezeNet()

#保存model部分  # save as JSON  保存模型的结构,而不包含其权重或配置信息
model_json = model.to_json()
# with open("model.json", "w") as json_file:
#     json_file.write(model_json)

# del_json = model.to_json()

#     json_file.write(model_json)

#只保存权重
model.save_weights("model.h5")

#保存model和权重
model.save('my_model.h5')  # creates a HDF5 file 'my_model.h5'
del model  # deletes the existing model
model = load_model(
    'my_model.h5')  # returns a compiled model   identical to the previous one

#img = image.load_img(img_path, target_size=(224, 224))  # 加载图像,归一化大小
img = image.load_img('images/cat.jpeg', target_size=(227, 227))
x = image.img_to_array(img)  # 序列化
x = np.expand_dims(x, axis=0)  # 展开
x = preprocess_input(x)  # 预处理到0~1

preds = model.predict(x)  # 预测结果,1000维的向量
print(
    'Predicted:',
    decode_predictions(preds))  # decode_predictions 输出5个最高概率:(类名, 语义概念, 预测概率)
Ejemplo n.º 14
0
    def execute_model(self, qn):
        # Get data
        with tf.device('/cpu:0'):
            (train_data, train_labels), (eval_data,
                                         eval_labels) = self.get_data()

        # Build model
        model = SqueezeNet(input_shape=train_data[0].shape,
                           weights=None,
                           classes=10)
        # model = get_model(train_data[0].shape)
        model.summary()

        model.compile(optimizer='adam',
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        # Train model
        if self.log_dir is not None:
            checkpoint_folder = self.log_dir + "/" + qn
        else:
            checkpoint_folder = "./" + qn

        last_epoch_ran = 0
        from os import path, makedirs
        print("****Checkpoint folder:", checkpoint_folder)
        checkpoint_path = checkpoint_folder + "/train.ckpt"
        if path.exists(checkpoint_path):
            print("\tRestoring checkpoint from %s" % checkpoint_path)
            model.load_weights(checkpoint_path)
            with open(path.join(checkpoint_folder, "epoch.txt"), "r") as f:
                last_epoch_ran = f.read()
            last_epoch_ran = int(last_epoch_ran)
            print("\tInitial epoch: %d" % last_epoch_ran)
        else:
            print("****Creating folder", checkpoint_folder)
            makedirs(checkpoint_folder, exist_ok=True)
        # checkpoint_path = checkpoint_folder + "/train-{epoch:04d}.ckpt"

        class SaveCheckpoint(keras.callbacks.ModelCheckpoint):
            def __init__(self,
                         filepath,
                         monitor='val_loss',
                         verbose=0,
                         save_best_only=False,
                         save_weights_only=False,
                         mode='auto',
                         period=1):
                super(SaveCheckpoint,
                      self).__init__(filepath,
                                     monitor=monitor,
                                     verbose=verbose,
                                     save_best_only=save_best_only,
                                     save_weights_only=save_weights_only,
                                     mode=mode,
                                     period=period)

            def on_epoch_end(self, epoch, logs=None):
                super(SaveCheckpoint, self).on_epoch_end(epoch, logs)
                with open(path.join(path.dirname(self.filepath), "epoch.txt"),
                          "w") as f:
                    f.write(str(epoch))

        save_checkpoint = SaveCheckpoint(checkpoint_path,
                                         save_weights_only=True,
                                         verbose=1)
        callbacks = [save_checkpoint]

        history = model.fit(train_data,
                            train_labels,
                            batch_size=self.batch_size,
                            epochs=self.epochs,
                            initial_epoch=last_epoch_ran,
                            verbose=1,
                            shuffle=True,
                            validation_split=self.validation_split,
                            callbacks=callbacks)
        # history = model.fit(train_data, train_labels, epochs=self.epochs)
        # Test model
        print("Training done. Evaluating model")
        test_loss, test_acc = model.evaluate(eval_data,
                                             eval_labels,
                                             batch_size=self.batch_size,
                                             verbose=1)

        print("test_loss: {}. test_acc: {}".format(test_loss, test_acc))

        # confusion matrix
        preds = model.predict(eval_data, batch_size=self.batch_size, verbose=1)
        preds = np.argmax(preds, 1)
        model.summary()

        print("eval_labels: {}. max: {}.\npreds: {}. max: {}.".format(
            eval_labels.shape, np.max(eval_labels), preds.shape,
            np.max(preds)))
        # with keras.backend.get_session() as sess:
        #     conf_mat = tf.confusion_matrix(eval_labels, preds)
        #     conf_mat = sess.run(conf_mat)
        from sklearn.metrics import confusion_matrix
        confusion_matrix(eval_data, preds)
        # clear memory
        keras.backend.clear_session()

        return history, conf_mat, test_loss, test_acc