Example #1
0
def serve_client(socket, graph):
    client = socket[0]
    logging.info("Received Client {}.".format(socket[1]))
    N = socket[2]
    data = client.recv(1024)
    data = data.decode()
    # print(data)
    url = '' + (data[:data.find('[END]')])
    logging.info("Client submitted URL {}".format(url))
    print_local = "image/images000" + str(N + 1) + ".jpg"
    local = "./" + print_local
    request.urlretrieve(url, local)
    logging.info("Image saved to {}".format(print_local))
    with graph.as_default():
        model = SqueezeNet()
        img = image.load_img(local, target_size=(227, 227))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)  #type of preds: <class 'numpy.ndarray'>
        preds = decode_predictions(preds)
    result = "(\"{}\", {})".format(preds[0][0][1], preds[0][0][2])
    logging.info("SqueezeNet result: {}".format(result))
    sendit = result.encode()  #why cannot use json?
    # print(sendit)
    client.sendall(sendit)
    client.shutdown(2)  #different between shutdown and close   why??
    logging.info("Client connection closed")
    client.close()
def squeezenet_featurize(imagename, imagedir):
	''' 
	This network model has AlexNet accuracy with small footprint (5.1 MB) 
	Pretrained models are converted from original Caffe network.

	This may be useful for production-purposes if the accuracy is similar to other
	types of featurizations.

	See https://github.com/rcmalli/keras-squeezenet
	'''

	model = SqueezeNet()

	img = image.load_img(imagedir+'/'+imagename, target_size=(227, 227))
	x = image.img_to_array(img)
	x = np.expand_dims(x, axis=0)
	x = preprocess_input(x)

	preds = model.predict(x)
	print('Predicted:', decode_predictions(preds))
	features = preds[0]
	labels=list()

	for i in range(len(features)):
		label='squeezenet_feature_%s'%(str(i))
		labels.append(label)

	return features, labels 
Example #3
0
    def set_tags(self):
        """
            Run a convolutional neural network on the server-side per image
            to associate tags (doesn't take more than a few seconds), and
            runs in parallel since thread is started per client request
        """
        import numpy as np
        import keras
        from keras.preprocessing import image
        from keras_squeezenet import SqueezeNet
        from keras.applications.imagenet_utils import preprocess_input, decode_predictions
        # dtype must be uint8
        # Encode image to ascii for Python 3 compatibility
        server_buffer = np.frombuffer(base64.decodestring(
            self.image.encode('ascii')),
                                      dtype="uint8")
        server_frame = cv2.imdecode(server_buffer, cv2.IMREAD_UNCHANGED)

        keras.backend.clear_session()
        model = SqueezeNet()
        resized = cv2.resize(server_frame,
                             dsize=(227, 227),
                             interpolation=cv2.INTER_CUBIC)
        x = image.img_to_array(resized)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        preds = decode_predictions(model.predict(x))
        # preds is an array containing an array of tags and scores
        print("Tags: {}".format(preds[0]))
        for id, pred, score in preds[0]:
            self.tags.append(pred)
Example #4
0
def make_predictions(image_file):
    model = SqueezeNet()
    img = image.load_img(image_file, target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    return decode_predictions(preds)
Example #5
0
def model_train(x_train, y_train):
    model = SqueezeNet(weights='imagenet',
                       include_top=False,
                       input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3))

    top_model = Sequential()
    top_model.add(Flatten(input_shape=model.output_shape[1:]))
    top_model.add(Dense(256, activation='relu'))
    top_model.add(Dropout(0.5))
    top_model.add(Dense(NUM_LABELS, activation='softmax'))

    model = Model(inputs=model.input, outputs=top_model(model.output))

    for layer in model.layers[:15]:
        layer.trainable = False

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()

    callback = EarlyStopping(monitor='val_loss',
                             patience=5,
                             verbose=1,
                             mode='auto')
    model.fit(x_train,
              y_train,
              batch_size=BATCH_SIZE,
              epochs=EPOCH,
              validation_split=0.1,
              callbacks=callback)

    return model
Example #6
0
 def testTHPrediction(self):
     keras.backend.set_image_dim_ordering('th')
     model = SqueezeNet()
     img = image.load_img('images/cat.jpeg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     preds = model.predict(x)
     decoded_preds = decode_predictions(preds)
     #print('Predicted:', decoded_preds)
     self.assertIn(decoded_preds[0][0][1], 'tabby')
def get_RCNN_face_model():
    num_outputs = 2
    print "Building SqueezeNet..."
    model = SqueezeNet()
    print "Building New Layers..."
    x = model.get_layer('drop9').output
    x = Convolution2D(num_outputs, (1, 1), padding='valid', name='conv10')(x)
    x = Activation('relu', name='relu_conv10')(x)
    x = GlobalAveragePooling2D()(x)
    out = Activation('softmax', name='loss')(x)
    model = Model(inputs=model.input, outputs=out)
    print "Model has been generated"
    return model
 def __init__(self, n_classes=3):
     self._fitted = False
     self.X = []
     self.y = []
     self.X_conv = []
     self._lr = SGDClassifier(
         loss='log',
         penalty='l1',
         #warm_start=True,
         max_iter=50,
         tol=1e-3,
         random_state=2 + 3)
     self._features = SqueezeNet(weights='imagenet',
                                 input_shape=(image_size, image_size, 3))
Example #9
0
def server_worker(client_socket, address, tf_graph, pic_num):

    logger = logging.getLogger('logger' + str(pic_num))
    logger.setLevel(logging.DEBUG)
    ch = logging.StreamHandler()
    formatter = logging.Formatter(
        "[%(asctime)s], [%(levelname)s], [%(processName)s], [%(threadName)s] :  %(message)s"
    )
    ch.setFormatter(formatter)
    logger.addHandler(ch)

    logger.info("Recieve Client ('%s', %s)." % (address[0], address[1]))

    recieve_string = ""

    # Read data from a client and send it response back
    while True:
        #print(client_socket)
        data = client_socket.recv(2048)
        data_decode = data.decode(("utf-8"))
        recieve_string = recieve_string + data_decode
        # All the message from client is recieved completely
        if recieve_string.find("[END]") != -1:
            break
    recieve_string = recieve_string[:-5]
    logger.info("Client submitted URL %s" % recieve_string)
    request.urlretrieve(recieve_string, "images/%d.jpg" % pic_num)
    print("get the pic")

    logger.info("finish download")

    with tf_graph.as_default():
        model = SqueezeNet()
        img = image.load_img('images/1.jpg', target_size=(227, 227))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)
        preds = model.predict(x)
        result = decode_predictions(preds)
        #print('Predicted:', result)
        result = result[0][0]

    logger.info("SqueezeNet result: (\"%s\", %.3f)" % (result[1], result[2]))
    result = [result[1], str(result[2])]
    result_string = ';'.join(result)
    client_socket.sendall(result_string.encode("utf-8"))
    client_socket.shutdown(socket.SHUT_RDWR)
    client_socket.close()
    logger.info("Client connection closed")
    del logger
def train_model():
    model = Sequential([
        # image size is 300 x 300 pixels. 3 is for RGB
        # Include_top lets you select if you want the final dense layers or not
        # Dense layers are capable of interpreting found patterns in order to classify: e.g. this image contains rock
        # Set to False as we have labeled what rock data looks like already
        SqueezeNet(input_shape=(300, 300, 3), include_top=False),

        # To prevent over-fitting, 20% dropout rate
        Dropout(0.2),

        # Add this layer to end of Squeeze NN
        Convolution2D(LABELS_COUNT, (1, 1), padding='valid'),

        # Any negative values become 0 and keeps positive values
        # Why? An Activation function is responsible for taking inputs and assigning weights to output nodes, a model
        # can't coordinate itself well with negative values,
        # it becomes un-uniform in the sense of how we expect the model to perform.
        # ЁЭСУ(ЁЭСе) = max{0, ЁЭСе} => The output of a ReLU unit is non-negative.
        # Returning x, if less than zero then max returns 0.
        # Has become the default activation function for many types of neural networks because a model that uses
        # it is easier to train and often achieves better performance.
        Activation('relu'),

        # Perform classification, calculates average output of each feature map in previous layer
        # i.e data reduction layer, prepares model for Activation('softmax')
        GlobalAveragePooling2D(),

        # softmax give probabilities of each hand sign
        # 4 image class (problem), 'softmax' handles multi-class, anything more than 2
        Activation('softmax')
    ])
    return model
class Learner:
    def __init__(self, n_classes=3):
        self._fitted = False
        self.X = []
        self.y = []
        self.X_conv = []
        self._lr = SGDClassifier(
            loss='log',
            penalty='l1',
            #warm_start=True,
            max_iter=50,
            tol=1e-3,
            random_state=2 + 3)
        self._features = SqueezeNet(weights='imagenet',
                                    input_shape=(image_size, image_size, 3))

    def add_image(self, img, klass):
        self.X.append(img)
        self.y.append(klass)
        X = np.array(self.X[-1:], dtype=np.float32)
        X = preprocess_input(X)
        X_conv = self._features.predict(X)
        print(decode_predictions(X_conv))
        X_conv = X_conv[0]
        print(X_conv.shape)
        X_conv = X_conv / np.linalg.norm(X_conv)
        self.X_conv.append(X_conv)
        print(Counter(self.y))

    def fit(self):
        X_conv = np.array(self.X_conv)
        y = np.array(self.y)
        print(X_conv.shape, y.shape)
        if len(set(self.y)) > 1:
            self._lr.fit(X_conv, y)
            self._fitted = True

    def predict(self, img):
        if not self._fitted:
            return 0
        # a batch of one image
        X = np.array(img, dtype=np.float32)
        X = X[np.newaxis]
        X = preprocess_input(X)
        X_conv = self._features.predict(X)[0]
        X_conv = X_conv / np.linalg.norm(X_conv)
        return self._lr.predict([X_conv])[0]
Example #12
0
 def initial_state(self,state0):
     #turn the image into gray then resize it to (84,84) then compress the data to 8bits
     #state0=np.uint8(resize(rgb2gray(state0),(frame_width,frame_height))*255)
     
     
     sq_model = SqueezeNet()
     start = time.time()
     #img = state0
     img = image.load_img('/home/i16djell/Bureau/state0.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature = sq_model.predict(x)
     
     features=[feature ,feature ,feature ,feature ]
     features=np.stack(features,axis=0)
     features=features.reshape(1,4,1000)
     return features
Example #13
0
def child_process(queue, lock):
    graph = tf.get_default_graph()
    model = SqueezeNet()
    with ThreadPoolExecutor(max_workers=4) as executor:
        while 1:
            if not queue.empty():
                with lock:
                    client_socket, client_address = queue.get()
                executor.submit(thread_worker, client_socket, client_address,
                                graph, model)
Example #14
0
def get_model():
    model = Sequential([
        SqueezeNet(input_shape=(227, 227, 3), include_top=False),
        Dropout(0.5),
        Convolution2D(NUM_CLASSES, (1, 1), padding='valid'),
        Activation('relu'),
        GlobalAveragePooling2D(),
        Activation('softmax')
    ])
    return model
Example #15
0
def def_model_param():
    gesture_categories = len(CATEGORY_MAP)
    base_model = Sequential()
    base_model.add(
        SqueezeNet(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3), include_top=False))
    base_model.add(Dropout(0.5))
    base_model.add(Convolution2D(gesture_categories, (1, 1), padding='valid'))
    base_model.add(Activation('relu'))
    base_model.add(GlobalAveragePooling2D())
    base_model.add(Activation('softmax'))
    return base_model
Example #16
0
def def_model_param():
    GESTURE_CATEGORIES = len(CATEGORY_MAP)
    base_model = Sequential()
    base_model.add(SqueezeNet(input_shape=(IMG_SIZE[0], IMG_SIZE[1], 3), include_top=False))
    base_model.add(Dropout(0.5))
    base_model.add(Convolution2D(GESTURE_CATEGORIES, (1, 1), padding='valid'))
    base_model.add(Activation('relu'))
    base_model.add(GlobalAveragePooling2D())
    base_model.add(Activation('softmax'))

    return base_model
Example #17
0
    def _encoder_model(self, input_shape, hyperparameters):
        squeezenet = SqueezeNet(
            input_shape=(self.input_shape[0], self.input_shape[1], 3),
            include_top=False,
        )
        x = Flatten()(squeezenet.output)
        embedding = Dense(np.prod(hyperparameters['embedding_dim']), activation='relu')(x)

        encoder = Model(squeezenet.input, embedding)
        utils.freeze_layers(squeezenet)
        return encoder
def f(queue):
    graph = tf.get_default_graph()
    model = SqueezeNet()
    executor = ThreadPoolExecutor(max_workers=4)  # 创建线程个数
    while True:
        if not queue.empty():
            l.acquire()
            s = queue.get()
            l.release()
            client_socket, client_address = (s[0], s[1])
            executor.submit(thread_pool, client_socket, client_address, graph,
                            model)
Example #19
0
def handle_request(queue):
    """
    A function that will be executed by a child process.
    It will keep on getting client connection from a queue.
    """
    g = tf.get_default_graph()
    model = SqueezeNet()
    with ThreadPoolExecutor(max_workers=4) as executor:
        while True:
            soc, address = queue.get()
            logging.info("Received {}".format(str(address)))
            executor.submit(handle_client, g, model, soc, address)
Example #20
0
def network(mean_train):
    shape = 227
    label_num = 10
    mean_train = np.expand_dims(mean_train, axis=0)
    mean_train = np.expand_dims(mean_train, axis=0)
    mean_train = np.expand_dims(mean_train, axis=0)
    input_tensor = Input((shape, shape, 3))
    mean_train_tensor = Input(tensor=tf.constant(mean_train, dtype=tf.float32))
    subtracted = Subtract()([input_tensor, mean_train_tensor])
    squeezeNet = SqueezeNet(subtracted)
    output = Dense(label_num, activation=None)(squeezeNet)

    return Model(inputs=[input_tensor, mean_train_tensor], outputs=output)
Example #21
0
def get_model():
    model = Sequential([
        SqueezeNet(input_shape=(227, 227, 3),
                   include_top=False),  #3 channels(rgb)
        Dropout(0.5),  #to prevent overfitting,we have 50% dropout rate
        Convolution2D(NUM_CLASSES, (1, 1), padding='valid'),
        Activation('relu'),  #rectified linear unit
        GlobalAveragePooling2D(),  #calculates avg of each feature map
        Activation(
            'softmax'
        )  #The softmax function is an activation function that turns numbers into probabilities which sum to one
    ])
    return model
Example #22
0
def get_model():
    model = Sequential([
        SqueezeNet(
            input_shape=(227, 227, 3), include_top=False
        ),  #image size is 227x227 and 3 stands for the three channels RGB 
        Dropout(0.5),  #in order to prevent overfitting 50% dropout
        Convolution2D(
            NUM_CLASSES, (1, 1), padding='valid'
        ),  #appending these few layers to the very end of the squeezenet NN
        Activation('relu'),
        GlobalAveragePooling2D(
        ),  #performs the classification itself, calculates the avg op of each feature map in the prev layer i.e. data reduction layer
        Activation('softmax')  #probabilties of each layer
    ])
    return model
def squeezenet_build(
        input_shape=(224, 224, 3), num_classes=2, weights="imagenet"):
    print("Building squeezenet", input_shape, "- num_classes", num_classes,
          "- weights", weights)
    sys.path.append('keras-squeezenet')
    from keras_squeezenet import SqueezeNet
    m1 = SqueezeNet(input_shape=input_shape, weights=weights, include_top=True)
    features = m1.layers[-2].output
    x = keras.layers.Dense(num_classes,
                           activation='softmax',
                           use_bias=True,
                           name='Logits')(features)
    model = keras.Model(m1.input, x)
    for l in model.layers:
        l.trainable = True
    return model, features
Example #24
0
def handle_request(queue):
    """
    A function that will be executed by a child process.
    It will keep on getting client connection from a queue.
    """
    model = SqueezeNet()
    g = tf.get_default_graph()
    thread_queue = Queue()
    for i in range(4):
        t = ClientThread(g, model, thread_queue)
        t.start()

    # Get client from the process queue,
    # and then put it into the thread queue
    while True:
        soc, address = queue.get()
        logging.info("Received {}".format(str(address)))
        thread_queue.put((soc, address))
Example #25
0
def get_all_nets(network_name, include_top=True):
    if (network_name == "ResNet50"):
        model = resnet50.ResNet50(weights='imagenet',
                                  include_top=include_top,
                                  input_shape=(224, 224, 3))
        # if(include_top==False):
        # 	model.pop()
    elif (network_name == "MobileNetV2"):
        model = mobilenetv2.MobileNetV2(weights='imagenet',
                                        include_top=include_top,
                                        input_shape=(224, 224, 3))
    elif (network_name == "VGG19"):
        model = vgg19.VGG19(weights='imagenet', include_top=include_top)
    elif (network_name == "SqueezeNet"):
        model = SqueezeNet(weights='imagenet', include_top=include_top)
        # if(include_top==False):
        # 	model.pop()
        # 	model.pop()
        # 	model.pop()
        # 	model.pop()
    return model
Example #26
0
 def new_state(self,futur_state0,state0,state_1,state_2):
     
     sq_model = SqueezeNet()
     start = time.time()
     #img = state_2
     img = image.load_img('/home/i16djell/Bureau/state_2.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature1 = sq_model.predict(x)
     
     #img = state_1
     img = image.load_img('/home/i16djell/Bureau/state_1.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature2 = sq_model.predict(x)
     
     #img = state0
     img = image.load_img('/home/i16djell/Bureau/state0.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature3 = sq_model.predict(x)  
     
     #img = futur_state0
     img = image.load_img('/home/i16djell/Bureau/futur_state0.jpg', target_size=(227, 227))
     x = image.img_to_array(img)
     x = np.expand_dims(x, axis=0)
     x = preprocess_input(x)
     feature4 = sq_model.predict(x)
     
     features=[feature4,feature3,feature2,feature1]
     features=np.stack(features,axis=0)
     features=features.reshape(1,4,1000)
     
     return features
Example #27
0
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
import time

if __name__ == '__main__':

    model = SqueezeNet()
    # model.load_weights('../model/squeezenet_weights_tf_dim_ordering_tf_kernels.h5', by_name=True)

    start = time.time()

    img = image.load_img('blah/train/cats/cat.3.jpg', target_size=(227, 227))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    duration = time.time() - start
    print('Predicted:', decode_predictions(preds))
        (3, 3),
        #kernel_regularizer=regularization,
        padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Activation('softmax', name='predictions')(x)

    model = Model(img_input, output)
elif (MODELS == 'squeezenet'):
    IMAGE_SIZE = 227
    EPOCS = 50
    import sys
    sys.path.append('../keras-squeezenet-master')
    from keras_squeezenet import SqueezeNet
    input_tensor = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    model = SqueezeNet(weights=None,
                       classes=N_CATEGORIES,
                       input_tensor=input_tensor)
else:
    raise Exception('invalid model name')

if (MODELS == 'vgg16'):
    #for fine tuning
    from keras.optimizers import SGD
    model.compile(optimizer=SGD(lr=0.0001, momentum=0.9),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
else:
    #for full training
    from keras.optimizers import Adagrad
    model.compile(optimizer=Adagrad(lr=0.01, epsilon=1e-08, decay=0.0),
                  loss='categorical_crossentropy',
Example #29
0
                       input_tensor=input_tensor)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(N_CATEGORIES, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
    for layer in base_model.layers[:15]:
        layer.trainable = False
elif (MODELS == 'squeezenet'):
    IMAGE_SIZE = 227
    import sys
    sys.path.append('../keras-squeezenet-master')
    from keras_squeezenet import SqueezeNet
    input_tensor = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    base_model = SqueezeNet(weights="imagenet",
                            include_top=False,
                            input_tensor=input_tensor)
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    x = Dense(1024, activation='relu')(x)
    predictions = Dense(N_CATEGORIES, activation='softmax')(x)
    model = Model(inputs=base_model.input, outputs=predictions)
elif (MODELS == 'squeezenet2'):
    IMAGE_SIZE = 64
    import sys
    sys.path.append('../keras-squeezenet-master')
    from keras_squeezenet import SqueezeNet
    input_tensor = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    base_model = SqueezeNet(include_top=False, input_tensor=input_tensor)
    x = base_model.output
    x = Dropout(0.5, name='drop9')(x)
class_number = 4

train_data_dir = 'data_EE_2F/4class_v2/train'
validation_data_dir = 'data_EE_2F/4class_v2/validation'
nb_train_samples = 4200
nb_validation_samples = 1450

epochs = 15
batch_size = 50

if K.image_data_format() == 'channels_first':
    input_shape = (3, img_width, img_height)
else:
    input_shape = (img_width, img_height, 3)

model = SqueezeNet(weights=None, input_shape=input_shape, classes=class_number)

top2_acc = functools.partial(metrics.top_k_categorical_accuracy, k=2)
top2_acc.__name__ = 'top2_acc'

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy', top2_acc])

# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(rescale=1. / 255)

# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
import numpy as np
from keras_squeezenet import SqueezeNet
from keras.applications.imagenet_utils import preprocess_input, decode_predictions
from keras.preprocessing import image
#import matplotlib.pyplot as plt
#import matplotlib.image as mpimg


model = SqueezeNet()

img = image.load_img('pexels-photo-280207.jpeg', target_size=(227, 227))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)

preds = model.predict(x)
all_results = decode_predictions(preds)
for results in all_results:
    for result in results:
        print('Probability %0.2f%% => [%s]' % (100*result[2], result[1]))
        #result_text= 'Probability %0.2f%% => [%s]' % (100*result[2], result[1])
        #break
#plt.figure(num=1,figsize=(8, 6), dpi=80)
#plt.imshow(img)
#plt.text(130,90,result_text,horizontalalignment='center', verticalalignment='center',fontsize=16,color='black')
#plt.axis('off')
#plt.show()