def mobilenet_prediction(model_path, query_path):

    model = MobileNet(weights='imagenet', include_top=False)

    #Extract features of Query Image
    imgq = image.load_img(query_path, target_size=(224, 224))
    img_dataq = image.img_to_array(imgq)
    img_dataq = np.expand_dims(img_dataq, axis=0)
    img_dataq = preprocess_input(img_dataq)
    mnet_feature_query = model.predict(img_dataq)
    mnet_feature_np_query = np.array(mnet_feature_query)
    mnet_feature_np_query = mnet_feature_np_query.flatten()

    listOfInput = [mnet_feature_np_query]

    loaded_model = load(model_path)

    probs = loaded_model.predict_proba(listOfInput)[:, :]

    print("probs", probs)
    #loaded_model.predict(listOfInput)

    #probs = model.predict_proba(listOfInput)

    category = []
    classes = loaded_model.classes_

    for index in range(len(classes)):
        category.append((probs[0][index], classes[index]))

    final_category = sorted(category, key=lambda x: x[0], reverse=True)

    return final_category[:3]
Exemplo n.º 2
0
def example_predict():

    CNN = MobileNet()
    #CNN = Xception()

    img = cv2.imread('./data/ex_natural_images/dog/dog_0011.jpg')
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = cv2.resize(img, (224, 224)).astype(numpy.float32)

    prob = CNN.predict(preprocess_input(numpy.array([img])))
    idx = numpy.argsort(-prob[0])[0]
    print(class_names[idx], prob[0, idx])

    return
    # mobilenet features
    base_model = MobileNet(input_shape=(224, 224, 3),
                           weights='imagenet',
                           include_top=False)

    # load the dataset
    print("Data loading started")
    index_img = 1
    for image in imagePaths:
        label = os.path.split(os.path.split(image)[0])[1]
        labels.append(label)

        img = cv2.imread(image)
        img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA)
        outp = base_model.predict(
            img.reshape(1, img.shape[0], img.shape[1], img.shape[2]))
        outp = outp.reshape(
            1, outp.shape[0] * outp.shape[1] * outp.shape[2] * outp.shape[3])
        data.append(outp)
        print("Loading Image Num = " + str(index_img))
        index_img = index_img + 1

    data = np.array(data)
    labels = np.array(labels)

    le = LabelEncoder()
    labels = le.fit_transform(labels)
    data = data.reshape(data.shape[0], -1)
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
print(model.summary())

model.fit_generator(
                        train_datagen, 
                        steps_per_epoch=STEPS, 
#                        initial_epoch = initial_epoch,
                        epochs=EPOCHS,
                        verbose=1,
                        validation_data=(x_valid, y_valid),
                        callbacks = callbacks
)


model.load_weights(MODEL_WEIGHTS_FILE)

valid_predictions = model.predict(x_valid, batch_size=128, verbose=1)
map3 = mapk(valid_df[['y']].values, preds2catids(valid_predictions).values)
print('Map3: {:.3f}'.format(map3))

test = pd.read_csv(os.path.join(INPUT_DIR, 'test_simplified.csv'))
test.head()
x_test = df_to_image_array_xd(test, size)
print(test.shape, x_test.shape)
print('Test array memory {:.2f} GB'.format(x_test.nbytes / 1024.**3 ))

test_predictions = model.predict(x_test, batch_size=128, verbose=1)

top3 = preds2catids(test_predictions)
top3.head()
top3.shape
Exemplo n.º 5
0
labels_dict = {0: 'yash', 1: 'mom'}
color_dict = {0: (0, 255, 0), 1: (0, 0, 255)}

# In[38]:

while (True):

    ret, img = cap.read()
    faces = face_clsfr.detectMultiScale(img)

    for (x, y, w, h) in faces:
        face_img = img[y:y + w, x:x + w]
        reshaped = cv2.resize(face_img, (224, 224))
        reshaped = image.img_to_array(reshaped)
        reshaped = np.expand_dims(reshaped, axis=0)
        result = model.predict(reshaped)

        label = np.argmax(result, axis=1)[0]

        cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2)
        cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1)
        cv2.putText(img, labels_dict[label], (x, y - 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)

    cv2.imshow('LIVE', img)
    key = cv2.waitKey(1)

    if (key == 27):
        break

cv2.destroyAllWindows()
Exemplo n.º 6
0
from mycbk import *
import os

os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "12"

def draw_predict(img,y_pred):
    plt.rcParams['figure.figsize'] = [16, 10]
    plt.rcParams['font.size'] = 14

    n = 5
    fig,axs = plt.subplots(nrows=n,ncols=n,sharex=True,sharey=True,figsize=(16,6))
    for i in range(n**2):
        ax = axs[i // n, i % n]
        ax.imshow(img[i].astype(np.uint8))
        ax.text(130,6,pred2text(y_pred[i]),fontsize=15,color = 'blue',
                bbox=dict(boxstyle="square",facecolor='wheat'))
        ax.axis('off')
    plt.tight_layout()
    fig.savefig(address_predict, dpi=300)
    plt.show()
    
length = get_char_length()
input_shape = (IMAGE_HEIGHT,IMAGE_WIDTH,CHANNEL)
model = MobileNet(input_shape=input_shape,alpha=1.,weights=None,classes=CHAR_NUM*length)
model.load_weights(address_model)

[img,x,y] = Generate_Data().test()
y_pred = model.predict(x)

draw_predict(img,y_pred)
import numpy as np
import matplotlib.pyplot as plt
from keras.applications import MobileNet
from keras.preprocessing.image import load_img
from keras.preprocessing.image import img_to_array
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.mobilenet import preprocess_input
%matplotlib inline

mobilenet=MobileNet(weights='imagenet') 

filename = 'image.jpg'
original = load_img(filename, target_size=(224, 224))
plt.imshow(original)
plt.show()

numpy_image = img_to_array(original)
image_batch = np.expand_dims(numpy_image, axis=0)

processed_image = preprocess_input(image_batch.copy())
predictions = mobilenet.predict(processed_image)
label = decode_predictions(predictions)
print(label)
Exemplo n.º 8
0
                        verbose=1,
                        validation_data=valid_datagen , #(x_valid, y_valid),
                        validation_steps = valSTEPS,
                        max_q_size=10,
                        callbacks = callbacks
)


model.load_weights(MODEL_WEIGHTS_FILE)

k = 100

test = pd.read_csv(os.path.join(INPUT_DIR, 'test_simplified.csv'))
test1 = test[:10000]
x_test = df_to_image_array_xd(test1,size=size)
test_predictions1 = model.predict(x_test, batch_size=128, verbose=1)
if (k == 100):
    gc.collect()
    
test2 = test[10000:20000]
x_test = df_to_image_array_xd(test2,size=size)
test_predictions2 = model.predict(x_test, batch_size=128, verbose=1)
if (k == 100):
    gc.collect()
        
test3 = test[20000:30000]
x_test = df_to_image_array_xd(test3,size=size)
test_predictions3 = model.predict(x_test, batch_size=128, verbose=1)
if (k == 100):
    gc.collect()
    verbose=VERBOSEFLAG)

test_df = pd.read_csv("../input/test_simplified.csv")

n_samples = test_df.shape[0]
pick_per_epoch = math.ceil(n_samples / batch_size)
pick_order = np.arange(test_df.shape[0])

all_preds = []

for i in trange(pick_per_epoch):
    c_pick = pick_order[i * batch_size:(i + 1) * batch_size]
    dfs = test_df.iloc[c_pick]
    out_imgs = list(map(strokes_to_img, dfs["drawing"]))
    X = np.array(out_imgs)[:, :, :, :3].astype(np.float32)
    preds = model.predict(X)
    for x in preds:
        all_preds.append(to_class[np.argmax(x)])
#        if i == 50:  # TODO: let it run till completion
#            break

#test_predictions = model.predict(x_test, batch_size=128, verbose=1)
#
#top3 = preds2catids(test_predictions)
#top3.head()
#top3.shape
#
#cats = list_all_categories()
#id2cat = {k: cat.replace(' ', '_') for k, cat in enumerate(cats)}
#top3cats = top3.replace(id2cat)
#top3cats.head()
def convertMobileNetWeights(out_path, input_size=224, alpha=0.25, include_top=True):
    if not os.path.isdir(out_path):
        os.mkdir(out_path)

    model_k = MobileNet(input_shape=(input_size, input_size, 3), alpha=alpha, weights='imagenet', include_top=include_top, pooling='avg')
    print(model_k.summary())
    model_t = MobileNet_v1(1000, alpha=alpha, input_size=input_size, include_top=include_top)

    res_t = dict()
    st = model_t.state_dict()
    for i, el in enumerate(st):
        arr = el.split('.')
        print(arr)
        if 'model' in el:
            key = (int(arr[1]), int(arr[2]))
            if key not in res_t:
                res_t[key] = []
            res_t[key].append((el, st[el].numpy().shape))
        elif 'fc.':
            key = (100, )
            if key not in res_t:
                res_t[key] = []
            res_t[key].append((el, st[el].numpy().shape))

    res_torch = dict()
    for i, el in enumerate(sorted(list(res_t.keys()))):
        print(i, el, res_t[el])
        res_torch[i] = res_t[el]
        print(res_torch[i])

    total = 0
    res_k = dict()
    for level_id in range(len(model_k.layers)):
        layer = model_k.layers[level_id]
        layer_type = layer.__class__.__name__
        if layer_type in ['Conv2D', 'BatchNormalization', 'DepthwiseConv2D', 'Dense']:
            w = layer.get_weights()
            print('{} {} {} {}'.format(total, level_id, layer_type, w[0].shape))
            res_k[total] = [level_id, layer_type, w[0].shape]

            # Modify state_dict
            if layer_type == 'Conv2D':
                weigths_t = w[0].transpose((3, 2, 1, 0))
                torch_name = res_torch[total][0][0]
                torch_shape = res_torch[total][0][1]
                print('Modify: {}'.format(torch_name))
                # Check shape
                if weigths_t.shape != torch_shape:
                    print('Shape mismatch: {} != {}'.format(weigths_t.shape, torch_shape))
                st[torch_name] = torch.from_numpy(weigths_t)
                if len(res_torch[total]) == 2:
                    print('Store bias...')
                    weigths_t = w[1]
                    torch_name = res_torch[total][1][0]
                    torch_shape = res_torch[total][1][1]
                    print('Modify: {}'.format(torch_name))
                    # Check shape
                    if weigths_t.shape != torch_shape:
                        print('Shape mismatch: {} != {}'.format(weigths_t.shape, torch_shape))
                    st[torch_name] = torch.from_numpy(weigths_t)

            elif layer_type == 'DepthwiseConv2D':
                weigths_t = w[0].transpose((2, 3, 1, 0))
                torch_name = res_torch[total][0][0]
                torch_shape = res_torch[total][0][1]
                print('Modify: {}'.format(torch_name))
                # Check shape
                if weigths_t.shape != torch_shape:
                    print('Shape mismatch: {} != {}'.format(weigths_t.shape, torch_shape))
                st[torch_name] = torch.from_numpy(weigths_t)
            elif layer_type == 'BatchNormalization':
                for i in range(4):
                    weigths_t = w[i]
                    torch_name = res_torch[total][i][0]
                    torch_shape = res_torch[total][i][1]
                    print('Modify: {}'.format(torch_name))
                    # Check shape
                    if weigths_t.shape != torch_shape:
                        print('Shape mismatch: {} != {}'.format(weigths_t.shape, torch_shape))
                    st[torch_name] = torch.from_numpy(weigths_t)

            total += 1

    model_t.load_state_dict(st)

    data_k = np.random.uniform(-1, 1, (100, input_size, input_size, 3)).astype(np.float32)
    data_t = data_k.transpose((0, 3, 2, 1))
    print(data_k.shape, data_t.shape)

    pred_k = model_k.predict(data_k)
    data_t = torch.from_numpy(data_t)
    model_t.eval()
    with torch.no_grad():
        pred_t = model_t(data_t)
    if include_top:
        pred_t = pred_t.numpy()
    else:
        pred_t = pred_t.permute(0, 3, 2, 1).squeeze().numpy()

    print(pred_k.shape, pred_t.shape)
    diff = (pred_t - pred_k)
    print(diff.min(), diff.max(), diff.mean())

    if np.abs(diff).max() > 0.001:
        print('Large error!')
        exit()

    top = '_no_top'
    if include_top:
        top = '_top'
    torch.save(model_t.state_dict(), out_path + "mobilenet_v1_size_{}_alpha_{}{}.pth".format(input_size, alpha, top))