예제 #1
0
def transform_image(image):
    # TODO: Hardcoded ImageNet dataset mean
    image = np.array(image) - np.array([103.939, 116.779, 123.68])
    image = image / np.array([57.375, 57.12, 58.395])
    image = image.transpose((2, 0, 1))
    image = image[np.newaxis, :]
    return image
예제 #2
0
def highlightBackProp(image, model2):

    #steering_angle = float(model.predict(image[None, :, :, :], batch_size=1))
    #   print(image.shape)
    #image = cv2.resize(image, (256, 256) )
    oldimage = image
    #image = cv2.resize(image, (200, 66) )

    # transpose if model is other way
    count, h, w, ch = model2.inputs[0].get_shape()
    ih, iw, ich = image.shape
    if h == ich and ch == ih:
        image = image.transpose()

    #print(image.shape)
    m1d = model2.predict(image[None, :, :, :], batch_size=1)
    #print(m1d.shape)
    m1d = np.squeeze(m1d, axis=0)
    m1d = np.squeeze(m1d, axis=2)
    #m1d = cv2.resize(image, (120, 160) )

    #m1d=np.resize(m1d,(120,160))
    #print(m1d.shape)

    #print(m1d)
    #plt.hist(m1d[::-1])
    #plt.show()
    #print(m1d.max())
    #print(m1d.min())
    o2 = overlay = Image.fromarray(cm.Reds(m1d / m1d.max(), bytes=True))
    #o2= o2.convert("RGB")
    #return o2
    #plt.imshow(o2);
    #plt.show();

    pixeldata = list(overlay.getdata())

    for i, pixel in enumerate(pixeldata):
        if almostEquals(pixel[:3], (255, 255, 255)):
            pixeldata[i] = (255, 255, 255, 0)
        else:
            pixeldata[i] = (pixel[0], pixel[1], pixel[2], 128)

    overlay.putdata(pixeldata)
    #obig= cv2.resize(overlay, (320, 160) )

    carimg = Image.fromarray(np.uint8(image))
    #carimg = Image.fromarray(np.uint8(oldimage))
    carimg = carimg.convert("RGBA")
    new_img2 = Image.alpha_composite(carimg, overlay)
    new_img2 = new_img2.convert("RGB")
    o2 = o2.convert("RGB")
    #plt.imshow(o2);
    #plt.show();
    return np.array(new_img2)
예제 #3
0
    def _evaluate_model(self, generator, model, iou_threshold, score_threshold,
                        max_detections, save_path):
        """ 
        Evaluate a given dataset using a given model.

        Parameters
        ----------
        generator       : The generator that represents the dataset to evaluate.
        model           : The model to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        score_threshold : The score confidence threshold to use for detections.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save images with visualized detections to.
        
        Returns
        -------
        all_detections  : A list containing the predicted boxes for each image in the generator.
        """

        box_list = []
        score_list = []
        for i in range(generator.size()):
            raw_image = generator.load_image(i)
            image = generator.preprocess_image(raw_image.copy())
            image, scale = generator.resize_image(image)

            if keras.backend.image_data_format() == 'channels_first':
                image = image.transpose((2, 0, 1))

            # run network
            boxes, scores, labels = model.predict_on_batch(
                np.expand_dims(image, axis=0))[:3]

            # correct boxes for image scale
            boxes /= scale

            for box, score in zip(boxes[0], scores[0]):
                if score < 0.5:
                    break

                b = box.astype(int)
                box_list.append(b)
                score_list.append(score)

            ### !!! SAVEPATH CURRENTLY NOT IMPLEMENTED !!!
            ### This optional feature can be added later, maybe for TA3, allowing images to be output with
            ### bounding boxes to a specified directory after evaluation.
            # if save_path is True:
            #     draw_annotations(raw_image, generator.load_annotations(i), label_to_name = generator.label_to_name)
            #     draw_detections(raw_image, image_boxes, image_scores, image_labels, label_to_name = generator.label_to_name, score_threshold = score_threshold)

            #     cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image)

        return box_list, score_list
    def on_post(self, req, res):
        """Handles POST requests"""

        data = req.get_param('file').file.read()

        # アプロードされたファイルを保存する
        # f = request.files['file']
        #filepath = "./static/" + datetime.now().strftime("%Y%m%d%H%M%S") + ".jpg"
        #data.save(filepath)

        pilimg = Image.open(io.BytesIO(data))
        # x = np.asarray(pilimg)

        # モデルを使って判定する

        ### load model and weight
        model = model_from_json(open('apple_orange_model.json').read())
        model.load_weights('apple_orange_weights.h5')

        # 画像を読み込み、グレースケールに変換し、28x28pixelに変換し、numpy配列へ変換する。
        # 画像の1ピクセルは、それぞれが0-255の数値。
        # image = np.array(Image.open(filepath).convert("L").resize((28, 28)))
        image = np.array(pilimg.resize((25, 25)))
        image = image.transpose(2, 0, 1)
        image = image.reshape(1, image.shape[0] * image.shape[1] *
                              image.shape[2]).astype("float32")[0]
        x = np.array([image / 255.])
        result = model.predict_classes(x)
        proba = model.predict_proba(x)
        # image = np.array(pilimg.convert("L").resize((28, 28)))
        # print(filepath)
        # さらにフラットな1次元配列に変換。
        # image = image.reshape(1, 784).astype("float32")[0]
        # result = model.predict_classes(np.array([image / 255.]))
        # print("result:", result[0], "(0:りんご, 1:オレンジ)")
        predict = result[0].tolist()
        result = classes[predict]
        predict_proba = proba[0].tolist()
        result_proba = predict_proba[predict]

        # クラスを予測
        # 入力は1枚の画像なので[0]のみ
        # pred = model.predict(x)[0]

        # 予測確率が高いトップ5を出力
        #top = 5
        #top_indices = pred.argsort()[-top:][::-1]
        #result = classes[top_indices[0]]

        # 予測確率が高いトップ1を出力
        res.status = falcon.HTTP_200
        res.body = json.dumps({'result': result, 'probability': result_proba})
def rotate_images(image):
    '''
    rotate_images(image):
    This function rotates an image on it's center 7 times (45, 90, 135, 180, 225, 270, and mirror image)
    Input:
        image: One image file
    Returns:
        A list of images containing the original image and the rotated one
    '''
    rotated_images = []

    chirl_image = image.transpose(Image.FLIP_LEFT_RIGHT)

    rotated_images.extend([image, chirl_image])
    return rotated_images
예제 #6
0
def upload_file():
  if request.method == 'GET':
    return render_template('index.html')
  if request.method == 'POST':
    # アプロードされたファイルを保存する
    #f = request.files['file']
    #filepath = "./static/" + datetime.now().strftime("%Y%m%d%H%M%S") + ".jpg"
    #f.save(filepath)
    # モデルを使って判定する

    # data = req.get_param('file').file.read()
    data = request.files['file'].read()
    pilimg = Image.open(io.BytesIO(data))

    ### load model and weight
    model = model_from_json(open('apple_orange_model.json').read())
    model.load_weights('apple_orange_weights.h5')

    image = np.array(pilimg.resize((25, 25)))
    image = image.transpose(2, 0, 1)
    image = image.reshape(1, image.shape[0] * image.shape[1] * image.shape[2]).astype("float32")[0]
    x = np.array([image / 255.])
    result = model.predict_classes(x)
    proba = model.predict_proba(x)

    predict = result[0].tolist()
    result = classes[predict]
    predict_proba = proba[0].tolist()
    result_proba = predict_proba[predict]

    response = jsonify({'result':result, 'probability':result_proba})

    # res.status = falcon.HTTP_200
    response.status_code = 200

    return response
def MetaFunct(i):  
    df = readJson(i)
    df = flatten_json(df)
    
    a = pd.DataFrame.from_dict(df, orient='index')
    a.columns = ['Values']
    
    label = a[a.index.str.match('label')]
    
    if label.shape[0]>0:
        label.index = label.index.str.split('_',expand=True)
        label.reset_index(inplace=True)
        label.drop(columns=['level_0'], inplace=True)
        label = label[label.level_2.str.match('score')]
        label.sort_values(by=['Values'], ascending=[0], inplace=True)
        label.reset_index(inplace=True)
        label.drop(columns=['index'], inplace=True)
   
        if label.shape[0] > 4:
            label = label.iloc[:5,:]
        
        label['level_1'] = label['level_2'] + '_' + label['level_1'].astype(str)
        label.drop(columns=['level_2'], inplace = True)
        label = pd.melt(label, id_vars=['level_1'])
        label.drop(columns=['variable'], inplace = True)
        label.set_index('level_1', inplace=True)
        label = label.transpose()
        label.reset_index(inplace=True, drop=True)
    
    image = a[a.index.str.match('image')]
    
    if image.shape[0]>0:
        image.index = image.index.str.split('_',expand=True)
        image.reset_index(inplace=True)        
        image.reset_index(inplace=True)
        image.drop(columns= ['index','level_0','level_1','level_2'], inplace=True)
        image['level_4'] = image['level_4'] + '_' + image['level_5'].astype(str)
        image.drop(columns=['level_5'], inplace=True)
        image = pd.pivot_table(image, index='level_3', columns='level_4', values='Values',aggfunc='first' ).reset_index()
                
        if image.shape[0] > 4:
            image = image.iloc[:5,:]
        image = pd.melt(image, id_vars=['level_3'])
        image['level_3'] = image['level_4'] + '_' + image['level_3'].astype(str)
        image.drop(columns=['level_4'], inplace=True)
        image.set_index('level_3', inplace=True)
        image = image.transpose()
        image.reset_index(inplace=True, drop=True)
    
    crop = a[a.index.str.match('crop')]
    
    if crop.shape[0]>0:
        crop.index = crop.index.str.split('_',expand=True)
        crop.reset_index(inplace=True)
        crop.drop(columns=['level_0','level_1', 'level_2'], inplace=True)
        crop['level_3'] = crop['level_3'] + '_' + crop['level_6'].astype(str)
        crop.drop(columns=['level_4'], inplace=True)
        crop.drop(columns=['level_6'], inplace=True)
       
        bounding = crop[crop.level_3.str.match('bounding')]
        if bounding.shape[0] > 0:
            bounding['level_3'] = bounding['level_3'] + '_' + bounding['level_5']
            bounding.drop(columns=['level_5'], inplace=True)
            bounding.set_index('level_3', inplace=True)
            bounding = bounding.transpose()
            bounding.reset_index(inplace=True, drop=True)
        
        
        confidence = crop[crop.level_3.str.match('confidence')]
        if crop.shape[0]>0:
            confidence = pd.DataFrame(confidence)
            confidence = confidence['Values']
            confidence = pd.DataFrame(confidence)
            confidence.columns = np.array(['Confidence'])
            confidence.reset_index(inplace=True, drop=True)

        importance = crop[crop.level_3.str.match('importance')]
     
        if importance.shape[0]>0 :
            importance = crop[crop.level_3.str.match('importance')]
            importance = importance['Values'] 
            importance.reset_index(inplace=True, drop=True)
            importance = pd.DataFrame(importance)
            importance.columns = np.array(['Importance'])
            
    crophints = pd.concat([bounding,confidence,importance], axis=1)
    print(i)
    document= pd.DataFrame({"PetID":[(i.split('.')[0]).split('/')[2]]})
    final = pd.concat([document, label, image, crophints], axis=1)
    return(final)
예제 #8
0
    def _evaluate_model(self, generator, model, iou_threshold, score_threshold, max_detections, save_path):
        """ 
        Evaluate a given dataset using a given model.

        Parameters
        ----------
        generator       : The generator that represents the dataset to evaluate.
        model           : The model to evaluate.
        iou_threshold   : The threshold used to consider when a detection is positive or negative.
        score_threshold : The score confidence threshold to use for detections.
        max_detections  : The maximum number of detections to use per image.
        save_path       : The path to save images with visualized detections to.
        
        Returns
        -------
        all_detections   : A list containing the predicted boxes for each image in the generator.
        """

        #all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())]
        #print(np.shape(all_detections), file = sys.__stdout__)
        
        box_list = []
        score_list = []
        for i in range(0,3):
            raw_image    = generator.load_image(i)
            image        = generator.preprocess_image(raw_image.copy())
            image, scale = generator.resize_image(image)

            if keras.backend.image_data_format() == 'channels_first':
                image = image.transpose((2, 0, 1))

            # run network
            boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis = 0))[:3]

            # correct boxes for image scale
            boxes /= scale
            
            for box, score in zip(boxes[0], scores[0]):
                if score < 0.5:
                    break
    
                b = box.astype(int)
                box_list.append(b)
                score_list.append(score)

            # select indices which have a score above the threshold
            # indices = np.where(scores[0, :] > score_threshold)[0]

            # # select those scores
            # scores = scores[0][indices]

            # # find the order with which to sort the scores
            # scores_sort = np.argsort(-scores)[:max_detections]

            # # select detections
            # image_boxes      = boxes[0, indices[scores_sort], :]
            # image_scores     = scores[scores_sort]
            # image_labels     = labels[0, indices[scores_sort]]
            # image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis = 1), np.expand_dims(image_labels, axis = 1)], axis = 1)

            # # if save_path is True:
            # #     draw_annotations(raw_image, generator.load_annotations(i), label_to_name = generator.label_to_name)
            # #     draw_detections(raw_image, image_boxes, image_scores, image_labels, label_to_name = generator.label_to_name, score_threshold = score_threshold)

            # #     cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image)
            
            # # copy detections to all_detections
            # for label in range(generator.num_classes()):
            #     if not generator.has_label(label):
            #         continue

            #     all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1]

        #print(score_list, file = sys.__stdout__)
        return box_list, score_list