コード例 #1
0
def predict_softmax(img1, preproess=False):
    if preproess:
        img1 = do_preprocess(img1, crop_size=PREPROCESS_IMG_SIZE)

    prob_np = []
    prob = []
    pred = [
    ]  # only one class, multi_models multi-class multi-label has multiple classes

    for dict1 in dicts_models:
        #real time image aug
        img_tensor = my_gen_img_tensor(img1, image_shape=dict1['input_shape'])
        prob1 = dict1['model'].predict_on_batch(img_tensor)
        prob1 = np.mean(prob1, axis=0)  # batch mean, test time img aug
        pred1 = prob1.argmax(axis=-1)

        prob_np.append(prob1)  #  numpy  weight avg prob_total

        prob.append(prob1.tolist())  #担心XMLRPC numpy
        pred.append(int(pred1))  # numpy int64, int  XMLRPC

    list_weights = []  # the prediction weights of models
    for dict1 in dicts_models:
        list_weights.append(dict1['model_weight'])

    prob_total = np.average(prob_np, axis=0, weights=list_weights)
    pred_total = prob_total.argmax(axis=-1)

    prob_total = prob_total.tolist()  #RPC Service can not pass numpy variable
    pred_total = int(pred_total)  # 'numpy.int64'  XMLRPC

    # correct_model_no is used for choosing which model to generate CAM
    # on extreme condition: average softmax prediction class is not in every model's prediction class
    correct_model_no = 0
    for i, pred1 in enumerate(pred):
        if pred1 == pred_total:
            correct_model_no = i  #start from 0
            break

    return prob, pred, prob_total, pred_total, correct_model_no
コード例 #2
0
    save_dir = os.path.join(dir_dest, heatmap_type)

    df = pd.read_csv(filename_csv)
    for _, row in df.iterrows():
        image_file = row['images']
        image_label = int(row['labels'])
        assert dir_preprocess in image_file, 'preprocess directory error'

        preprocess = False
        image_size = 299
        if preprocess:
            img_preprocess = my_preprocess.do_preprocess(image_file,
                                                         crop_size=384)
            img_input = my_gen_img_tensor(img_preprocess,
                                          image_shape=(image_size, image_size,
                                                       3))
        else:
            img_source = image_file
            img_input = my_gen_img_tensor(image_file,
                                          image_shape=(image_size, image_size,
                                                       3))

        probs = model1.predict(img_input)
        class_predict = np.argmax(probs)

        if class_predict == 1:
            # Apply analyzer w.r.t. maximum activated output-neuron
            data_heatmap = analyzer.analyze(img_input)

            # Aggregate along color channels and normalize to [-1, 1]
コード例 #3
0
import pandas as pd
df = pd.read_csv(filename_csv)

for _, row in df.iterrows():
    image_file = row['images']
    image_label = int(row['labels'])

    preprocess = False
    input_shape = (299, 299, 3)

    img_source = image_file
    file_dir, filename = os.path.split(img_source)
    file_basename, file_ext = os.path.splitext(filename)

    from LIBS.ImgPreprocess.my_image_helper import my_gen_img_tensor
    img_input = my_gen_img_tensor(image_file, image_shape=input_shape)

    for model_no in range(len(dicts_models)):
        model1 = dicts_models[model_no]['model_original']
        probs = model1.predict(img_input)

        for j in range(len(probs[0])):  # number of classes
            if probs[0][j] > 0.5:
                print(j)

                if heatmap_type == 'grad_cam':
                    filename_CAM1 = server_grad_cam(dicts_models=dicts_models, model_no=model_no,
                                                    img_source=img_source, pred=j,
                                                    preprocess=False, blend_original_image=blend_original_image,
                                                    base_dir_save='/tmp/temp_cam/')
                if heatmap_type == 'CAM':