コード例 #1
0
ファイル: data_handler.py プロジェクト: skymatte/rtex
def load_image(image_path):
    img = image.load_img(image_path, target_size=(224, 224))
    x = image.img_to_array(img)
    x = preprocess_input(x)
    img.close()
    return x
コード例 #2
0
# load model
model = load_model(model_path)

# create colormap
image_list = os.listdir(original_dir)
# image_list = ['bamba.jpg', 'kurikara.jpg', 'matii.jpg', 'takemata.jpg', 'togi.jpg', 'urii.jpg']
s_time = time.time()
for image_n in image_list:
    image_name = pathlib.Path(image_n)
    img_s_time = time.time()
    #read images dir
    image_path = original_dir + image_n
    src = img_to_array(load_img(image_path))
    white_in_image = cv2.inRange(src, (255, 255, 255), (255, 255, 255))
    src = preprocess_input(src)
    # src = src / 255.0
    if src is None:
        print(("Failed to load image file :" + image_path))
        continue
    height, width, channels = src.shape[:3]
    predicted_keeper = np.zeros((height, width, 4))
    predicted_num = np.zeros((height, width, 1))  #推論回数

    for r_y in range(0, height, slide):
        X = []
        r = []
        if r_y + size >= height:
            break
        for r_x in range(0, width, slide):
            sys.stdout.write("\r y : %d" % r_y)
コード例 #3
0
    samples_per_batch = 50000 // n_batch

    n_start_batch = 5
    n_selected_batch = 45
    n_ensemble = 10

    df = pd.read_csv(
        os.path.join(output_folder, 'golden_optimization_simgas.csv'))
    sigma_1 = df[df.iteration == 6].sigma_1.values[0]
    sigma_2 = df[df.iteration == 6].sigma_2.values[0]
    sigma_optimum = sigma_2 + (sigma_1 - sigma_2) / 2
    for i in range(n_start_batch, n_start_batch + n_selected_batch):
        images_path = os.path.join(input_folder,
                                   'x_val_' + str(i).zfill(3) + '.npy')
        array = np.load(images_path)
        X_test_batch = preprocess_input(array)
        y_test_batch = y_test[i * samples_per_batch:(i + 1) *
                              samples_per_batch]
        y_test_pred_ensemble = np.zeros((n_ensemble, *y_test_batch.shape),
                                        dtype=np.float32)
        y_pred_base = model.predict(X_test_batch, verbose=1)
        nll_base = log_loss(y_test_batch, y_pred_base)
        print('-' * 100)
        print('nll base {0:.4f}'.format(nll_base))
        print('-' * 100)
        for seed_index, seed in enumerate(range(17, 17 + n_ensemble)):
            np.random.seed(seed)
            model.set_weights(ws)
            wp = np.copy(ws)
            for index2, w in enumerate(ws):
                shape = w.shape
コード例 #4
0
def similar_images(imatge_test, classe_test, sexe_test): #Imatge format igual obrir imatge(array a secas)

    output1 = open(ROOT_DIR +'dataSimilarImages/images_path_CNN2.pkl', 'rb')
    images_path = pickle.load(output1)
    output1.close()

    output2 = open(ROOT_DIR +'dataSimilarImages/classes_CNN2.pkl', 'rb')
    classes = pickle.load(output2)
    output2.close()

    output3 = open(ROOT_DIR +'dataSimilarImages/genders_CNN2.pkl', 'rb')
    genders = pickle.load(output3)
    output3.close()

    output4 = open(ROOT_DIR +'dataSimilarImages/predictions_CNN2.pkl', 'rb')
    predictions = pickle.load(output4)
    output4.close()

    output5 = open(ROOT_DIR +'dataSimilarImages/finding_array_CNN2.pkl', 'rb')
    finding_array = pickle.load(output5)
    output5.close()
    
    print('Predictions shape:', predictions.shape)

    imatge_grayscale= imatge_test.astype(float)
    imatge_grayscale= imatge_grayscale/255

    img_test= resize(imatge_test, (trainshape, trainshape), mode = 'reflect')
    image = np.stack((img_test,) * 3, -1)
    img_train= image.reshape([1, trainshape, trainshape, 3])
    img_train= preprocess_input(img_train)
    prediction_test= loaded_model2.predict(img_train)


    same_class = np.equal(classe_test, classes)
    same_class= np.array(same_class)
    print('same class shape', same_class.shape)
    print('same class array', same_class[0:10])


    genders=np.array(genders)
    same_gender= np.core.defchararray.equal(sexe_test, genders)
    same_gender= np.array(same_gender)

    print('same_gender shape', same_gender.shape)
    print('same_gender array', same_gender[0:10])

    same_class_same_gender= np.logical_and(same_class, same_gender)
    same_class_same_gender= np.array(same_class_same_gender)
    
    print(type(same_class_same_gender))
    print('same_class_same_gender shape', same_class_same_gender.shape)
    print('same_class_same_gender', same_class_same_gender[0:10])
    #print('Total imatges a comparar:', np.sum(same_class_same_gender))


    images_path_similars= images_path[same_class_same_gender]
    predictions_similars= predictions[same_class_same_gender]
    finding_array_similars= finding_array[same_class_same_gender]
    print('images_path_similars', images_path_similars.shape)
    print('predictions_similars', predictions_similars.shape )
    print('finding_array_similars', finding_array_similars.shape)

    vector_distancies=[]

    for i in range(predictions_similars.shape[0]):
        distancia= distance.canberra(predictions_similars[i,:], prediction_test)
        vector_distancies.append(distancia)

    vector_distancies= np.array(vector_distancies)
    vector_distancies=vector_distancies.reshape([-1,1])
    print('Vector_distancies shape', vector_distancies.shape)
    

    similar_image_path_list=[]
    finding_array_similars_list=[]
    ssim_values_array=[]    
    images_done=0

    while images_done < 3 :

        minimum_position= np.argmin(vector_distancies)

        if vector_distancies[minimum_position] <= 0.03:
            vector_distancies= np.delete(vector_distancies, [minimum_position], axis= 0)
            images_path_similars= np.delete(images_path_similars, [minimum_position])
            finding_array_similars= np.delete(finding_array_similars, [minimum_position])

        else:            
            print('Min distance value position', minimum_position , ': ' ,vector_distancies[minimum_position])
            
            similar_image_path = images_path_similars[minimum_position]
            finding_similar = finding_array_similars[minimum_position]
            temp= obrir_imatge(similar_image_path)
            print('similar_image_path', type(similar_image_path))
            print('obrir_imatge(similar_image_path)', obrir_imatge(similar_image_path).shape)
            print('imatge_test', type(imatge_test))

            ssim_value= compare_sim(imatge_grayscale, temp) #float /255
            
            if (images_done != 0 and similar_image_path_list[-1] == similar_image_path) or ssim_value < 0.03:
                vector_distancies= np.delete(vector_distancies, [minimum_position], axis= 0)
                images_path_similars= np.delete(images_path_similars, [minimum_position])
                finding_array_similars= np.delete(finding_array_similars, [minimum_position])
            
            else:
                ssim_values_array.append(ssim_value)             
                similar_image_path_list.append(similar_image_path)
                finding_array_similars_list.append(finding_similar)
                vector_distancies= np.delete(vector_distancies, [minimum_position], axis= 0)
                images_path_similars= np.delete(images_path_similars, [minimum_position] )
                finding_array_similars= np.delete(finding_array_similars, [minimum_position])

                images_done+=1
    
    
    max_ssim_position= np.argmax(ssim_values_array)  #retorna index max_ssim i ja ho tindriem aixo                      
    print(similar_image_path_list[0], 'Finding:', finding_array_similars_list[0])
    print(similar_image_path_list[1], 'Finding:', finding_array_similars_list[1])
    print(similar_image_path_list[2], 'Finding:', finding_array_similars_list[2])
    most_similar_image= obrir_imatge(similar_image_path_list[max_ssim_position])
    finding_most_similar= finding_array_similars_list[max_ssim_position]
    print('Most similar', similar_image_path_list[max_ssim_position], 'Finding:', finding_array_similars_list[max_ssim_position])
    #similar_image1= obrir_imatge(similar_image_path_list[0])
    #similar_image2= obrir_imatge(similar_image_path_list[1])
    #similar_image3= obrir_imatge(similar_image_path_list[2])

    return (most_similar_image, finding_most_similar)
コード例 #5
0

query_path = '/home/niruhan/PycharmProjects/DensenetReID/market_for_keras/query/query_images/'

query_names = os.listdir(query_path)

np.savetxt('query_names_epoch10.txt', query_names, fmt='%s')

reid_feature_list = np.empty([3368, 1024])

print 'hi'

for name_index in range(len(query_names)):
    if name_index % 500 == 0:
        print name_index
    img_path = query_path + query_names[name_index]
    img = image.load_img(img_path, target_size=(224, 224))
    img_data = image.img_to_array(img)
    img_data = np.expand_dims(img_data, axis=0)
    img_data = preprocess_input(img_data)

    reid_feature_list[name_index] = feature_extraction_model.predict(img_data)

np.savetxt('query_reid_feature_list_epoch10.txt', reid_feature_list, fmt='%f')

# for feature in reid_feature_list:
#     dist = np.linalg.norm(feature - reid_feature_list[3])
#     print dist

print 'hi'
コード例 #6
0
        raw_img = load_img(image_file, target_size=(ROWS, COLS))
        img = img_to_array(raw_img)
        data[i] = img

    return data


train_array = image_array(train_images, len(train_images))
validation_array = image_array(validation_images, len(validation_images))
test_array = image_array(test_images, len(test_images))

np.save('train_array_size256.npy', train_array)
np.save('validation_array_size256.npy', validation_array)
np.save('test_array_size256.npy', test_array)

train_preprocessed = preprocess_input(train_array)
validation_preprocessed = preprocess_input(validation_array)
test_preprocessed = preprocess_input(test_array)

base_model = DenseNet121(input_shape=(256, 256, 3),
                         include_top=False,
                         weights='imagenet')

### Train Bottlenecks

bottleneck_features_train = base_model.predict(train_preprocessed, verbose=1)
#save as numpy array,
np.save('bottleneck_features_train_256size_densenet.npy',
        bottleneck_features_train)

bottleneck_features_validation = base_model.predict(validation_preprocessed,
コード例 #7
0
def preprocess(img):
    img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_AREA)

    return preprocess_input(img)
コード例 #8
0
 def preprocess(self, image):
     # apply the Keras utility function that correctly rearranges
     # the dimensions of the image
     return densenet.preprocess_input(image, data_format=self.dataFormat)
コード例 #9
0
print('Predicting...')
#with CustomObjectScope({'relu6': keras.applications.mobilenet.relu6,'DepthwiseConv2D': keras.applications.mobilenet.DepthwiseConv2D}):
#    model = keras.models.load_model(model_name+'.h5')
model = keras.models.load_model(model_path)

# test_path = './test.csv'
test_data = pd.read_csv(test_path)
predict = []
batch_size = 1000
for i in range(0, len(test_data), batch_size):
    batch_data = []
    real_len = len(test_data[i:i + batch_size])
    for j in range(i, i + real_len):
        img = ReadImage(img_folder_path + test_data['Image Index'][j])
        batch_data.append(img)
    batch_data = np.array(batch_data)
    r1_img = batch_data.reshape(-1, image_size, image_size, 3)
    r2_img = preprocess_input(r1_img)
    batch_predict = model.predict(r2_img)
    for j in range(real_len):
        predict.append(batch_predict[j])
    if i % 100 == 0:
        print(i, '/', len(test_data))
predict = np.array(predict)
result = pd.DataFrame({'Id': test_data['Image Index']})
#print(result)
for d in range(len(columns)):
    print(columns[d], predict[:, d])
    result[columns[d]] = predict[:, d]
result.to_csv(output_path, index=False)
コード例 #10
0
def save_bottlebeck_features(model_name):

    if model_name=='resnet50':
        model = resnet50.ResNet50(weights='imagenet', include_top=False, pooling='avg')

        # 2048 dimensional features
        # pooling: 1) None: output is 16x16x2048, 2) avg: 1x1x2048, 3) max: 1x1x2048
        #base_model=resnet50.ResNet50(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model = Model(inputs=base_model.input, outputs=base_model.get_layer('activation_25').output)
    elif model_name=='nasnet_large':
        model=nasnet.NASNetLarge(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        #4032 dimensional features
    elif model_name=='xception':
        model=xception.Xception(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')

        #2048 dimensional features
    elif model_name=='inceptionv3':
        model=inception_v3.InceptionV3(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')

        #2048 dimensional features
    elif model_name=='inceptionresnetv2':
        model=inception_resnet_v2.InceptionResNetV2(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        #1536 dimensional features
    elif model_name=='densenet':
        model=densenet.DenseNet201(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False, pooling='avg')
        # 1920 dimensional features
    else:
        model=vgg19.VGG19(weights='imagenet', include_top=False, pooling='avg')
        # 512 dimensional features
        #base_model=vgg19.VGG19(input_shape=(img_height,img_width,3),weights='imagenet', include_top=False)
        #model=Model(inputs=base_model.input,outputs=base_model.get_layer('block4_pool').output)

    images = os.listdir(wsi_dir)
    for image_name in images:
        if '.svs' in image_name:
            patient_id=image_name[0:23]
            image_features = []
            image_names = []
            #patient_id='TCGA-2F-A9KT'
            #patches=os.listdir(train_data_dir[ind]+patient_id+'*.png')
            patches=glob.glob(data_dir+patient_id+'*.png')

            for patch_name in patches:
                patch_split=patch_name.split("\\")
                img = image.load_img(patch_name, target_size=(img_height,img_width))
                # convert image to numpy array
                x = image.img_to_array(img)

                # the image is now in an array of shape (224, 224, 3)
                # need to expand it to (1, 224, 224, 3) as it's expecting a list
                x = np.expand_dims(x, axis=0)
                #imshow(np.uint8(x[0,:,:,:]))

                if model_name=='resnet50':
                    x = resnet50.preprocess_input(x)
                elif model_name=='nasnet_large':
                    x = nasnet.preprocess_input(x)
                elif model_name == 'xception':
                    x = xception.preprocess_input(x)
                elif model_name=='inceptionv3':
                    x=inception_v3.preprocess_input(x)
                elif model_name == 'inceptionresnetv2':
                    x=inception_resnet_v2.preprocess_input(x)
                elif model_name=='densenet':
                    x=densenet.preprocess_input(x)
                else:
                    x=vgg19.preprocess_input(x)

                # extract the features
                features = model.predict(x)[0]
                #features=np.mean(features,axis=(0,1))

                image_features.append(features)
                image_names.append(patch_split[1])

            if save_features==True:
                scipy.io.savemat('./step2_output/'+patient_id+'_feat.mat', mdict={'image_features': image_features, 'image_names':image_names})
コード例 #11
0
                          include_top=True,
                          weights=None,
                          pooling='avg',
                          input_shape=(img_rows, img_cols, img_channels),
                          classes=10)
model.compile(
    loss='categorical_crossentropy',
    optimizer=AdaBound(),  #keras.optimizers.SGD(momentum=0.9),
    metrics=['acc'])
model.summary()

(trainX, trainY), (testX, testY) = keras.datasets.cifar10.load_data()

trainX = trainX.astype('float32')
testX = testX.astype('float32')

trainX = densenet.preprocess_input(trainX)
testX = densenet.preprocess_input(testX)

Y_train = keras.utils.to_categorical(trainY, 10)
Y_test = keras.utils.to_categorical(testY, 10)

history = model.fit(trainX,
                    Y_train,
                    batch_size=batch_size,
                    epochs=nb_epoch,
                    validation_split=0.1)

with open('./cifar10_densenet_adabound.hist', 'w') as fp:
    json.dump(history.history, fp)
コード例 #12
0
def main():
    # Parameters
    if len(sys.argv) == 3:
        superclass = sys.argv[1]
        model_weight = sys.argv[2]
    else:
        print('Parameters error')
        exit()

    # The constants
    classNum = {'A': 40, 'F': 40, 'V': 40, 'E': 40, 'H': 24}
    testName = {'A': 'a', 'F': 'a', 'V': 'b', 'E': 'b', 'H': 'b'}
    date = '20180321'

    # Feature extraction model
    base_model = DenseNet121(include_top=True, weights=None,
                           input_tensor=None, input_shape=None,
                           pooling=None, classes=classNum[superclass[0]])
    # parallel_model = multi_gpu_model(base_model, gpus=2)
    # parallel_model.load_weights(model_weight)
    # densenet_weight = parallel_model.get_layer('densenet121').get_weights()
    # base_model.set_weights(densenet_weight)
    base_model.load_weights(model_weight)
    model = Model(inputs=base_model.inputs,
                  outputs=base_model.get_layer('avg_pool').output)

    imgdir_train = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_'+date\
                     +'/zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_train_images_'+date
    imgdir_test = '../zsl_'+testName[superclass[0]]+'_'+str(superclass).lower()+'_test_'+date
    categories = os.listdir(imgdir_train)
    categories.append('test')

    num = 0
    for eachclass in categories:
        if eachclass[0] == '.':
            continue
        if eachclass == 'test':
            classpath = imgdir_test
        else:
            classpath = imgdir_train+'/'+eachclass
        num += len(os.listdir(classpath))

    print('Total image number = '+str(num))

    features_all = np.ndarray((num, 1024))
    labels_all = list()
    images_all = list()
    idx = 0

    # Feature extraction
    for iter in tqdm(range(len(categories))):
        eachclass = categories[iter]
        if eachclass[0] == '.':
            continue
        if eachclass == 'test':
            classpath = imgdir_test
        else:
            classpath = imgdir_train+'/'+eachclass
        imgs = os.listdir(classpath)

        for eachimg in imgs:
            if eachimg[0] == '.':
                continue

            img_path = classpath+'/'+eachimg
            img = image.load_img(img_path, target_size=(224, 224))
            x = image.img_to_array(img)
            x = np.expand_dims(x, axis=0)
            x = preprocess_input(x)
            feature = model.predict(x)

            features_all[idx, :] = feature
            labels_all.append(eachclass)
            images_all.append(eachimg)
            idx += 1

    features_all = features_all[:idx, :]
    labels_all = labels_all[:idx]
    images_all = images_all[:idx]
    data_all = {'features_all':features_all, 'labels_all':labels_all,
                'images_all':images_all}

    # Save features
    savename = 'features_' + superclass + '.pickle'
    fsave = open(savename, 'wb')
    pickle.dump(data_all, fsave)
    fsave.close()
コード例 #13
0
    #load keras model
    init_model = load_model(
        '../input/densenet-8020/densenet169_one_cycle_model.h5')

    class_label = ['no_tumor', 'tumor']

    random_int = np.random.choice(len(list_tumor))
    img_no_tumor = load_img(
        '/kaggle/input/histopathologic-cancer-detection/train/' +
        list_no_tumor[random_int] + '.tif')
    img_tumor = load_img(
        '/kaggle/input/histopathologic-cancer-detection/train/' +
        list_tumor[random_int] + '.tif')

    img_no_tumor = img_to_array(img_no_tumor)
    img_no_tumor = preprocess_input(img_no_tumor)
    y_pred_no_tumor = init_model.predict(img_no_tumor[np.newaxis, ...])

    img_tumor = img_to_array(img_tumor)
    img_tumor = preprocess_input(img_tumor)
    y_pred_tumor = init_model.predict(img_tumor[np.newaxis, ...])

    layer_idx = utils.find_layer_idx(init_model, 'dense_3')
    # Swap softmax with linear
    init_model.layers[layer_idx].activation = keras.activations.linear
    model = utils.apply_modifications(init_model)

    penultimate_layer_idx = utils.find_layer_idx(model, "relu")

    seed_input = img_no_tumor
    grad_top1_no_tumor = visualize_cam(
コード例 #14
0
ファイル: predict.py プロジェクト: DenisFromRussia/PetFinder
def load_image(path):
    image = cv2.imread(f'{path}')
    new_image = resize_to_square(image)
    new_image = preprocess_input(new_image)
    return new_image
コード例 #15
0
def preproc_input_classifcation(img):
    from keras.applications.densenet import preprocess_input
    return preprocess_input(img)
コード例 #16
0
DIRECTORY = 'D:/diabetes/models/flagship'
model_names = listdir(DIRECTORY)
model_path_names = [
    '{}/{}'.format(DIRECTORY, name) for name in model_names
    if isfile(join(DIRECTORY, name))
]

file = h5py.File('D:/diabetes/kaggle/data_check_384.h5', 'r')

x_test = file['x_check']
y_test = file['y_check']

x_test = np.asarray(x_test)
y_test = np.asarray(y_test)

x_test = preprocess_input(x_test)

kappas = []
for path in model_path_names:
    model = load_model(path,
                       custom_objects={
                           'f1_loss': f1_loss,
                           'multi_label_acc': multi_label_acc,
                           'f1_m': f1_m
                       })
    predictions = model.predict(x_test, batch_size=5, verbose=1)
    predictions_bool = predictions > 0.5
    predictions_int = predictions_bool.astype(int).sum(axis=1) - 1
    val = {
        'path': path,
        'kappa': cohen_kappa_score(predictions_int,
コード例 #17
0
def PreprocessValidationData(X, Y, imageDim=224):
    return preprocess_input(np.array(X).reshape(-1, imageDim, imageDim,
                                                3)), np.array(Y)
コード例 #18
0
def load_image(path, pet_id):
    image = cv2.imread(f'{path}{pet_id}-1.jpg')
    new_image = resize_to_square(image)
    new_image = preprocess_input(new_image)
    return new_image
コード例 #19
0
# iteration count
_iter = 1
"""
    Main
"""
if __name__ == '__main__':
    # load the model
    model = DenseNet201()
    # load an image from file
    image = load_img('mug.jpg', target_size=(224, 224))
    # convert the image pixels to a numpy array
    image = img_to_array(image)
    # reshape data for the model
    image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
    # prepare the image for the VGG model
    image = preprocess_input(image)

    # predict the probability across all output classes
    for i in range(_iter):
        raw_input('{} iteration, press any key to perform...'.format(str(i)))
        yhat = model.predict(image)

    # return if not iter
    if not _iter: exit()
    # convert the probabilities to class labels
    label = decode_predictions(yhat)
    # retrieve the most likely result, e.g. highest probability
    label = label[0][0]
    # print the classification
    print('%s (%.2f%%)' % (label[1], label[2] * 100))
    # done.
コード例 #20
0
ファイル: feature.py プロジェクト: kkmonster/aXeleRate
 def normalize(self, image):
     from keras.applications.nasnet import preprocess_input
     return preprocess_input(image)
 def load_image(path):
     image = cv2.imread(path + '.png', cv2.IMREAD_UNCHANGED)
     image = preprocess_input(image)
     return image
コード例 #22
0
def load_test_image(pet_id):
    path = f'{Path(os.getcwd()).parents[0]}\\data\\test_images\\'
    image = cv2.imread(f'{path}{pet_id}-1.jpg')
    new_image = resize_to_square(image)
    new_image = preprocess_input(new_image)
    return new_image
コード例 #23
0
from keras.applications.densenet import DenseNet201
from keras.preprocessing import image
from keras.applications.densenet import preprocess_input, decode_predictions
import numpy as np
import os
import sys

data_dir = sys.argv[1]

model = DenseNet201(weights='imagenet')

for filename in os.listdir(data_dir):
    imgfile = data_dir + '/' + filename
    img = image.load_img(imgfile, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    preds = model.predict(x)
    print(filename + ' --- ' + str(decode_predictions(preds, top=5)[0]))

コード例 #24
0
import numpy as np
import os
from collections import Counter
from keras.applications.densenet import DenseNet121, preprocess_input, decode_predictions
from keras.preprocessing.image import load_img, img_to_array

if __name__ == "__main__":
    img_dir = "data/val/bees"
    images = [os.path.join(img_dir, name) for name in os.listdir(img_dir)]

    model = DenseNet121()
    img = [
        img_to_array(load_img(name, target_size=(224, 224))) for name in images
    ]
    batch = np.stack(img, axis=0)
    batch = preprocess_input(batch)
    preds = model.predict(batch)
    result = decode_predictions(preds, top=1)
    print(Counter([x[0][1] for x in result]))
コード例 #25
0
def fn_reader_val(d, path, size):
    img = np.array(
        Image.open(path + "/" + d["patientId"] + ".png").resize(size))
    return [preprocess_input(img.astype(np.float32))
            ], [to_categorical(d["Target"], num_classes=2)]
コード例 #26
0
 def compute_image_embedding(self, image_path):
     img = image.load_img(image_path, target_size=(224, 224))
     img_vec = image.img_to_array(img)
     img_vec = np.expand_dims(img_vec, axis=0)
     img_vec = densenet.preprocess_input(img_vec)
     return img_vec
コード例 #27
0
def chexnet_preprocess_input(value):
    return preprocess_input(value)
def preprocess_image(img):
    from keras.applications.densenet import preprocess_input
    return preprocess_input(img)
コード例 #29
0
ファイル: generator.py プロジェクト: 347001569/msloss
 def cv_imread(self, file_path):
     cv_img = cv2.imread(file_path)
     cv_img =cv2.resize(cv_img,(self.img_scale,self.img_scale))
     cv_img =preprocess_input(cv_img)
     return cv_img
コード例 #30
0
def process(x):
    return preprocess_input(x)