def train():

    # Load Dataset
    cifar_data = cifar10.load_data()
    train_data = preprocess_input((cifar_data[0][0]).astype('float32'))
    train_label = cifar_data[0][1]
    test_data = preprocess_input((cifar_data[1][0]).astype('float32'))
    test_label = cifar_data[1][1]
    train_label = to_categorical(train_label, 10)
    test_label = to_categorical(test_label, 10)

    # Init Classifier
    inception_model = create_inceptionv3(32, 256, 10)
    inception_model.compile(optimizer='sgd',
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])

    # Traing Classifier
    if not os.path.isdir('./checkpoint'):
        os.makedirs('./checkpoint')
    earlystopping = EarlyStopping(patience=2)
    modelchenkpoint = ModelCheckpoint('./checkpoint/best.h5',
                                      save_best_only=True,
                                      save_weights_only=True)
    inception_model.fit(train_data,
                        train_label,
                        batch_size=64,
                        epochs=10,
                        validation_data=(test_data, test_label),
                        callbacks=[earlystopping, modelchenkpoint])
Esempio n. 2
0
def main():

    args = parser.parse_args()
    print(args)

    # prepare the inception v3 model
    model = InceptionV3(include_top=False,
                        pooling='avg',
                        input_shape=(299, 299, 3))

    pos = ['top_right', 'top_left', 'bottom_left', 'bottom_right']
    # pos = ['center']
    fid = np.array([0] * len(pos))

    for i, p in enumerate(pos):
        path1 = pathlib.Path(args.path[0])
        files1 = list(path1.glob('*.png'))
        images1 = np.array([imread(str(fn)) for fn in files1])
        images1 = crop_images(images1, 299, 299, p)
        images1 = preprocess_input(images1)

        path2 = pathlib.Path(args.path[1])
        files2 = list(path2.glob('*.png'))
        images2 = np.array([imread(str(fn)) for fn in files2])
        images2 = crop_images(images2, 299, 299, p)
        images2 = preprocess_input(images2)

        # fid between images1 and images2
        fid[i] = calculate_fid(model, images1, images2)

    fid = np.mean(fid)
    print('FID: %.3f' % fid)

    return 0
def timedPreprocessInput(x, **kwargs):
  t0 = time.perf_counter_ns();
  preprocess_input(x, **kwargs)
  t1 = time.perf_counter_ns();
  elapsed_ns = t1 - t0
  global preprocessingTime
  preprocessingTime = preprocessingTime + elapsed
Esempio n. 4
0
    def fid_score(self, images1, images2):
        ''' Calcualtes the FID score between two sets of images. '''
        images1 = images1.astype('float32')
        images2 = images2.astype('float32')

        images1 = preprocess_input(images1)
        images2 = preprocess_input(images2)
        fid = self.calculate_fid(images1, images2)

        return fid
Esempio n. 5
0
    def evaluate(self, images1, images2):
        images1 = preprocess_input(self.scale_images(images1))
        images2 = preprocess_input(self.scale_images(images2))

        act1 = self.model.predict(images1)
        act2 = self.model.predict(images2)

        mu1, sigma1 = act1.mean(axis=0), np.cov(act1, rowvar=False)
        mu2, sigma2 = act2.mean(axis=0), np.cov(act2, rowvar=False)

        return self.calculate_fid(mu1, sigma1, mu2, sigma2)
Esempio n. 6
0
def train_v2(xtrain, ytrain, xval, yval, xtest, ytest):
    # preprocess
    xtrain = preprocess_input(xtrain)
    xval = preprocess_input(xval)
    xtest = preprocess_input(xtest)
    extractor_model = InceptionV3(weights='imagenet', include_top=False)

    xtrain = extractor_model.predict(xtrain)
    xval = extractor_model.predict(xval)
    xtest = extractor_model.predict(xtest)

    # linear softmax model
    num_categories = ytrain.shape[1]
    input_shape = xtrain.shape[1:]

    inputs = Input(shape=input_shape)  # input layer
    flat = Flatten()(inputs)
    outputs = Dense(num_categories, activation='softmax')(flat)  # output layer
    model = Model(inputs, outputs)

    # For a multi-class classification problem
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Distribute the neural network over multiple GPUs if available.
    gpu_count = len(available_gpus())
    if gpu_count > 1:
        print(f"\n\nModel parallelized over {gpu_count} GPUs.\n\n")
        parallel_model = keras.utils.multi_gpu_model(model, gpus=gpu_count)
    else:
        print("\n\nModel not parallelized over GPUs.\n\n")
        parallel_model = model

    parallel_model.compile(
        optimizer='adam',
        loss='categorical_crossentropy',
        metrics=['accuracy'],
    )

    # create a checkpoint to save the model history
    csv_logger = keras.callbacks.CSVLogger(
        opt.output_dir + "history_" + opt.model + ".csv", )

    # train
    print("ephochs2:", opt.epoch2)
    parallel_model.fit(xtrain,
                       ytrain,
                       validation_data=(xval, yval),
                       epochs=opt.epoch2,
                       callbacks=[csv_logger])

    results2(xtrain, ytrain, xval, yval, xtest, ytest, parallel_model)
Esempio n. 7
0
def get_fid_accuracy(path1, prefix1, path2, prefix2, samples_number, dim):
    inception_model = InceptionV3(include_top=False,
                                  pooling='avg',
                                  input_shape=(dim, dim, 3))
    sample_set1 = get_samples(path1, prefix1, samples_number - 1)
    sample_set2 = get_samples(path2, prefix2, samples_number - 1)

    sample_set1 = sample_set1.astype('float32')
    sample_set1 = preprocess_input(scale_samples(sample_set1, (dim, dim, 3)))
    sample_set2 = sample_set2.astype('float32')
    sample_set2 = preprocess_input(scale_samples(sample_set2, (dim, dim, 3)))

    fid = compute_fid(inception_model, sample_set1, sample_set2)
    return fid
Esempio n. 8
0
def extractBottlenecks(cv2img, augcount):

    rgb_img = cv2.cvtColor(cv2img, cv2.COLOR_BGR2RGB)
    x = rgb_img.astype(np.float32)

    # add a batch dimension
    x = np.expand_dims(x, axis=0)
    # call model-specific preprocessing function
    x = preprocess_input(x)

    # WARNING: predict_generator did not work in this Keras. replaced it
    #features = bottleneck_creator.predict_generator(datagen.flow(x, batch_size=1), augcount)
    #new_df = pd.DataFrame(features, columns=np.arange(2048))
    #return new_df

    # features = bottleneck_creator.predict(x)
    i = 0
    df = pd.DataFrame(columns=np.arange(2048))
    for xi in datagen.flow(x, batch_size=augcount):
        features = bottleneck_creator.predict(xi)
        new_df = pd.DataFrame(features, columns=np.arange(2048))
        df = pd.concat([df, new_df], axis=0)
        #print(df.shape)
        i = i + 1
        if (i == augcount):
            break
    return df
Esempio n. 9
0
def calculate_inception_score(images, n_split=10, eps=1E-16):
    # load inception v3 model
    model = InceptionV3()
    # convert from uint8 to float32
    processed = images.astype('float32')
    # pre-process raw images for inception v3 model
    processed = preprocess_input(processed)
    # predict class probabilities for images
    yhat = model.predict(processed)
    # enumerate splits of images/predictions
    scores = list()
    n_part = floor(images.shape[0] / n_split)
    for i in range(n_split):
        # retrieve p(y|x)
        ix_start, ix_end = i * n_part, i * n_part + n_part
        p_yx = yhat[ix_start:ix_end]
        # calculate p(y)
        p_y = expand_dims(p_yx.mean(axis=0), 0)
        # calculate KL divergence using log probabilities
        kl_d = p_yx * (log(p_yx + eps) - log(p_y + eps))
        # sum over classes
        sum_kl_d = kl_d.sum(axis=1)
        # average over images
        avg_kl_d = mean(sum_kl_d)
        # undo the log
        is_score = exp(avg_kl_d)
        # store
        scores.append(is_score)
    # average across images
    is_avg, is_std = mean(scores), std(scores)
    return is_avg, is_std
Esempio n. 10
0
 def calculate_inception_score(self, images, n_split=10, eps=1E-16):
     # load inception v3 model
     model = InceptionV3()
     # enumerate splits of images/predictions
     scores = list()
     n_part = floor(images.shape[0] / n_split)
     for i in range(n_split):
         # retrieve images
         ix_start, ix_end = i * n_part, (i + 1) * n_part
         subset = images[ix_start:ix_end]
         # convert from uint8 to float32
         subset = subset.astype('float32')
         # scale images to the required size
         subset = self.scale_images(subset, (299, 299, 3))
         # pre-process images, scale to [-1,1]
         subset = preprocess_input(subset)
         # predict p(y|x)
         p_yx = model.predict(subset)
         # calculate p(y)
         p_y = expand_dims(p_yx.mean(axis=0), 0)
         # calculate KL divergence using log probabilities
         kl_d = p_yx * (log(p_yx + eps) - log(p_y + eps))
         # sum over classes
         sum_kl_d = kl_d.sum(axis=1)
         # average over images
         avg_kl_d = mean(sum_kl_d)
         # undo the log
         is_score = exp(avg_kl_d)
         # store
         scores.append(is_score)
     # average across images
     is_avg, is_std = mean(scores), std(scores)
     return is_avg, is_std
Esempio n. 11
0
def build_model(classes=2):
    inputs = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, 3))
    x = preprocess_input(inputs)
    x = InceptionV3(weights=None, classes=classes)(x)
    model = Model(inputs=inputs, outputs=x)
    model.compile(loss='categorical_crossentropy', metrics=['accuracy'])
    return model
Esempio n. 12
0
    def call(self, inputs, **kwargs):
        """
        Takes the given input, runs a single forward pass, then return the output

        :param inputs: Tensor representing a single video. Should be of the shape [300, 299, 299, 3]
        :return: A tensor representing the probability that the video is fake at any one frame.
        """

        # Preprocess input
        processed_input = preprocess_input(inputs)

        # Run the inputs through the inception model. Output is of size [frames, 8, 8, 2048]
        out = self.inception_model(processed_input)

        # Normalize the input to the RNN. Output is shape [frames, 2048]
        out = self.pooling(out)
        out = self.reshaping(out)

        # Run the inputs through the RNN. Adds a dummy batch dimension, which is 1. Output is of shape [1,300,1]
        out = tf.expand_dims(out, axis=0)
        out = self.RNN(out)

        # Squeeze the output down to a single column vector of shape [frames]
        out = tf.squeeze(out)

        return out
Esempio n. 13
0
def classifyImage(request):
    if request.method == 'POST':
        image = request.FILES['image']
        fs = FileSystemStorage()
        imageName = fs.save(image.name, image)
        imageName = fs.url(imageName)
        loc = '.' + imageName
        
        img = load_img(loc, target_size=(100, 100))
        img_arry = img_to_array(img)
        to_pred = np.expand_dims(img_arry, axis = 0)
        prep = preprocess_input(to_pred)
        prediction = model.predict(prep)
        percentage = np.max(prediction)
        prediction = np.argmax(prediction)

        if(percentage > 0.5):
            ans = label['0'][str(prediction)].split()[0]
        else:
            ans = "Sorry, unable to classify"

        context = {
            'imageName': imageName,
            'label': ans
        }
        return render(request, "classifier_app/home.html", context)
def dream_on(original_img, feature_extractor, output_dir, iterations=1000, save_every=10, downscale_factor=2):

    #processed_img = preprocess_image(original_img)
    processed_img = original_img
    processed_img = tf.image.resize(processed_img, 
            (int(processed_img.shape[1]/downscale_factor), int(processed_img.shape[2]/downscale_factor))
        )
    img =  processed_img

    x_size, y_size = int(processed_img.shape[1]), int(processed_img.shape[2])
    print(f"x_size: {x_size}, y_size:{y_size}")

    for i in range(iterations):
        

        files = os.listdir(f"{output_dir}")
        files = sorted(files, key=lambda x: int(x.split("_")[3].split(".")[0]))
        print(f"recent saves: {files[-2:]}")
    
        if os.path.isfile(f"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg"):
            print(f"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg Exist")

        elif len(os.listdir(f"{output_dir}"))==0:
            img = processed_img
            #img = tf.keras.preprocessing.image.img_to_array(img)
            tf.keras.preprocessing.image.save_img(f"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg", deprocess_image(img.numpy()))
        else:
            lastfile = files[-1]
        
            img = tf.keras.preprocessing.image.load_img(f"{output_dir}/{lastfile}")
            img = tf.keras.preprocessing.image.img_to_array(img)
            
            x_trim = 2
            y_trim = 2

            print(img.shape)
            #img = img[0:x_size-x_trim, 0:y_size-y_trim]
            img = tf.image.central_crop(img, central_fraction=0.99)
            img = tf.image.resize(img, (x_size, y_size))
            print(img.shape)

            #kernel = np.ones((5,5),np.float32)/25
            #img = cv2.filter2D(np.array(img),-1,kernel)
            #img = cv2.GaussianBlur(np.array(img), (9, 9), 0)
            #img = cv2.resize(img, (y_size, x_size))

            print(img.shape)
            img = tf.expand_dims(img, axis=0)
            img = inception_v3.preprocess_input(img)
            print(i%save_every)

            img = gradient_ascent_loop(img, feature_extractor, optim_steps, step_size, max_loss=None)

            if save_every>0 and i%save_every==0:
                deproc_img = deprocess_image(img.numpy())

                deproc_img = cv2.GaussianBlur(deproc_img, (3, 3), 0)

                tf.keras.preprocessing.image.save_img(f"{output_dir}/dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg", deproc_img)
                print(f"-------dream_{img.shape[1]}_{img.shape[2]}_{i}" + ".jpg-------")
Esempio n. 15
0
def preprocess_image(image_path):
    img = image.load_img(image_path)
    img = image.img_to_array(img)
    # 增加一个0维度
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img
Esempio n. 16
0
 def extractImgFeature(self, filename, modelType):
     if modelType == 'inceptionv3':
         from tensorflow.keras.applications.inception_v3 import preprocess_input
         target_size = (299, 299)
         model = InceptionV3()
     elif modelType == 'xception':
         from tensorflow.keras.applications.xception import preprocess_input
         target_size = (299, 299)
         model = Xception()
     elif modelType == 'vgg16':
         from tensorflow.keras.applications.vgg16 import preprocess_input
         target_size = (224, 224)
         model = VGG16()
     elif modelType == 'resnet50':
         from tensorflow.keras.applications.resnet50 import preprocess_input
         target_size = (224, 224)
         model = ResNet50()
     model.layers.pop()
     model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
     image = load_img(filename,
                      target_size=target_size)  # Loading and resizing image
     image = img_to_array(
         image)  # Convert the image pixels to a numpy array
     image = image.reshape((1, image.shape[0], image.shape[1],
                            image.shape[2]))  # Reshape data for the model
     image = preprocess_input(
         image)  # Prepare the image for the CNN Model model
     features = model.predict(
         image, verbose=0)  # Pass image into model to get encoded features
     return features
 def transform_input(self, X, names, meta):
     logger.info("Transform called")
     t = preprocess_input(X)
     logger.info("Data type is %s", t.dtype)
     t = np.float32(t)
     logger.info("Data type is %s", t.dtype)
     return t
Esempio n. 18
0
def load_image(image_path,max_dim=512):
    img=Image.open(image_path)
    img=img.convert("RGB")
    img.thumbnail([max_dim,max_dim])
    img=np.array(img,dtype=np.uint8)
    img=np.expand_dims(img,axis=0)
    return inception_v3.preprocess_input(img)
Esempio n. 19
0
def preprocess_image(image_path):
    # Resimleri uygun dizilere açmak, yeniden boyutlandırmak ve biçimlendirmek için Util işlevi
    img = keras.preprocessing.image.load_img(image_path)
    img = keras.preprocessing.image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img
def detect_and_predict_mask(frame, faceNet, maskNet):
	# grab the dimensions of the frame and then construct a blob
	# from it
	(h, w) = frame.shape[:2]
	blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
		(104.0, 177.0, 123.0))

	# pass the blob through the network and obtain the face detections
	faceNet.setInput(blob)
	detections = faceNet.forward()

	# initialize our list of faces, their corresponding locations,
	# and the list of predictions from our face mask network
	faces = []
	locs = []
	preds = []

	# loop over the detections
	for i in range(0, detections.shape[2]):
		# extract the confidence (i.e., probability) associated with
		# the detection
		confidence = detections[0, 0, i, 2]

		# filter out weak detections by ensuring the confidence is
		# greater than the minimum confidence
		if confidence > args["confidence"]:
			# compute the (x, y)-coordinates of the bounding box for
			# the object
			box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
			(startX, startY, endX, endY) = box.astype("int")

			# ensure the bounding boxes fall within the dimensions of
			# the frame
			(startX, startY) = (max(0, startX), max(0, startY))
			(endX, endY) = (min(w - 1, endX), min(h - 1, endY))

			# extract the face ROI, convert it from BGR to RGB channel
			# ordering, resize it to 224x224, and preprocess it
			face = frame[startY:endY, startX:endX]
			face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
			face = cv2.resize(face, (229, 229))
			face = img_to_array(face)
			face = preprocess_input(face)

			# add the face and bounding boxes to their respective
			# lists
			faces.append(face)
			locs.append((startX, startY, endX, endY))

	# only make a predictions if at least one face was detected
	if len(faces) > 0:
		# for faster inference we'll make batch predictions on *all*
		# faces at the same time rather than one-by-one predictions
		# in the above `for` loop
		faces = np.array(faces, dtype="float32")
		preds = maskNet.predict(faces, batch_size=32)

	# return a 2-tuple of the face locations and their corresponding
	# locations
	return (locs, preds)
Esempio n. 21
0
def examine_cat_breeds(image, model, train_list):
    # 行列に変換
    img_array = img_to_array(image)
    # 3dim->4dim
    img_dims = np.expand_dims(img_array, axis=0)
    # Predict class(preds:クラスごとの確率が格納された行列(クラス数×1))
    preds = model.predict(preprocess_input(img_dims))
    # print('preds')
    # print(preds)
    preds_reshape = preds.reshape(-1, preds.shape[0])
    # print("preds_reshape")
    # print(preds_reshape)
    # train_list(リスト)を12×1行列に変換
    # print('train_list')
    # print(train_list)
    cat_array = np.array(train_list).reshape(len(train_list), -1)
    # print('cat_array')
    # print(cat_array)
    # 確率高い順にソートする
    preds_sort = preds_reshape[np.argsort(preds_reshape[:, 0])[::-1]]
    # 確率の降順に合わせて猫の順番も変える
    cat_sort = cat_array[np.argsort(preds_reshape[:, 0])[::-1]]
    # print("cat_sort")
    # print(cat_sort)
    # preds_reshape と cat_arrayを結合
    set_result = np.concatenate([cat_sort, preds_sort], 1)
    # print("set_result")
    # print(set_result)
    return set_result
    def check_face(self, image):
        if len(self.face_vectors) == 0:
            return False

        self.get_roi(image)

        if self.method == 0:
            sift = cv2.xfeatures2d.SIFT_create(128)
            kp, des = sift.detectAndCompute(self.roi, None)
            keypoints = cv2.drawKeypoints(self.roi, kp, self.roi)
            cv2.imshow("Keypoints", keypoints)
            cv2.waitKey(0)
        elif self.method == 1:
            pca = PCA(n_components=128)
            pca.fit(np.array(self.roi))
            des = pca.singular_values_
        elif self.method == 2:
            self.model.predict(np.array(self.roi).reshape(-1, 1))
        elif self.method == 3:
            img = cv.resize(self.roi, (128, 128))
            img = img.reshape(1, 128, 128, 3)
            des = self.model.predict(preprocess_input(img))

        distribution = {}

        for key, face in self.face_vectors.items():
            similarity = cosine_similarity(face, des)
            print("Testing against: %s, similarity: %s" %
                  (key, str(similarity)))

            distribution[key] = similarity[0][0]

        print(distribution)

        return distribution
Esempio n. 23
0
def image_classifier():
    model_ip = os.environ['inception_ip']

    address = 'http://%s:8501/v1/models/inception:predict' % (model_ip)

    #content = request.get_json()
    # from json get img path

    #img_path = content['instances']
    img_path = request.files['file']
    # img loading from path

    img = image.load_img(img_path, target_size=(224, 224))
    # img preprocessing
    x = image.img_to_array(img)

    x = preprocess_input(x)
    data = {"instances": [{'input_1': x.tolist()}]}

    # Making POST request (POST 방식으로 address에 requsets)
    result = requests.post(address, json=data)

    # Decoding results from TensorFlow Serving server
    pred = json.loads(result.content.decode('utf-8'))

    # Returning JSON response to the frontend
    return jsonify(decode_predictions(np.array(pred['predictions']))[0])
Esempio n. 24
0
def path_to_tensor(img_path):
    img = image.load_img(img_path, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    return x
    def call(self, input_x):
        x = tf.image.resize(input_x, (299, 299))
        x = preprocess_input(255 * x)
        x = self.model(x)
        x = tf.expand_dims(x, 1)

        return x
Esempio n. 26
0
def preprocess_image(image_path):
	# Util function to open, resize and format pictures into appropriate tensors.
	img = load_img(image_path)
	img = img_to_array(img)
	img = np.expand_dims(img, axis=0)
	img = inception_v3.preprocess_input(img)
	return img
Esempio n. 27
0
def load_img_to_array(*, imgfile:str) -> np.ndarray:
    img = load_img(imgfile, target_size=(299, 299))
    img = img_to_array(img)
    img = img.reshape((1, img.shape[0], img.shape[1], img.shape[2]))
    img = preprocess_input(img)

    return img
Esempio n. 28
0
def preprocess_image(img_path):
    """ process images for inceptionV3 shape """
    img = image.load_img(img_path)
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img
Esempio n. 29
0
def test_anchor_images():
    os.environ.clear()
    alibi_model = os.path.join(
        kfserving.Storage.download(IMAGENET_EXPLAINER_URI), EXPLAINER_FILENAME)
    with open(alibi_model, "rb") as f:
        model = InceptionV3(weights="imagenet")
        predictor = lambda x: model.predict(x)  # pylint:disable=unnecessary-lambda
        alibi_model = dill.load(f)
        anchor_images = AnchorImages(predictor,
                                     alibi_model,
                                     batch_size=25,
                                     stop_on_first=True)
        category = "Persian cat"
        image_shape = (299, 299, 3)
        data, _ = fetch_imagenet(category,
                                 nb_images=10,
                                 target_size=image_shape[:2],
                                 seed=2,
                                 return_X_y=True)
        images = preprocess_input(data)
        print(images.shape)
        np.random.seed(0)
        explanation = anchor_images.explain(images[0:1])
        exp_json = json.loads(explanation.to_json())
        assert exp_json["data"]["precision"] > 0.9
Esempio n. 30
0
def InceptionV3(image_bytes):

    image_batch = np.expand_dims(image_bytes, axis=0)
    processed_imgs = inception_v3.preprocess_input(image_batch)
    inception_v3_features = inception_v3_extractor.predict(processed_imgs)
    flattened_features = inception_v3_features.flatten()
    # normalized_features = flattened_features / norm(flattened_features)
    return flattened_features