def extractGlobalReferences(self, img):
        image = cv2.imread(img)
        filename = utils.getFilename(img)

        W, H, c = image.shape
        base = np.zeros((W, H), 'uint8')
        segmented = np.zeros((W, H, 3), 'uint8')
        segmented[..., 0] = base.copy()
        segmented[..., 1] = base.copy()
        segmented[..., 2] = base.copy() + 255

        segmentedPath = config.temp_folder + 'global_segmented.png'
        cv2.imwrite(segmentedPath, segmented)

        illuminantMaps.estimateGrayEdge(img, segmentedPath, config.temp_folder,
                                        self.verbose)
        illuminantMaps.estimateGrayWorld(img, segmentedPath,
                                         config.temp_folder, self.verbose)
        illuminantMaps.estimateMaxRGB(img, segmentedPath, config.temp_folder,
                                      self.verbose)
        illuminantMaps.estimateShadesOfGray(img, segmentedPath,
                                            config.temp_folder, self.verbose)
        illuminantMaps.estimateSecondGrayEdge(img, segmentedPath,
                                              config.temp_folder, self.verbose)

        global_references = {}
        for alg in self.algorithms:
            global_references[alg] = utils.evaluateRGBMedian(
                config.temp_folder + filename + "_gge_map_" + alg + ".png")[0]
        return global_references
def extractDescriptor(img, descriptor, space=0, channel=3):
    filename = utils.getFilename(img)

    descriptorName = config.faces_folder + filename + "-" + descriptor.lower(
    ) + "-desc.txt"
    nname = img

    newName = nname[:-3] + "ppm"
    sourceImg = cv2.imread(img)
    if space == 0:
        destImg = cv2.cvtColor(sourceImg, cv2.COLOR_BGR2HSV)
    elif space == 1:
        destImg = cv2.cvtColor(sourceImg, cv2.COLOR_BGR2RGB)
    elif space == 2:
        destImg = cv2.cvtColor(sourceImg, cv2.COLOR_BGR2YCR_CB)
    elif space == 4:
        destImg = cv2.cvtColor(sourceImg, cv2.COLOR_BGR2LAB)
    else:
        destImg = sourceImg

    cv2.imwrite(nname, destImg)

    command = config.convertBinary + " " + nname + " " + newName
    subprocess.call([command], stdout=devnull, shell=True)
    command = "descriptors/" + descriptor.lower(
    ) + "/source/bin/./" + descriptor.lower(
    ) + "_extraction " + newName + " " + descriptorName

    if descriptor.lower() == 'sasi' or descriptor.lower(
    ) == 'las' or descriptor.lower() == 'unser':
        command += ' 1'
    subprocess.call([command], stdout=devnull, shell=True)
Ejemplo n.º 3
0
 def extractIlluminationMaps(self, img):
     filename = utils.getFilename(img)
     illuminantMaps.prepareImageIlluminants(
         img, config.seg_sigma, config.seg_k, config.seg_min_size,
         config.min_intensity, config.max_intensity, self.verbose)
     if 'GGE' in config.illuminantTypes:
         illuminantMaps.extractGGEMap(
             img, config.maps_folder + filename + "_segmented.png",
             config.gge_sigma, config.gge_n, config.gge_p, self.verbose)
     if 'IIC' in config.illuminantTypes:
         illuminantMaps.extractIICMap(
             img, config.maps_folder + filename + "_segmented.png",
             self.verbose)
Ejemplo n.º 4
0
    def getTrainingData(self, images, descriptor, illum='GGE'):
        trainingData = []
        trainingLabels = []
        trainingSource = []
        for i in range(len(images)):
            filename = utils.getFilename(images[i])
            path = config.features_folder + filename + '_' + illum + "_" + descriptor.lower(
            ) + ".txt"
            if os.path.isfile(path):
                imageFeatures = open(path, "r")
                features = [
                    FaceTrainingSample.fromTxt(pair)
                    for pair in imageFeatures.read().splitlines()
                ]
                for sample in features:
                    trainingLabels.append(sample.label)
                    #pairData = [float(val) for val in list()]
                    trainingData.append(
                        np.array(sample.feature.split(), dtype=float))
                    trainingSource.append(utils.getFilename(path))

        return np.asarray(trainingData), np.asarray(
            trainingLabels), np.asarray(trainingSource)
    def evaluateEuclideanDistances(self, images):
        # Extract image features from each images in training set

        for i in range(len(images)):
            img = images[i]
            #if self.extract_features:
            #    self.processImage(img, True, self.verbose, self.heat_map)

            print(img)
            filename = utils.getFilename(img)

            #Reads IMs
            first_map = cv2.imread(config.maps_folder + filename +
                                   '_gge_map_0_1.png')
            second_map = cv2.imread(config.maps_folder + filename +
                                    '_gge_map_1_1.png')

            #first_map = self.resizeImage(first_map, 200)
            #second_map = self.resizeImage(second_map, 200)
            if first_map is None or second_map is None:
                continue

            gge_b, gge_g, gge_r = cv2.split(first_map)
            iic_b, iic_g, iic_r = cv2.split(second_map)
            #Get maps dimensions
            rows, cols, _ = first_map.shape
            #Building heat map
            heat_map = np.sqrt(
                pow(
                    gge_b[0:rows - 1, 0:cols - 1] -
                    iic_b[0:rows - 1, 0:cols - 1], 2) + pow(
                        gge_g[0:rows - 1, 0:cols - 1] -
                        iic_g[0:rows - 1, 0:cols - 1], 2) + pow(
                            gge_r[0:rows - 1, 0:cols - 1] -
                            iic_r[0:rows - 1, 0:cols - 1], 2))
            #Recover heat map max value
            max_value = np.ndarray.max(heat_map)
            #Normalization
            heat_map = heat_map / max_value
            sum = np.sum(heat_map)

            if self.heat_map:
                heat_map = heat_map * 255
                heat_map = heat_map.astype(np.uint8)
                #Display color map
                color_map = cv2.applyColorMap(heat_map, cv2.COLORMAP_JET)
                cv2.imshow('img', utils.resizeImage(color_map, 500))
                cv2.waitKey(0)

            print(sum)
Ejemplo n.º 6
0
def PlayFolder(folder):
    videos = utils.parseFolder(folder, subfolders=False)

    realVideos = []
    for video in videos:
        video = video[1]
        if utils.isFilePlayable(video):
            if utils.getExtension(video) == SRC:
                playlist = utils.getPlaylistFromLocalSrc(video)
                for video in playlist:
                    realVideos.append(video)
            else:
                realVideos.append(video)

    if len(realVideos) == 0:
        utils.DialogOK(GETTEXT(30102))
        return

    videos = {}
    for video in realVideos:
        filename = utils.getFilename(video)
        if filename in videos:
            #prefer local duplicates over Amazon
            if 'AMAZON@' in videos[filename]:
                videos[filename] = video  
            else:
                pass
        else:
            videos[filename] = video

    isFirst = True

    repeatMode = GetRepeatMode()

    for video in videos:
        video = videos[video]
        title, image = utils.GetTitleAndImage(video)
        AddToPlaylist(title, image, video, LOCAL_FILE+10000, isFirst)        
        isFirst = False

    xbmc.executebuiltin('PlayerControl(%s)' % repeatMode)
    def train(self, images, labels, direction='vertical'):
        for i in range(len(images)):
            featureFile = open('trained_features' + direction + '.txt', 'a')
            img = images[i]
            label = labels[i]
            self.filename = utils.getFilename(img)
            print('Processing ' + self.filename)
            # Reads image
            image = cv2.imread(img)
            rows, cols, _ = image.shape
            maskImage = np.zeros((rows, cols))

            try:
                if direction == 'vertical':
                    maskBand = np.ones((rows, label[2])) * 255
                    maskImage[:, label[0]:label[1]] = maskBand
                elif direction == 'horizontal':
                    maskBand = np.ones((label[2], cols)) * 255
                    maskImage[label[0]:label[1], :] = maskBand

            except:
                print("Error evaluating band " + img)
                continue

            global_references = self.extractGlobalReferences(img)

            self.evaluateIlluminantMaps(image, maskImage, direction)

            # Evaluate distances
            medians = {}
            for i in range(self.verticalBands):
                filename = config.temp_folder + 'vertical_band_' + str(
                    i) + "_gge_map_"
                for alg in self.algorithms:
                    medians[alg] = utils.evaluateRGBMedian(filename + alg +
                                                           ".png")

                band_feature = []  #Feature vector evaluated on medians
                band_feature_global = [
                ]  #Feature vector evaluated on the global references
                band = self.bands['vertical'][i]

                for alg in self.algorithms:
                    distance = utils.euclideanDistanceRGB(
                        medians[alg], self.verticalReferences[alg])
                    band_feature.append(distance)
                    distance = utils.euclideanDistanceRGB(
                        medians[alg], global_references[alg])
                    band_feature_global.append(distance)

                print('\tEvaluated ' + str(i + 1) + '/' +
                      str(self.verticalBands) + ' vertical bands')

                featureFile.write(str(band.label) + ": ")
                for feat in band_feature:
                    featureFile.write(str(feat) + " ")
                featureFile.write(":")
                for feat in band_feature_global:
                    featureFile.write(str(feat) + " ")
                featureFile.write(":" + str(band.colorAvg))
                featureFile.write("\n")

            for i in range(self.horizontalBands):
                filename = config.temp_folder + 'horizontal_band_' + str(
                    i) + "_gge_map_"
                for alg in self.algorithms:
                    medians[alg] = utils.evaluateRGBMedian(filename + alg +
                                                           ".png")

                band = self.bands['horizontal'][i]
                band_feature = []  #Feature vector evaluated medians
                band_feaure_global = [
                ]  #Feature vector evaluated on the global references
                for alg in self.algorithms:
                    distance = utils.euclideanDistanceRGB(
                        medians[alg], self.horizontalReferences[alg])
                    band_feature.append(distance)
                    distance = utils.euclideanDistanceRGB(
                        medians[alg], global_references[alg])
                    band_feaure_global.append(distance)

                print('\tEvaluated ' + str(i + 1) + '/' +
                      str(self.horizontalBands) + ' horizontal bands')

                featureFile.write(str(band.label) + ": ")
                for feat in band_feature:
                    featureFile.write(str(feat) + " ")
                featureFile.write(":")
                for feat in band_feaure_global:
                    featureFile.write(str(feat) + " ")
                featureFile.write(":" + str(band.colorAvg))
                featureFile.write("\n")

            featureFile.close()
Ejemplo n.º 8
0
def estimateSecondGrayEdge(img, segmentedImg, outputFolder, verbose):
    filename = utils.getFilename(img)
    output = outputFolder + filename + "_gge_map_secondgrayedge.png"
    extractGGEMap(img, segmentedImg, 1, 2, 1, verbose, output=output)
Ejemplo n.º 9
0
def estimateShadesOfGray(img, segmentedImg, outputFolder, verbose):
    filename = utils.getFilename(img)
    output = outputFolder + filename + "_gge_map_shadesofgray.png"
    extractGGEMap(img, segmentedImg, 1, 0, 5, verbose, output=output)
Ejemplo n.º 10
0
def estimateMaxRGB(img, segmentedImg, outputFolder, verbose):
    filename = utils.getFilename(img)
    output = outputFolder + filename + "_gge_map_maxrgb.png"
    extractGGEMap(img, segmentedImg, 1, 0, 20, verbose, output=output)
Ejemplo n.º 11
0
def estimateGrayWorld(img, segmentedImg, outputFolder, verbose):
    filename = utils.getFilename(img)
    output = outputFolder + filename + "_gge_map_grayworld.png"
    extractGGEMap(img, segmentedImg, 1, 0, 1, verbose, output=output)
Ejemplo n.º 12
0
def load(name=None):
    if name is None:
        name = config.dataset

    images = []
    labels = []

    if name == 'DSO-1' or name == 'DSI-1':
        print('Loading ' + name + ' dataset')
        #Retrieving file list
        files = os.listdir(config.imagesFolder)
        for i in files:
            filename = utils.getFilename(i)
            try:
                img = config.imagesFolder + i
                if os.path.isfile(img) and not i.startswith('.'):
                    images.append(img)
                    #Retrieving label
                    labelFile = open(config.labelsFolder + filename + '.txt',
                                     'r')
                    imageLabels = [
                        re.split(r'\t+', s.strip())
                        for s in labelFile.readlines()
                    ]
                    labelFile.close()
                    labels.append(imageLabels)
            except:
                print("Error on processing image: " + i)

        print('Loaded ' + name + ' dataset')

    if name == 'COLORCHECKER':
        print('Loading color checked dataset')
        files = os.listdir(config.imagesFolder)
        for i in files:
            try:
                img = config.imagesFolder + i
                if os.path.isfile(img) and not i.startswith('.'):
                    images.append(img)
                    labels.append(1)
            except:
                print("Error on processing image: " + i)

        print('Loaded color checker dataset')

    if name == 'SPLICED_COLORCHECKER' or name == 'SPLICED_DSO1':
        print('Loading spliced color checked dataset')
        files = os.listdir(config.imagesFolder)
        for i in files:
            filename = utils.getFilename(i)
            try:
                img = config.imagesFolder + i
                _, file_extension = os.path.splitext(img)
                if os.path.isfile(img) and not i.startswith(
                        '.') and file_extension == '.png':
                    images.append(img)
                    label = np.loadtxt(config.imagesFolder + filename +
                                       '.txt').tolist()
                    labels.append(label)
                    print('added ' + str(img))
            except:
                print("Error on processing image: " + i)

        print('Loaded splicing color checker dataset')

    return images, labels
Ejemplo n.º 13
0
    solution = [
        solve(case, sheeps[case]) for case in range(0, number_of_cases)
    ]
    return solution


def solve(case, sheeps):
    N = int(sheeps[0])
    digits = set()
    i = 1
    while len(digits) < 10:
        M = N * i
        digitsArr = getDigits(M)
        for digit in digitsArr:
            digits.add(digit)
        i += 1
        if N == 0:
            return "Case #" + str(case + 1) + ": INSOMNIA"
    return "Case #" + str(case + 1) + ": " + str(M)


def getDigits(N):
    Ns = [int(d) for d in str(N)]
    return Ns


filename = utils.getFilename()
input = utils.read_input(filename)
output = process(input)
utils.print_output(filename, output)
Ejemplo n.º 14
0
    def train(self, images, labels):
        # Extract image features from each images in training set
        if self.extract_features or self.extract_maps:
            for i in range(len(images)):
                #Extract the faces in the image
                faces, _ = self.extractFaces(images[i], labels[i])
                if len(faces) > 1:
                    #If there are two or more faces, process the image
                    filename = utils.getFilename(images[i])
                    print('Processing ' + filename)
                    #Extract maps
                    if self.extract_maps:
                        self.extractIlluminationMaps(images[i])
                    #Extract face paired features
                    if self.extract_features:
                        # Extract image descriptors and features
                        for illum in config.illuminantTypes:
                            for desc in self.descriptors:
                                self.extractFeatures(images[i],
                                                     labels[i],
                                                     faces=faces,
                                                     illum=illum,
                                                     descriptor=desc)
                else:
                    #else discard the current image
                    print('Not suitable number of faces found in the image')

        # Sample training
        if not self.cross_validation:
            # Train one model for each descriptor
            for illum in config.illuminantTypes:
                for desc in self.descriptors:
                    trainingData, trainingLabels, _ = self.getTrainingData(
                        images, desc, illum=illum)
                    if len(trainingData) > 0:
                        # Creates an instance of Neighbours Classifier and fit the data.
                        clf = KNNClassifier(config.KNeighbours, 'uniform')
                        clf.train(trainingData, trainingLabels)
                        clf.store(config.classification_folder + 'model_' +
                                  illum + '_' + desc.lower() + '.pkl')
                        print(illum + "/" + desc.upper() +
                              ' classification model created correctly')

        else:
            #Crossvalidate dataset witk K-fold crossvalidation
            print('Evaluate dataset with crossvalidation')
            trainingDesc = {}

            for illum in config.illuminantTypes:
                for desc in self.descriptors:
                    key = illum + "_" + desc
                    trainingDesc[key] = self.getTrainingData(images,
                                                             desc,
                                                             illum=illum)

            #Counting misclassified samples for accuracy score
            misclassified = 0
            refKey = config.illuminantTypes[0] + "_" + self.descriptors[0]
            #Splits dataset in train and test for crossvalidation
            splits = splitDataset(trainingDesc[refKey][0], config.folds)

            outputs_labels = trainingDesc[refKey][1]
            outputs_scores = np.zeros(len(trainingDesc[refKey][0]))

            for (trainIndex, testIndex) in splits:
                classifiers = {}
                for illum in config.illuminantTypes:
                    for desc in self.descriptors:
                        key = illum + "_" + desc
                        trainingData, trainingLabels, _ = trainingDesc[key]
                        if len(trainingData) > 0 and len(trainingLabels) > 0:
                            trainingData = trainingData[trainIndex]
                            trainingLabels = trainingLabels[trainIndex]
                            #Training model for illum type and descriptor
                            clf = KNNClassifier(config.KNeighbours, 'uniform')
                            clf.train(trainingData, trainingLabels)
                            classifiers[key] = clf

                outputs = []
                outputs_probs = []

                for illum in config.illuminantTypes:
                    for desc in self.descriptors:
                        key = illum + "_" + desc
                        testData, _, _ = trainingDesc[key]
                        if len(testData) > 0:
                            testData = testData[testIndex]
                            prediction = classifiers[key].predict(
                                testData) * config.descriptors_weights[desc]
                            outputs.append(prediction)
                            prediction = classifiers[key].predict(
                                testData,
                                True) * config.descriptors_weights[desc]
                            outputs_probs.append(prediction)

                output = np.zeros(len(testIndex))
                output_prob = np.zeros(len(testIndex))

                for i in range(len(outputs)):
                    predictions = outputs[i]
                    output += predictions
                    predictions_prob = outputs_probs[i]
                    for j in range(len(predictions)):
                        output_prob[j] += predictions_prob[j][1]

                #If voting is majority, classify as fake
                outputs_scores[testIndex] = output_prob
                counter = 0
                _, trainingLabels, _ = trainingDesc[refKey]
                testLabels = trainingLabels[testIndex]
                totalModels = len(classifiers)

                for val in np.nditer(output):
                    if val > totalModels / 2:
                        if testLabels[counter] != 1:
                            misclassified += 1
                    else:
                        if testLabels[counter] != 0:
                            misclassified += 1
                    counter += 1

            scipy.io.savemat(
                'classification_output.mat',
                dict(labels=outputs_labels, scores=outputs_scores))

            print('Number of classifiers: ' + str(totalModels))
            totalSamples = len(trainingDesc[refKey][0])
            print('Misclassified: ' + str(misclassified) + '/' +
                  str(totalSamples))
            accuracy = (totalSamples - misclassified) / totalSamples
            print('Accuracy: ' + str(accuracy))
Ejemplo n.º 15
0
def testGGEMaps():
    images, labels = loadDatasets.load()
    length = len(algs)
    if evaluate:
        for img in images:
            filename = utils.getFilename(img)
            print('Processing ' + filename)
            illuminantMaps.prepareImageIlluminants(img, config.seg_sigma,
                                                   config.seg_k,
                                                   config.seg_min_size,
                                                   config.min_intensity,
                                                   config.max_intensity, False)
            illuminantMaps.estimateGrayEdge(img, filename + "_segmented.png",
                                            verbose)
            illuminantMaps.estimateGrayWorld(img, filename + "_segmented.png",
                                             verbose)
            illuminantMaps.estimateMaxRGB(img, filename + "_segmented.png",
                                          verbose)
            illuminantMaps.estimateShadesOfGray(img,
                                                filename + "_segmented.png",
                                                verbose)
            illuminantMaps.estimateSecondGrayEdge(img,
                                                  filename + "_segmented.png",
                                                  verbose)

            distanceMap = np.zeros((length, length), dtype=np.float64)
            for i in range(length):
                first = cv2.imread(config.maps_folder + filename +
                                   "_gge_map_" + algs[i] + ".png")
                for j in range(i + 1, length):
                    second = cv2.imread(config.maps_folder + filename +
                                        "_gge_map_" + algs[j] + ".png")
                    dist = utils.evaluateEuclideanDistances(
                        first, second, display)
                    distanceMap[i, j], distanceMap[j, i] = dist, dist

            np.savetxt(config.maps_folder + filename + '.out', distanceMap)
    '''Plot histogram'''
    for i in range(length):
        for j in range(i + 1, length):
            normal_x = []
            spliced_x = []
            normal_y = []
            spliced_y = []

            for k in range(len(images)):
                filename = utils.getFilename(images[k])
                label = 0
                if filename.find("splicing") > -1:
                    label = 1
                distanceMap = np.loadtxt(config.maps_folder + filename +
                                         '.out')
                if label == 0:
                    normal_x.append(k)
                    normal_y.append(distanceMap[i, j])
                else:
                    spliced_x.append(k)
                    spliced_y.append(distanceMap[i, j])

            normal_x = np.asarray(normal_x)
            spliced_x = np.asarray(spliced_x)
            normal_y = np.asarray(normal_y)
            spliced_y = np.asarray(spliced_y)

            plt.scatter(normal_x, normal_y, color='blue')
            plt.scatter(spliced_x, spliced_y, color='red')
            plt.savefig(config.maps_folder + algs[i] + '_' + algs[j])
            plt.close()
    def detect(self, img, groundtruth=True):
        self.filename = utils.getFilename(img)
        print('Processing ' + self.filename)

        # Reads image
        image = cv2.imread(img)
        if image is None:
            print('Error processing ' + self.filename + ': image not found')
            return

        image = utils.resizeImage(image, 1200)

        # Mask
        if groundtruth:
            maskImage = cv2.imread(
                config.masks_folder + self.filename + '.png',
                cv2.IMREAD_GRAYSCALE)
            if maskImage is not None:
                maskImage = np.invert(maskImage)
                maskImage = utils.resizeImage(maskImage, 1200)
        else:
            maskImage = None

        if image is not None:

            self.evaluateIlluminantMaps(image, maskImage)
            rows, cols, _ = image.shape
            detectionMap = np.zeros((rows, cols))

            global_references = self.extractGlobalReferences(img)

            # Evaluate distances
            medians = {}
            for i in range(self.verticalBands):
                filename = config.temp_folder + 'vertical_band_' + str(
                    i) + "_gge_map_"
                for alg in self.algorithms:
                    medians[alg] = utils.evaluateRGBMedian(filename + alg +
                                                           ".png")

                band = self.bands['vertical'][i]
                band_feature = []

                for alg in self.algorithms:
                    if config.referenceColorType == 'median':
                        distance = utils.euclideanDistanceRGB(
                            medians[alg], self.verticalReferences[alg])
                    elif config.referenceColorType == 'global':
                        distance = utils.euclideanDistanceRGB(
                            medians[alg], global_references[alg])
                    band_feature.append(distance)

                    if config.regionalTrainingType is None:
                        detectionMap = band.incrementDetection(
                            detectionMap, distance)

                if config.regionalTrainingType == 'svm':
                    clf_type = 'vertical'
                    if config.referenceColorType == 'global':
                        clf_type += '_global'

                    prediction = self.clf[clf_type].predict_proba(
                        np.asarray(band_feature).reshape((1, -1)))
                    detectionMap = band.incrementDetection(
                        detectionMap, prediction[0][1])

                print('\tEvaluated ' + str(i + 1) + '/' +
                      str(self.verticalBands) + ' vertical bands')

            for i in range(self.horizontalBands):
                filename = config.temp_folder + 'horizontal_band_' + str(
                    i) + "_gge_map_"
                for alg in self.algorithms:
                    medians[alg] = utils.evaluateRGBMedian(filename + alg +
                                                           ".png")

                band = self.bands['horizontal'][i]
                band_feature = []

                for alg in self.algorithms:
                    if config.referenceColorType == 'median':
                        distance = utils.euclideanDistanceRGB(
                            medians[alg], self.verticalReferences[alg])
                    elif config.referenceColorType == 'global':
                        distance = utils.euclideanDistanceRGB(
                            medians[alg], global_references[alg])
                    band_feature.append(distance)

                    if config.regionalTrainingType is None:
                        detectionMap = band.incrementDetection(
                            detectionMap, distance)

                if config.regionalTrainingType == 'svm':
                    clf_type = 'horizontal'
                    if config.referenceColorType == 'global':
                        clf_type += '_global'

                    prediction = self.clf[clf_type].predict_proba(
                        np.asarray(band_feature).reshape((1, -1)))
                    detectionMap = band.incrementDetection(
                        detectionMap, prediction[0][1])

                print('\tEvaluated ' + str(i + 1) + '/' +
                      str(self.horizontalBands) + ' horizontal bands')

            # Recover detection map max value
            splicedPercent = len(
                np.where(detectionMap > config.fakeThreshold)
                [0]) / detectionMap.size
            print('Spliced area (%): ' + str(splicedPercent))

            if splicedPercent > config.splicedTolerance:
                print('Image is SPLICED - Score: ' + str(splicedPercent))
                outputMask = detectionMap.copy()
                outputMask[outputMask < config.fakeThreshold] = 0
                outputMask[outputMask >= config.fakeThreshold] = 1

                max_value = np.ndarray.max(detectionMap)
                detectionMap = detectionMap / max_value
                detectionMap *= 255

                if self.display_result:
                    # Display color map
                    color_map = detectionMap
                    color_map = color_map.astype(np.uint8)
                    color_map = cv2.applyColorMap(color_map, cv2.COLORMAP_JET)
                    out = np.concatenate((utils.resizeImage(
                        image, 500), utils.resizeImage(color_map, 500)),
                                         axis=1)
                    cv2.imshow('output', out)
                    cv2.waitKey(0)

                    # Display spliced regions
                    regionMask = np.zeros(image.shape, 'uint8')
                    regionMask[..., 0] = outputMask.copy()
                    regionMask[..., 1] = outputMask.copy()
                    regionMask[..., 2] = outputMask.copy()
                    splicedRegions = np.multiply(image, regionMask)
                    out = np.concatenate((utils.resizeImage(
                        image, 500), utils.resizeImage(splicedRegions, 500)),
                                         axis=1)
                    cv2.imshow('output', out)
                    cv2.waitKey(0)

                # Write output mask
                outputMask *= 255
                cv2.imwrite(config.regionOutputDetectionImage, outputMask)

            else:
                print('Image is ORIGINAL - Score: ' + str(splicedPercent))

        else:
            print('No image found: ' + img)
Ejemplo n.º 17
0
    def extractFeatures(self,
                        img,
                        label=None,
                        faces=[],
                        descriptor="ACC",
                        space=0,
                        illum='GGE',
                        output=False):
        filename = utils.getFilename(img)

        if illum == 'GGE':
            map_path = config.maps_folder + filename + '_' + illum.lower(
            ) + '_map_' + str(config.gge_n) + "_" + str(config.gge_p) + ".png"
        else:
            map_path = config.maps_folder + filename + '_' + illum.lower(
            ) + '_map.png'

        illuminantMap = cv2.imread(map_path)

        #Storing faces extracted from maps
        count = 0
        for (x, y, w, h) in faces:
            face = illuminantMap[y:y + h, x:x + w]
            path = config.faces_folder + "face-" + illum.upper() + "-" + str(
                count) + ".png"
            cv2.imwrite(path, face)
            descriptors.extractDescriptor(path, descriptor, space)
            count += 1

        #Image features
        features = []
        first = 0
        while first < len(faces) - 1:
            second = first + 1
            firstFaceFeat = config.faces_folder + 'face-' + illum + '-' + str(
                first) + "-" + descriptor.lower() + "-desc.txt"
            while second < len(faces):
                secondFaceFeat = config.faces_folder + 'face-' + illum + '-' + str(
                    second) + "-" + descriptor.lower() + "-desc.txt"
                #Concat the two feature vector
                facePairFeature = descriptors.buildFaceFeatureVector(
                    firstFaceFeat, secondFaceFeat, descriptor)

                if config.inverseFacePosition:
                    inverseFacePairFeature = descriptors.buildFaceFeatureVector(
                        secondFaceFeat, firstFaceFeat, descriptor)

                pairLabel = None
                if label is not None and not isinstance(label, str):
                    pairLabel = 0
                    if label[first][1] != config.positiveLabel or label[
                            second][1] != config.positiveLabel:
                        pairLabel = 1

                sample = FaceTrainingSample(facePairFeature, pairLabel, first,
                                            second, filename)
                features.append(sample)

                if config.inverseFacePosition:
                    sample = FaceTrainingSample(inverseFacePairFeature,
                                                pairLabel, second, first)
                    features.append(sample)

                second += 1
            first += 1

        if self.verbose:
            print('\tFeatures extracted with ' + descriptor +
                  ' descriptor in ' + illum + ' map')

        if not output:
            #Storing image feaures in file
            nameFile = config.features_folder + filename + '_' + illum + "_" + descriptor.lower(
            ) + ".txt"
            files = open(nameFile, "w")
            for sample in features:
                files.write(str(sample))
            files.close()
        else:
            return features
Ejemplo n.º 18
0
import sys
from utils import getFilename, readFile

filename = getFilename(sys.argv)
output = readFile(filename, str)

output = [list(o) for o in output]
originalOutput = output[:]


def getResult(output, printOutput=True):
    result = []
    for o in output:
        result.append("".join(o))
        if printOutput:
            print(result[-1])
    result = "".join(result)

    return result


### PART 1 ###


def adjOccupiedNum(output, row, col):
    check = []
    for i in range(max(row - 1, 0), min(row + 2, len(output))):
        for j in range(max(col - 1, 0), min(col + 2, len(output[0]))):
            if not i == row or not j == col:
                check.append((i, j))
Ejemplo n.º 19
0
    def detect(self, img, detected_faces=None):
        filename = utils.getFilename(img)

        config.maps_folder = config.temp_folder
        config.faces_folder = config.temp_folder
        config.descriptors_folder = config.temp_folder

        print('Processing ' + filename)

        image = cv2.imread(img)
        if image is None:
            print('ERROR processing ' + filename + ': image not found')
            return -1

        #Check if image is colored
        try:
            _, _, channels = image.shape
            if channels < 3:
                raise Exception()
        except Exception as e:
            print(
                'ERROR: image is grayscale. Illuminant maps analysis cannot be performed.'
            )
            return -1

        #Loads classifier
        # Extracting image features
        # Extract the faces in the image
        faces, _ = self.extractFaces(img, detected_faces)

        #Prediction map
        predictions = {}
        counters = {}
        for i in range(len(faces)):
            predictions[i], counters[i] = 0, 0

        #Image is precessable if there are more than one image
        if len(faces) > 1:
            # If there are two or more faces, process the image
            # Extract maps
            self.extractIlluminationMaps(img)
            # Extract image descriptors and features
            for illum in config.illuminantTypes:
                for desc in self.descriptors:
                    clfPath = config.classification_folder + 'model_' + illum + '_' + desc.lower(
                    ) + '.pkl'
                    if os.path.isfile(clfPath):
                        clf = KNNClassifier.load(clfPath)
                        features = self.extractFeatures(img,
                                                        label=detected_faces,
                                                        faces=faces,
                                                        illum=illum,
                                                        descriptor=desc,
                                                        output=True)
                        #Predict over sample
                        for sample in features:
                            prediction = clf.predict(
                                np.array(sample.feature.split(),
                                         dtype=float).reshape((1, -1)),
                                True)[0][1]
                            predictions[sample.first] += prediction
                            predictions[sample.second] += prediction
                            counters[sample.first] += 1
                            counters[sample.second] += 1

            #Majority voting
            threshold = config.majorityVotingThreshold
            score = 0
            detected = False
            fakeFaces = []

            for i in predictions:
                if score < predictions[i] / counters[i]:
                    score = predictions[i] / counters[i]

                if predictions[i] / counters[i] > threshold:
                    fakeFaces.append(i)
                    print('\tFace ' + str(i + 1) + ' is FAKE. Score ' +
                          str(score))

                    if not detected:
                        detected = not detected
                else:
                    print('\tFace ' + str(i + 1) + ' is NORMAL. Score ' +
                          str(predictions[i] / counters[i]))

            if detected:
                print('Image is FAKE')
            else:
                print('Image is ORIGINAL')

            orig = cv2.imread(img, cv2.COLOR_BGR2GRAY)
            #Display spliced faces
            rows, cols, _ = orig.shape
            outputMask = np.zeros((rows, cols))

            faceScores = orig.copy()
            idx = 0
            font = cv2.FONT_HERSHEY_SIMPLEX
            for (x, y, w, h) in faces:
                face_score = predictions[idx] / counters[i]
                if idx not in fakeFaces:
                    cv2.rectangle(faceScores, (x, y), (x + w, y + h),
                                  (0, 255, 0), 8)
                    cv2.putText(faceScores, str("{:.3f}".format(face_score)),
                                (x, y + h + 80), font, 1.8, (0, 255, 0), 3)
                else:
                    cv2.rectangle(faceScores, (x, y), (x + w, y + h),
                                  (0, 0, 255), 8)
                    cv2.putText(faceScores, str("{:.3f}".format(face_score)),
                                (x, y + h + 80), font, 1.8, (0, 0, 255), 3)

                #Set score in detection map
                outputMask[y:y + h, x:x + w] = face_score
                idx += 1
            faceScores = utils.resizeImage(faceScores, 1000)

            if self.display_result:
                cv2.imshow('output', faceScores)
                cv2.waitKey()

            regionMask = np.zeros(orig.shape)
            regionMask[..., 0] = outputMask.copy()
            regionMask[..., 1] = outputMask.copy()
            regionMask[..., 2] = outputMask.copy()
            # Write output mask
            outputMask *= 255
            cv2.imwrite(config.faceOutputDetectionImage, outputMask)

            return score

        else:
            # discard the current image
            print('Not suitable number of faces found in the image')
            return -1