Ejemplo n.º 1
0
def changeName(pasta):

    # changing files name to <number>_garamond
    path1 = "/Users/fegvilela/Documents/Unb/TCC/baseDados/" + str(pasta) + "Random/letrasSeparadas/"

    os.chdir(path1)


    #transforma todos jpg em png
    jpgFiles = glob.glob("*.jpeg")
    #print(jpgFiles)
    for file in jpgFiles:
        #print(file[:-4])
        im = Image.open(file) #abre imagem com nome do arquivo
        im.save("%s.png" %file[:-4])
        os.remove(file) #remove jpg

    #transforma todos jpg em png
    jpgFiles = glob.glob("*.jpg")
    #print(jpgFiles)
    for file in jpgFiles:
        #print(file[:-4])
        im = Image.open(file) #abre imagem com nome do arquivo
        im.save("%s.png" %file[:-4])
        os.remove(file) #remove jpg


    #transforma todos gif em png
    gifFiles = glob.glob("*.gif")
    #print(jpgFiles)
    for file in gifFiles:
        #print(file[:-4])
        im = Image.open(file) #abre imagem com nome do arquivo
        im.save("%s.png" %file[:-4])
        os.remove(file) #remove gif

    #todos arquivos png do diretório (ou seja, todas imagens do diretório)
    pngFiles = glob.glob("*.png")

    #teste para verificar se os arquivos daquele diretório foram nomeados com _ (p/ posteriormente evitar a nomeação repetida, apagando arquivos)
    tem = 0
    if '_' in pngFiles[0]:
        tem = 1

    i = 1
    for filename in pngFiles:
        #se os arquivos já foram nomeados com _, nomeia diferente, para não sobreescrever os arquivos
        if tem == 1:
            os.rename(filename, (str(i)+ str(pasta) + ".png"))
        else:
            os.rename(filename, (str(i)+ "_" + str(pasta) + ".png"))
        i = i + 1

    for filename in pngFiles:
        #se os arquivos já foram nomeados com _, nomeia diferente, para não sobreescrever os arquivos
        if tem == 1:
            os.rename(filename, (str(i)+ "_" + str(pasta) + ".png"))
        i = i + 1

    imagePaths2 = list(paths.list_images(path1))

    print(len(imagePaths2))
Ejemplo n.º 2
0
                    filename='run_ML_classify_run_model.log',
                    filemode='w')

# write run parameters to log file
logging.info("log level : %s", loglevel)

logging.info("test files : %s", args.test)
logging.info("model : %s", args.model)

# -------------------------------------------
# Classify the images using the input model
# -------------------------------------------
logging.info("Classifying images ...")

# grab set of test images
imagePaths2 = sorted(paths.list_images(args.test))

#------------------------------------
# Open up the results output file
#------------------------------------
# add date & time to results filename
# get current date/time and convert to a string
now = datetime.datetime.now()
nowStr = now.strftime("%Y-%m-%d_%H-%M")
logging.info("nowStr = %s", nowStr)

# ----------------------------------------------
# Set the results output filename from the args
# ----------------------------------------------
#outFileName = r'C:\Users\lrmayer\Documents\Mayer\CatalogImages\classify_output\ML_results_' + nowStr + '.csv'
Ejemplo n.º 3
0
#!/usr/bin/env python
# coding: utf-8

import paths
import face_recognition
import pickle
import cv2
import os

dataset_path = './dataset'
detection_method = 'hog'

imagePaths = list(paths.list_images(dataset_path))
data = []

#imagePaths

# loop over the image paths
for (i, imagePath) in enumerate(imagePaths):
    # load the input image and convert it from RGB (OpenCV ordering)
    # to dlib ordering (RGB)
    print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths)))
    print(imagePath)
    image = cv2.imread(imagePath)
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    # detect the (x, y)-coordinates of the bounding boxes
    # corresponding to each face in the input image
    boxes = face_recognition.face_locations(rgb, model=detection_method)
    # compute the facial embedding for the face
    encodings = face_recognition.face_encodings(rgb, boxes)
Ejemplo n.º 4
0
# lrm -------------------------------------------------------------------------------
# assuming loglevel is bound to the string value obtained from the
# command line argument. Convert to upper case to allow the user to
# specify --log=DEBUG or --log=debug
loglevel = args["log"]
numeric_level = getattr(logging, loglevel.upper(), None)
if not isinstance(numeric_level, int):
    raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level, filename='example.log', filemode='w')
# lrm -------------------------------------------------------------------------------


# grab the set of training image paths and initialize the list of training labels and matrix of features 
print("[INFO] extracting features...")
imagePaths = sorted(paths.list_images(args["train"]))
Labels = []
Data = []

# loop over images in training directory 
for imagePath in imagePaths:
    # extract the label and load image from disk
    label = imagePath[imagePath.rfind("/") + 1:].split("_")[0]
    logging.info("reading the image : %s", imagePath)
    image = cv2.imread(imagePath)
    
    # extract features from the image, then update the list of
    # labels and features 
    features = describe(image)
    Labels.append(label)
    Data.append(features)
Ejemplo n.º 5
0
args = vars(ap.parse_args())

# load the configuration and grab all image paths in the dataset
conf = Conf(args["conf"])

datagen = ImageDataGenerator(rotation_range=40,
                             width_shift_range=0.2,
                             height_shift_range=0.2,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode='nearest')

for subdir in r_list_dirs(conf["dataset_path"]):

    for fname in list_images(conf["dataset_path"], subdir):

        fpath = os.path.join(conf["dataset_path"], subdir, fname)

        label = os.path.basename(os.path.dirname(fpath))
        out_dir = os.path.join('generated', subdir)
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        img = cv2.imread(
            fpath)  # this is a Numpy array with shape (150, 150, 3)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        x = np.swapaxes(img, 0,
                        2)  # this is a Numpy array with shape (3, 150, 150)
        x = x.reshape(
            (1, ) +
Ejemplo n.º 6
0
# loglevel =
numeric_level = getattr(logging, args.log.upper(), None)
if not isinstance(numeric_level, int):
    raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(level=numeric_level,
                    filename='run_ML_classify_create_model.log',
                    filemode='w')

# write run parameters to log file
logging.info("log level : %s", args.log)
logging.info("training files : %s", args.train)

#  -------------------------------------------------------------------------------

# grab the set of training image paths and initialize the list of training labels and matrix of features
imagePaths = sorted(paths.list_images(args.train))
Labels = []
Data = []

#-----------------------------
# Set up the image descriptor
#-----------------------------
desc = HSVColorTexture()

# -----------------------------------------------------------
# Loop over images in training directory to create the model
# -----------------------------------------------------------
for imagePath in imagePaths:
    # get the basename of the file, use that to get the target name
    label = os.path.basename(imagePath).split("_")[0]
Ejemplo n.º 7
0
    eroded = cv2.cvtColor(eroded, cv2.COLOR_HSV2BGR)
    cv2.imwrite(os.path.join(out_dir, fname), eroded)

    hist = cv2.calcHist([eroded], [0], None, [256], [0, 256])
    hist /= hist.max()

    fig = plt.figure(figsize=(4.5, 1.8))
    ax = fig.add_subplot(111)
    ax.set_title("Histogram")
    ax.set_xlabel("Bins")
    ax.set_ylabel("# of Pixels")
    #	ax.set_ylim([0, 300])
    ax.plot(hist)
    ax.set_xlim([0, 256])
    #	print os.path.join(out_hist_dir, fname)
    plt.savefig(os.path.join(out_hist_dir, fname), dpi=(50))
    plt.close(fig)


#	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

for subdir in r_list_dirs(in_dir):

    for fname in list_images(in_dir, subdir):
        #		print in_dir, dpath, fname
        process_image(fname, os.path.join(in_dir, subdir),
                      os.path.join(out_dir, subdir),
                      os.path.join(out_hist_dir, subdir))

print 'Finished'
Ejemplo n.º 8
0
# region Initializing command argument parser
argsParser = argparse.ArgumentParser()
argsParser.add_argument("-i",
                        "--dataset",
                        required=True,
                        help="path to input directory of faces + images")
argsParser.add_argument("-e",
                        "--encodings",
                        required=True,
                        help="path to serialized db of facial encodings")
args = vars(argsParser.parse_args())
# endregion

# region Prepare images and data
print("[INFO] quantifying faces...")
imagePaths = list(paths.list_images(args["dataset"]))

knownEncodings = []
knownNames = []
# endregion

# region Encoding
for (i, imagePath) in enumerate(imagePaths):
    print("[INFO] processing image {}/{}".format(i + 1, len(imagePaths)))
    name = imagePath.split(os.path.sep)[-2]

    image = cv2.imread(imagePath)
    rgbImage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

    boxes = face_recognition.face_locations(rgbImage)
    encodings = face_recognition.face_encodings(rgbImage, boxes)