def main(pathfeats1,
         pathfeats2,
         dataset,
         output=None,
         mean=False,
         different=False):
    pathfeats1 = fh.is_file(pathfeats1)
    pathfeats2 = fh.is_file(pathfeats2)
    dataset = fc.Configuration().has_dataset(dataset)
    if output:
        if fh.is_folder(output, boolean=True):
            fileout = join(output, 'merged_feats.txt')
        else:
            fileout = fh.is_file(fileoutput)
    else:
        dirin = dirname(pathfeats1)
        fileout = join(dirin, 'merged_feats.txt')
    if different:
        fh.merge_features_different_files(pathfeats1,
                                          pathfeats2,
                                          fileout,
                                          dataset,
                                          mean=mean)
    else:
        fh.merge_features_equal_files(pathfeats1,
                                      pathfeats2,
                                      fileout,
                                      mean=mean)
def resize_pathfile(inputfile, outputfolder, dataset, size):
    """
    Receives the path of a file and resize all images in this
    file to size=`size`
    
    Parameters:
    -----------
    input : string
        path to the input file containing multiple images
    output : string
        path to the output folder
    size : int
        new size of the image
    """
    inputfile = filehandler.is_file(inputfile)
    outputfolder = filehandler.is_folder(outputfolder)
    fname = filehandler.add_text2path(inputfile, size, withfolder=False)
    fout = open(join(outputfolder, fname), 'w')

    logger.info('resizing images to: %dx%d' % (size, size))
    logger.info('saving output file at: %s' % join(outputfolder, fname))

    pf = filehandler.ImagePaths(inputfile, dataset)
    for impath, label in pf:
        #logger.info('processing file: %s' % impath)
        _, fimg = pf.extract_root()
        outpath = join(outputfolder, fimg)

        #logger.info('saving file: %s' % fimg)
        imfolder = dirname(outpath)
        if not exists(imfolder):
            os.makedirs(imfolder)

        resize_file(impath, outpath, size)
        fout.write('%s %s\n' % (outpath, str(label)))
def grid_svm(trainfile,
             valfile,
             outputdir,
             kernel='rbf',
             gamma_min=2e-15,
             gamma_max=2e3,
             gamma_step=1e2,
             c_min=2e-5,
             c_max=2e15,
             c_step=1e2):
    """
    Perform a grid of parameters in SVM

    Parameters:
    -----------
    trainfile: string
        path to the file containing training features
    valfile: string
        path to the file containing validation features
    outputdir: string
        path to the folder to save tests
    kernel: string
        type of kernel (scikit names)
    gamma_min: int
        minimum value of gamma
    gamma_max: int
        maximum value of gamma
    gamma_step: int
        step of increasing gamma
    c_min: int
        minimum value of C
    c_max: int
        maximum value of C
    c_step: int
        step of increasing value of C
    """
    trainfile = fh.is_file(trainfile)
    _, X_train, y_train = fh.load_features(trainfile)
    valfile = fh.is_file(valfile)
    vpaths, X_val, y_val = fh.load_features(valfile)
    outputdir = fh.is_folder(outputdir)

    vgamma = create_range(gamma_min, gamma_max, gamma_step)
    vc = create_range(c_min, c_max, c_step)
    svc = svm.SVC(kernel=kernel, verbose=True)
    for c in vc:
        for g in vgamma:
            logger.info('Running C: %E :: Gamma: %E' % (c, g))
            clf = svm.SVC(kernel=kernel, C=c, gamma=g)
            clf.fit(X_train, y_train)
            pred = clf.predict(X_val)

            fileout = join(outputdir,
                           str(kernel) + '_' + str(c) + '_' + str(g) + '.txt')
            logger.info('saving output file in: %s' % fileout)
            with open(fileout, 'w') as fout:
                for path, y, p in zip(vpaths, y_val, pred):
                    fout.write('%s %d %d\n' % (path, y, p))
    logger.info('Finished!')
예제 #4
0
def main(inputfile, size, dataset, output=None):
    if output:
        dirout = filehandler.is_folder(output)
    else:
        dirout = join(dirname(inputfile), str(size))
        if not exists(dirout):
            os.makedirs(dirout)
    imresize.resize_pathfile(inputfile, dirout, dataset, size)
def mean_pixel(input, dirout):
    """
    Generate a pixelwise mean for a file containing paths to images.

    Parameters:
    -----------
    input : string
        File containing the path to all images and their true labels
    dirout : string
        Path to the output folder

    Notes:
    -------
    The function generates three files:
        fname.binaryproto : contains the pixelwise mean of the images
        fname.npy : numpy array containing the mean
        fname.png : image resulting of the mean
    """
    caffe = True
    try:
        from caffe.io import array_to_blobproto
    except:
        logger.warning(
            'The system does not contain caffe.io to save as binaryproto')
        caffe = False
    from skimage import io

    input = realpath(input)
    fname = fh.filename(input, extension=False)
    dirout = fh.is_folder(dirout)
    fnameout = join(dirout, fname + '_mean')

    pf = fh.PathfileHandler(input)
    n = pf.nb_lines
    logger.info('Calculating mean for %d files.' % n)

    for nbline, arr in enumerate(pf):
        path = arr[0]
        img = io.imread(path)
        if nbline == 0:
            size = img.shape[1]
            mean = np.zeros((1, 3, size, size))
        mean[0][0] += img[:, :, 0]
        mean[0][1] += img[:, :, 1]
        mean[0][2] += img[:, :, 2]

    mean[0] /= n
    if caffe:
        blob = array_to_blobproto(mean)
        logger.info('Saving data into: %s' % fnameout + '.binaryproto')
        with open(fnameout + '.binaryproto', 'wb') as f:
            f.write(blob.SerializeToString())

    logger.info('Saving numpy matrix into: %s' % fnameout + '.npy')
    np.save(fnameout + '.npy', mean[0])
    mean_img = np.transpose(mean[0].astype(np.uint8), (1, 2, 0))
    logger.info('Saving mean image into: %s' % fnameout + '.png')
    io.imsave(fnameout + '.png', mean_img)
def main(fileinput, output=None, dataset="PENN"):
    fileinput = fh.is_file(fileinput)
    if output:
        dirout = fh.is_folder(output)
    else:
        dirin = dirname(fileinput)
        dirout = join(dirin, 'JPG')
        if not isdir(dirout):
            os.makedirs(dirout)
    cvr.convert_files(fileinput, dirout, dataset, to='jpg')
def main(fileinput, dirout, output=None, window=2, dataset="PENN"):
    fileinput = fh.is_file(fileinput)
    dirout = fh.is_folder(dirout)
    if output:
        fileout = output
    else:
        dirin = dirname(fileinput)
        fileout = join(dirout, 'bronx_paths.txt')

    utils.bronx_file(fileinput, dirout, fileout, dataset, window=window)
def calculate_from_file(inputfile, by_pixel=True, channels='RGB', output=None):
    inputfile = fh.is_file(inputfile)
    if output:
        outfolder = fh.is_folder(output)
    else:
        outfolder = dirname(inputfile)
    fout = join(outfolder, 'mean')
    if by_pixel:
        mean_pixel(inputfile, outfolder)
    else:
        mean_channel(inputfile, mode=channels)
예제 #9
0
def main(inputfile, by_pixel=False, channels='RGB', output=None):
    inputfile = filehandler.is_file(inputfile)
    if output:
        dirout = filehandler.is_folder(output)
    else:
        dirin = dirname(inputfile)
        dirout = join(dirin, 'mean')
        if not exists(dirout):
            os.makedirs(dirout)
    rgbmean.calculate_from_file(inputfile,
                                by_pixel=by_pixel,
                                channels=channels,
                                output=dirout)
예제 #10
0
def main(datainput, imsize, output=None):
    """
    Parameters:
    -----------
    datainput : string
        path to the root folder or file containing the dataset
    imsize : int
        size of the new images
    output : string
        path to the folder where the new dataset will be saved
    """
    input = fh.is_folder(datainput, boolean=True)
    if input:
        # create a pathfile to the dataset
        pennaction.create_pathfile(input)
    else:
        input = fh.is_file(datainput, boolean=True)
        if output:
            dirout = fh.is_folder(output)
        else:
            dirout = join(dirname(input), str(imsize))
            if not exists(dirout):
                os.makedirs(dirout)
예제 #11
0
def main(frame1, frame2, output=None, channels=False):
    image_1 = fh.is_file(frame1)
    image_2 = fh.is_file(frame2)
    if output:
        dirout = fh.is_folder(output)
    else:
        dirin = dirname(frame1)

    flow = of.optical_flow(frame1, frame2, channels=channels)
    if channels:
        outflowX = join(dirin, 'optflow_x.jpg')
        outflowY = join(dirin, 'optflow_y.jpg')
        cv2.imwrite(outflowX, flow[0])
        cv2.imwrite(outflowY, flow[1])
    else:
        output = join(dirin, 'optflow.jpg')
        cv2.imwrite(output, flow)
def main(filetrain, fileval, output, kernel, gamma_min, gamma_max, gamma_step,
         c_min, c_max, c_step):
    filetrain = fh.is_file(filetrain)
    fileval = fh.is_file(fileval)
    if output:
        dirout = fh.is_folder(output)
    else:
        dirin = dirname(filetrain)
        dirout = join(dirin, 'GridSVM')
        if not isdir(dirout):
            os.makedirs(dirout)
    clr.grid_svm(filetrain,
                 fileval,
                 dirout,
                 kernel=kernel,
                 gamma_min=gamma_min,
                 gamma_max=gamma_max,
                 gamma_step=gamma_step,
                 c_min=c_min,
                 c_max=c_max,
                 c_step=c_step)
예제 #13
0
def main(inputfile, output=None, window=1, channels=False):
    inputfile = fh.is_file(inputfile)
    if output:
        dirout = fh.is_folder(output)
    else:
        dirin = dirname(inputfile)

    # generate pairs of images to the optical flow
    dic = fh.imgpath2dic(inputfile)
    seqs = fh.pairs_of_paths(sorted(dic.keys()), window)

    # create optical flow for each pair
    for id1, id2 in seqs:
        flow = of.optical_flow(dic[id1], dic[id2], channels=channels)
        if channels:
            outflowX = join(dirin, str(id1)+'-'+str(id2)+'_x.jpg')
            outflowY = join(dirin, str(id1)+'-'+str(id2)+'_y.jpg')
            cv2.imwrite(outflowX, flow[0])
            cv2.imwrite(outflowY, flow[1])
        else:
            output = join(dirin, str(id1)+'-'+str(id2)+'.jpg')
            cv2.imwrite(output, flow)
예제 #14
0
def extract(inputfile, dataset, output=None):
    """
    Extract frames corresponding to videos in ``inputfile``
    
    Parameters
    ----------
    inputfile : string
        file containing paths and true labels
    datasets : string (dogs|kitchen|ucf11)
        name of the dataset
    output : string
        folder to save output files 

    Output
    -------
        save files containing the name of the action (from the path) and its
        respective frames. Create also a file named ``videos.txt`` containing
        a list of all generated files.
    """
    inputfile = filehandler.is_file(inputfile)
    if output:
        dirout = filehandler.is_folder(output)
    else:
        dirout = dirname(inputfile)

    fh = filehandler.Videos(inputfile, dataset)
    dvideos = fh.extract_videos()
    fvideos = open(join(dirout, 'videos.txt'), 'w')
    for video in dvideos:
        fname = video + '.txt'
        vname = join(dirout, fname)
        fvideos.write('%s\n' % vname)
        logger.info('Creating file: %s' % fname)
        with open(vname, 'w') as fout:
            for path, y in sorted(dvideos[video]):
                fout.write('%s %s\n' % (path, y))
    fvideos.close()