예제 #1
0
def create(dataset, work_dir):
    # Find all the pages in the dataset
    img_dir = Path(Path.cwd().ancestor(1), 'data/hwr_data/pages', dataset)
    ann_dir = Path(Path.cwd().ancestor(1), 'data/charannotations')
    images = img_dir.listdir('*.jpg')
    annotations = ann_dir.listdir(dataset + '*.words')
    files = merge(images, annotations)

    # Create character segmentations
    stats = {}
    for f in files:
        # Preprocess
        logging.info("Preprocessing %s", str(f[0]))
        pagesPathFolder = Path(work_dir, 'pages')
        pagesPathFolder.mkdir()
        pagePath = Path(pagesPathFolder, f[0].stem + '.ppm')
        img = cv2.imread(f[0], cv2.IMREAD_GRAYSCALE)
        img = preprocess(img)
        cv2.imwrite(pagePath, img)

        # Segment
        segmentPathFolder = Path(work_dir, 'segments')
        segmentPathFolder.mkdir()
        e = ET.parse(f[1]).getroot()
        logging.info("Segmenting %s", str(f[0]))
        segment(img, e, segmentPathFolder, stats)

    print_statistics(stats, dataset)
예제 #2
0
def get(project=None):
    """Get the data from different experiments.

    Warning! Experiment directories are expected in a particular location
    outside of this (words-in-transition) directory.

    Options are:

        telephone-app
        acoustic-similarity
        learning-sound-names

    """
    if project is None or project == 'telephone-app':
        app_dir = Path('../telephone-app')
        snapshot_dir = Path(app_dir, 'words-in-transition')
        if src_dir.exists():
            src_dir.rmtree()
        copytree(snapshot_dir, src_dir)

    if project is None or project == 'acoustic-similarity':
        # src
        proj_dir = Path('../acoustic-similarity/data')
        judgments = Path(proj_dir, 'judgments')

        # dst
        acoustic_similarity_dir = Path(data_raw, 'acoustic-similarity')
        if not acoustic_similarity_dir.isdir():
            acoustic_similarity_dir.mkdir()

        # copy the csvs in the root proj data dir
        for csv in proj_dir.listdir('*.csv'):
            csv.copy(Path(acoustic_similarity_dir, csv.name))

        # concat and save judgments files
        judgments_csv = Path(acoustic_similarity_dir, 'judgments.csv')
        judgments = [pd.read_csv(x) for x in judgments.listdir('*.csv')]
        if judgments:
            (pd.concat(judgments, ignore_index=True).to_csv(judgments_csv,
                                                            index=False))

    if project is None or project == 'learning-sound-names':
        src = Path('../learning-sound-names/data')
        dst = Path(data_raw, 'learning_sound_names.csv')
        data = pd.concat([pd.read_csv(x) for x in src.listdir('LSN*.csv')])
        data['is_correct'] = data.is_correct.astype(int)
        data.to_csv(dst, index=False)

        # also get subject info and questionnaire data
        to_get = ['questionnaire_v1', 'subject_info']
        for x in to_get:
            src_file = Path(src, '{}.csv'.format(x))
            dst_file = Path(data_raw, 'learning_sound_names_{}.csv'.format(x))
            run('cp {} {}'.format(src_file, dst_file))
예제 #3
0
def get(project=None):
    """Get the data from different experiments.

    Warning! Experiment directories are expected in a particular location
    outside of this (words-in-transition) directory.

    Options are:

        telephone-app
        acoustic-similarity
        learning-sound-names

    """
    if project is None or project == 'telephone-app':
        app_dir = Path('../telephone-app')
        snapshot_dir = Path(app_dir, 'words-in-transition')
        if src_dir.exists():
            src_dir.rmtree()
        copytree(snapshot_dir, src_dir)

    if project is None or project == 'acoustic-similarity':
        # src
        proj_dir = Path('../acoustic-similarity/data')
        judgments = Path(proj_dir, 'judgments')

        # dst
        acoustic_similarity_dir = Path(data_raw, 'acoustic-similarity')
        if not acoustic_similarity_dir.isdir():
            acoustic_similarity_dir.mkdir()

        # copy the csvs in the root proj data dir
        for csv in proj_dir.listdir('*.csv'):
            csv.copy(Path(acoustic_similarity_dir, csv.name))

        # concat and save judgments files
        judgments_csv = Path(acoustic_similarity_dir, 'judgments.csv')
        judgments = [pd.read_csv(x) for x in judgments.listdir('*.csv')]
        if judgments:
            (pd.concat(judgments, ignore_index=True)
               .to_csv(judgments_csv, index=False))

    if project is None or project == 'learning-sound-names':
        src = Path('../learning-sound-names/data')
        dst = Path(data_raw, 'learning_sound_names.csv')
        data = pd.concat([pd.read_csv(x) for x in src.listdir('LSN*.csv')])
        data['is_correct'] = data.is_correct.astype(int)
        data.to_csv(dst, index=False)

        # also get subject info and questionnaire data
        to_get = ['questionnaire_v1', 'subject_info']
        for x in to_get:
            src_file = Path(src, '{}.csv'.format(x))
            dst_file = Path(data_raw, 'learning_sound_names_{}.csv'.format(x))
            run('cp {} {}'.format(src_file, dst_file))
예제 #4
0
class JarBuilder(object):
    """Holds the information needed for building a Java Archive (`.jar`)
file.

    """

    def __init__(self, jarfile, sourcedir, tsa):
        self.libjars = []
        self.tsa = tsa
        self.jarfile = Path(jarfile)
        self.sourcedir = Path(sourcedir)
        self.sources = list(self.sourcedir.listdir('*.java'))

        self.jarcontent = [Path('Manifest.txt')]
        self.jarcontent += [
            Path(x) for x in self.sourcedir.listdir('*.class')]

    def add_lib(self, pth):
        self.libjars.append(Path(pth))

    def build_jar(self, ctx, outdir, alias):
        flags = '-storepass "`cat ~/.secret/.keystore_password`"'
        if self.tsa:
            flags += ' -tsa {0}'.format(self.tsa)

        def run_signer(jarfile):
            ctx.run("jarsigner %s %s %s" % (flags, jarfile, alias), pty=True)
            ctx.run("jarsigner -verify %s" % jarfile, pty=True)

        outdir = Path(outdir)
        jarfile = outdir.child(self.jarfile)
        if jarfile.needs_update(self.jarcontent):
            jarcontent = [x.replace("$", r"\$") for x in self.jarcontent]
            ctx.run("jar cvfm %s %s" % (jarfile, ' '.join(jarcontent)), pty=True)
        run_signer(jarfile)
        for libfile in self.libjars:
            jarfile = outdir.child(libfile.name)
            if not jarfile.exists() or libfile.needs_update([jarfile]):
                libfile.copy(jarfile)
            run_signer(jarfile)

    def build_classes(self, ctx):
        flags = "-Xlint:unchecked"
        if len(self.libjars):
            cp = ':'.join(self.libjars)
            flags += " -classpath %s" % cp
        for src in self.sources:
            ctx.run("javac %s %s" % (flags, src), pty=True)
예제 #5
0
def get_local(code, ktype='d', path='.'):
    """get last file and demo

    Parameters
    ----------
    code : str
    ktype : {'d', '5', '15', '30', '60', 'w', 'm'}
    path : str
        where to search file

    Returns
    -------
    filename_demo : filename of demo

    """
    path = Path(path)
    pattern = CONSTS.filename_sep.join([ktype, CONSTS.demo])
    pattern = '%s%s*%s%s' % (code, CONSTS.filename_sep, CONSTS.filename_sep,
                             pattern)
    file_list = path.listdir(pattern, names_only=True)
    if len(file_list) == 1:
        return file_list[0]
    elif not file_list:  # empty means False
        return warn('NO ITEM')
    else:
        raise Exception('MORE THAN 1 ITEM')
예제 #6
0
class JarBuilder(object):
    """Holds the information needed for building a Java Archive (`.jar`)
file.

    """
    def __init__(self, jarfile, sourcedir, tsa):
        self.libjars = []
        self.tsa = tsa
        self.jarfile = Path(jarfile)
        self.sourcedir = Path(sourcedir)
        self.sources = list(self.sourcedir.listdir('*.java'))

        self.jarcontent = [Path('Manifest.txt')]
        self.jarcontent += [Path(x) for x in self.sourcedir.listdir('*.class')]

    def add_lib(self, pth):
        self.libjars.append(Path(pth))

    def build_jar(self, ctx, outdir, alias):
        flags = '-storepass "`cat ~/.secret/.keystore_password`"'
        if self.tsa:
            flags += ' -tsa {0}'.format(self.tsa)

        def run_signer(jarfile):
            ctx.run("jarsigner %s %s %s" % (flags, jarfile, alias), pty=True)
            ctx.run("jarsigner -verify %s" % jarfile, pty=True)

        outdir = Path(outdir)
        jarfile = outdir.child(self.jarfile)
        if jarfile.needs_update(self.jarcontent):
            jarcontent = [x.replace("$", r"\$") for x in self.jarcontent]
            ctx.run("jar cvfm %s %s" % (jarfile, ' '.join(jarcontent)),
                    pty=True)
        run_signer(jarfile)
        for libfile in self.libjars:
            jarfile = outdir.child(libfile.name)
            if not jarfile.exists() or libfile.needs_update([jarfile]):
                libfile.copy(jarfile)
            run_signer(jarfile)

    def build_classes(self, ctx):
        flags = "-Xlint:unchecked"
        if len(self.libjars):
            cp = ':'.join(self.libjars)
            flags += " -classpath %s" % cp
        for src in self.sources:
            ctx.run("javac %s %s" % (flags, src), pty=True)
예제 #7
0
파일: info.py 프로젝트: hallaj/inupypi
    def __init__(self, path):
        try:
            path = Path(path)

            self.__parents__ = path
            self.__contents__ = [d.name for d in path.listdir()]
        except:
            pass
예제 #8
0
def load_sounds(sound_dir, pattern):
    sound_dir = Path(sound_dir)
    sound_names = sound_dir.listdir(pattern=pattern)
    sounds = {}
    for snd_name in sound_names:
        snd_stem = str(snd_name.stem)
        sounds[snd_stem] = sound.Sound(snd_name)

    return sounds
    def install_skeletton(self):
        logger.info('Installing %s' % self.flavor)

        source = Path(self.repo_dir, self.flavor)

        #move all items in the directory into the install_path
        for item in source.listdir():
            item.move(self.install_path)
        self.delete_tmp_dir()
        logger.info('...done')
예제 #10
0
def find_labels():
    files = {}
    work_dir = Path("tmp/segments")
    if not work_dir.exists():
        raise Exception("You must first create the labels")

    files = {}
    labels = work_dir.listdir(filter=DIRS_NO_LINKS)
    for label in labels:
        files[str(label.name)] = label.listdir(pattern='*.ppm')
    return files
예제 #11
0
def find_labels():
    files = {}
    work_dir = Path("tmp/segments")
    if not work_dir.exists():
        raise Exception("You must first create the labels")

    files = {}
    labels = work_dir.listdir(filter=DIRS_NO_LINKS)
    for label in labels:
        files[str(label.name)] = label.listdir(pattern='*.ppm')
    return files
예제 #12
0
def dump_path(path, prefix="", tab="    ", file=None):
    if file is None:
        file = sys.stdout
    p = Path(path)
    if   p.islink():
        print >>file, "%s%s -> %s" % (prefix, p.name, p.read_link())
    elif p.isdir():
        print >>file, "%s%s:" % (prefix, p.name)
        for p2 in p.listdir():
            dump_path(p2, prefix+tab, tab, file)
    else:
        print >>file, "%s%s  (%d)" % (prefix, p.name, p.size())
예제 #13
0
def dream_on_dir(images_dir, out_dir):
    images_dir = Path(images_dir)
    out_dir = Path(out_dir)

    if not out_dir.exists():
        os.mkdir(out_dir)

    for image in images_dir.listdir():
        config = NightmareConfig(out_dir=out_dir)
        config.force_all_random()
        deep_dream(image, config)
        for d in config.list_output_for_image(image):
            print("  New dream: {}".format(d))
예제 #14
0
class JarBuilder(object):
    """
    Used by my Java projects :ref:`davlink` and :ref:`eidreader`.
    """
    def __init__(self, jarfile, sourcedir):
        self.jarfile = Path(jarfile)
        self.sourcedir = Path(sourcedir)
        self.sources = list(self.sourcedir.listdir('*.java'))

        self.jarcontent = [Path('Manifest.txt')]
        self.jarcontent += list(self.sourcedir.listdir('*.class'))
        self.jarcontent = [
            Path(x.replace("$", "\\$")) for x in self.jarcontent]
        self.libjars = []

    def add_lib(self, pth):
        self.libjars.append(Path(pth))

    def build_jar(self, outdir, alias):
        flags = '-storepass "`cat ~/.secret/.keystore_password`"'
        flags += ' -tsa http://timestamp.globalsign.com/scripts/timestamp.dll'
        outdir = Path(outdir)
        jarfile = outdir.child(self.jarfile)
        if jarfile.needs_update(self.jarcontent):
            local("jar cvfm %s %s" % (jarfile, ' '.join(self.jarcontent)))
        local("jarsigner %s %s %s" % (flags, jarfile, alias))
        for libfile in self.libjars:
            jarfile = outdir.child(libfile.name)
            if libfile.needs_update([jarfile]):
                libfile.copy(jarfile)
            local("jarsigner %s %s %s" % (flags, jarfile, alias))

    def build_classes(self):
        flags = "-Xlint:unchecked"
        if len(self.libjars):
            cp = ':'.join(self.libjars)
            flags += " -classpath %s" % cp
        for src in self.sources:
            local("javac %s %s" % (flags, src))
예제 #15
0
파일: __init__.py 프로젝트: hallaj/inupypi
    def __init__(self, path):
        self.__parents__ = None
        self.__contents__ = None

        try:
            path = Path(path)

            self.__parents__ = path
            self.__contents__ = sorted([dir_.absolute() for dir_ in
                                        path.listdir()
                                        if dir_.isdir() or dir_.isfile()],
                                       key=lambda v: parse_version(v))
        except:
            pass
예제 #16
0
def parse(path='./flat_files/'):
    path = Path(path)
    print "parsing records at {}".format(path.absolute())

    records = []
    for p in path.listdir():
        try:
            gbr = GenBank.read(open(p))
            records.append(gbr)
        except:
            print 'error with file', p
    print "parsed %s records.." % len(records)

    return records
예제 #17
0
def parse(path='./flat_files/'):
    path = Path(path)
    print "parsing records at {}".format(path.absolute())

    records = []
    for p in path.listdir():
        try:
            gbr = GenBank.read(open(p))
            records.append(gbr)
        except:
            print 'error with file', p
    print "parsed %s records.." % len(records)

    return records
예제 #18
0
def cli(darknet_dir, images_dir, temp_dir):
    darknet_dir = Path(darknet_dir)
    images_dir = Path(images_dir)
    temp_dir = Path(temp_dir)

    if not temp_dir.exists():
        print('Dir "{}" does not exist'.format(temp_dir))
        return

    with multiprocessing.Pool(PROCESSES) as pool:
        args = []
        for image_file in images_dir.listdir():
            args.append((image_file, temp_dir, darknet_dir))

        pool.starmap(yolo_detect_image_task, args)
예제 #19
0
def recursive_listdir(path):
    """
    Lists recursively all files inside a given directory and its subdirectories
    :param path: (unipath.Path or string) path to the directory
    :return: (list of unipath.Path) list containing the path to all the files
        inside the given directory
    """
    folder = Path(path)
    files = list()
    for f in folder.listdir():
        if f.isdir():
            files.extend(recursive_listdir(f))
        else:
            files.append(f.absolute())
    return files
예제 #20
0
    def __init__(self, path):
        self.__parents__ = None
        self.__contents__ = None

        try:
            path = Path(path)

            self.__parents__ = path
            self.__contents__ = sorted([
                dir_.absolute()
                for dir_ in path.listdir() if dir_.isdir() or dir_.isfile()
            ],
                                       key=lambda v: parse_version(v))
        except:
            pass
예제 #21
0
def parse_auth_log(log_folder='/var/log/'):
    directory = Path(log_folder)
    for auth_file in directory.listdir('auth*.*', FILES):
        if auth_file.ext == '.gz':
            auth = gzip.open(auth_file, 'rb')
        else:
            auth = open(auth_file, 'rU')

        prev_id = ''
        attempt = None
        for line in auth.readlines():
            line = line.strip()

            # Skip sudo entries
            if line.find('sudo') >= 0:
                continue
            # Grab the date/time of the attempt and remove that from line
            attempted, line = get_date_from_line(line)

            # Grab the attempt ID
            attempt_id = get_id_from_line(line)

            # Is this a dup?
            if SshHackAttempt.objects.filter(ssh_id=attempt_id).count() > 0:
                continue

            # Is this a new attempt?
            new_attempt = prev_id != attempt_id

            if new_attempt:
                if getattr(attempt, 'has_all_keys', None):
                    # Store the previous attempt and start a new one
                    attempt.save()
                attempt = AuthAttempt()
                attempt.ssh_id = attempt_id
                attempt.attempted = attempted

            # Grab the IP address
            attempt.set_ip_from_line(line)

            # Is the username in this line?
            attempt.set_username_from_line(line)

            # Does the ID need updated?
            if prev_id != attempt_id:
                prev_id = attempt_id

        auth.close()
예제 #22
0
def convert_pdfs(pdfs_dir, output_dir):
    output_dir = Path(output_dir)
    if not output_dir.exists():
        os.makedirs(output_dir)

    pdfs_dir = Path(pdfs_dir)
    args = []
    for pdf in tqdm.tqdm(pdfs_dir.listdir("*.pdf")):
        f_name = pdf.name.split('.')[0]
        out = output_dir.child(f_name)
        outfile = out + '-1.png'
        if not outfile.exists():
            args.append((pdf, out))

    print('Converting {} pdfs'.format(len(args)))
    with Pool(NUM_OF_PROCESSES) as pool:
        pool.starmap(convert_to_png, args)
예제 #23
0
def get_package_files(eggbasket, package):
    package_path = get_package_path(eggbasket)
    package = package_path.listdir()[[x.name.lower() for x in package_path.listdir()].index(package.lower())]

    package_dir = Path(package_path, package)
    files = []

    for package_file in package_dir.listdir():
        p = Package()
        p.filepath = package_file
        p.eggbasket = eggbasket
        try:
            p.name = SDist(package_file).name
            p.author = SDist(package_file).metadata['PKG-INFO']['Author']
        except Exception:
            pass
        files.append(p)
    return sorted(files, reverse=True)
예제 #24
0
def create_own_lexicon():

    lexicon = {}

    for dataset in ['KNMP', 'Stanford']:
        # Find all the annotated pages in the dataset
        ann_dir = Path(Path.cwd().ancestor(1), 'data/hwr_data/words/' + dataset)
        annotations = ann_dir.listdir( '*.words')

        for f in annotations:
            # Segment
            annotation = ET.parse(f).getroot()
            for word in annotation.iter('Word'):
                text = word.get('text')

                # Add word to lexicon
                if lexicon.has_key(text):
                    lexicon[text] += 1
                else :
                    lexicon[text] = 1
    return lexicon
def check_circular_imports(path, prefix):
    path = Path(path)
    python_files_paths = path.listdir(filter=FILES, pattern='*.py')
    relative_import_modules = {}
    pattern = re.compile(
        r'(from {0} import )?(\w+),?[\s+]?(as\s\w+)?'.format(prefix),
        re.IGNORECASE)
    for pyf in python_files_paths:
        with open(pyf, 'r') as f:
            for line in f.read().splitlines():
                matchs = pattern.findall(line)
                if not matchs:
                    continue
                modules_names = [m[1] for m in matchs]
                relative_import_modules[pyf.stem] = modules_names

    for module, next_module in combinations(relative_import_modules.keys(), 2):
        module_modules = relative_import_modules[module]
        next_module_modules = relative_import_modules[next_module]
        if module.name in next_module_modules and next_module.name in module_modules:
            print "Circular imports in modules {0} and {1}".format(
                module, next_module)
예제 #26
0
from __future__ import print_function
from unipath import Path
import numpy as np

if __name__ == '__main__':
    workdir = Path('tmp/')
    datasets = workdir.listdir('window_stats_*.csv')
    total_widths = []
    total_heights = []

    for data in datasets:
        with open(data, 'r') as f:
            widths = []
            heights = []
            for line in f.readlines():
                letter, width, height = line.strip().split(',')
                if letter == 'label' and width == 'width' and height == 'height':
                    continue  # skip header
                widths.append(int(width))
                heights.append(int(height))
                total_widths.append(int(width))
                total_heights.append(int(height))

            print(str(data), 'statistics:')
            print('Mean  width: ', np.mean(widths))
            print('Stdev width: ', np.std(widths))
            print('Mean  height:', np.mean(heights))
            print('Stdev height:', np.std(heights))
            print('Median width:', np.median(widths))
            print('Median height:', np.median(heights))
            print()
예제 #27
0
class ReleaseSorter(object):
    def __init__(self, sort_dir):
        self.sort_dir = Path(sort_dir)

        # Make sure the sort dir is a dir and cd into it
        if not self.sort_dir.isdir():
            raise ReleaseSorterError('Invalid sort-dir {}'.format(sort_dir))
        os.chdir(sort_dir)

        self.files_to_sort = {}

    def relative_path(self, path, root_path):
        relative_path = path.replace(root_path, '')
        if relative_path[0:1] == '/':
            return relative_path[1:]
        else:
            return relative_path

    def check_extension(self, extension):
        if extension in ('.mkv', '.avi'):
            return True
        else:
            return False

    def check_modified_time(self, time_since_modified):
        if time_since_modified < timedelta(minutes=20):
            return False
        else:
            return True

    def create_series_folders(self, sorter_file):
        if sorter_file.series_dir and not sorter_file.series_dir.exists():
            log.info('Creating series dir {}'.format(
                sorter_file.relative_path(sorter_file.series_dir)))
            sorter_file.series_dir.mkdir()

        if sorter_file.season_dir and not sorter_file.season_dir.exists():
            log.info('Creating season dir {}'.format(
                sorter_file.relative_path(sorter_file.season_dir)))
            sorter_file.season_dir.mkdir()

    def move_subtitle_files(self, sorter_file):
        """Check for existing subtitle files matching media file and move
        them to sort folder too.
        """

        for ext in ('.srt', '.sub', '.idx'):
            subtitle_path = Path(sorter_file.path.parent, '{}{}'.format(
                sorter_file.path.stem, ext))

            if subtitle_path.exists():
                log.info('Moving subtitle file {} to {}'.format(
                    self.relative_path(subtitle_path, self.sort_dir),
                    sorter_file.season_dir))
                subtitle_path.move(Path(self.sort_dir,
                                   sorter_file.season_dir))

    def move_sorter_file(self, sorter_file):
        log.info('Moving {} to {}'.format(sorter_file.relative_path(),
                                          sorter_file.season_dir))
        sorter_file.path.move(Path(self.sort_dir, sorter_file.season_dir))

    def get_sorter_files(self):
        """List sort dir and find all files to sort"""

        log.debug('Sorting dir {}'.format(self.sort_dir))

        file_list = self.sort_dir.listdir(filter=FILES)

        for file in file_list:
            sorter_file = SorterFile(file, self.sort_dir)

            # File extension
            if not self.check_extension(sorter_file.extension):
                log.debug('Skipping {}, wrong file extension'.format(
                    sorter_file.relative_path()))
                continue

            # Modifed time, only process files who hasen't been modified the
            # in the last 20 min
            time_since_modified = datetime.now() - sorter_file.mtime
            if not self.check_modified_time(time_since_modified):
                log.debug('Skipping {}, has been modified in the last 20 min '
                          '({})'.format(sorter_file.relative_path(),
                                        human(time_since_modified)))
                continue

            # Skip if file is not a TV release
            if not sorter_file.release.tv_release:
                log.debug('Skipping {}, not a TV release'.format(
                    sorter_file.relative_path()))
                continue

            # Add file to sorter list
            series_name = sorter_file.release.tv_series_data['series_name']
            series_episodes = self.files_to_sort.get(series_name)
            if not series_episodes:
                series_episodes = {}
            series_episodes[unicode(sorter_file)] = sorter_file
            self.files_to_sort[series_name] = series_episodes

    def sort_files(self):
        # If a season dir already exist use that when sorting. Else if there
        # is only one file found for the series skip processing and moving.
        for series in self.files_to_sort.keys():
            series_episodes = self.files_to_sort[series]

            for episode_file in series_episodes:
                sorter_file = series_episodes[episode_file]
                # Episode already has a season dir
                if sorter_file.season_dir.exists():
                    log.info('Season dir for {} already exists {}'.format(
                        episode_file, sorter_file.season_dir))
                # No season dir for episode. Skip if only one episode was found
                else:
                    # Skip if only one episode was found
                    if len(series_episodes) < 2:
                        log.debug('Skipping {}, only one episode found'.format(
                            series_episodes.iterkeys().next()))

                        del(self.files_to_sort[series])

        # Loop remaining files for folder creating and moving
        for series in self.files_to_sort:
            series_episodes = self.files_to_sort[series]

            for episode_file in series_episodes:
                sorter_file = series_episodes[episode_file]

                # Create series folder if needed
                self.create_series_folders(sorter_file)

                # Move the file
                self.move_sorter_file(sorter_file)

                # Move subtitle files
                self.move_subtitle_files(sorter_file)

    def sort(self):
        self.get_sorter_files()
        self.sort_files()

    def cleanup_empty_folders(self):
        log.debug('Cleanup empty folders in {}'.format(self.sort_dir))

        dirs_to_check_for_removal = []
        for dir in self.sort_dir.walk(filter=DIRS, top_down=False):
            # Skip all dirs in _ dir
            if '/_' in dir:
                log.debug('Skipping cleanup on {}, _ dir'.format(dir))
                continue

            dirs_to_check_for_removal.append(dir)

        for dir in dirs_to_check_for_removal:
            # If dir is empty, remove it
            if dir.isdir() and len(dir.listdir()) == 0:
                log.info('Removing empty dir {}'.format(self.relative_path(
                    dir, self.sort_dir)))
                dir.rmtree()
예제 #28
0
from unipath import Path
import os

# CircleCI will run this script 3rd

# This will add an "_" to all the fresh .md files in v3-1

# All markdown files in the 'includes' folder in Slate portion of the Dev Center repo
#os.chdir(r"C:\Users\I860605\Desktop\Dev_Center_New\dev_concur\src\api-explorer\v3-1")

v3_1 = Path(r"./src/api-explorer/v3-1")
#Add "_" to the beginning of every markdown file that does not have it
for markdown in v3_1.listdir():
    if "_" not in markdown:
        new_name = "_" + markdown
        os.rename(markdown, new_name)
        print("Adding '_' to file...Filename has just been updated to: ",
              new_name)
print("All filenames already have correct formatting")
예제 #29
0
from unipath import Path
import os

# CircleCI will run this script 4th

# This will add an "_" to all the fresh .md files in v3-2

# All markdown files in the 'includes' folder in Slate portion of the Dev Center repo
#os.chdir(r"C:\Users\I860605\Desktop\Dev_Center_New\dev_concur\src\api-explorer\v3-2")

v3_2 = Path(r"./src/api-explorer/v3-2")

#Add "_" to the beginning of every markdown file that does not have it
for markdown in v3_2.listdir():
    if "_" not in markdown:
        new_name = "_" + markdown
        os.rename(markdown, new_name)
        print("Adding '_' to file...Filename has just been updated to: ",
              new_name)
print("All filenames already have correct formatting")
예제 #30
0
class Decoder(object):
    def __init__(self,base_dir, cal=None, cal_each_mask=False):
        '''
        A Decoder for a Vector Coded Aperture Measurment System
        
        Parameters 
        -----------
        base_dir : str, or `unipath.Path`
            the base directory which holds all data
            
        cal: `skrf.Calibration` object or None
            the calibration template that is copied for each mask's 
            calibration. The `measurements` attribute provided by 
            the calibraition template is never used, only the `ideals`.
            if None, then no calibration is performed
        
        cal_each_mask : bool
            should a calibration be performed for each mask? If True, this 
            requires that `cal` represents a calibration template, for 
            which the `measurements` are provided for each mask dir 
        '''
        self.base_dir = Path( base_dir)
        self.cal = cal
        self.cal_each_mask = cal_each_mask
        # determine rank 
        max_dec = max([int(d) for d in self.decs.keys()])
        self.rank = int(sqrt(len('{0:b}'.format(max_dec))))
        
        self.frequency = rf.ran(str(self.decs.values()[0])).values()[0].frequency
        
            
        
    @property
    def decs(self):
        '''
        list of decimal values for each mask
        
        A dictionary with key:values as string:Path for each dec value
        '''
        return {str(k.name):k for k in self.base_dir.listdir()}
    
    
    
            
    def pixel2decs(self, m, n, half_on_only=False):
        '''
        list of the masks which have a given pixel `on`.  

        the masks are given in decimal representations
        '''
        out=[]
        for d in self.decs.keys():
            mask = dec2mask(d, rank=self.rank)
            if mask[m,n] ==1:
                # pixel is on
                if half_on_only:
                    if sum(mask) == self.rank:
                        out.append(d)
                else:
                    out.append(d)
        return out
    
    
        
    def cal_of(self,dec):
        '''
        Calibration for a given mask, or pixel
        '''
        
        ##TODO: for no-cal or static cal this could be only calculated
        # once to improve performance
        if self.cal is None:
            freq = self.frequency
            n = len(freq)
            coefs ={'directivity':zeros(n),
                    'source match': zeros(n),
                    'reflection tracking':ones(n)}
            cal =OnePort.from_coefs(frequency=freq, coefs=coefs)
            return cal
            
        if not self.cal_each_mask:
            return self.cal
        else:
            # we want a calibration for each mask, so create the calbration
            # for this mask,or pixel
            cal = deepcopy(self.cal)
            ideals = cal.ideals

            if isinstance(dec, tuple):
                # decode the measurements 
                measured =[]
                for ideal in ideals:
                    m = self.raw_ntwk_of(dec,ideal.name)
                    measured.append(m)
                
            else:
                measured = rf.ran(self.decs[dec]).values()
            

            cal.measured, cal.ideals = rf.align_measured_ideals(measured,ideals)
            cal.name = str(dec)
            return cal

    def error_ntwk_of(self,dec):
        '''
        error ntwk for a given mask, or pixel
        '''
        if isinstance(dec, tuple):
            ntwks = [self.error_ntwk_of(k) for k in self.pixel2decs(*dec)]
            return rf.average(ntwks)
        
        ntwk = self.cal_of(dec).error_ntwk
        ntwk.name = dec
        return ntwk
    
    def raw_ntwk_of(self,dec,name):
        '''
        raw ntwk for a given mask, or pixel
        '''
        if isinstance(dec, tuple):
            ntwks = [self.raw_ntwk_of(k,name) for k in self.pixel2decs(*dec)]
            return rf.average(ntwks)
        ntwk = rf.ran(str(self.decs[dec]), contains=name).values()[0]
        
        return ntwk
        
    def cor_ntwk_of(self,dec, name, loc='corrected'):
        '''
        corrected ntwk for a given mask, or pixel
        '''
        if isinstance(dec, tuple):
            if loc  == 'corrected':
                # decode in corrected-space
                ntwks = [self.cor_ntwk_of(k,name) for k in self.pixel2decs(*dec)]
                return rf.average(ntwks)
            elif loc =='measured':
                # decode in measured space
                m = self.raw_ntwk_of(dec,name)
                return self.cal_of(dec).apply_cal(m)
        
        # correct a measurement for a single mask
        return self.cal_of(dec).apply_cal(self.raw_ntwk_of(dec,name))
    
    
    
    def cor_cube(self,name,attr='s_db'):
        '''
        a corrected datacube
        
        constructs a `corrected`  3D data cube with dimensions 
        (FxMXN), where F is frequency axis, M and N are pixels 
        starting from upper left. 
        
        Parameters
        --------------
        name : str
            name of network
        attr: 's', 's_db', 's_deg', any skrf.Network property
            the attribute to put in the cube
        
        '''
        rank = self.rank
        z = array([getattr(self.cor_ntwk_of((m,n),name),attr) \
            for m in range(rank) for n in range(rank)])
        z = z.T.reshape(-1,rank,rank)
        return z
    
    def interact_cor_cube(self,name,attr='s_db', clims=None):
        '''
        an interactive image projection of the cor_cube
        '''
        z = self.cor_cube(name=name, attr=attr)
        if clims == None:
            if attr =='s_db':
                clims = (-20,10)
            elif attr=='s_deg':
                clims = (-180,180)
        freq = self.frequency    
        def func(n):
            plt.matshow(z[n])
            plt.title('%i%s'%(freq.f_scaled[n],freq.unit)) 
            plt.grid(0)
            plt.colorbar()
            if clims is not None:
                plt.clim(clims)
        return interactive (func,n =(0,len(freq)) )
예제 #31
0
from __future__ import print_function
from unipath import Path
import numpy as np

if __name__ == "__main__":
    workdir = Path("tmp/")
    datasets = workdir.listdir("window_stats_*.csv")
    total_widths = []
    total_heights = []

    for data in datasets:
        with open(data, "r") as f:
            widths = []
            heights = []
            for line in f.readlines():
                letter, width, height = line.strip().split(",")
                if letter == "label" and width == "width" and height == "height":
                    continue  # skip header
                widths.append(int(width))
                heights.append(int(height))
                total_widths.append(int(width))
                total_heights.append(int(height))

            print(str(data), "statistics:")
            print("Mean  width: ", np.mean(widths))
            print("Stdev width: ", np.std(widths))
            print("Mean  height:", np.mean(heights))
            print("Stdev height:", np.std(heights))
            print("Median width:", np.median(widths))
            print("Median height:", np.median(heights))
            print()
예제 #32
0
from unipath import Path
import os

# CircleCI will run this script 5th

# This will add an "_" to all the fresh .md files in v4-0

#Note for testing on your local machine, you may uncomment either of the below paths, either will work.
#They've been changed for use in CircleCI to specify the specific path as listed in the repository, rather than your machine
#So once you uncomment either path, kindly adjust the portion after the r" to wherever v3-0 is on your machine
# Once done, simply comment out the 'v3_0 = Path(r"./src/api-explorer/v3-0")' path and then it will run on your machine

#These are the 2 paths you may edit for testing on your machine:

#os.chdir(r"C:\Users\I860605\Desktop\Dev_Center_New\dev_concur\src\api-explorer\v4-0")
# v4_0 = Path(r"C:\Users\I860605\Desktop\Dev_Center_New\dev_concur\src\api-explorer\v4-0"

v4_0 = Path(r"./src/api-explorer/v4-0")

#Add "_" to the beginning of every markdown file that does not have it
for markdown in v4_0.listdir():
    if "_" not in markdown:
        new_name = "_" + markdown
        os.rename(markdown, new_name)
        print("Adding '_' to file...Filename has just been updated to: ",
              new_name)
print("All filenames already have correct formatting")
예제 #33
0
def get_current_package(eggbasket, package):
    package_dir = Path(get_package_path(eggbasket), package)
    contents = sorted(package_dir.listdir(), reverse=True)
    return contents[0] if contents else 'unknown'
예제 #34
0
import os

# CircleCI will run this script 2nd

# This will add an "_" to all the fresh .md files in v3-0

# All markdown files in the 'includes' folder in Slate portion of the Dev Center repo

#Note for testing on your local machine, you may uncomment either of the below paths, either will work.
#They've been changed for use in CircleCI to specify the specific path as listed in the repository, rather than your machine
#So once you uncomment either path, kindly adjust the portion after the r" to wherever v3-0 is on your machine
# Once done, simply comment out the 'v3_0 = Path(r"./src/api-explorer/v3-0")' path and then it will run on your machine

#These are the 2 paths you may edit for testing on your machine:

#os.chdir(r"C:\Users\I860605\Desktop\Dev_Center_New\dev_concur\src\api-explorer\v3-0")
#v3_0 = Path(r"C:\Users\I860605\Desktop\Dev_Center_New\dev_concur\src\api-explorer\v3-0")

v3_0 = Path(r"./src/api-explorer/v3-0")

#Add "_" to the beginning of every markdown file that does not have it
for markdown in v3_0.listdir():
    #os.path.join(v3_0, markdown)
    print(markdown)
    if "_" not in markdown:
        new_name = "_" + markdown
        os.rename(markdown, new_name)
        print("Adding '_' to file...Filename has just been updated to: ",
              new_name)
print("All filenames already have correct formatting")
예제 #35
0
for app in app_list:
    log.info("Retrieving log data for {}".format(app.name))

    # Set up the directory containing all the logs
    log_directory = Path(app.log_location)

    log.debug("Log Location = {}".format(log_directory))

    # Cycle through all retrieved files looking for files
    # matching the file name regex
    log.debug("File name regex = {}".format(app.file_name))

    name_regex = r"{}".format(app.file_name)
    app_logs = []

    for file in log_directory.listdir(names_only=True):
        log.debug("File name in log directory = {}".format(file))

        if re.match(name_regex, file):
            log.debug("Regex matches file name")

            app_logs.append(log_directory.child(file))

    # Open the log(s) and form them into a single json file
    json_log = []

    # Cycle through all the log files and create a list of the entries
    for app_log in app_logs:
        log.debug("Opening log file: {}".format(app_log))

        try: