예제 #1
0
def gnomekeyring_get_all_entries():
    """Returns a dict of all keyring containers with a list of all their
    entries.
    """
    out = {}
    tried_to_unlock = False
    for c in gnomekeyring.list_keyring_names_sync():
        out[c] = []
        for i in gnomekeyring.list_item_ids_sync(c):
            item = None
            try:
                item = gnomekeyring.item_get_info_sync(c, i)
            except gnomekeyring.IOError as e:
                logging.info('%s', e)
                if not tried_to_unlock:
                    tried_to_unlock = True
                    if gnomekeyring_unlock():
                        # Try again.
                        try:
                            item = gnomekeyring.item_get_info_sync(c, i)
                        except gnomekeyring.IOError as e:
                            logging.info('%s', e)
            if item:
                out[c].append(GnomeKeyringItem(c, i, item))
            else:
                logging.error('Failed to access %s-%-2d: %s', c, i, e)
        natsort.natsort(out[c], key=str)
    return out
예제 #2
0
def gnomekeyring_get_all_entries():
    """Returns a dict of all keyring containers with a list of all their
    entries.
    """
    out = {}
    tried_to_unlock = False
    for c in gnomekeyring.list_keyring_names_sync():
        out[c] = []
        for i in gnomekeyring.list_item_ids_sync(c):
            item = None
            try:
                item = gnomekeyring.item_get_info_sync(c, i)
            except gnomekeyring.IOError as e:
                logging.info('%s', e)
                if not tried_to_unlock:
                    tried_to_unlock = True
                    if gnomekeyring_unlock():
                        # Try again.
                        try:
                            item = gnomekeyring.item_get_info_sync(c, i)
                        except gnomekeyring.IOError as e:
                            logging.info('%s', e)
            if item:
                out[c].append(GnomeKeyringItem(c, i, item))
            else:
                logging.error('Failed to access %s-%-2d: %s', c, i, e)
        natsort.natsort(out[c], key=str)
    return out
예제 #3
0
    def runTest(self):

        # This is plain old sorting (what we don't want).
        assert sorted(["1", "5", "10", "50"]) == ["1", "10", "5", "50"]

        # This is version sorting (what we're after).
        assert natsort(["1", "5", "10", "50"]) == ["1", "5", "10", "50"]

        # This is version sorting reversed.
        assert natsort(["1", "5", "10", "50"], reverse=True) == ["50", "10", "5", "1"]

        # This covers a previously fixed bug. I've purposefully shuffled the
        # order on the left side to avoid false positives caused by stable
        # sorting.
        assert natsort(["1.5", "1.0"]) == ["1.0", "1.5"]
예제 #4
0
 def __init__(self, directoryName, fileSuffix, revertOrder=False):
     self.directoryName = directoryName
     self.fileSuffix = fileSuffix
     self.fileNames = os.listdir(directoryName)
     self.fileNames = natsort(self.fileNames)
     if revertOrder:
         self.fileNames = self.fileNames[::-1]
예제 #5
0
    def collect_backups(self, bucketname, prefix):
        """
        Collect the backups in the given s3 bucket.

        :param bucket: s3 backup bucket (a string).
        :returns: A sorted :class:`list` of :class:`Backup` objects (the
                  backups are sorted by their date).
        """
        backups = []
        
        bucket = self.conn.get_bucket(bucketname)

        logger.info("Scanning for backups: s3://%s/%s", bucketname, prefix)

        for entry in natsort([key.name for key in bucket.list(prefix)]):
            # Check for a time stamp in the directory entry's name.
            match = TIMESTAMP_PATTERN.search(entry)
            if match:
                # Make sure the entry matches the given include/exclude patterns.
                if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
                    logger.debug("Excluded %r (it matched the exclude list).", entry)
                elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
                    logger.debug("Excluded %r (it didn't match the include list).", entry)
                else:
                    backups.append(S3Backup(
                        pathname=entry,
                        timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
                    ))
            else:
                logger.debug("Failed to match time stamp in filename: %s", entry)
        if backups:
            logger.info("Found %i timestamped backups in %s.", len(backups), bucket)
        return sorted(backups)
def calc_feat_df(cough_dir_list,feat_col_Names,N_SPLITS):


	df = pd.DataFrame(columns = feat_col_Names)


	k = 0.0

	# Init progress bar
	printProgress(k,len(cough_dir_list),prefix = "Progress:",suffix = "Complete",barLength = 50)
	
	for dir in cough_dir_list:

		coughs_list = glob(dir + "/*.wav")
	
		# first numerically sort the coughs list
		coughs_list = natsort(coughs_list)

		# extract spectral features for all coughs in dir
		feat_matrix = extract_features(dir,coughs_list, meta_data,N_SPLITS)

		temp_df = pd.DataFrame(feat_matrix,columns = feat_col_Names)

		df = df.append(temp_df)

		k += 1
		printProgress(k,len(cough_dir_list),prefix = "Progress:",suffix = "Complete",barLength = 50)


	return df
예제 #7
0
    def test_python_3_compatibility(self):
        """
        Test the Python 3 incompatibility reported in `issue 2`_.

        .. _issue 2: https://github.com/xolox/python-naturalsort/issues/2
        """
        assert natsort(['1', 'a']) == ['1', 'a']
예제 #8
0
def readStreamVid(dataDir='data'):
    """
    Reads the streamline time sequence.
    
    call signature:
    
        readStreamVid(dataDir = 'data')
    
    Keyword arguments:
    
    *dataDir*:
      Data directory.
    """

    files = listdir(dataDir)
    files = natsort.natsort(files)
    nFiles = 0
    for i in range(len(files)):
        if ((str.find(files[i], 'stream') == 0) and (files[i] != 'stream.vtk')
                and (str.find(files[i], 'Init') == -1)):
            nFiles += 1

    # initialize the array of streamline objects
    s = []

    for f in files:
        if ((str.find(f, 'stream') == 0) and (f != 'stream.vtk')
                and (str.find(f, 'Init') == -1)):
            s.append(gm.readStream(dataDir=dataDir, streamFile=f))

    return s
예제 #9
0
    def collect_backups(self, location):
        """
        Collect the backups at the given location.

        :param location: Any value accepted by :func:`coerce_location()`.
        :returns: A sorted :class:`list` of :class:`Backup` objects (the
                  backups are sorted by their date).
        :raises: :exc:`~exceptions.ValueError` when the given directory doesn't
                 exist or isn't readable.
        """
        backups = []
        location = coerce_location(location)
        logger.info("Scanning %s for backups ..", location)
        location.ensure_readable()
        pattern=re.compile(self.timestamp, re.VERBOSE)
        for entry in natsort(location.context.list_entries(location.directory)):
            match = pattern.search( entry)
            if match:
                if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
                    logger.verbose("Excluded %s (it matched the exclude list).", entry)
                elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
                    logger.verbose("Excluded %s (it didn't match the include list).", entry)
                else:
                    try:
                        backups.append(Backup(
                            pathname=os.path.join(location.directory, entry),
                            timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
                        ))
                    except ValueError as e:
                        logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
            else:
                logger.debug("Failed to match time stamp in filename: %s", entry)
        if backups:
            logger.info("Found %i timestamped backups in %s.", len(backups), location)
        return sorted(backups)
예제 #10
0
    def update_file(self, force=None):
        """
        Update the file with the contents of the files in the ``.d`` directory.

        :param force: Override the value of :attr:`force` (a boolean or
                      :data:`None`).
        :raises: :exc:`RefuseToOverwrite` when :attr:`force` is :data:`False`
                 and the contents of :attr:`filename` were modified.
        """
        if force is None:
            force = self.force
        if not self.context.is_directory(self.directory):
            # Create the .d directory.
            logger.info("Creating directory %s ..", format_path(self.directory))
            self.context.execute('mkdir', '-p', self.directory, tty=False)
            # Move the original file into the .d directory.
            local_file = os.path.join(self.directory, 'local')
            logger.info("Moving %s to %s ..", format_path(self.filename), format_path(local_file))
            self.context.execute('mv', self.filename, local_file, tty=False)
        # Read the modular configuration file(s).
        blocks = []
        for entry in natsort(self.context.list_entries(self.directory)):
            if not entry.startswith('.'):
                filename = os.path.join(self.directory, entry)
                if self.context.is_executable(filename):
                    blocks.append(self.execute_file(filename))
                else:
                    blocks.append(self.read_file(filename))
        contents = b"\n\n".join(blocks)
        # Make sure the generated file was not modified? We skip this on the
        # first run, when the original file was just moved into the newly
        # created directory (see above).
        if all(map(self.context.is_file, (self.filename, self.checksum_file))):
            logger.info("Checking for local changes to %s ..", format_path(self.filename))
            if self.new_checksum != self.old_checksum:
                if force:
                    logger.warning(compact(
                        """
                        The contents of the file to generate ({filename})
                        were modified but --force was used so overwriting
                        anyway!
                        """,
                        filename=format_path(self.filename),
                    ))
                else:
                    raise RefuseToOverwrite(compact(
                        """
                        The contents of the file to generate ({filename})
                        were modified and I'm refusing to overwrite it! If
                        you're sure you want to proceed, use the --force
                        option or delete the file {checksum_file} and
                        retry.
                        """,
                        filename=format_path(self.filename),
                        checksum_file=format_path(self.checksum_file),
                    ))
        # Update the generated configuration file.
        self.write_file(self.filename, contents)
        # Update the checksum file.
        self.context.write_file(self.checksum_file, self.new_checksum)
예제 #11
0
def calc_feat_df(cough_dir_list, WIN_SIZE):

    feat_col_Names = get_colNames()

    df = pd.DataFrame(columns=feat_col_Names)

    k = 0.0

    for dir in cough_dir_list:

        coughs_list = glob(dir + "/*.wav")

        coughs_list = natsort(coughs_list)

        feat_matrix = extract_features(dir, coughs_list, meta_data, WIN_SIZE)

        temp_df = pd.DataFrame(feat_matrix, columns=feat_col_Names)

        df = df.append(temp_df)

        k += 1

        printProgress(k, len(cough_dir_list), prefix="Progress:", suffix="Complete", barLength=50)

    return df
예제 #12
0
def rename(file, folder):
    file_content = pd.read_excel(file)
    files = natsort(os.listdir(folder))

    i = 0
    for file in files:

        file_format = PurePosixPath(file).suffix
        file_name = file.split(str(file_format))[0]

        old_name = file_content['nome_atual'].tolist()[i]
        new_name = file_content['novo_nome'].tolist()[i] + str(file_format)

        if not os.path.exists(folder + '/Renomeados'):
            os.mkdir(folder + '/Renomeados')
            destino = folder + '/Renomeados'

        if str(file) == str(old_name):
            old_name = os.path.join(folder, old_name)
            new_name = os.path.join(destino, new_name)
            os.rename(old_name, new_name)
            i += 1
        else:
            sg.PopupError(
                "A lista de arquivos da planilha nao coincide com a pasta indicada!"
            )
            break
예제 #13
0
파일: poincare.py 프로젝트: SimonCan/glemur
def readPoincareVid(dataDir='data'):
    """
    Reads the Poncare map time sequence.
    
    call signature:
    
        readPoincareVid(dataDir = 'data')
    
    Keyword arguments:
    
    *dataDir*:
      Data directory.
    """

    # find the number of dump files and do some sorting
    files = listdir(dataDir)
    files = natsort.natsort(files)
    nFiles = 0
    for i in range(len(files)):
        if ((str.find(files[i], 'poincare') == 0)
                and (files[i] != 'poincare.vtk')
                and (str.find(files[i], 'Init') == -1)):
            nFiles += 1

    # initialize the array of Poncare objects
    po = []

    for f in files:
        if ((str.find(f, 'poincare') == 0) and (f != 'poincare.vtk')
                and (str.find(f, 'Init') == -1)):
            print(f)
            po.append(gm.readPoincare(dataDir=dataDir, poincare=f))

    return po
예제 #14
0
    def collect_backups(self, location):
        """
        Collect the backups at the given location.

        :param location: Any value accepted by :func:`coerce_location()`.
        :returns: A sorted :class:`list` of :class:`Backup` objects (the
                  backups are sorted by their date).
        :raises: :exc:`~exceptions.ValueError` when the given directory doesn't
                 exist or isn't readable.
        """
        backups = []
        location = coerce_location(location)
        logger.info("Scanning %s for backups ..", location)
        location.ensure_readable()
        for entry in natsort(location.context.list_entries(location.directory)):
            match = TIMESTAMP_PATTERN.search(entry)
            if match:
                if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
                    logger.verbose("Excluded %s (it matched the exclude list).", entry)
                elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
                    logger.verbose("Excluded %s (it didn't match the include list).", entry)
                else:
                    try:
                        backups.append(Backup(
                            pathname=os.path.join(location.directory, entry),
                            timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
                        ))
                    except ValueError as e:
                        logger.notice("Ignoring %s due to invalid date (%s).", entry, e)
            else:
                logger.debug("Failed to match time stamp in filename: %s", entry)
        if backups:
            logger.info("Found %i timestamped backups in %s.", len(backups), location)
        return sorted(backups)
예제 #15
0
def readMapVid(dataDir = 'data'):
    """
    Reads the integration map time sequence.
    
    call signature:
    
        readMapVid(dataDir = 'data')
    
    Keyword arguments:
    
    *dataDir*:
      Data directory.
    """
    
    files = listdir(dataDir)
    files = natsort.natsort(files)
    nFiles = 0
    for i in range(len(files)):
        if ((str.find(files[i], 'map') == 0) and (files[i] != 'map.vtk') and (str.find(files[i], 'Init') == -1)):
            nFiles += 1
    
    # initialize the array of streamline objects
    m = []

    for f in files:
        if ((str.find(f, 'map') == 0) and (f != 'map.vtk') and (str.find(f, 'Init') == -1)):
            m.append(gm.readMap(dataDir = dataDir, mapFile = f))
    
    return m
예제 #16
0
    def collect_backups(self, directory, rotate_type):
        """
        Collect the backups in the given directory. on local or google drive
        :param directory: The pathname of an existing directory (a string).
        :param rotate_type: The rotate type if local o remote on Google Drive
        :returns: A sorted :class:`list` of :class:`Backup` objects (the
                  backups are sorted by their date).
        """
        backups = []
        # directory = os.path.abspath(directory)
        directory = os.path.abspath(directory) if not rotate_type == 'remote' else directory
        logger.info("Scanning %s directory for backups: %s", rotate_type, self.custom_format_path(directory))
        # get files from local if rotate_type is local else get files from GoogleDrive
        files = os.listdir(directory) if not rotate_type == 'remote' else self.gdrivecm.get_files(directory)

        for entry in natsort(files):
            # Check for a time stamp in the directory entry's name.
            match = TIMESTAMP_PATTERN.search(entry)
            if match:
                # Make sure the entry matches the given include/exclude patterns.
                if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
                    logger.debug("Excluded %r (it matched the exclude list).", entry)
                elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
                    logger.debug("Excluded %r (it didn't match the include list).", entry)
                else:
                    backups.append(Backup(
                        pathname=os.path.join(directory, entry) if not rotate_type == 'remote' else entry,
                        datetime=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
                    ))
            else:
                logger.debug("Failed to match time stamp in filename: %s", entry)
        if backups:
            logger.info("Found %i timestamped backups in %s.", len(backups), self.custom_format_path(directory))
        return sorted(backups)
예제 #17
0
    def test_dotted_sorting(self):
        """
        Test a previously fixed bug to prevent regressions.

        I've purposefully shuffled the order on the left side to avoid false
        positives caused by stable sorting.
        """
        assert natsort(['1.5', '1.0']) == ['1.0', '1.5']
예제 #18
0
    def update_file(self, force=None):
        """
        Update the file with the contents of the files in the ``.d`` directory.

        :param force: Override the value of :attr:`force` (a boolean or
                      :data:`None`).
        :raises: :exc:`RefuseToOverwrite` when :attr:`force` is :data:`False`
                 and the contents of :attr:`filename` were modified.
        """
        if force is None:
            force = self.force
        if not self.context.is_directory(self.directory):
            # Create the .d directory.
            logger.info("Creating directory %s", format_path(self.directory))
            self.context.execute('mkdir', '-p', self.directory, tty=False)
            # Move the original file into the .d directory.
            local_file = os.path.join(self.directory, 'local')
            logger.info("Moving %s to %s", format_path(self.filename), format_path(local_file))
            self.context.execute('mv', self.filename, local_file, tty=False)
        # Read the modularized configuration file(s).
        blocks = []
        for filename in natsort(self.context.list_entries(self.directory)):
            if not filename.startswith('.'):
                blocks.append(self.read_file(os.path.join(self.directory, filename)))
        contents = b"\n\n".join(blocks)
        # Make sure the generated file was not modified? We skip this on the
        # first run, when the original file was just moved into the newly
        # created directory (see above).
        if all(map(self.context.is_file, (self.filename, self.checksum_file))):
            logger.info("Checking for local changes to %s ..", format_path(self.filename))
            if self.hash_contents() != self.context.read_file(self.checksum_file):
                if force:
                    logger.warning(compact(
                        """
                        The contents of the file to generate ({filename})
                        were modified but --force was used so overwriting
                        anyway!
                        """,
                        filename=format_path(self.filename),
                    ))
                else:
                    raise RefuseToOverwrite(compact(
                        """
                        The contents of the file to generate ({filename})
                        were modified and I'm refusing to overwrite it! If
                        you're sure you want to proceed, use the --force
                        option or delete the file {checksum_file} and
                        retry.
                        """,
                        filename=format_path(self.filename),
                        checksum_file=format_path(self.checksum_file),
                    ))
        # Update the generated configuration file.
        self.write_file(self.filename, contents)
        # Update the checksum file.
        self.context.write_file(self.checksum_file, self.hash_contents())
예제 #19
0
    def test_more_complex_versions(self):
        """
        Test the implementation of the ``NaturalOrderKey`` class.

        This test uses some more complex version strings that were sorted
        incorrectly by the initial (way too naive) implementation in 1.4.
        """
        sorted_versions = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']
        random_versions = ['1534-44658', '1536-44935', '1532-44349', '1538-44920', '1536-44582', '1538-44874']
        assert sorted_versions == natsort(random_versions)
예제 #20
0
    def test_input_order_irrelevant(self):
        """
        Test that order of input does not adversely affect order of output.

        Works by shuffling the input and checking that all 10.000 iterations
        result in the same output.
        """
        sorted_strings = ['1532-44349', '1534-44658', '1536-44582', '1536-44935', '1538-44874', '1538-44920']
        mutable_copy = list(sorted_strings)
        for i in range(10000):
            random.shuffle(mutable_copy)
            assert natsort(mutable_copy) == sorted_strings
예제 #21
0
def lines_for_timestamp(request):
    try:
        try:
            d = dateutil.parser.parse(request.params['datetime'])
        except:
            d = DBSession.query(BusDelay).order_by(BusDelay.time.desc()).first().time
        lines = DBSession.query(BusDelay.line).filter(BusDelay.time == d).group_by(BusDelay.line).all()
        output=[]
        for l in lines:
            output.append(l[0])
    except DBAPIError:
        return Response("a problem occured", content_type='text/plain', status_int=500)
    return {'lines': natsort(output)}
예제 #22
0
 def entries(self):
     """A list of :class:`PasswordEntry` objects."""
     timer = Timer()
     passwords = []
     logger.info("Scanning %s ..", format_path(self.directory))
     listing = self.context.capture("find", "-type", "f", "-name", "*.gpg", "-print0")
     for filename in split(listing, "\0"):
         basename, extension = os.path.splitext(filename)
         if extension == ".gpg":
             # We use os.path.normpath() to remove the leading `./' prefixes
             # that `find' adds because it searches the working directory.
             passwords.append(PasswordEntry(name=os.path.normpath(basename), store=self))
     logger.verbose("Found %s in %s.", pluralize(len(passwords), "password"), timer)
     return natsort(passwords, key=lambda e: e.name)
예제 #23
0
    def sorted(self, objects):
        """
        Sort the given objects in a human friendly way.

        :param objects: The objects to sort (an iterable).
        :returns: The sorted objects (a list).

        If all of the objects are strings they are sorted using natural
        order sorting, otherwise the :func:`sorted()` function is used.
        """
        if all(isinstance(o, string_types) for o in objects):
            return natsort(objects)
        else:
            return sorted(objects)
예제 #24
0
    def load_training_dict(self, folder_name='Brats17TrainingData'):
        """Creating a dictionary of training HGG and LGG scans."""
        """
            Arguments:
                folder_name: folder where the training data is stored
        """
        grades = ['HGG', 'LGG']

        for g in grades:
            scans = os.listdir(os.path.join(self.db_path, folder_name, g))
            scans = ns.natsort(scans)
            for s in scans:
                if s not in self.train_dict:
                    s_relative_path = os.path.join(folder_name, g, s)
                    self.train_dict[s] = ScanBRATS(s, s_relative_path, 'train')
예제 #25
0
    def available_files(self):
        """
        The filenames of the available configuration files (a list of strings).

        The value of :attr:`available_files` is computed the first time its
        needed by searching for available configuration files that match
        :attr:`filename_patterns` using :func:`~glob.glob()`. If you set
        :attr:`available_files` this effectively disables searching for
        configuration files.
        """
        matches = []
        for pattern in self.filename_patterns:
            logger.debug("Matching filename pattern: %s", pattern)
            matches.extend(natsort(glob.glob(parse_path(pattern))))
        return matches
예제 #26
0
파일: poincare.py 프로젝트: SimonCan/glemur
def poincareVid(dataDir='data',
                poincareInit='poincareInit.vtk',
                interpolation='weighted'):
    """
    Creates a Poincare map time sequence.
    
    call signature:
    
        poincareVid(dataDir = 'data', poincareInit = 'poincareInit.vtk', interpolation = 'weighted')
    
    Keyword arguments:
    
    *dataDir*:
      Data directory.
        
    *poincareInit*:
        Read the initial Poincare maps from this file.
        
    *interpolation*:
      Interpolation of the vector field.
      'mean': takes the mean of the adjacent grid point.
      'weighted': weights the adjacent grid points according to their distance.
    """

    # find the number of dump files and do some sorting
    files = listdir(dataDir)
    files = natsort.natsort(files)
    nFiles = 0
    for i in range(len(files)):
        if (str.find(files[i], 'dump') == 0):
            nFiles += 1

    # initialize the array of Poncare objects
    po = []

    for f in files:
        if (str.find(f, 'dump') == 0):
            poincareFile = f.replace('dump', 'poincare')
            print(f)
            po.append(
                gm.mapPoincare(dataDir=dataDir,
                               poincareInit=poincareInit,
                               poincare=poincareFile,
                               dumpFile=f,
                               interpolation=interpolation))
            print(po[-1].p.t)

    return po
예제 #27
0
def streamVid(dataDir='data',
              streamFileInit='streamInit.vtk',
              interpolation='weighted'):
    """
    Creates a streamline time sequence.
    
    call signature:
    
        streamVid(dataDir = 'data', streamFileInit = 'streamInit.vtk', interpolation = 'weighted')
    
    Keyword arguments:
    
    *dataDir*:
      Data directory.
        
    *streamFileInit*:
      Read the initial streamline in this file.
        
    *interpolation*:
      Interpolation of the vector field.
      'mean': takes the mean of the adjacent grid point.
      'weighted': weights the adjacent grid points according to their distance.
    """

    # find the number of dump files
    files = listdir(dataDir)
    files = natsort.natsort(files)
    nFiles = 0
    for i in range(len(files)):
        if (str.find(files[i], 'dump') == 0):
            nFiles += 1

    # initialize the array of streamline objects
    s = []

    for f in files:
        if (str.find(f, 'dump') == 0):
            streamFile = f.replace('dump', 'stream')
            print(f)
            s.append(
                gm.mapStream(dataDir=dataDir,
                             streamFileInit=streamFileInit,
                             streamFile=streamFile,
                             dumpFile=f,
                             interpolation=interpolation))
            print(s[-1].p.t)

    return s
예제 #28
0
def lines_for_timestamp(request):
    try:
        try:
            d = dateutil.parser.parse(request.params['datetime'])
        except:
            d = DBSession.query(BusDelay).order_by(
                BusDelay.time.desc()).first().time
        lines = DBSession.query(BusDelay.line).filter(
            BusDelay.time == d).group_by(BusDelay.line).all()
        output = []
        for l in lines:
            output.append(l[0])
    except DBAPIError:
        return Response("a problem occured",
                        content_type='text/plain',
                        status_int=500)
    return {'lines': natsort(output)}
예제 #29
0
def TsToMp4(Folder, OutPutFileName=""):
    Files = os.listdir(Folder)
    Files = natsort(Files)
    fullPathFiles = []
    for i in Files:
        path = os.path.join(Folder, i)
        if os.path.isfile(path):
            if i.endswith(".ts"):
                fullPathFiles.append(path)
    if fullPathFiles == []:
        print "Thu muc %s khong co file .ts" % Folder
        return

    DirComplete = os.path.join(Folder[0:Folder.rfind("\\")], 'complete')
    if not os.path.exists(DirComplete): os.mkdir(DirComplete)
    DirLog = os.path.join(DirComplete, 'log')
    if not os.path.exists(DirLog): os.mkdir(DirLog)
    concatFile = os.path.join(DirComplete, 'concat.txt')
    with open(concatFile, 'w') as f:
        for i in fullPathFiles:
            if os.path.isfile(i):
                f.write("file '%s'\n" % i.replace("'", "'\\''"))

    if OutPutFileName:
        outputFile = os.path.join(DirComplete, OutPutFileName)
        FileLog = os.path.join(DirLog, OutPutFileName + ".log")
    else:
        outputFile = os.path.join(DirComplete, 'output.mp4')
        FileLog = os.path.join(DirLog, 'output.log')

    args = [
        FFMPEG_PATH, '-f', 'concat', '-i', concatFile, '-c', 'copy', '-bsf:a',
        'aac_adtstoasc', outputFile
    ]

    with open(FileLog, 'w') as f:
        process = subprocess.Popen(args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.STDOUT)
        for line in iter(lambda: process.stdout.read(1), ''):
            sys.stdout.write(line)
            f.write(line.rstrip('\n'))

    os.remove(concatFile)
예제 #30
0
    def train_valid_split(self, folder_name='ISLES2017_Training'):
        """Splitting training data into train and valid subsets."""
        """
            Arguments:
                folder_name: name of the folder where training data is stored
        """

        np.random.seed(123456)
        scan_list = ns.natsort(self.train_dict.keys())

        n_scans = len(self.train_dict)
        select_valid = np.random.choice(n_scans,
                                        int(np.round(n_scans *
                                                     self.valid_p)),
                                        replace=False)

        for s_idx, s in enumerate(scan_list):
            if s_idx in select_valid:
                self.valid_scans.append(s)
            else:
                self.train_scans.append(s)
예제 #31
0
    def _summarize_digit_list(list_or_set):
        # In some cases there are hundreds of "missing" boxes. To make the output more human-readable, the code below
        # contracts sequential series of missing numbers into a range, e.g. [1, 2, 3, 4] into "1-4"

        # First extract each series of sequential numbers
        # (code from the python docs, see https://docs.python.org/2.6/library/itertools.html#examples)
        range_lists = []
        for k, g in groupby(enumerate(list_or_set), lambda ix: ix[0] - ix[1]):
            range_lists.append(map(itemgetter(1), g))

        # make each sequence human-readable, by grabbing the first and last values of each sequence list
        ranges = []
        for range in range_lists:
            if len(range) > 1:
                ranges.append(u"{0}-{1}".format(range[0], range[-1]))
            elif len(range) == 1:
                ranges.append(unicode(range[0]))

        # Since this is a list of strings that sometimes contain non-numeric characters, we use the naturalsort library to
        # return the results in the expected order. Otherwise ["2-10", "11-19", "20"] would be sorted ["11-19", "2-10", "20"]
        return natsort(ranges)
예제 #32
0
    def _summarize_digit_list(list_or_set):
        # In some cases there are hundreds of "missing" boxes. To make the output more human-readable, the code below
        # contracts sequential series of missing numbers into a range, e.g. [1, 2, 3, 4] into "1-4"

        # First extract each series of sequential numbers
        # (code from the python docs, see https://docs.python.org/2.6/library/itertools.html#examples)
        range_lists = []
        for k, g in groupby(enumerate(list_or_set), lambda ix: ix[0] - ix[1]):
            range_lists.append(map(itemgetter(1), g))

        # make each sequence human-readable, by grabbing the first and last values of each sequence list
        ranges = []
        for range in range_lists:
            if len(range) > 1:
                ranges.append(u"{0}-{1}".format(range[0], range[-1]))
            elif len(range) == 1:
                ranges.append(unicode(range[0]))

        # Since this is a list of strings that sometimes contain non-numeric characters, we use the naturalsort library to
        # return the results in the expected order. Otherwise ["2-10", "11-19", "20"] would be sorted ["11-19", "2-10", "20"]
        return natsort(ranges)
예제 #33
0
def create_matriz(folder):
    try:
        linhas_planilha = []
        files = natsort(os.listdir(folder))

        for file in files:
            linha = {}

            linha['nome_atual'] = file
            linha['nome_atual 2'] = str(file.split("-")[0])
            linha['novo_nome'] = '[INSIRA O NOME NOVO]'

            linhas_planilha.append(linha)
            try:
                data = pd.DataFrame(linhas_planilha)
                data.to_excel('Matriz.xlsx', index=False)
            except ValueError as err:
                error_matriz = "Erro ao salvar Matriz: " + str(err)
                sg.PopupError(error_matriz)
    except ValueError as err:
        error_matriz = "Erro ao Criar Matriz: " + str(err)
        sg.PopupError(error_matriz)
def get_missing_numbers(list_or_set):
    # set the maximum and minimum box numbers found in the given box-number list
    min_number = min(list_or_set)
    max_number = max(list_or_set)

    missing_numbers = []

    # for each integer between the minimum and maximum value, check if that integer is in the box list.
    # If it does not, that box is missing.
    i = min_number
    while i < max_number:
        if i not in list_or_set:
            missing_numbers.append(i)
        i += 1

    # sort the list of missing numbers
    missing_numbers.sort()

    # In some cases there are hundreds of "missing" boxes. To make the output more human-readable, the code below
    # contracts sequential series of missing numbers into a range, e.g. [1, 2, 3, 4] into "1-4"

    # First extract each series of sequential numbers
    # (code from the python docs, see https://docs.python.org/2.6/library/itertools.html#examples)
    range_lists = []
    for k, g in groupby(enumerate(missing_numbers), lambda ix: ix[0]-ix[1]):
        range_lists.append(map(itemgetter(1), g))

    # make each sequence human-readable, by grabbing the first and last values of each sequence list
    ranges = []
    for range in range_lists:
        if len(range) > 1:
            ranges.append("{0}-{1}".format(range[0], range[-1]))
        elif len(range) == 1:
            ranges.append(str(range[0]))

    # Since this is a list of strings that sometimes contain non-numeric characters, we use the naturalsort library to
    # return the results in the expected order. Otherwise ["2-10", "11-19", "20"] would be sorted ["11-19", "2-10", "20"]
    return natsort(ranges)
예제 #35
0
def get_missing_numbers(list_or_set):
    # set the maximum and minimum box numbers found in the given box-number list
    min_number = min(list_or_set)
    max_number = max(list_or_set)

    missing_numbers = []

    # for each integer between the minimum and maximum value, check if that integer is in the box list.
    # If it does not, that box is missing.
    i = min_number
    while i < max_number:
        if i not in list_or_set:
            missing_numbers.append(i)
        i += 1

    # sort the list of missing numbers
    missing_numbers.sort()

    # In some cases there are hundreds of "missing" boxes. To make the output more human-readable, the code below
    # contracts sequential series of missing numbers into a range, e.g. [1, 2, 3, 4] into "1-4"

    # First extract each series of sequential numbers
    # (code from the python docs, see https://docs.python.org/2.6/library/itertools.html#examples)
    range_lists = []
    for k, g in groupby(enumerate(missing_numbers), lambda ix: ix[0] - ix[1]):
        range_lists.append(map(itemgetter(1), g))

    # make each sequence human-readable, by grabbing the first and last values of each sequence list
    ranges = []
    for range in range_lists:
        if len(range) > 1:
            ranges.append("{0}-{1}".format(range[0], range[-1]))
        elif len(range) == 1:
            ranges.append(str(range[0]))

    # Since this is a list of strings that sometimes contain non-numeric characters, we use the naturalsort library to
    # return the results in the expected order. Otherwise ["2-10", "11-19", "20"] would be sorted ["11-19", "2-10", "20"]
    return natsort(ranges)
예제 #36
0
 def test_version_sorting(self):
     """Test version sorting (what we're after)."""
     assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']
예제 #37
0
    try:
        os.makedirs(dbdir)
    except:
        print("ERROR: Unable to create directory: {}".format(dbdir),
              file=sys.stderr)
        exit(1)

# Output data to country spesific files
dbdir = config.get('DBDIR')
for cc in ccdata:
    alldata = []
    typedata = ccdata[cc]
    types = sorted(typedata.keys())
    for iptype in types:
        filepath = os.path.join(dbdir, cc + '_' + iptype)
        if iptype == "IPV6":
            data = sorted(typedata[iptype])
        else:
            data = natsort.natsort(typedata[iptype])
        alldata += data

        with open(filepath, 'w+') as f:
            for item in data:
                print(item, file=f)

    # Create a combined file as well
    filepath = os.path.join(dbdir, cc + '_ALL')
    with open(filepath, 'w+') as f:
        for item in alldata:
            print(item, file=f)
예제 #38
0
    try:
        os.makedirs(dbdir)
    except:
        print("ERROR: Unable to create directory: {}".format(dbdir),
              file=sys.stderr)
        exit(1)

# Output data to country spesific files
dbdir = config.get('DBDIR')
for cc in ccdata:
    alldata = []
    typedata = ccdata[cc]
    types = sorted(typedata.keys())
    for iptype in types:
        filepath = os.path.join(dbdir, cc + '_' + iptype)
        if iptype == "IPV6":
            data = sorted(typedata[iptype])
        else:
            data = natsort.natsort(typedata[iptype])
        alldata += data

        with open(filepath, 'w+') as f:
            for item in data:
                print(item, file=f)

    # Create a combined file as well
    filepath = os.path.join(dbdir, cc + '_ALL')
    with open(filepath, 'w+') as f:
        for item in alldata:
            print(item, file=f)
def run(root_dir, list_file_path, n):
  # check list file for selection
  list_file = open(list_file_path, "r")
  list_content = list_file.readlines()
  list_file.close()

  filter_dict = {}

  for i in range(0, len(list_content)):
    filter_dict[list_content[i].split(".avi")[0]] = True

  # print list_content

  # read the content of the root directory and filter all directories
  directory_names = map(lambda f: os.path.join(root_dir, f), os.listdir(root_dir))
  # directory_names = filter(lambda f: os.path.basename(f).startswith('M'), directory_names)
  directories = filter(os.path.isdir, directory_names)

  # assign each 'top-level' directory to a topic id
  topics = { dir : topicId for (topicId, dir) in enumerate(directories) }

  # open ouput files
  train_name = os.path.join(root_dir, OUTPUT_FILE) % os.path.basename(list_file_path)
  train_file = open(train_name, "w")

  videos = []
  # for every topic read all its image files
  for topic_dir in directories:
    for parent_dir, sub_dirs, files in os.walk(topic_dir):

      # only take folder if it is in list
      parent_dir_split = parent_dir.split("/")
      identifier = parent_dir_split[-2] + "/" + parent_dir_split[-1]
      if (not identifier in filter_dict):
        continue

      if len(files) == 0:
        continue

      def writeFiles(sub_files):
        sub_file_count = len(sub_files)
        # all frames
        if (n == -1):
          count = 1
          start = 0
        # every 'count' frames
        else:
          count = sub_file_count / n
          start = int(ceil((sub_file_count - count * n) / 2.0))

        current_sequence = []
        for i in range(start, len(sub_files), count):
          if len(current_sequence) != n:
            absolute_file = os.path.join(parent_dir, sub_files[i])

            if (sub_files[i].endswith(("jpeg", "jpg", "png"))):
              # write absolute file path and the corresponding topic Id to the output file
              line = '{} {}\n'.format(absolute_file, topics[topic_dir])
              current_sequence.append(line)

        if n >= 0 and len(current_sequence) != n:
          print("sub_file_count")
          print(sub_file_count)
          print("count")
          print(count)
          print("n")
          print(n)
          print("len(current_sequence)")
          print(len(current_sequence))

        assert(n == -1 or len(current_sequence) == n)
        videos.append(current_sequence)
      # end writeFiles()

      # sort files
      files = natsort(files)
      file_count = len(files)
      sub_video_count = 10
      assert(file_count % sub_video_count == 0)
      nrFramesPerCrop = file_count / sub_video_count
      writeFiles(files[0 * nrFramesPerCrop:1 * nrFramesPerCrop])
      writeFiles(files[1 * nrFramesPerCrop:2 * nrFramesPerCrop])
      writeFiles(files[2 * nrFramesPerCrop:3 * nrFramesPerCrop])
      writeFiles(files[3 * nrFramesPerCrop:4 * nrFramesPerCrop])
      writeFiles(files[4 * nrFramesPerCrop:5 * nrFramesPerCrop])
      writeFiles(files[5 * nrFramesPerCrop:6 * nrFramesPerCrop])
      writeFiles(files[6 * nrFramesPerCrop:7 * nrFramesPerCrop])
      writeFiles(files[7 * nrFramesPerCrop:8 * nrFramesPerCrop])
      writeFiles(files[8 * nrFramesPerCrop:9 * nrFramesPerCrop])
      writeFiles(files[9 * nrFramesPerCrop:10 * nrFramesPerCrop])

      if (len(videos) % 5000 == 0):
        print "%d videos processed..." % (len(videos) / 10)

  # shuffle the video lists, not the frames inside one video
  shuffle(videos)
  video_number = len(videos)
  split_point = int(video_number * TRAIN_PERCENTAGE)

  for sequence in videos:
    for line in sequence:
      train_file.write(line)

  # close output file
  train_file.close()

  # just some logging
  sys.stdout.write("Done. Exported %s/%s \n\n" % (root_dir, OUTPUT_FILE))
  sys.stdout.write("Please, call Caffee's 'convert_image' tool now:\n")
  sys.stdout.write("$CAFFE_ROOT/build/tools/convert_image \"\" %s UCF101\n" % OUTPUT_FILE)
예제 #40
0
def run(root_dir, list_file_path, n):
    # check list file for selection
    list_file = open(list_file_path, "r")
    list_content = list_file.readlines()
    list_file.close()

    filter_dict = {}

    for i in range(0, len(list_content)):
        filter_dict[list_content[i].split(".avi")[0]] = True

    # print list_content

    # read the content of the root directory and filter all directories
    directory_names = map(lambda f: os.path.join(root_dir, f),
                          os.listdir(root_dir))
    # directory_names = filter(lambda f: os.path.basename(f).startswith('M'), directory_names)
    directories = filter(os.path.isdir, directory_names)

    # assign each 'top-level' directory to a topic id
    topics = {dir: topicId for (topicId, dir) in enumerate(directories)}

    # open ouput files
    train_name = os.path.join(root_dir,
                              OUTPUT_FILE) % os.path.basename(list_file_path)
    train_file = open(train_name, "w")

    videos = []
    # for every topic read all its image files
    for topic_dir in directories:
        for parent_dir, sub_dirs, files in os.walk(topic_dir):

            # only take folder if it is in list
            parent_dir_split = parent_dir.split("/")
            identifier = parent_dir_split[-2] + "/" + parent_dir_split[-1]
            if (not identifier in filter_dict):
                continue

            if len(files) == 0:
                continue

            def writeFiles(sub_files):
                sub_file_count = len(sub_files)
                # all frames
                if (n == -1):
                    count = 1
                    start = 0
                # every 'count' frames
                else:
                    count = sub_file_count / n
                    start = int(ceil((sub_file_count - count * n) / 2.0))

                current_sequence = []
                for i in range(start, len(sub_files), count):
                    if len(current_sequence) != n:
                        absolute_file = os.path.join(parent_dir, sub_files[i])

                        if (sub_files[i].endswith(("jpeg", "jpg", "png"))):
                            # write absolute file path and the corresponding topic Id to the output file
                            line = '{} {}\n'.format(absolute_file,
                                                    topics[topic_dir])
                            current_sequence.append(line)

                if n >= 0 and len(current_sequence) != n:
                    print("sub_file_count")
                    print(sub_file_count)
                    print("count")
                    print(count)
                    print("n")
                    print(n)
                    print("len(current_sequence)")
                    print(len(current_sequence))

                assert (n == -1 or len(current_sequence) == n)
                videos.append(current_sequence)

            # end writeFiles()

            # sort files
            files = natsort(files)
            file_count = len(files)
            sub_video_count = 10
            assert (file_count % sub_video_count == 0)
            nrFramesPerCrop = file_count / sub_video_count
            writeFiles(files[0 * nrFramesPerCrop:1 * nrFramesPerCrop])
            writeFiles(files[1 * nrFramesPerCrop:2 * nrFramesPerCrop])
            writeFiles(files[2 * nrFramesPerCrop:3 * nrFramesPerCrop])
            writeFiles(files[3 * nrFramesPerCrop:4 * nrFramesPerCrop])
            writeFiles(files[4 * nrFramesPerCrop:5 * nrFramesPerCrop])
            writeFiles(files[5 * nrFramesPerCrop:6 * nrFramesPerCrop])
            writeFiles(files[6 * nrFramesPerCrop:7 * nrFramesPerCrop])
            writeFiles(files[7 * nrFramesPerCrop:8 * nrFramesPerCrop])
            writeFiles(files[8 * nrFramesPerCrop:9 * nrFramesPerCrop])
            writeFiles(files[9 * nrFramesPerCrop:10 * nrFramesPerCrop])

            if (len(videos) % 5000 == 0):
                print "%d videos processed..." % (len(videos) / 10)

    # shuffle the video lists, not the frames inside one video
    shuffle(videos)
    video_number = len(videos)
    split_point = int(video_number * TRAIN_PERCENTAGE)

    for sequence in videos:
        for line in sequence:
            train_file.write(line)

    # close output file
    train_file.close()

    # just some logging
    sys.stdout.write("Done. Exported %s/%s \n\n" % (root_dir, OUTPUT_FILE))
    sys.stdout.write("Please, call Caffee's 'convert_image' tool now:\n")
    sys.stdout.write("$CAFFE_ROOT/build/tools/convert_image \"\" %s UCF101\n" %
                     OUTPUT_FILE)
예제 #41
0
        '--array',
        metavar='NS',
        type=int,
        default=-1,
        help='Submit the jobs as an array, with <=NS running simultaneous.'
        'Good for avoiding using all the Abaqus licenses. NS=0 runs all array jobs simultaneously.'
    )
    args = parser.parse_args()

    tC = 0
    tA = 0
    # Handle inputs.
    if args.sort:
        try:
            from natsort import natsort
            args.inp = natsort(args.inp)
        except:
            print 'WARNING: no natsort module found, sorting not available.'
    if args.cluster: cp = cluster_properties(args.cluster)
    if not args.procs:
        if args.cluster: np = cp['np']
        else: np = 8
    else: np = args.procs

    # Print a summary.
    print 'Tasks to run:', len(args.inp)
    for inp in args.inp:
        print '\t', inp

    # Run tasks.
    jobs = list()
예제 #42
0
 def test_zero_padding(self):
     """Test that zero padding semantics are respected."""
     assert natsort(['1.5.1', '1.5']) == ['1.5', '1.5.1']
예제 #43
0
    if args.labels and len(args.labels)<len(args.data):
        utility.print_error('Number of given labels must be the same as the number of given files.',True)


    # Separate the data files into FEM and EXP.
    femdata = []
    expdata = []
    for df in args.data:
        if df.endswith('.rpt'): femdata.append(df)
        else:                   expdata.append(df)

    # Try to sort the data.
    if args.sort or args.sort_reverse:
        try:
            from natsort import natsort
            femdata = natsort(femdata)
            expdata = natsort(expdata)
            if args.sort_reverse:
                femdata = list(reversed(femdata))
                expdata = list(reversed(expdata))
        except:
            print 'WARNING: no natsort module found, sorting not available.'

    # Prepare the plots.
    fig,ax = pyplot.subplots()
    cm = pyplot.get_cmap('gist_rainbow')
    # cm = pyplot.get_cmap('jet')
    if len(femdata)==len(expdata):
        colors = [cm(float(i)/len(femdata)) for i in range(len(femdata))]
    else:
        colors = [cm(float(i)/len(args.data)) for i in range(len(args.data))]
예제 #44
0
                somecounter+=1

                cigartup= read.cigartuples
                cigarstr = read.cigarstring
                unfiltreadidx = read.get_reference_positions(full_length=True)
                unfiltread = list(read.query_sequence)
                readqual = read.query_qualities
                cigarseq = cigarTranslate(cigartup)
                wee = analyzeRead(cigartup,cigarstr,unfiltreadidx,unfiltread,readqual,cigarseq)

            bigconsensus = []
            keylist = []
            for boop in CONSENSUSDICT:
                keylist.append(boop)

            a = natsort(keylist)
            coveragechecker = 0
            for i in a:
                # print i,CONSENSUSDICT[i]
                if 'I' in i: #and sum(CONSENSUSDICT[i].values()) < 200: #Include insertion!?!?!?
                    pass
                else:
                    x = CONSENSUSDICT[i]
                    maxkey = max(x.iteritems(), key=operator.itemgetter(1))[0]
                    # for key, value in sorted(x.iteritems(), key=lambda (k,v): (v,k)):
                    #     maxkey = key
                    #     break

                    # print>>readreportfile,i,maxkey,refdict[SEGMENT][int(i)],CONSENSUSDICT[i] #use me for snpcheck
                    if maxkey != refdict[SEGMENT][int(i)]:
                        
if __name__ == '__main__':

	meta_data = np.loadtxt(labels_csv,delimiter=";",dtype = str,skiprows = 1)

	coughs_dir_list = glob(coughs_dir + "C*")

	recordings_list = [os.path.basename(f) for f in coughs_dir_list ]

	
	for dir in coughs_dir_list:

		if EXTRACT:
			print "Extracting features for coughs in:",dir,"...",
			# List of cough wav files sorted numerically
			cough_wavs = natsort(list(glob(dir + "/*.wav")))
			extract_features(cough_wavs,dir,meta_data)
			print "done\n"

	# Copy all generated feature files to
	# the all folder
	if COPY_ALL:
		print "Copying features to all folder...",
		copy_to_all()
		print "done!\n"

		print "\ndone\n"


	if CONVERT_SUPERVECTORS:
		print "Converting data into supervectors..."
예제 #46
0
    pyplot.rc('mathtext', default='regular')  # Don't use italics for mathmode.
    #  pyplot.rc('font',size=16)
    pyplot.rc('axes', grid=True)
    pyplot.rc('figure', dpi=300)
    pyplot.rc('savefig', dpi=300)

    # Remove any datafiles that have already been processed.
    datafiles = []
    for dfile in args.datafiles:
        if not path.splitext(dfile)[0].endswith('--CLEAN'):
            datafiles.append(dfile)

    # Try to sort the data.
    try:
        from natsort import natsort
        datafiles = natsort(datafiles)
    except:
        print 'WARNING: no natsort module found, sorting not available.'

    # Prepare the multiplots.
    if len(datafiles) > 1:
        cm = pyplot.get_cmap('gist_rainbow')
        colors = [cm(float(i) / len(datafiles)) for i in range(len(datafiles))]
        fig1, ax1 = pyplot.subplots(3, 1, figsize=(15, 11))
        fig2, ax2 = pyplot.subplots(1, 3, figsize=(15, 6))
        if args.title:
            fig1.suptitle(args.title, fontweight="bold")
            fig2.suptitle(args.title, fontweight="bold")
        ax1[2].axis('off')
        ax2[2].axis('off')
        ax1[0].set_color_cycle(colors)
예제 #47
0
def create_window():
    file_list_column = [
        [
            sg.Text("Selecione o Diretório de Origem: "),
            sg.In(size=(25, 1), enable_events=True, key="-FOLDER-"),
            sg.FolderBrowse(),
        ],
        [
            sg.Listbox(values=[],
                       enable_events=True,
                       size=(57, 20),
                       key="-FILE LIST-")
        ],
        [sg.Button("Baixar Planilha Matriz")],
        [
            sg.Text("Selecione o Arquivo Matriz:        "),
            sg.In(size=(25, 1), enable_events=True, key="-FILE-"),
            sg.FileBrowse(),
        ],
        [sg.Button("Rename")],
        [sg.Button(" Cancel ")],
    ]

    layout = [[sg.Column(file_list_column)]]

    window = sg.Window("Rename Files", layout)

    # Run the Event Loop
    while True:
        event, values = window.read()
        if event == " Cancel " or event == sg.WIN_CLOSED:
            break
        # Folder name was filled in, make a list of files in the folder
        if event == "-FOLDER-":
            folder = values["-FOLDER-"]

            try:
                # Get list of files in folder
                file_list = natsort(os.listdir(folder))

            except:
                file_list = []

            fnames = [
                f for f in file_list
                if os.path.isfile(os.path.join(folder, f)) and
                f.lower().endswith((".rar", ".pdf", ".xls", ".xlsx", ".doc",
                                    ".docx", ".txt", ".png", ".jpg", ".bmp"))
            ]
            window["-FILE LIST-"].update(fnames)
        elif event == "-FILE LIST-":  # A file was chosen from the listbox
            try:
                filename = os.path.join(values["-FOLDER-"],
                                        values["-FILE LIST-"][0])
            except:
                pass

        if event == "-FILE-":
            file = values["-FILE-"]

        if event == "Baixar Planilha Matriz":
            create_matriz(folder)
            sg.PopupOK("Arquivo Matriz Criado.")

        if event == "Rename":
            try:
                backup(folder)
                try:
                    rename(file, folder)
                    sg.PopupOK(
                        'Processo concluido: Verifique a pasta indicada!')
                except ValueError as err:
                    error_rename = "Erro ao Renomear: " + str(err)
                    sg.PopupError(error_rename)
            except ValueError as err:
                error_backup = "Erro ao criar backup: " + str(err)
                sg.PopupError(error_backup)

    window.close()
예제 #48
0
def parse_xml(filename, hostname):
    mini_graph_path, root = reconcile_mini_graph_locations(filename, hostname)

    u_neighbors = None
    u_devices = None
    hwsku = None
    bgp_sessions = None
    bgp_asn = None
    intfs = None
    vlan_intfs = None
    pc_intfs = None
    mgmt_intf = None
    lo_intf = None
    neighbors = None
    devices = None

    hwsku_qn = QName(ns, "HwSku")
    for child in root:
        if child.tag == str(hwsku_qn):
            hwsku = child.text

    # port_alias_map maps ngs port name to sonic port name
    if hwsku == "Force10-S6000":
        for i in range(0, 128, 4):
            port_alias_map["fortyGigE0/%d" % i] = "Ethernet%d" % i
    elif hwsku == "Force10-S6100":
        for i in range(0, 4):
            for j in range(0, 16):
                port_alias_map["fortyGigE1/%d/%d" % (i+1, j+1)] = "Ethernet%d" % (i * 16 + j)
    elif hwsku == "Arista-7050-QX32":
        for i in range(1, 25):
            port_alias_map["Ethernet%d/1" % i] = "Ethernet%d" % ((i - 1) * 4)
        for i in range(25, 33):
            port_alias_map["Ethernet%d" % i] = "Ethernet%d" % ((i - 1) * 4)
    else:
        for i in range(0, 128, 4):
            port_alias_map["Ethernet%d" % i] = "Ethernet%d" % i

    for child in root:
        if child.tag == str(QName(ns, "DpgDec")):
            (intfs, lo_intfs, mgmt_intf, vlan_intfs, pc_intfs) = parse_dpg(child, hostname)
        elif child.tag == str(QName(ns, "CpgDec")):
            (bgp_sessions, bgp_asn) = parse_cpg(child, hostname)
        elif child.tag == str(QName(ns, "PngDec")):
            (neighbors, devices, console_dev, console_port, mgmt_dev, mgmt_port) = parse_png(child, hostname)
        elif child.tag == str(QName(ns, "UngDec")):
            (u_neighbors, u_devices, _, _, _, _) = parse_png(child, hostname)

    # Replace port with alias in Vlan interfaces members
    if vlan_intfs is not None:
        for vlan in vlan_intfs:
            for i,member in enumerate(vlan['members']):
                vlan['members'][i] = port_alias_map[member]
            # Convert vlan members into a space-delimited string
            vlan['members'] = " ".join(vlan['members'])

    # Replace port with alias in port channel interfaces members
    if pc_intfs is not None:
        for pc in pc_intfs:
            for i,member in enumerate(pc['members']):
                pc['members'][i] = port_alias_map[member]


    # Create port index map. Since we currently output a mix of NGS names
    # and SONiC mapped names, we include both in this map.
    # SONiC aliases, when sorted in natural sort order, match the phyical port
    # index order, so we sort by SONiC port alias, and map
    # back to NGS names after sorting using this inverted map
    #
    # TODO: Move all alias-related code out of minigraph_facts.py and into
    # its own module to be used as another layer after parsing the minigraph.
    inverted_port_alias_map = {v: k for k, v in port_alias_map.iteritems()}

    # Start by creating a list of all port aliases
    port_alias_list = []
    for k, v in port_alias_map.iteritems():
        port_alias_list.append(v)

    # Sort the list in natural order
    port_alias_list_sorted = natsort(port_alias_list)

    # Create map from SONiC alias to physical index and NGS name to physical index
    port_index_map = {}
    for idx, val in enumerate(port_alias_list_sorted):
        port_index_map[val] = idx
        port_index_map[inverted_port_alias_map[val]] = idx



    # Generate results
    Tree = lambda: defaultdict(Tree)

    results = Tree()
    results['minigraph_hwsku'] = hwsku
    # sorting by lambdas are not easily done without custom filters.
    # TODO: add jinja2 filter to accept a lambda to sort a list of dictionaries by attribute.
    # TODO: alternatively (preferred), implement class containers for multiple-attribute entries, enabling sort by attr
    results['minigraph_bgp'] = sorted(bgp_sessions, key=lambda x: x['addr'])
    results['minigraph_bgp_asn'] = bgp_asn
    # TODO: sort does not work properly on all interfaces of varying lengths. Need to sort by integer group(s).
    results['minigraph_interfaces'] = sorted(intfs, key=lambda x: x['name'])
    results['minigraph_vlan_interfaces'] = vlan_intfs
    results['minigraph_portchannel_interfaces'] = pc_intfs
    results['minigraph_mgmt_interface'] = mgmt_intf
    results['minigraph_lo_interfaces'] = lo_intfs
    results['minigraph_neighbors'] = neighbors
    results['minigraph_devices'] = devices
    results['minigraph_underlay_neighbors'] = u_neighbors
    results['minigraph_underlay_devices'] = u_devices
    # note - this may include files under acs/ansible/minigraph, or those under the default cache folder.
    # (see ANSIBLE_USER_MINIGRAPH_PATH at the top of the module)
    results['minigraph_as_xml'] = mini_graph_path
    results['minigraph_console'] = get_console_info(devices, console_dev, console_port)
    results['minigraph_mgmt'] = get_mgmt_info(devices, mgmt_dev, mgmt_port)
    results['minigraph_port_indices'] = port_index_map

    return results
예제 #49
0
 def entries(self):
     """A list of :class:`PasswordEntry` objects."""
     passwords = []
     for store in self.stores:
         passwords.extend(store.entries)
     return natsort(passwords, key=lambda e: e.name)
예제 #50
0
 def test_reversed_version_sorting(self):
     """Test reversed version sorting."""
     assert natsort(['1', '5', '10', '50'], reverse=True) == ['50', '10', '5', '1']
예제 #51
0
            consensus = []
            for read in samplebamdict[SAMPLENAME].fetch(SEGMENT):
                cigartup= read.cigartuples
                cigarstr = read.cigarstring
                unfiltreadidx = read.get_reference_positions(full_length=True)
                unfiltread = list(read.query_sequence)
                readqual = read.query_qualities
                cigarseq = cigarTranslate(cigartup)
                analyzeRead(cigartup,cigarstr,unfiltreadidx,unfiltread,readqual,cigarseq,read.is_reverse)

            bigconsensus = []
            keylist = []
            for boop in CONSENSUSDICT:
                keylist.append(boop)

            a = natsort(keylist) #sorting our nucleotide
            coveragechecker = 0


            tempconsensus = []
            for i in a: #move consensus earlier, we're going to need to use it later.
                if 'I' in i: #and sum(CONSENSUSDICT[i].values()) < 200: #Include insertion!?!?!?
                    pass
                else:
                    if sum(CONSENSUSDICT[i].values()) < 200: 
                        coveragechecker+=1

                    if sum(CONSENSUSDICT[i].values()) < args.covercutoff: #Do we want to filter?!?!?!
                        bigconsensus.append('N')
                        tempconsensus.append('N')
                    else:
예제 #52
0
                        help='Optional min and max for y-axis.')
    parser.add_argument('--title', help='Optional title for plot.')
    parser.add_argument(
        "--paper",
        action='store_true',
        help="Create plots designed for the paper, rather than for general use."
    )
    args = parser.parse_args()
    pyplot.rc('mathtext', default='regular')  # Don't use italics for mathmode.

    # Try to sort the data.
    expdata = args.data
    if args.sort:
        try:
            from natsort import natsort
            expdata = natsort(expdata)
        except:
            print 'WARNING: no natsort module found, sorting not available.'

    # Prepare the plots.
    fig, ax = pyplot.subplots()

    # TODO - remove longest common prefix from label strings.
    # Plot the EXP data.
    lines = ['-', '--', '-.', ':']
    if args.test == 'linU':
        xlabel = 'Time (s)'
        ylabel = 'Displacement (mm)'
        title = 'Displacements vs Time'
    elif args.test == 'linF':
        xlabel = 'Time (s)'