コード例 #1
0
ファイル: lsanc.py プロジェクト: sderose/Shell
def printFileInfo(path):
    statResult = osstat(path)
    warning1("statResult: " + repr(statResult))
    stMode = statResult.st_mode
    warning1("stMode: " + repr(stMode))
    perms = stat.filemode(stMode)
    print("%9s %s" % (perms, path))
コード例 #2
0
def is_DirectoryWriteable(dirname, use_username):
#   code snippet from stackoverflow
#   get all group ids which can be used by use_username
  gids = [g.gr_gid for g in getgrall() if use_username in g.gr_mem]
#   get groupid of the use_username
  gid = getpwnam(use_username).pw_gid
  gids.append(getgrgid(gid).gr_gid)
#   get the group names
  usergroups = [getgrgid(gid).gr_name for gid in gids]

  dirstat = osstat(dirname)
  dirusername = getpwuid(dirstat.st_uid)[0]
  dirusergroup = getgrgid(dirstat.st_gid)[0]
#   check if directory belongs to user and writeable
  if dirusername == use_username and bool(dirstat.st_mode & stat.S_IWUSR):
#     module_logger.info('{0}: "{1}" writeable'.format(getframe().f_code.co_name, dirname))
    return True
#   check if group of the directory in usergroup and writeable
  elif dirusergroup in usergroups and bool(dirstat.st_mode & stat.S_IWGRP):
#     module_logger.info('{0}: "{1}" writeable'.format(getframe().f_code.co_name, dirname))
    return True
#   check if writeable for anyone
  elif bool(dirstat.st_mode & stat.S_IWOTH):
#     module_logger.info('{0}: "{1}" writeable'.format(getframe().f_code.co_name, dirname))
    return True
  else:
#     module_logger.error('{0}: "{1}" not writeable for "{2}"'.format(getframe().f_code.co_name, dirname, use_username))
    return False
コード例 #3
0
ファイル: files.py プロジェクト: weweyes/inpanel
def tlist():
    mounts = _getmounts()
    _inittrash(mounts)
    # gather informations in each mount point's trash
    items = []
    for mount in mounts:
        db = anydbm.open(join(mount, '.deleted_files', '.fileinfo'), 'c')
        for uuid, info in db.items():
            fields = info.split('\t')
            item = {
                'uuid': uuid,
                'name': fields[0],
                'path': fields[1],
                'time': ftime(float(fields[2])),
                'mount': mount
            }
            filepath = join(mount, '.deleted_files', uuid)
            if exists(filepath):
                stat = osstat(filepath)
                item['isdir'] = S_ISDIR(stat.st_mode)
                item['isreg'] = S_ISREG(stat.st_mode)
                item['islnk'] = S_ISLNK(stat.st_mode)
            items.append(item)
        db.close()
    items.sort(lambda x, y: cmp(y['time'], x['time']))
    return items
コード例 #4
0
    def copytree(self, src, dst, symlinks=False, ignore=None):
        if src.startswith("/") is False:
            src = self._app_dir + "/" + src
        if dst.startswith("/") is False:
            dst = self._working_dir + "/" + dst

        if not path.exists(dst):
            makedirs(dst)
        for item in listdir(src):
            s = path.join(src, item)
            d = path.join(dst, item)
            if path.isdir(s):
                self.copytree(s, d, symlinks, ignore)
            else:
                if not path.exists(d) or osstat(s).st_mtime - osstat(d).st_mtime > 1:
                    yield threads.deferToThread(shutil.copy2, s, d)
コード例 #5
0
ファイル: methods.py プロジェクト: Sanix-Darker/crapo
def decrypt(path, key):
    print("> Decrypting ", path)
    # get encrypted file size
    enc_file_size = osstat(path).st_size
    # decrypt
    with open(path, "rb") as fIn:
        with open(path.replace(".crp0", ""), "wb") as fOut:
            try:
                # decrypt file stream
                pyAesCrypt.decryptStream(fIn, fOut, str(key), bufferSize,
                                         enc_file_size)
                print(">", path, " decrypted!")
            except ValueError:
                # remove output file on error
                exit("> Error Groulps!!! Verify the key")
コード例 #6
0
def get_file_creation_time(full_path):
    try:
        return osstat(full_path).st_birthtime if IS_UNIX else osstat(
            full_path).st_ctime
    except Exception:
        return osstat(full_path).st_ctime
コード例 #7
0
def nix_check_permissions(bin):
    st = osstat(bin)
    if not bool(st.st_mode & stat.S_IEXEC):
        chmod(bin, st.st_mode | stat.S_IEXEC)
コード例 #8
0
ファイル: easy_diff.py プロジェクト: MSublime/EasyDiff
    def set_view(self, view):
        """Set the view."""

        setattr(self, "f%d" % self.side, view.file_name())
        setattr(self, "t%d" % self.side, time.ctime(osstat(view.file_name()).st_mtime))
コード例 #9
0
def nix_check_permissions(bin):
    st = osstat(bin)
    if not bool(st.st_mode & stat.S_IEXEC):
        chmod(bin, st.st_mode | stat.S_IEXEC)
コード例 #10
0
ファイル: easy_diff.py プロジェクト: skeptycal/EasyDiff
    def set_view(self, view):
        """Set the view."""

        setattr(self, "f%d" % self.side, view.file_name())
        setattr(self, "t%d" % self.side,
                time.ctime(osstat(view.file_name()).st_mtime))
コード例 #11
0
def run_spades(se_fastq_files, pe_fastq_files, dir_spades_assemblies,
               spades, dir_temp, ss, threads, ram):

    if len(se_fastq_files) > 0 or len(pe_fastq_files) > 0:
        if spades is None:
            Log.err('SPAdes is not available. Cannot continue. Exiting.')
            exit(0)

    for se in se_fastq_files:
        dir_results = opj(dir_spades_assemblies, se + '__' + ss)
        fq_path = se_fastq_files[se]['vsearch_results_path' + '__' + ss]
        se_fastq_files[se]['spades_assembly' + '__' + ss] = None

        if ope(dir_results):
            Log.msg('SPAdes assembly already exists:', se)
        else:
            make_dirs(dir_results)
            Log.msg('Running SPAdes on:', se)
            run_spades_se(spades,
                          out_dir=dir_results,
                          input_file=fq_path,
                          threads=threads,
                          memory=ram,
                          rna=True)

        assmbl_path = opj(dir_results, 'transcripts.fasta')
        if ope(assmbl_path):
            count = len(read_fasta(assmbl_path, SEQ_TYPE_NT))
            tr_str = ' transcripts.'
            if count == 1:
                tr_str = ' transcript.'
            Log.msg('SPAdes produced ' + str(count) + tr_str, False)
            se_fastq_files[se]['spades_assembly' + '__' + ss] = assmbl_path
        else:
            Log.wrn('SPAdes produced no transcripts.', False)

    for pe in pe_fastq_files:
        dir_results = opj(dir_spades_assemblies, pe + '__' + ss)
        fq_paths = pe_fastq_files[pe]['vsearch_results_path' + '__' + ss]
        pe_fastq_files[pe]['spades_assembly' + '__' + ss] = None

        if ope(dir_results):
            Log.msg('SPAdes assembly already exists:', pe)
        else:
            make_dirs(dir_results)
            Log.msg('Running SPAdes on: ' + pe)

            if osstat(fq_paths[0]).st_size > 0 and \
               osstat(fq_paths[1]).st_size > 0:

                run_spades_pe(spades,
                              out_dir=dir_results,
                              input_files=fq_paths,
                              threads=threads,
                              memory=ram,
                              rna=True)

            else:
                _ = opj(dir_temp, 'temp.fasta')
                combine_text_files(fq_paths, _)
                run_spades_se(spades,
                              out_dir=dir_results,
                              input_file=_,
                              threads=threads,
                              memory=ram,
                              rna=True)
                osremove(_)

        assmbl_path = opj(dir_results, 'transcripts.fasta')
        if ope(assmbl_path):
            count = len(read_fasta(assmbl_path, SEQ_TYPE_NT))
            tr_str = ' transcripts.'
            if count == 1:
                tr_str = ' transcript.'
            Log.msg('SPAdes produced ' + str(count) + tr_str, False)
            pe_fastq_files[pe]['spades_assembly' + '__' + ss] = assmbl_path
        else:
            Log.wrn('SPAdes produced no transcripts.', False)
コード例 #12
0
ファイル: files.py プロジェクト: weweyes/inpanel
        'uid': stat.st_uid,
        'gid': stat.st_gid,
        'uname': uname,
        'gname': gname,
        'size': b2h(stat.st_size),
        'atime': ftime(stat.st_atime),
        'mtime': ftime(stat.st_mtime),
        'ctime': ftime(stat.st_ctime),
    }
    if item['islnk']:
        linkfile = readlink(path)
        item['linkto'] = linkfile
        if not linkfile.startswith('/'):
            linkfile = abspath(join(basepath, linkfile))
        try:
            stat = osstat(linkfile)
            item['link_isdir'] = S_ISDIR(stat.st_mode)
            item['link_isreg'] = S_ISREG(stat.st_mode)
            item['link_broken'] = False
        except:
            item['link_broken'] = True
    return item


def rename(oldpath, newname):
    # path = abspath(oldpath)
    if not exists(oldpath):
        return False
    try:
        basepath = dirname(oldpath)
        newpath = join(basepath, newname)
コード例 #13
0
""" https://stackoverflow.com/questions/60208/replacements-for-switch-statement-in-python """


@static_vars(scopes={
    'u': 'USR',
    'g': 'GRP',
    'o': 'OTH'
},
             perms={
                 'r': 'R',
                 'w': 'W',
                 'x': 'X'
             },
             ops={
                 '=': lambda p, path: 0 | p,
                 '+': lambda p, path: osstat(path).st_mode | p,
                 '-': lambda p, path: osstat(path).st_mode & ~p
             })
def ezChmod(path, scope, op, perm, issuid):
    myScopes = map(lambda s: ezChmod.scopes[s], scope)
    myPerms = map(lambda p: ezChmod.perms[p], perm)
    pool = Pool()
    cons = pool.map(ezChmodHelper, product(myPerms, myScopes))
    pool.close()
    pool.join()
    #cons     = map   (lambda a: getattr(stat, "S_I%s%s" % a), product(myPerms, myScopes))
    P = reduce(lambda a, b: a | b, cons, S_ISUID if issuid else 0)
    f = ezChmod.ops[op]
    fp = f(P, path)
    chmod(path, fp)
コード例 #14
0
                        continue
                    print(level_three + " ---- " + str(r.headers.get('content-length')) + ' bytes')

                    # URL for each filename
                    get_it = site_url + level_one + level_two + level_three

                    # setup
                    r = requests.get(get_it, stream=True)
                    total = r.headers.get('content-length')

                    # if remote file is accessable
                    if r.status_code == 200:

                        # if filename exists and size is different
                        if Path(level_three).exists():

                            if str(osstat(level_three).st_size) != str(total):
                                download(total)

                        # else if filename doesn't exist, download file
                        elif Path(level_three).exists() is False:
                            download(total)

                    else:
                        print("File '%s' not found!" % level_three)


print('##################')
print('#### COMPLETE ####')
print('##################')
コード例 #15
0
def update_fields(atcf_stormfilename, cc, conn, process=False):
    # Must be of form similar to 
    # Gal912016.dat

    import re
    from datetime import datetime, timedelta
    from os.path import basename as pathbasename
    from os import stat as osstat

    updated_files = []

    LOG.info('Checking '+atcf_stormfilename+' ... process '+str(process))

    # Check if we match Gxxdddddd.dat filename format. If not just return and don't do anything.
    if not re.compile('G\D\D\d\d\d\d\d\d\.\d\d\d\d\d\d\d\d\d\d.dat').match(pathbasename(atcf_stormfilename)) and \
       not re.compile('G\D\D\d\d\d\d\d\d\.dat').match(pathbasename(atcf_stormfilename)):
        LOG.info('')
        LOG.warning('    DID NOT MATCH REQUIRED FILENAME FORMAT, SKIPPING: '+atcf_stormfilename)
        return []

    # Get all fields for the database entry for the current filename
    cc.execute("SELECT * FROM atcf_deck_stormfiles WHERE filename = ?", (atcf_stormfilename,))
    data=cc.fetchone()

    file_timestamp = datetime.fromtimestamp(osstat(atcf_stormfilename).st_mtime)
    # Reads timestamp out as string - convert to datetime object.
    # Check if timestamp on file is newer than timestamp in database - if not, just return and don't do anything.
    if data: 
        database_timestamp = datetime.strptime(cc.execute("SELECT last_updated from atcf_deck_stormfiles WHERE filename = ?", (atcf_stormfilename,)).fetchone()[0],'%Y-%m-%d %H:%M:%S.%f')
        if file_timestamp < database_timestamp:
            LOG.info('')
            LOG.info(atcf_stormfilename+' already in '+ATCF_DECKS_DB+' and up to date, not doing anything.')
            return []

    lines = open(atcf_stormfilename,'r').readlines()
    start_line = lines[0].split(',')
    # Start 24h prior to start in sectorfile, for initial processing
    #storm_start_datetime = datetime.strptime(start_line[2],'%Y%m%d%H')
    start_datetime = datetime.strptime(start_line[2],'%Y%m%d%H') - timedelta(hours=24)
    end_datetime = datetime.strptime(lines[-1].split(',')[2],'%Y%m%d%H')
    start_vmax= start_line[8]
    vmax=0
    for line in lines:
        currv = line.split(',')[8]
        track = line.split(',')[4]
        if currv and track == 'BEST' and float(currv) > vmax:
            vmax = float(currv)

    if data and database_timestamp < file_timestamp:
        LOG.info('')
        LOG.info('Updating start/end datetime and last_updated fields for '+atcf_stormfilename+' in '+ATCF_DECKS_DB)
        old_start_datetime,old_end_datetime,old_vmax = cc.execute("SELECT start_datetime,end_datetime,vmax from atcf_deck_stormfiles WHERE filename = ?", (atcf_stormfilename,)).fetchone()
        # Eventually add in storm_start_datetime
        #old_storm_start_datetime,old_start_datetime,old_end_datetime,old_vmax = cc.execute("SELECT storm_start_datetime,start_datetime,end_datetime,vmax from atcf_deck_stormfiles WHERE filename = ?", (atcf_stormfilename,)).fetchone()
        if old_start_datetime == start_datetime.strftime('%Y-%m-%d %H:%M:%S'):
            LOG.info('    UNCHANGED start_datetime: '+old_start_datetime)
        else:
            LOG.info('    Old start_datetime: '+old_start_datetime+' to new: '+start_datetime.strftime('%Y-%m-%d %H:%M:%S'))
            updated_files += [atcf_stormfilename]
        #if old_storm_start_datetime == storm_start_datetime.strftime('%Y-%m-%d %H:%M:%S'):
        #    LOG.info('    UNCHANGED storm_start_datetime: '+old_storm_start_datetime)
        #else:
        #    LOG.info('    Old storm_start_datetime: '+old_storm_start_datetime+' to new: '+storm_start_datetime.strftime('%Y-%m-%d %H:%M:%S'))
        if old_end_datetime == end_datetime.strftime('%Y-%m-%d %H:%M:%S'):
            LOG.info('    UNCHANGED end_datetime: '+old_end_datetime)
        else:
            LOG.info('    Old end_datetime: '+old_end_datetime+' to new: '+end_datetime.strftime('%Y-%m-%d %H:%M:%S'))
            updated_files += [atcf_stormfilename]
        if database_timestamp == file_timestamp:
            LOG.info('    UNCHANGED last_updated: '+database_timestamp.strftime('%Y-%m-%d %H:%M:%S'))
        else:
            LOG.info('    Old last_updated: '+database_timestamp.strftime('%Y-%m-%d %H:%M:%S')+' to new: '+file_timestamp.strftime('%Y-%m-%d %H:%M:%S'))
            updated_files += [atcf_stormfilename]
        if old_vmax == vmax:
            LOG.info('    UNCHANGED vmax: '+str(old_vmax))
        else:
            LOG.info('    Old vmax: '+str(old_vmax)+' to new: '+str(vmax))
            updated_files += [atcf_stormfilename]
        cc.execute('''UPDATE atcf_deck_stormfiles SET 
                        last_updated=?,
                        start_datetime=?,
                        end_datetime=?,
                        vmax=? 
                      WHERE filename = ?''', 
                      #Eventually add in ?
                      #storm_start_datetime=?,
                        (file_timestamp,
                        #storm_start_datetime,
                        start_datetime,
                        end_datetime,
                        str(vmax),
                        atcf_stormfilename,))
        conn.commit()
        return updated_files

    start_lat = start_line[6]
    start_lon = start_line[7]
    storm_basin = start_line[0]
    storm_num = start_line[1]
    try:
        start_name= start_line[48]+start_line[49]
    except IndexError:
        start_name= start_line[41]

    if data == None:
        #print '    Adding '+atcf_stormfilename+' to '+ATCF_DECKS_DB
        cc.execute('''insert into atcf_deck_stormfiles(
                        filename,
                        last_updated,
                        vmax,
                        storm_num,
                        storm_basin,
                        start_datetime,
                        start_lat,
                        start_lon,
                        start_vmax,
                        start_name,
                        end_datetime) values(?, ?,?, ?,?,?,?,?,?,?,?)''', 
                        # Eventually add in ?
                        #end_datetime) values(?, ?, ?,?, ?,?,?,?,?,?,?,?)''', 
                        #storm_start_datetime,
                        (atcf_stormfilename,
                            file_timestamp,
                            str(vmax),
                            storm_num,
                            storm_basin,
                            #storm_start_datetime,
                            start_datetime,
                            start_lat,
                            start_lon,
                            start_vmax,
                            start_name,
                            end_datetime,))
        LOG.info('')
        LOG.info('    Adding '+atcf_stormfilename+' to '+ATCF_DECKS_DB) 
        updated_files += [atcf_stormfilename]
        conn.commit()

        # This ONLY runs if it is a brand new storm file and we requested 
        # processing.
        if process:
            reprocess_storm(atcf_stormfilename)
    return updated_files
コード例 #16
0
ファイル: modio.py プロジェクト: nbuchwitz/revpimodio2
    def __init__(self,
                 autorefresh=False,
                 monitoring=False,
                 syncoutputs=True,
                 procimg=None,
                 configrsc=None,
                 simulator=False,
                 debug=True,
                 replace_io_file=None,
                 shared_procimg=False,
                 direct_output=False):
        """
        Instantiiert die Grundfunktionen.

        :param autorefresh: Wenn True, alle Devices zu autorefresh hinzufuegen
        :param monitoring: In- und Outputs werden gelesen, niemals geschrieben
        :param syncoutputs: Aktuell gesetzte Outputs vom Prozessabbild einlesen
        :param procimg: Abweichender Pfad zum Prozessabbild
        :param configrsc: Abweichender Pfad zur piCtory Konfigurationsdatei
        :param simulator: Laedt das Modul als Simulator und vertauscht IOs
        :param debug: Gibt alle Warnungen inkl. Zyklusprobleme aus
        :param replace_io_file: Replace IO Konfiguration aus Datei laden
        :param shared_procimg: Share process image with other processes (insecure for automation, little slower)
        :param direct_output: Deprecated, use shared_procimg
        """
        # Parameterprüfung
        acheck(bool,
               autorefresh=autorefresh,
               monitoring=monitoring,
               syncoutputs=syncoutputs,
               simulator=simulator,
               debug=debug,
               shared_procimg=shared_procimg,
               direct_output=direct_output)
        acheck(str,
               procimg_noneok=procimg,
               configrsc_noneok=configrsc,
               replace_io_file_noneok=replace_io_file)

        # TODO: Remove in next release
        if direct_output:
            warnings.warn(
                DeprecationWarning(
                    "direct_output is deprecated - use shared_procimg instead!"
                ))

        self._autorefresh = autorefresh
        self._configrsc = configrsc
        self._direct_output = shared_procimg or direct_output
        self._monitoring = monitoring
        self._procimg = "/dev/piControl0" if procimg is None else procimg
        self._simulator = simulator
        self._syncoutputs = syncoutputs

        # TODO: bei simulator und procimg prüfen ob datei existiert / anlegen?

        # Private Variablen
        self.__cleanupfunc = None
        self._buffedwrite = False
        self._debug = 1
        self._exit = Event()
        self._exit_level = 0
        self._imgwriter = None
        self._ioerror = 0
        self._length = 0
        self._looprunning = False
        self._lst_devselect = []
        self._lst_refresh = []
        self._maxioerrors = 0
        self._myfh = None
        self._myfh_lck = Lock()
        self._replace_io_file = replace_io_file
        self._th_mainloop = None
        self._waitexit = Event()

        # Modulvariablen
        self.core = None

        # piCtory Klassen
        self.app = None
        self.device = None
        self.io = None
        self.summary = None

        # Event für Benutzeraktionen
        self.exitsignal = Event()

        # Wert über setter setzen
        self.debug = debug

        try:
            self._run_on_pi = S_ISCHR(osstat(self._procimg).st_mode)
        except Exception:
            self._run_on_pi = False

        # Nur Konfigurieren, wenn nicht vererbt
        if type(self) == RevPiModIO:
            self._configure(self.get_jconfigrsc())
            self._configure_replace_io(self._get_cpreplaceio())