Ejemplo n.º 1
0
    def _check_if_tg_required(self, tg_symbol):
        """Checks if the rule associated with tg_symbol should be ran or not. It's true if:
        - The target isn't an existing file (then it's a rule, or a file that needs to be created)
        - One of the rule's dependencies isn't a file
        - One of the rule's dependency is a file, and has a more recent timestamp
        """

        # checking if it's a file
        tg_filepath = join(self.makefile_folder, tg_symbol.name)
        if not isfile(tg_filepath):
            return True

        # checking the dependency
        for dependency in self.rule_table[tg_symbol].dependencies:
            dep_filepath = join(self.makefile_folder, dependency.name)

            if isfile(join(self.makefile_folder, dependency.name)):
                # dep have a more recent timestamp than tgt
                if getctime(dep_filepath) > getctime(tg_filepath): # checking the timestamp
                    return True
            else:
                # Assuming that the Makefile is valid and the required file "dependency" is a target of another rule
                return True

        return False
Ejemplo n.º 2
0
    def execute(self, context):
        # 1. Save scene, and set environment variables.
        if bpy.data.filepath == '':
            bpy.ops.wm.save_mainfile('INVOKE_AREA')
        else:
            bpy.ops.wm.save_as_mainfile('EXEC_AREA')
            self.full_file_path = bpy.data.filepath
            self.file_name = split(self.full_file_path)[1][:-6]
            self.file_directory = dirname(self.full_file_path)

            # 2. Check if 5 minutes has passed since last save. If it has, save new version. Else, just save.
            
            self.version_number = '-0001'
            self.old_version_number = self.version_number
            while exists(join(self.file_directory, self.file_name + self.version_number + '.blend')) == True:
                self.old_version_number = self.version_number
                self.version_number = int(self.version_number[1:])
                self.version_number += 1
                self.append_number = ''
                for i in range(4 - len(str(self.version_number))):
                    self.append_number += '0'
                self.append_number += str(self.version_number)
                self.version_number = '-' + self.append_number

            try:
                self.previous_time = getctime(join(self.file_directory, self.file_name + self.old_version_number + '.blend'))

            except FileNotFoundError:
                self.previous_time = getctime(self.full_file_path)

            if (time() - self.previous_time) >= 300: # Check if 5 minutes has passed (300 seconds).
                self.new_file_name = self.file_name + self.version_number
                copyfile(join(self.file_directory, self.file_name + '.blend'), join(self.file_directory, self.new_file_name + '.blend'))

        return {'FINISHED'}
Ejemplo n.º 3
0
def bgzip_and_tabix(fpath, reuse=False, tabix_parameters='', **kwargs):
    gzipped_fpath = join(fpath + '.gz')
    tbi_fpath = gzipped_fpath + '.tbi'

    if reuse and \
           file_exists(gzipped_fpath) and (getctime(gzipped_fpath) >= getctime(fpath) if file_exists(fpath) else True) and \
           file_exists(tbi_fpath) and getctime(tbi_fpath) >= getctime(gzipped_fpath):
        info('Actual compressed file and index exist, reusing')
        return gzipped_fpath

    info('Compressing and tabixing file, writing ' + gzipped_fpath + '(.tbi)')
    bgzip = which('bgzip')
    tabix = which('tabix')
    if not bgzip:
        err('Cannot index file because bgzip is not found')
    if not tabix:
        err('Cannot index file because tabix is not found')
    if not bgzip and not tabix:
        return fpath

    if isfile(gzipped_fpath):
        os.remove(gzipped_fpath)
    if isfile(tbi_fpath):
        os.remove(tbi_fpath)

    info('BGzipping ' + fpath)
    cmdline = '{bgzip} {fpath}'.format(**locals())
    call_process.run(cmdline)

    info('Tabixing ' + gzipped_fpath)
    cmdline = '{tabix} {tabix_parameters} {gzipped_fpath}'.format(**locals())
    call_process.run(cmdline)

    return gzipped_fpath
Ejemplo n.º 4
0
def createDict(path, root={}):
    pathList = listdir(path)
    for i, item in enumerate(pathList):
        file_path = path_join(path, item)
        if item not in ignore_dir and exists(file_path):
            if isdir(file_path):
                if not root.get(item, False):
                    root[item] = {"type": "dir", "files": {}}
                createDict(file_path, root[item]["files"])
            else:
                if not root.get(item, False):
                    log("new file " + file_path)
                    root[item] = {"type": "file",
                                  "file_size": getsize(file_path),
                                  "mtime": getmtime(file_path), 
                                  "ctime": getctime(file_path),
                                  "md5": md5(file_path),
                                  "sha256": sha256(file_path)}
                else:
                    if root[item]["mtime"] != getmtime(file_path):
                        log("rehashing " + file_path)
                        root[item] = {"type": "file",
                                      "file_size": getsize(file_path),
                                      "mtime": getmtime(file_path), 
                                      "ctime": getctime(file_path),
                                      "md5": md5(file_path),
                                      "sha256": sha256(file_path)}
                        
                                    
    return root
Ejemplo n.º 5
0
def is_modified ( abs_path, is_file, max_age=48, feature_enabled=False,
    image_file_pattern=compile('^.*$') ):
    """Check if a file was created between now and now minus the given
    max age in hours. Return false if this feature is not configured."""

    if not feature_enabled:
        return False

    oldest_epoch = time() - ( max_age * 60.0 * 60.0 )
    is_modified = False
    last_change = 0

    # on files just check the file ..
    if is_file:
        if (path.getctime(abs_path) >= oldest_epoch or
            path.getmtime(abs_path) >= oldest_epoch):
            is_modified = True
        last_change = max(path.getctime(abs_path), path.getmtime(abs_path))
    # on folders find all images file and check those for changes (
    # if we would just inspect the folder we'll get updates, e.g., simply
    # because the folder was touched.
    else:
        files = findfiles( abs_path, image_file_pattern, doprint=False)
        for subfile in files:
            if (path.getctime(subfile) >= oldest_epoch or
                path.getmtime(subfile) >= oldest_epoch):
                is_modified = True
            last_change = max(
                last_change, path.getctime(abs_path), path.getmtime(abs_path))

    return is_modified, last_change
Ejemplo n.º 6
0
def needs_update(arg,dirname,names):
    last_built = path.getctime('%s.root'%dirname)

    times = []
    for name in names:
        times.append(path.getctime(path.join(dirname,name)))
    
    arg[0] = (last_built < max(times))    
Ejemplo n.º 7
0
 def filesort(file1, file2):
     """ sort by create time """
     ctime1 = getctime(file1)
     ctime2 = getctime(file2)
     if ctime1 < ctime2:
         return -1
     elif ctime1 == ctime2:
         return 0
     else:
         return 1
Ejemplo n.º 8
0
def infos_ogr(shapepath):
    u""" Uses gdal/ogr functions to extract basic informations about shapefile
    given as parameter and store into the corresponding dictionary. """
    global dico_infos_couche, dico_champs, liste_chps
    source = ogr.Open(shapepath, 0)     # OGR driver
    couche = source.GetLayer()          # get the layer
    objet = couche.GetFeature(0)        # get the first object (index 0)
    geom = objet.GetGeometryRef()       # get the geometry
    def_couche = couche.GetLayerDefn()  # get the layer definitions
    srs = couche.GetSpatialRef()        # get spatial system reference
    srs.AutoIdentifyEPSG()              # try to determine the EPSG code
    # Storing into the dictionary
    dico_infos_couche[u'nom'] = path.basename(shapepath)
    dico_infos_couche[u'titre'] = dico_infos_couche[u'nom'][:-4].replace('_', ' ').capitalize()
    dico_infos_couche[u'nbr_objets'] = couche.GetFeatureCount()
    dico_infos_couche[u'nbr_attributs'] = def_couche.GetFieldCount()
    dico_infos_couche[u'proj'] = unicode(srs.GetAttrValue("PROJCS")).replace('_', ' ')
    dico_infos_couche[u'EPSG'] = unicode(srs.GetAttrValue("AUTHORITY", 1))
    '''dico_infos_couche[u'EPSG'] = u"Projection : " + \
                                 unicode(srs.GetAttrValue("PROJCS")).replace('_', ' ') + \
                                 u" - Code EPSG : " + \
                                 unicode(srs.GetAttrValue("AUTHORITY", 1))'''
    # type géométrie
    if geom.GetGeometryName() == u'POINT':
        dico_infos_couche[u'type_geom'] = u'Point'
    elif u'LINESTRING' in geom.GetGeometryName():
        dico_infos_couche[u'type_geom'] = u'Ligne'
    elif u'POLYGON' in geom.GetGeometryName():
        dico_infos_couche[u'type_geom'] = u'Polygone'
    else:
        dico_infos_couche[u'type_geom'] = geom.GetGeometryName()
    # Spatial extent (bounding box)
    dico_infos_couche[u'Xmin'] = round(couche.GetExtent()[0],2)
    dico_infos_couche[u'Xmax'] = round(couche.GetExtent()[1],2)
    dico_infos_couche[u'Ymin'] = round(couche.GetExtent()[2],2)
    dico_infos_couche[u'Ymax'] = round(couche.GetExtent()[3],2)

    # Fields
    i = 0
    while i < def_couche.GetFieldCount():
        liste_chps.append(def_couche.GetFieldDefn(i).GetName())
        dico_champs[def_couche.GetFieldDefn(i).GetName()] = def_couche.GetFieldDefn(i).GetTypeName(),\
                                                            def_couche.GetFieldDefn(i).GetWidth(),\
                                                            def_couche.GetFieldDefn(i).GetPrecision()
        i = i+1

    dico_infos_couche[u'date_actu'] = unicode(localtime(path.getmtime(shapepath))[2]) +\
                                   u'/'+ unicode(localtime(path.getmtime(shapepath))[1]) +\
                                   u'/'+ unicode(localtime(path.getmtime(shapepath))[0])
    dico_infos_couche[u'date_creation'] = unicode(localtime(path.getctime(shapepath))[2]) +\
                                   u'/'+ unicode(localtime(path.getctime(shapepath))[1]) +\
                                   u'/'+ unicode(localtime(path.getctime(shapepath))[0])
    # end of function
    return dico_infos_couche, dico_champs, liste_chps
Ejemplo n.º 9
0
    def upgrade_static_files(self):
        """This method allows for updating a selection of static files
        with corresponding files residing in a hidden .pyntrest  folder
        in your main image folder. This comes in handy when you want  to
        update the CSS or favicon without touching the core implementation."""

        pyn_config_folder = path.join(self.main_images_path, '.pyntrest')

        # these files can be overridden
        changeable_files = [
           path.join('res', 'favicon.png'),
           path.join('res', 'favicon-apple.png'),
           path.join('css', 'pyntrest-main.css'),
           path.join('index.html'),
           path.join('bookify.html'),
        ]

        for ch_file in changeable_files:

            # the changeable file at its final destination
            if 'index' in ch_file:
                exis_file = path.join(TEMPLATE_DIRS[0], 'pyntrest', ch_file)
            else:
                exis_file = path.join(self.static_path, ch_file)
            # the candidate file from the main images folder
            cand_file = path.join(pyn_config_folder, ch_file)

            if not file_exists(exis_file) and not file_exists(cand_file):
                # no target file and no custom file --> copy from default
                print ('Creating file \'{}\' from default.'.format(exis_file))
                copyfile(exis_file + '.default', exis_file)
            elif not file_exists(exis_file) and file_exists(cand_file):
                # no target file but custom file --> copy from custom
                print ('Creating file \'{}\' from version at \'{}\'.'
                       .format(exis_file, cand_file))
                copyfile(cand_file, exis_file)

            #print 'staticfile candidate = {}'.format(cand_file)

            if not file_exists(cand_file):
                continue # nothing to compare

            # get modified / created dates
            efile_ts = max( path.getctime(exis_file), path.getmtime(exis_file))
            cfile_ts = max( path.getctime(cand_file), path.getmtime(cand_file))

            if cfile_ts >= efile_ts:
                print (
                'Updating file \'{}\' with newer version at \'{}\' [{} >> {}].'
                .format(ch_file, cand_file, efile_ts, cfile_ts))
                copyfile(cand_file, exis_file)
            else:
                pass
Ejemplo n.º 10
0
def generate_time_df():
    cols = ["vehicles", "capacity", "waiting_time", "day", "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for v, c, wt, d in product(vehicles, caps, waiting_times, range(1, 8)):
        s, e = get_comp_filenames(v, c, wt, 0, d)
        diff = (path.getctime(e) - path.getctime(s)) / 2878
        if diff > 500:
            diff = 2.9
        data.loc[counter] = [v, c, wt, d - 1, diff]
        counter += 1
    return data
Ejemplo n.º 11
0
def generate_hour_time_df():
    cols = ["hour", "day", "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for i, d in product(["same", "t12", "t19"], range(1, 8)):
        s, e = get_hour_comp_filenames(i, d)
        diff = (path.getctime(e) - path.getctime(s)) / 2878
        if diff > 500:
            diff = 2.9
        data.loc[counter] = [i, d - 1, diff]
        counter += 1
    return data
Ejemplo n.º 12
0
def generate_demand_time_df():
    cols = ["demand", "capacity", "day", "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for i, c, d in product(demands, [1, 4], range(1, 8)):
        s, e = get_demand_comp_filenames(i, c, d)
        diff = (path.getctime(e) - path.getctime(s)) / 2878
        if diff > 500:
            diff = 2.9
        data.loc[counter] = [i, c, d - 1, diff]
        counter += 1
    return data
Ejemplo n.º 13
0
def generate_interval_time_df():
    cols = ["interval", "day", "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for i, d in product(intervals, range(1, 8)):
        s, e = get_interval_comp_filenames(i, d)
        total_secs = (24 * 60 * 60) / i
        diff = (path.getctime(e) - path.getctime(s)) / total_secs
        if diff > 500:
            diff = 2.9
        data.loc[counter] = [i, d - 1, diff]
        counter += 1
    return data
Ejemplo n.º 14
0
def _lvm_pickle(filename):
    """ Reads pickle file (for local use)

    :param filename: filename of lvm file
    :return lvm_data: dict with lvm data
    """
    p_file = '{}.pkl'.format(filename)
    lvm_data = False
    # if pickle file exists and pickle is up-2-date just load it.
    if path.exists(p_file) and path.getctime(p_file) > path.getctime(filename):
        f = open(p_file, 'rb')
        lvm_data = pickle.load(f)
        f.close()
    return lvm_data
Ejemplo n.º 15
0
	def oggenc(self,wavfile,oggfile):
		"""
			encodes wav to ogg if the ogg file does not exist or if the wav file is newer than the ogg file
                        1. sox trims silence from beginning and end of audio
                        2. sox pads audio with 10ms silence before and 50ms silence after audio
                        3. oggenc encodes trimmed and padded audio to ogg

			@param string wavfile => full path to the input wav file
			@param string oggfile => full path to the output ogg file
			@return integer/string

			returns 0 if the convertion was successfull,
			otherwise returns a string containing the command used to convert audio
		"""
		trimmed_wav_file = path.split(wavfile)[0] + '/trimmed.wav'
		trimcommand = 'sox -q -t wav ' + wavfile + ' ' + trimmed_wav_file + ' silence 1 1 0.05% reverse silence 1 1 0.05% reverse pad 0.010 0.050'
		encodecommand = 'oggenc -Q --resample 44100 ' + trimmed_wav_file + ' -o ' + oggfile

		# ogg file does not exist
		if not path.exists(self.oggfile):
			print 'Info: ogg file ' + oggfile + ' does not exist, encoding wav to ogg'

			child = popen(trimcommand)
			err = child.close()
			if err != None:
				return 'Error: ' + wavfile + ' could not trim audio using using command: ' + trimcommand + '\n'

			child = popen(encodecommand)
			err = child.close()
			if err != None:
				return 'Error: ' + wavfile + ' could not be encoded to ogg using command: ' + encodecommand + '\n'
			return 0

		# wav file is newer than ogg file
		if path.getctime(wavfile) > path.getctime(oggfile):
			remove(oggfile)
			print 'Info: wav file ' + wavfile + ' is updated, re-encoding wav to ogg'

			child = popen(trimcommand)
			err = child.close()
			if err != None:
				return 'Error: ' + wavfile + ' could not trim audio using using command: ' + trimcommand + '\n'

			child = popen(encodecommand)
			err = child.close()
			if err != None:
				return 'Error: ' + wavfile + ' could not be encoded to ogg using command: ' + encodecommand + '\n'
			return 0
		return 0
Ejemplo n.º 16
0
Archivo: DB.py Proyecto: gwely/Redownr
	def add_existing_album(self, user, oldalbum, oldpath):
		newalbum = path.join(ImageUtils.get_root(), 'content', user, oldalbum)
		if path.exists(newalbum):
			self.debug('album already exists: %s' % newalbum)
			return

		(post, comment, imgid) = self.get_post_comment_id(oldalbum)
		url = 'http://imgur.com/a/%s' % imgid
		try:
			album_id = self.add_album(newalbum, user, url, post, comment)
		except Exception as e:
			self.debug('add_existing_album: failed: %s' % str(e))
			return

		for image in listdir(oldpath):
			self.debug('add_existing_album: image=%s' % path.join(oldpath, image))
			fakeimage = post
			if comment != None:
				fakeimage = '%s-%s' % (fakeimage, comment)
			fakeimage = '%s_%s' % (fakeimage, image.split('_')[-1])
			self.add_existing_image(user, fakeimage, path.join(oldpath, image), subdir=oldalbum, album_id=album_id)

			# Add post
			p = Post()
			p.id = post
			p.author = user
			if comment == None: p.url = url
			p.created = path.getctime(oldpath)
			p.subreddit = ''
			p.title = ''
			try:
				self.add_post(p, legacy=1)
			except Exception as e:
				#self.debug('add_existing_image: %s' % str(e))
				pass

			# Add comment
			if comment != None:
				c = Comment()
				c.id = comment
				c.post_id = post
				c.author = user
				if comment != None: c.body = url
				p.created = path.getctime(oldpath)
				try:
					self.add_comment(c, legacy=1)
				except Exception as e:
					#self.debug('add_existing_image: %s' % str(e))
					pass
Ejemplo n.º 17
0
    def load_from_cache(self):
        """ Loads the package dict from a cache file """
        try:
            ctime = getctime(self._cache)

            if getctime(self._db) > ctime or getctime(__file__) > ctime:
                raise CacheError(_("Cache is outdated: {0}").format(self._cache))
        except OSError:
            raise CacheError(_("Cache is outdated: {0}").format(self._cache))

        try:
            with open(self._cache, "rb") as f:
                return unpickle(f)
        except:
            raise CacheError(_("Could not load cache: {0}").format(self._cache))
Ejemplo n.º 18
0
    def __init__(self, src_path, dico_qgs, tipo, txt=''):
        u"""Parse QGS files which are XML based files.

        qgspath = path to the qgs file
        dico_qgs = dictionary for global informations
        tipo = format
        text = dictionary of text in the selected language
        """
        # changing working directory to layer folder
        chdir(path.dirname(src_path))
        # initializing errors counter
        self.alert = 0

        # context metadata
        dico_qgs['name'] = path.basename(src_path)
        dico_qgs['folder'] = path.dirname(src_path)
        dico_qgs['date_crea'] = strftime("%Y/%m/%d",
                                         localtime(path.getctime(src_path)))
        dico_qgs['date_actu'] = strftime("%Y/%m/%d",
                                         localtime(path.getmtime(src_path)))
        dico_qgs["total_size"] = youtils.sizeof(src_path)
        # opening qgs
        with open(qgspath, "r") as fd:
            in_xml = xmltodict.parse(fd.read())
            logger.debug("QGIS file opened.")
            xml_qgis = in_xml.get("qgis", {})
            print(xml_qgis.keys())
            # BASICS
            dico_qgs['title'] = xml_qgis.get('title')
            dico_qgs['description'] = xml_qgis.get('@projectname')
            dico_qgs['version'] = xml_qgis.get('@version')
            # MAP CANVAS
            qgs_map = xml_qgis.get("mapcanvas")
            if len(qgs_map) > 1:
                logging.info("QGS file has more than 1 mapcanvas markup.")
                qgs_map = qgs_map[0]
            else:
                pass
            dico_qgs['units'] = qgs_map.get("units")

            qgs_extent = qgs_map.get('extent')
            dico_qgs["Xmin"] = round(float(qgs_extent.get("xmin")), 2)
            dico_qgs["Xmax"] = round(float(qgs_extent.get("xmax")), 2)
            dico_qgs["Ymin"] = round(float(qgs_extent.get("ymin")), 2)
            dico_qgs["Ymax"] = round(float(qgs_extent.get("ymax")), 2)

            # SRS
            qgs_srs = qgs_map.get("destinationsrs").get("spatialrefsys")
            print(qgs_srs.keys())
            dico_qgs[u'srs'] = qgs_srs.get("description")
            if qgs_srs.get("geographicflag") == "false":
                dico_qgs[u'srs_type'] = u"Projected"
            else:
                dico_qgs[u'srs_type'] = u"Geographic"
            dico_qgs[u'EPSG'] = qgs_srs.get("authid")

            # LAYERS
            # print(xml_qgis.get("projectlayers").get("maplayer"))
            qgs_lyrs = xml_qgis.get("projectlayers").get("maplayer")
            dico_qgs[u'layers_count'] = len(qgs_lyrs)
Ejemplo n.º 19
0
def generate_time_df():
    cols = ["vehicles", "capacity", "waiting_time", "day", "predictions",
            "comp_time"]
    data = pd.DataFrame(columns=cols)
    counter = 0
    for v, c, wt, p, d in product(vehicles, caps, waiting_times, preds, days):
        s, e = get_comp_filenames(v, c, wt, p, d)
        try:
            diff = (path.getctime(e) - path.getctime(s)) / 2878
            if diff > 500:
                diff = 2.9
            data.loc[counter] = [v, c, wt, d, p, diff]
            counter += 1
        except OSError:
            print s, e
    return data
Ejemplo n.º 20
0
def cmd_install(opt, slist):
    num_installation = 0
    update_targets = []
    failed_targets = []
    uptodate_targets = []

    for i, s in util.next_target(opt, slist):
        f_compiled = path.join(s.code, Config.target_name(s.target))
        f_current = path.join(s.run, Config.target_name(s.target))
        if path.exists(f_compiled) and (not path.exists(f_current) or path.getctime(f_compiled) > path.getctime(f_current)):
            num_installation = num_installation + 1
            mv_stat = 'mv {0} {1}'.format(f_compiled, f_current)
            if 0 == call(mv_stat, shell=True):
                update_targets.append(s.target)
            else:
                failed_targets.append(s.target)
        else:
            uptodate_targets.append(s.target)

    logutil.debug('num of installation: ' + str(num_installation))
    logutil.debug('updated targets:\n\t' + '\n\t'.join(update_targets))
    logutil.debug('up-to-date targets:\n\t' + '\n\t'.join(uptodate_targets))
    logutil.debug('failed targets:\n\t' + '\n\t'.join(failed_targets))

    return len(failed_targets)
Ejemplo n.º 21
0
    def _dump(self):
        if _subprocess.call('which mongodump', stdout=_subprocess.DEVNULL, stderr=_subprocess.DEVNULL, shell=True) != 0:
            raise RuntimeError('Cannot find mongodump executable.')

        _maintenance.enable()

        db_name = _reg.get('db.database')
        target_dir = _path.join(_reg.get('paths.root'), 'misc', 'dbdump')
        target_subdir = _path.join(target_dir, db_name)

        if _path.exists(target_subdir):
            ctime = _datetime.fromtimestamp(_path.getctime(target_subdir))
            target_subdir_move = '{}-{}'.format(target_subdir, ctime.strftime('%Y%m%d-%H%M%S'))
            _shutil.move(target_subdir, target_subdir_move)

        from . import _api
        config = _api.get_config()

        command = 'mongodump -h {}:{} --gzip -o {} -d {}'.format(config['host'], config['port'], target_dir, db_name)

        if config['user']:
            command += ' -u {} -p {}'.format(config['user'], config['password'])
        if config['ssl']:
            command += ' --ssl --sslAllowInvalidCertificates'

        r = _subprocess.call(command, shell=True)

        _maintenance.disable()

        return r
Ejemplo n.º 22
0
    def _delete_old_files(ct):

        # define the expiration time constant
        expired_at = time.time()-3600*config.LIFETIME
        
        # delete old files (currently they live for %life_hours% hours)
        import glob
        files, ks, ds, kept, deleted = [], [], [], 0, 0
        for path in ('static/*/images/*session*.*','static/*/csv/*session*.*',config.UPLOAD_FOLDER+'/*session*.*'): files += glob.glob(path)
        fmt_str = r'(cdi|fth|xpcs)data_session([0-9]*)_id([0-9]*)(.*).(fits|zip|csv|png|jpg)'

        for f in files:
            
            # get the session id for the file
            try:
                project, session_id, data_id, extra, fmt = re.match(fmt_str,f).groups()
            except AttributeError:
                project, session_id, data_id, extra, fmt = None, None, None, None, None
            
            # see how old the file is. if too old, delete it.
            if getctime(f) < expired_at and extra != '_crashed':
                os.remove(f); deleted += 1; ds.append(session_id)
            else:
                kept += 1; ks.append(session_id)
                
        print "kept %s files from %s distinct sessions"%(kept,len(set(ks)))
        print "deleted %s files from %s distinct sessions"%(deleted,len(set(ds)))
def getinfo(path):
    typ = 'inny'
    size = ''
    count = ''
    way = path
    ct = datetime.datetime.fromtimestamp(mktime(gmtime(getctime(path)))).date()
    mt = datetime.datetime.fromtimestamp(mktime(gmtime(getmtime(path)))).date()
    if isfile(path):
        typ = 'plik'
        size = (str(getsize(path)))+('B')
        return '\n'.join(['typ: '+typ, 'sciezka: '+(way),
                          'rozmiar: '+(size), 
                          'ctime '+('-'.join([str(ct.year), str(ct.month), str(ct.day)])),
                          'mtime '+('-'.join([str(mt.year), str(mt.month), str(mt.day)]))])
    if isdir(path):
        typ = 'katalog'
        size, count = get_size(path)
        return '\n'.join(['typ: '+(typ), 'sciezka: '+(way),
                          'rozmiar: '+(size), 'liczba_plikow: '+(count),
                          'ctime '+('-'.join([str(ct.year), str(ct.month), str(ct.day)])),
                          'mtime '+('-'.join([str(mt.year), str(mt.month), str(mt.day)]))])
    
    return '\n'.join(
        ['typ: '+(typ), 'sciezka: '+(way),'rozmiar: '+(size), 
                          'ctime '+('-'.join([str(ct.year), str(ct.month), str(ct.day)])),
                          'mtime '+('-'.join([str(mt.year), str(mt.month), str(mt.day)]))])
Ejemplo n.º 24
0
    def _load_offsets(self):
        """load frame offsets from file, reread them from the trajectory if that
        fails"""
        fname = offsets_filename(self.filename)

        if not isfile(fname):
            self._read_offsets(store=True)
            return

        data = read_numpy_offsets(fname)
        ctime_ok = size_ok = n_atoms_ok = False

        try:
            ctime_ok = getctime(self.filename) == data['ctime']
            size_ok = getsize(self.filename) == data['size']
            n_atoms_ok = self._xdr.n_atoms == data['n_atoms']
        except KeyError:
            # we tripped over some old offset formated file
            pass

        if not (ctime_ok and size_ok and n_atoms_ok):
            warnings.warn("Reload offsets from trajectory\n "
                          "ctime or size or n_atoms did not match")
            self._read_offsets(store=True)
        else:
            self._xdr.set_offsets(data['offsets'])
Ejemplo n.º 25
0
Archivo: app.py Proyecto: debatem1/pyll
    def _get_default_headers(self, path):
        """
        Returns a dict with the default headers for `path`.

        `path` - the relative path from the project dir to the file
        `title` - titleized version of the filename
        `date` - set to ctime. On unix this is the time of the most recent
                 metadata change; on windows the creation time. If ctime
                 cannot be accessed (due to permissions), the current 
                 time is used.
        `status` - set to 'live'
        `template` - set to 'default.html'
        `url` - set to "default" rule
        `slug` - filename or, if the filename is "index", the dirname
               of the parent directory unless its the top level dir.
        `output_ext` - the extension of the parsed file
        """
        output_ext = splitext(path)[1][1:]
        root, filename = split(splitext(path)[0])
        if filename == 'index' and root != self.settings['project_dir']:
            slug = basename(root)
        else:
            slug = filename
        title = filename.title()
        try:
            date = datetime.fromtimestamp(getctime(path))
        except OSError:
            # use the current date if the ctime cannot be accessed
            date = datetime.now()
        return dict(path=relpath(path, self.settings['project_dir']),
                    title=title, date=date, status='live',
                    slug=slug, template='default.html', url='default',
                    output_ext=output_ext)
Ejemplo n.º 26
0
 def get_update_time(self):
     try:
         from os.path import getctime
         from datetime import datetime
         self.update_utc_time = datetime.utcfromtimestamp(getctime(self.ANSIBLE_PULL_LOG_FILE))
     except:
         pass
Ejemplo n.º 27
0
 def read_container(root_dir):
     if not op.isdir(root_dir):
         raise Exception('%s should be a directory, but it is not' % root_dir)
     container_file = op.join(root_dir, CONTAINER_FILE_NAME)
     if op.exists(container_file):
         with open(container_file) as reader:
             this_container = pickle.load(reader)
     else:
         this_container = generate_container_dict(
             title=op.basename(root_dir),
             dateAdded=datetime2prtime(datetime.datetime.fromtimestamp(op.getmtime(root_dir))),
             lastModified=datetime2prtime(datetime.datetime.fromtimestamp(op.getctime(root_dir))))
     ff_children = []
     fs_children = [op.join(root_dir, fn) for fn in os.listdir(root_dir)]
     fs_children.sort(key=op.getmtime)
     for fs_child in fs_children:
         if op.isdir(fs_child):
             ff_children.append(read_container(fs_child))
         elif fs_child.endswith('.ffurl'):
             with open(fs_child) as reader:
                 bookmark = pickle.load(reader)
                 update_title(fs_child, bookmark)
                 ff_children.append(bookmark)
     this_container['children'] = ff_children
     update_title(root_dir, this_container)
     return this_container
Ejemplo n.º 28
0
    def __init__(self, pid):

        self.pid = pid
        self.running = True
        self.ended_datetime = None

        # Mapping of each status_fields to value from the status file.
        # Initialize fields to zero in case info() is called.
        self.status = {field: 0 for field in self.status_fields}

        self.path = path = P.join(PROC_DIR, str(pid))
        if not P.exists(path):
            raise NoProcessFound(pid)

        self.status_path = P.join(path, 'status')

        # Get the command that started the process
        with open(P.join(path, 'cmdline'), encoding='utf-8') as f:
            cmd = f.read()
            # args are separated by \x00 (Null byte)
            self.command = cmd.replace('\x00', ' ').strip()

            if self.command == '':
                # Some processes (such as kworker) have nothing in cmdline, read comm instead
                with open(P.join(path, 'comm')) as comm_file:
                    self.command = self.executable = comm_file.read().strip()

            else:
                # Just use 1st arg instead of reading comm
                self.executable = self.command.split()[0]

        # Get the start time (/proc/PID file creation time)
        self.created_datetime = datetime.fromtimestamp(P.getctime(path))

        self.check()
Ejemplo n.º 29
0
 def updateGames(self):
     files = [f for f in listdir(self.gamesPath) if isfile(join(self.gamesPath, f))]
     self.gameSem.acquire()
     self.gameList = {}
     for f in files:
         self.gameList[f] = getctime(join(self.gamesPath, f))
     self.gameSem.release()
Ejemplo n.º 30
0
 def fillFileInfo(self, name):
     if os.path.splitext(name)[1] == '.wav' or os.path.splitext(name)[1] == '.pcm':
         fileName = name.decode("GBK").encode("utf8")
         fileSize = getsize(unicode(join(self.path, fileName), 'utf8'))
         fullName = join(self.path, fileName)
         createTime = time.ctime(getctime(unicode(join(self.path, fileName), 'utf8')))
         self.files.append(FileInfo(fileName, fullName, fileSize, createTime))
Ejemplo n.º 31
0
# Python program to retrieve file properties.

from os import path
import time
# return current file path
print('File         :', __file__)
# return access time of file
print('Access time  :', time.ctime(path.getatime(__file__)))
# return Modified time of file
print('Modified time:', time.ctime(path.getmtime(__file__)))
# return the system’s ctime which, on some systems (like Unix) is the time of the last metadata change
print('Change time  :', time.ctime(path.getctime(__file__)))
# return size of file
print('Size         :', path.getsize(__file__))
# return abs path of dir
print('the current directory:', path.abspath(__file__))
# return dir name
print('the current name:', path.dirname(__file__))
# Return the base name of pathname path
print('base name:', path.basename(__file__))
print(' directory exit:', path.exists(__file__))
# Return True if path is an existing regular file
print('is file:', path.isfile(__file__))
# Return True if path is an existing regular directory
print('is directory:', path.isdir(__file__))



Ejemplo n.º 32
0
    def import_mesh(self, update=False):
        curr_asset_collection, _ = H.create_asset_collection(
            self.context, self.asset_name)

        H.set_active_collection(self.context, self.asset_name)

        # register the mesh in scene variable
        self.scn_asset = self.context.scene.lm_asset_list.add()
        self.scn_asset.name = self.asset_name
        self.scn_asset.collection = curr_asset_collection
        self.scn_asset.asset_root = path.dirname(self.meshes[0].path)

        global_import_date = 0.0

        for m in self.meshes:
            if not m.is_valid:
                self.log.info(
                    'Mesh "{}" dosn\'t exist or file format "{}" not compatible'
                    .format(m.name, m.ext))
                continue

            self.update_json_values(m, self.scn_asset)

            mesh_path = m.path
            file = m.file
            ext = m.ext
            name = m.name

            # Store the list of material in the blender file before importing the mesh
            initial_scene_materials = list(bpy.data.materials)

            if update:
                self.log.info('Updating file "{}" : {}'.format(
                    file, time.ctime(path.getmtime(mesh_path))))
            else:
                self.log.info('Importing mesh "{}"'.format(name))

            m.import_mesh()

            # Store the list of material in the blender file after importing the mesh
            new_scene_materials = list(bpy.data.materials)

            # Get the imported materials
            self.imported_materials[name] = H.get_different_items(
                initial_scene_materials, new_scene_materials)

            self.log.info('{} new materials imported'.format(
                len(self.imported_materials[name])))
            for m in self.imported_materials[name]:
                self.log.info('		"{}"'.format(m.name))

            # Feed scene mesh list
            curr_mesh_list = self.scn_asset.mesh_list.add()
            curr_mesh_list.name = name
            curr_mesh_list.file_path = mesh_path
            curr_mesh_list.import_date = path.getmtime(mesh_path)
            curr_mesh_list.file_size = path.getsize(mesh_path)

            global_import_date += path.getctime(mesh_path)

            # Updating Materials
            for o in curr_asset_collection.objects:
                curr_mesh_object_list = curr_mesh_list.mesh_object_list.add()
                curr_mesh_object_list.mesh_name = o.name.lower()
                curr_mesh_object_list.mesh = o

                for m in o.material_slots:

                    if m.material.name not in self.scn_asset.material_list:
                        material_list = self.scn_asset.material_list.add()
                        material_list.name = m.material.name
                        material_list.material = m.material

                        material_list = curr_mesh_list.material_list.add()
                        material_list.name = m.material.name
                        material_list.material = m.material

                    curr_mesh_material_list = curr_mesh_object_list.material_list.add(
                    )
                    curr_mesh_material_list.name = m.material.name
                    curr_mesh_material_list.material = m.material

        # Set scene import date
        self.scn_asset.import_date = self.global_import_date

        # Feed assets
        self.asset = self.get_asset()

        # Add newly imported asset to renderqueue
        bpy.ops.scene.lm_add_asset_to_render_queue(asset_name=self.asset_name)
Ejemplo n.º 33
0

# Чистит список от лишних знаков препинания
def beauty_output(arg_list):
    return '\n'.join(arg_list)

# "Если файл для пользователя уже существует,
# то существующий файл переименовать, добавив в него время составления
# этого старого отчёта в формате "old_Antonette_2020-09-23T15:25.txt""
if len(listdir()) > 0:
    for file in listdir():
        if 'old_' not in file:
            # В условии задания указано, что время в файле должно быть указано
            # в формате HH:MM, но Windows не допускает содержания знака ":"
            # в названии файлов или папок.
            date = dt.fromtimestamp(getctime(file)).strftime('%Y-%m-%dT%H_%M')
            new_name = f"old_{file[:-4]}_{date}.txt"
            try:
                rename(file, new_name)
                # В условии задания не обговорено,
                # но я добавлял возможность многократного обновления отчетов.
                # Но, по какой-то причине, отчеты обновляются только 1 раз
                # из-за FileExistsError.
                # При этом я проверил, что при попытке переименовать файл
                # старое и новое названия отличаются.
                # Официальная документация функции rename и форумы не ответили
                # на мой вопрос по данной проблеме.
            except FileExistsError:
                print(file, '- Старый вариант этого файла уже существует.')

for user in users:
Ejemplo n.º 34
0
    def infos_dataset(self, source_path, dico_dataset, txt=dict(), tipo=None):
        """Use OGR functions to extract basic informations.

        source_path = path to the File Geodatabase Esri
        dico_dataset = dictionary for global informations
        tipo = format
        txt = dictionary of text in the selected language
        """
        dico_dataset["format"] = tipo

        # opening GDB
        try:
            driver = ogr.GetDriverByName(str("OpenFileGDB"))
            src = driver.Open(source_path, 0)
            print(driver.GetName())
            # print(type(src), dir(src.GetDriver()), len(dir(src)))
            # src = gdal.OpenEx(source_path, 0)  # GDAL driver
            # print(type(src), dir(src), len(dir(src)))
            if not tipo:
                dico_dataset["format"] = driver.GetName()
            else:
                dico_dataset["format"] = tipo
                pass
        except Exception as e:
            logger.error(e)
            youtils.erratum(dico_dataset, source_path, "err_corrupt")
            self.alert = self.alert + 1
            return None

        # GDB name and parent folder
        try:
            dico_dataset["name"] = path.basename(src.GetName())
            dico_dataset["folder"] = path.dirname(src.GetName())
        except AttributeError as err:
            logger.warning(err)
            dico_dataset["name"] = path.basename(source_path)
            dico_dataset["folder"] = path.dirname(source_path)
        # layers count and names
        dico_dataset["layers_count"] = src.GetLayerCount()
        li_layers_names = []
        li_layers_idx = []
        dico_dataset["layers_names"] = li_layers_names
        dico_dataset["layers_idx"] = li_layers_idx

        # cumulated size
        dico_dataset["total_size"] = youtils.sizeof(source_path)

        # global dates
        crea, up = path.getctime(source_path), path.getmtime(source_path)
        dico_dataset["date_crea"] = strftime("%Y/%m/%d", localtime(crea))
        dico_dataset["date_actu"] = strftime("%Y/%m/%d", localtime(up))
        # total fields count
        total_fields = 0
        dico_dataset["total_fields"] = total_fields
        # total objects count
        total_objs = 0
        dico_dataset["total_objs"] = total_objs
        # parsing layers
        for layer_idx in range(src.GetLayerCount()):
            # dictionary where will be stored informations
            dico_layer = OrderedDict()
            # parent GDB
            dico_layer["src_name"] = path.basename(src.GetName())
            # getting layer object
            layer = src.GetLayerByIndex(layer_idx)
            # layer globals
            li_layers_names.append(layer.GetName())
            dico_layer["title"] = georeader.get_title(layer)
            li_layers_idx.append(layer_idx)

            # features
            layer_feat_count = layer.GetFeatureCount()
            dico_layer["num_obj"] = layer_feat_count
            if layer_feat_count == 0:
                """ if layer doesn't have any object, return an error """
                dico_layer["error"] = "err_nobjet"
                self.alert = self.alert + 1
            else:
                pass

            # fields
            layer_def = layer.GetLayerDefn()
            dico_layer["num_fields"] = layer_def.GetFieldCount()
            dico_layer["fields"] = georeader.get_fields_details(layer_def)

            # geometry type
            dico_layer["type_geom"] = georeader.get_geometry_type(layer)

            # SRS
            srs_details = georeader.get_srs_details(layer, txt)
            dico_layer["srs"] = srs_details[0]
            dico_layer["epsg"] = srs_details[1]
            dico_layer["srs_type"] = srs_details[2]

            # spatial extent
            extent = georeader.get_extent_as_tuple(layer)
            dico_layer["xmin"] = extent[0]
            dico_layer["xmax"] = extent[1]
            dico_layer["ymin"] = extent[2]
            dico_layer["ymax"] = extent[3]

            # storing layer into the GDB dictionary
            dico_dataset["{0}_{1}".format(
                layer_idx, dico_layer.get("title"))] = dico_layer
            # summing fields number
            total_fields += dico_layer.get("num_fields", 0)
            # summing objects number
            total_objs += dico_layer.get("num_obj", 0)
            # deleting dictionary to ensure having cleared space
            del dico_layer
        # storing fileds and objects sum
        dico_dataset["total_fields"] = total_fields
        dico_dataset["total_objs"] = total_objs

        # warnings messages
        if self.alert:
            dico_dataset["err_gdal"] = gdal_err.err_type, gdal_err.err_msg
        else:
            pass
        # clean exit
        del src
Ejemplo n.º 35
0
 def created_time(self, name):
     return datetime.fromtimestamp(path.getctime(self.path(name)))
Ejemplo n.º 36
0
    def __init__(self, layerpath, dico_layer, tipo, txt=''):
        u""" Uses OGR functions to extract basic informations about
        geographic vector file (handles shapefile or MapInfo tables)
        and store into dictionaries.

        layerpath = path to the geographic file
        dico_layer = dictionary for global informations
        dico_fields = dictionary for the fields' informations
        li_fieds = ordered list of fields
        tipo = format
        text = dictionary of text in the selected language

        """
        # handling ogr specific exceptions
        errhandler = gdal_err.handler
        gdal.PushErrorHandler(errhandler)
        # gdal.UseExceptions()
        ogr.UseExceptions()
        self.alert = 0

        # changing working directory to layer folder
        chdir(path.dirname(layerpath))

        # raising corrupt files
        try:
            source = ogr.Open(layerpath)  # OGR driver
        except Exception as e:
            logging.error(e)
            self.alert = self.alert + 1
            youtils.erratum(dico_layer, layerpath, u'err_corrupt')
            dico_layer['err_gdal'] = gdal_err.err_type, gdal_err.err_msg
            return None

        # raising incompatible files
        if not source:
            u""" if file is not compatible """
            self.alert += 1
            dico_layer['err_gdal'] = gdal_err.err_type, gdal_err.err_msg
            youtils.erratum(dico_layer, layerpath, u'err_nobjet')
            return None
        else:
            layer = source.GetLayer()  # get the layer
            pass

        # dataset name, title and parent folder
        try:
            dico_layer['name'] = path.basename(layerpath)
            dico_layer['folder'] = path.dirname(layerpath)
        except AttributeError as e:
            dico_layer['name'] = path.basename(layer.GetName())
            dico_layer['folder'] = path.dirname(layer.GetName())
        dico_layer['title'] = dico_layer.get('name')[:-4]\
                                        .replace('_', ' ')\
                                        .capitalize()

        # dependencies and total size
        dependencies = youtils.list_dependencies(layerpath, "auto")
        dico_layer[u'dependencies'] = dependencies
        dico_layer[u"total_size"] = youtils.sizeof(layerpath, dependencies)
        # Getting basic dates
        crea, up = path.getctime(source_path), path.getmtime(source_path)
        dico_dataset[u'date_crea'] = strftime('%Y/%m/%d', localtime(crea))
        dico_dataset[u'date_actu'] = strftime('%Y/%m/%d', localtime(up))

        # features
        layer_feat_count = layer.GetFeatureCount()
        dico_layer['num_obj'] = layer_feat_count
        if layer_feat_count == 0:
            u""" if layer doesn't have any object, return an error """
            self.alert += 1
            youtils.erratum(dico_layer, layerpath, u'err_nobjet')
            return None
        else:
            pass

        # fields
        layer_def = layer.GetLayerDefn()
        dico_layer['num_fields'] = layer_def.GetFieldCount()
        dico_layer['fields'] = georeader.get_fields_details(layer_def)

        # geometry type
        dico_layer[u'type_geom'] = georeader.get_geometry_type(layer)

        # SRS
        srs_details = georeader.get_srs_details(layer, txt)
        dico_layer[u'srs'] = srs_details[0]
        dico_layer[u'EPSG'] = srs_details[1]
        dico_layer[u'srs_type'] = srs_details[2]

        # spatial extent
        extent = georeader.get_extent_as_tuple(layer)
        dico_layer[u'Xmin'] = extent[0]
        dico_layer[u'Xmax'] = extent[1]
        dico_layer[u'Ymin'] = extent[2]
        dico_layer[u'Ymax'] = extent[3]

        # warnings messages
        if self.alert:
            dico_layer['err_gdal'] = gdal_err.err_type, gdal_err.err_msg
        else:
            pass

        # safe exit
        del source
Ejemplo n.º 37
0
def directory_index(req, path):
    """Returns directory index as html page."""
    if not isdir(path):
        log.error(
            "Only directory_index can be send with directory_index handler. "
            "`%s' is not directory.", path)
        raise HTTPException(HTTP_INTERNAL_SERVER_ERROR)

    index = os.listdir(path)
    if req.document_root != path[:-1]:
        index.append("..")  # parent directory

    index.sort()

    diruri = req.uri.rstrip('/')
    content = ("<!DOCTYPE html>\n"
               "<html>\n"
               " <head>\n"
               "  <title>Index of %s</title>\n"
               '  <meta http-equiv="content-type" '
               'content="text/html; charset=utf-8"/>\n'
               "  <style>\n"
               "   body { width: 98%%; margin: auto; }\n"
               "   table { font: 90%% monospace; text-align: left; }\n"
               "   td, th { padding: 0 1em 0 1em; }\n"
               "   .size { text-align:right; white-space:pre; }\n"
               "  </style>\n"
               " </head>\n"
               " <body>\n"
               "  <h1>Index of %s</h1>\n"
               "  <hr>\n"
               "  <table>\n"
               "   <tr><th>Name</th><th>Last Modified</th>"
               "<th class=\"size\">Size</th><th>Type</th></tr>\n" %
               (diruri, diruri))

    for item in index:
        # dot files
        if item[0] == "." and item[1] != ".":
            continue
        # bakup files (~)
        if item[-1] == "~":
            continue

        fpath = "%s/%s" % (path, item)
        if not os.access(fpath, os.R_OK):
            continue

        fname = item + ('/' if isdir(fpath) else '')
        ftype = ""

        if isfile(fpath):
            # pylint: disable=unused-variable
            (ftype, encoding) = mimetypes.guess_type(fpath)
            if not ftype:
                ftype = 'application/octet-stream'
            size = "%.1f%s" % hbytes(getsize(fpath))
        elif isdir(fpath):
            ftype = "Directory"
            size = "-"
        else:
            size = ftype = '-'

        content += (
            "   <tr><td><a href=\"%s\">%s</a></td><td>%s</td>"
            "<td class=\"size\">%s</td><td>%s</td></tr>\n" %
            (diruri + '/' + fname, fname,
             strftime("%d-%b-%Y %H:%M", gmtime(getctime(fpath))), size, ftype))

    content += ("  </table>\n" "  <hr>\n")

    if req.debug:
        content += ("  <small><i>%s / Poor WSGI for Python, "
                    "webmaster: %s </i></small>\n" %
                    (req.server_software, req.server_admin))
    else:
        content += ("  <small><i>webmaster: %s </i></small>\n" %
                    req.server_admin)

    content += ("  </body>\n" "</html>")

    return content
Ejemplo n.º 38
0
async def lst(event):
    if event.fwd_from:
        return
    cat = event.pattern_match.group(1)
    path = cat if cat else getcwd()
    if not exists(path):
        await event.edit(
            f"There is no such directory or file with the name `{cat}` check again!"
        )
        return
    if isdir(path):
        if cat:
            msg = f"**Folders and Files in `{path}`** :\n\n"
        else:
            msg = "**Folders and Files in Current Directory** :\n\n"
        lists = listdir(path)
        files = ""
        folders = ""
        for contents in os_sorted(lists):
            catpath = path + "/" + contents
            if not isdir(catpath):
                size = stat(catpath).st_size
                if contents.endswith((".mp3", ".flac", ".wav", ".m4a")):
                    files += "🎵 "
                elif contents.endswith(".opus"):
                    files += "🎙 "
                elif contents.endswith(
                    (".mkv", ".mp4", ".webm", ".avi", ".mov", ".flv")):
                    files += "🎞 "
                elif contents.endswith(
                    (".zip", ".tar", ".tar.gz", ".rar", ".7z", ".xz")):
                    files += "🗜 "
                elif contents.endswith((".jpg", ".jpeg", ".png", ".gif",
                                        ".bmp", ".ico", ".webp")):
                    files += "🖼 "
                elif contents.endswith((".exe", ".deb")):
                    files += "⚙️ "
                elif contents.endswith((".iso", ".img")):
                    files += "💿 "
                elif contents.endswith((".apk", ".xapk")):
                    files += "📱 "
                elif contents.endswith(".py"):
                    files += "🐍 "
                else:
                    files += "📄 "
                files += f"`{contents}` (__{humanbytes(size)}__)\n"
            else:
                folders += f"📁 `{contents}`\n"
        msg = msg + folders + files if files or folders else msg + "__empty path__"
    else:
        size = stat(path).st_size
        msg = "The details of given file :\n\n"
        if path.endswith((".mp3", ".flac", ".wav", ".m4a")):
            mode = "🎵 "
        elif path.endswith(".opus"):
            mode = "🎙 "
        elif path.endswith((".mkv", ".mp4", ".webm", ".avi", ".mov", ".flv")):
            mode = "🎞 "
        elif path.endswith((".zip", ".tar", ".tar.gz", ".rar", ".7z", ".xz")):
            mode = "🗜 "
        elif path.endswith(
            (".jpg", ".jpeg", ".png", ".gif", ".bmp", ".ico", ".webp")):
            mode = "🖼 "
        elif path.endswith((".exe", ".deb")):
            mode = "⚙️ "
        elif path.endswith((".iso", ".img")):
            mode = "💿 "
        elif path.endswith((".apk", ".xapk")):
            mode = "📱 "
        elif path.endswith(".py"):
            mode = "🐍 "
        else:
            mode = "📄 "
        time.ctime(getctime(path))
        time2 = time.ctime(getmtime(path))
        time3 = time.ctime(getatime(path))
        msg += f"**Location :** `{path}`\n"
        msg += f"**Icon :** `{mode}`\n"
        msg += f"**Size :** `{humanbytes(size)}`\n"
        msg += f"**Last Modified Time:** `{time2}`\n"
        msg += f"**Last Accessed Time:** `{time3}`"

    if len(msg) > MAX_MESSAGE_SIZE_LIMIT:
        with io.BytesIO(str.encode(msg)) as out_file:
            out_file.name = "ls.txt"
            await event.client.send_file(
                event.chat_id,
                out_file,
                force_document=True,
                allow_cache=False,
                caption=path,
            )
            await event.delete()
    else:
        await event.edit(msg)
Ejemplo n.º 39
0
 def getMetaData(self, filename):
     ts = op.getctime(filename)
     return datetime.fromtimestamp(ts).replace(tzinfo=pytz.utc)
Ejemplo n.º 40
0
            thumbnail = path.join(ImageUtils.get_root(), 'images',
                                  'nothumb.png')
        try:
            self.add_image(newimage, user, url, dims[0], dims[1], size,
                           thumbnail, 'image', album_id, post, comment)
        except Exception, e:
            self.debug('add_existing_image: failed: %s' % str(e))
            return

        if subdir == '' and album_id == -1:  # Not an album
            # Add post
            p = Post()
            p.id = post
            p.author = user
            if comment == None: p.url = url
            p.created = path.getctime(oldpath)
            p.subreddit = ''
            p.title = ''
            try:
                self.add_post(p, legacy=1)
            except Exception, e:
                self.debug('add_existing_image: create post failed: %s' %
                           str(e))

            # Add comment
            if comment != None:
                c = Comment()
                c.id = comment
                c.post_id = post
                c.author = user
                if comment != None: c.body = url
Ejemplo n.º 41
0
def sync_entry(self, meta, cls, datafunc, metafunc):
    task = meta["task"]
    path = meta["path"]
    root = meta["root"]
    target = meta["target"]
    config = meta["config"]
    logging_config = config["log"]
    ignore_cache = meta["ignore_cache"]
    logger = sync_logging.get_sync_logger(logging_config)

    max_retries = get_max_retries(logger, meta)

    lock = None
    logger.info("synchronizing " + cls + ". path = " + path)

    if is_unicode_encode_error_path(path):
        abspath = os.path.abspath(path)
        path = os.path.dirname(abspath)
        utf8_escaped_abspath = abspath.encode('utf8', 'surrogateescape')
        b64_path_str = base64.b64encode(utf8_escaped_abspath)

        unicode_error_filename = 'irods_UnicodeEncodeError_' + str(
            b64_path_str.decode('utf8')).rstrip('/')

        logger.warning(
            'sync_entry raised UnicodeEncodeError while syncing path:' +
            str(utf8_escaped_abspath))

        meta['path'] = path
        meta['b64_path_str'] = b64_path_str
        meta['unicode_error_filename'] = unicode_error_filename

        sync_key = str(b64_path_str.decode('utf8')) + ":" + target
    else:
        sync_key = path + ":" + target

    try:
        r = get_redis(config)
        lock = redis_lock.Lock(r, "sync_" + cls + ":" + sync_key)
        lock.acquire()

        if not ignore_cache:
            sync_time = get_with_key(r, sync_time_key, sync_key, float)
        else:
            sync_time = None

        mtime = meta.get("mtime")
        if mtime is None:
            mtime = getmtime(path)

        ctime = meta.get("ctime")
        if ctime is None:
            ctime = getctime(path)

        if sync_time is not None and mtime < sync_time and ctime < sync_time:
            logger.info("succeeded_" + cls + "_has_not_changed",
                        task=task,
                        path=path)
        else:
            t = datetime.now().timestamp()
            logger.info("synchronizing " + cls,
                        path=path,
                        t0=sync_time,
                        t=t,
                        ctime=ctime)
            meta2 = meta.copy()
            if path == root:
                if 'unicode_error_filename' in meta:
                    target2 = join(target, meta['unicode_error_filename'])
                else:
                    target2 = target
            else:
                if meta.get('s3_keypair') is not None:
                    # Strip prefix from S3 path
                    prefix = meta['s3_prefix']
                    reg_path = path[path.index(prefix) +
                                    len(prefix):].strip('/')
                    # Construct S3 "logical path"
                    target2 = join(target, reg_path)
                    # Construct S3 "physical path" as: /bucket/objectname
                    meta2['path'] = '/' + join(root, path)
                else:
                    target2 = join(target, relpath(path, start=root))
            meta2["target"] = target2
            if sync_time is None or mtime >= sync_time:
                datafunc(meta2, logger, True)
                logger.info("succeeded", task=task, path=path)
            else:
                metafunc(meta2, logger)
                logger.info("succeeded_metadata_only", task=task, path=path)
            set_with_key(r, sync_time_key, sync_key, str(t))
    except Exception as err:
        retry_countdown = get_delay(logger, meta, self.request.retries + 1)
        raise self.retry(max_retries=max_retries,
                         exc=err,
                         countdown=retry_countdown)
    finally:
        if lock is not None:
            lock.release()
Ejemplo n.º 42
0
def get_remote_file(fname, directory=None, force_download=False, auto=True):
    """ Get a the filename for the local version of a file from the web

    Parameters
    ----------
    fname : str
        The relative filename on the remote data repository to download.
        These correspond to paths on
        ``https://github.com/imageio/imageio-binaries/``.
    directory : str | None
        The directory where the file will be cached if a download was
        required to obtain the file. By default, the appdata directory
        is used. This is also the first directory that is checked for
        a local version of the file. If the directory does not exist,
        it will be created.
    force_download : bool | str
        If True, the file will be downloaded even if a local copy exists
        (and this copy will be overwritten). Can also be a YYYY-MM-DD date
        to ensure a file is up-to-date (modified date of a file on disk,
        if present, is checked).
    auto : bool
        Whether to auto-download the file if its not present locally. Default
        True. If False and a download is needed, raises NeedDownloadError.

    Returns
    -------
    fname : str
        The path to the file on the local system.
    """
    _url_root = 'https://github.com/imageio/imageio-binaries/raw/master/'
    url = _url_root + fname
    nfname = op.normcase(fname)  # convert to native
    # Get dirs to look for the resource
    given_directory = directory
    directory = given_directory or appdata_dir('imageio')
    dirs = resource_dirs()
    dirs.insert(0, directory)  # Given dir has preference
    # Try to find the resource locally
    for dir in dirs:
        filename = op.join(dir, nfname)
        if op.isfile(filename):
            if not force_download:  # we're done
                if given_directory and given_directory != dir:
                    filename2 = os.path.join(given_directory, nfname)
                    # Make sure the output directory exists
                    if not op.isdir(op.dirname(filename2)):
                        os.makedirs(op.abspath(op.dirname(filename2)))
                    shutil.copy(filename, filename2)
                    return filename2
                return filename
            if isinstance(force_download, string_types):
                ntime = time.strptime(force_download, '%Y-%m-%d')
                ftime = time.gmtime(op.getctime(filename))
                if ftime >= ntime:
                    if given_directory and given_directory != dir:
                        filename2 = os.path.join(given_directory, nfname)
                        # Make sure the output directory exists
                        if not op.isdir(op.dirname(filename2)):
                            os.makedirs(op.abspath(op.dirname(filename2)))
                        shutil.copy(filename, filename2)
                        return filename2
                    return filename
                else:
                    print('File older than %s, updating...' % force_download)
                    break
    
    # If we get here, we're going to try to download the file
    if os.getenv('IMAGEIO_NO_INTERNET', '').lower() in ('1', 'true', 'yes'):
        raise InternetNotAllowedError('Will not download resource from the '
                                      'internet because enironment variable '
                                      'IMAGEIO_NO_INTERNET is set.')
    
    # Can we proceed with auto-download?
    if not auto:
        raise NeedDownloadError()
    
    # Get filename to store to and make sure the dir exists
    filename = op.join(directory, nfname)
    if not op.isdir(op.dirname(filename)):
        os.makedirs(op.abspath(op.dirname(filename)))
    # let's go get the file
    if os.getenv('CONTINUOUS_INTEGRATION', False):  # pragma: no cover
        # On Travis, we retry a few times ...
        for i in range(2):
            try:
                _fetch_file(url, filename)
                return filename
            except IOError:
                time.sleep(0.5)
        else:
            _fetch_file(url, filename)
            return filename
    else:  # pragma: no cover
        _fetch_file(url, filename)
        return filename
Ejemplo n.º 43
0
        break
    except:
        print("waiting for json folder to be created")

print("found the folder")



while True:
    sleep(0.09)
    print(listdir())
    #delete old files
    newest_file = ""
    largest_time = 0
    for file in listdir():
        time_of_file = path.getctime(file)

        #if file is newer than the newest file, it is the newest
        if time_of_file > largest_time:
            newest_file = file
            largest_time = time_of_file

        #delete file if folder than two seconds
        if time() - time_of_file > 3:
            remove(file) 
            pass
    
    data = []
    key_codes = [17,23,31,36,17,38]
    kb._listener.start_if_necessary()
    try:
Ejemplo n.º 44
0
    def infos_dataset(
        self, source_path: str, dico_dataset, txt: dict = dict(), tipo=None
    ):
        """Use OGR functions to extract basic informations about
        geographic vector file (handles shapefile or MapInfo tables)
        and store into dictionaries.

        source_path = path to the geographic file
        dico_dataset = dictionary for global informations
        tipo = format
        txt = dictionary of text in the selected language
        """
        # changing working directory to layer folder
        # chdir(path.dirname(source_path))

        # raising corrupt files
        try:
            src = gdal.OpenEx(source_path, 0)  # GDAL driver
            if not tipo:
                dico_dataset["format"] = src.GetDriver().LongName
            else:
                dico_dataset["format"] = tipo
                pass
        except Exception as e:
            logger.error(e)
            self.alert = self.alert + 1
            dico_dataset["format"] = tipo
            youtils.erratum(dico_dataset, source_path, "err_corrupt")
            dico_dataset["err_gdal"] = gdal_err.err_type, gdal_err.err_msg
            return 0

        # raising incompatible files
        if not src:
            """ if file is not compatible """
            self.alert += 1
            dico_dataset["err_gdal"] = gdal_err.err_type, gdal_err.err_msg
            youtils.erratum(dico_dataset, source_path, "err_nobjet")
            return 0
        else:
            layer = src.GetLayer()  # get the layer
            pass

        # dataset name, title and parent folder
        try:
            dico_dataset["name"] = path.basename(source_path)
            dico_dataset["folder"] = path.dirname(source_path)
        except AttributeError as err:
            logger.warning(err)
            dico_dataset["name"] = path.basename(layer.GetName())
            dico_dataset["folder"] = path.dirname(layer.GetName())
        dico_dataset["title"] = (
            dico_dataset.get("name")[:-4].replace("_", " ").capitalize()
        )

        # dependencies and total size
        dependencies = youtils.list_dependencies(source_path, "auto")
        dico_dataset["dependencies"] = dependencies
        dico_dataset["total_size"] = youtils.sizeof(source_path, dependencies)
        # Getting basic dates
        crea, up = path.getctime(source_path), path.getmtime(source_path)
        dico_dataset["date_crea"] = strftime("%Y/%m/%d", localtime(crea))
        dico_dataset["date_actu"] = strftime("%Y/%m/%d", localtime(up))

        # features
        layer_feat_count = layer.GetFeatureCount()
        dico_dataset["num_obj"] = layer_feat_count
        if layer_feat_count == 0:
            """ if layer doesn't have any object, return an error """
            self.alert += 1
            youtils.erratum(dico_dataset, source_path, "err_nobjet")
            return 0
        else:
            pass

        # fields
        layer_def = layer.GetLayerDefn()
        dico_dataset["num_fields"] = layer_def.GetFieldCount()
        dico_dataset["fields"] = georeader.get_fields_details(layer_def)

        # geometry type
        dico_dataset["type_geom"] = georeader.get_geometry_type(layer)

        # SRS
        srs_details = georeader.get_srs_details(layer, txt)
        dico_dataset["srs"] = srs_details[0]
        dico_dataset["epsg"] = srs_details[1]
        dico_dataset["srs_type"] = srs_details[2]

        # spatial extent
        extent = georeader.get_extent_as_tuple(layer)
        dico_dataset["xmin"] = extent[0]
        dico_dataset["xmax"] = extent[1]
        dico_dataset["ymin"] = extent[2]
        dico_dataset["ymax"] = extent[3]

        # warnings messages
        if self.alert:
            dico_dataset["err_gdal"] = gdal_err.err_type, gdal_err.err_msg
        else:
            pass

        # clean & exit
        del src
        return 1, dico_dataset
Ejemplo n.º 45
0
def getctime(filename):
    return time.ctime(path.getctime(filename))
# this script takes a directory that needs to be cleaned as the command line argument
# Leaves files where they are if they are not standard ie. are a folder or have numbers in the name

from os import listdir, path, rename, mkdir
from time import ctime
from sys import argv

directory = argv[1]


def HasNumbers(string: str) -> bool:
    return any(i.isdigit() for i in string)


for file_name in listdir(directory):
    try:
        file_year = ctime(
            path.getctime(f'{directory}{file_name}')).split(' ')[-1]
        file_ext = file_name.split('.')[-1].upper()
        if HasNumbers(file_ext) or '.' not in file_name:
            continue
        if not path.exists(f'{directory}{file_ext}_{file_year}'):
            mkdir(f'{directory}{file_ext}_{file_year}')
        rename(f'{directory}{file_name}',
               f'{directory}{file_ext}_{file_year}/{file_name}')
    except IndexError as error:
        continue
    except OSError as error:
        print(error)
Ejemplo n.º 47
0
    def get_img(self, request, ident, region, size, rotation, quality,
                target_fmt, base_uri):
        '''Get an Image.
        Args:
            request (Request):
                Forwarded by dispatch_request
            ident (str):
                The identifier portion of the IIIF URI syntax

        '''
        r = LorisResponse()
        r.set_acao(request, self.cors_regex)
        # ImageRequest's Parameter attributes, i.e. RegionParameter etc. are
        # decorated with @property and not constructed until they are first
        # accessed, which mean we don't have to catch any exceptions here.
        image_request = img.ImageRequest(ident, region, size, rotation,
                                         quality, target_fmt)

        self.logger.debug('Image Request Path: %s', image_request.request_path)

        if self.enable_caching:
            in_cache = image_request in self.img_cache
        else:
            in_cache = False

        try:
            # We need the info to check authorization,
            # ... still cheaper than always resolving as likely to be cached
            info = self._get_info(ident, request, base_uri)[0]
        except ResolverException as re:
            return NotFoundResponse(str(re))

        if self.authorizer and self.authorizer.is_protected(info):
            authed = self.authorizer.is_authorized(info, request)

            if authed['status'] != 'ok':
                # Images don't redirect, they just deny out
                r.status_code = 401
                return r

        if in_cache:
            fp, img_last_mod = self.img_cache[image_request]
            ims_hdr = request.headers.get('If-Modified-Since')
            # The stamp from the FS needs to be rounded using the same precision
            # as when went sent it, so for an accurate comparison turn it into
            # an http date and then parse it again :-( :
            img_last_mod = parse_date(http_date(img_last_mod))
            self.logger.debug("Time from FS (default, rounded): %s",
                              img_last_mod)
            self.logger.debug("Time from IMS Header (parsed): %s",
                              parse_date(ims_hdr))
            # ims_hdr = parse_date(ims_hdr) # catch parsing errors?
            if ims_hdr and parse_date(ims_hdr) >= img_last_mod:
                self.logger.debug('Sent 304 for %s ', fp)
                r.status_code = 304
                return r
            else:
                r.content_type = constants.FORMATS_BY_EXTENSION[target_fmt]
                r.status_code = 200
                r.last_modified = img_last_mod
                r.headers['Content-Length'] = path.getsize(fp)
                r.response = open(fp, 'rb')

                # hand the Image object its info
                info = self._get_info(ident, request, base_uri)[0]

                self._set_canonical_link(request=request,
                                         response=r,
                                         image_request=image_request,
                                         image_info=info)
                return r
        else:
            try:
                # 1. Get the info
                info = self._get_info(ident, request, base_uri)[0]

                # 2. Check that we can make the quality requested
                if image_request.quality not in info.profile.description[
                        'qualities']:
                    return BadRequestResponse(
                        '"%s" quality is not available for this image' %
                        (image_request.quality, ))

                # 3. Check if requested size is allowed
                if image_request.request_resolution_too_large(
                        max_size_above_full=self.max_size_above_full,
                        image_info=info):
                    return NotFoundResponse('Resolution not available')

                # 4. Redirect if appropriate
                if self.redirect_canonical_image_request:
                    if not image_request.is_canonical(info):
                        self.logger.debug(
                            'Attempting redirect to %s',
                            image_request.canonical_request_path,
                        )
                        r.headers[
                            'Location'] = image_request.canonical_request_path
                        r.status_code = 301
                        return r

                # 5. Make an image
                fp = self._make_image(image_request=image_request,
                                      image_info=info)

            except ResolverException as re:
                return NotFoundResponse(str(re))
            except TransformException as te:
                return ServerSideErrorResponse(te)
            except (RequestException, SyntaxException) as e:
                return BadRequestResponse(str(e))
            except ImageInfoException as ie:
                # 500s!
                # ImageInfoException is only raised when
                # ImageInfo.from_image_file() can't  determine the format of the
                # source image. It results in a 500, but isn't necessarily a
                # developer error.
                return ServerSideErrorResponse(ie)
            except (CalledProcessError, IOError) as e:
                # CalledProcessError and IOError typically happen when there are
                # permissions problems with one of the files or directories
                # used by the transformer.
                msg = '''%s \n\nThis is likely a permissions problem, though it\'s
possible that there was a problem with the source file
(%s).''' % (str(e), info.src_img_fp)
                return ServerSideErrorResponse(msg)
        r.content_type = constants.FORMATS_BY_EXTENSION[target_fmt]
        r.status_code = 200
        r.last_modified = datetime.utcfromtimestamp(path.getctime(fp))
        r.headers['Content-Length'] = path.getsize(fp)
        self._set_canonical_link(request=request,
                                 response=r,
                                 image_request=image_request,
                                 image_info=info)
        r.response = open(fp, 'rb')

        if not self.enable_caching:
            r.call_on_close(lambda: unlink(fp))

        return r
Ejemplo n.º 48
0
 def get_created_time(self, name):
     if self.keep_original:
         return super().get_created_time(name)
     return self._datetime_from_timestamp(
         getctime(self.get_alternate_compressed_path(name)))
Ejemplo n.º 49
0
 def date_created(self):
     if self.___source is None or self.___source == "":
         raise NoDataException("No file not set")
     return ctime(getctime(self.___source))
Ejemplo n.º 50
0
    def __init__(self, source_path, dico_dataset, tipo, txt=""):
        """Uses OGR functions to extract basic informations about
        geographic vector file (handles shapefile or MapInfo tables)
        and store into dictionaries.

        source_path = path to the DXF file
        dico_dataset = dictionary for global informations
        tipo = format
        text = dictionary of text in the selected language
        """
        # handling ogr specific exceptions
        errhandler = gdal_err.handler
        gdal.PushErrorHandler(errhandler)
        gdal.UseExceptions()
        self.alert = 0

        # changing working directory to layer folder
        chdir(path.dirname(source_path))

        # opening DXF
        try:
            # driver_dxf = ogr.GetDriverByName(str("DXF"))
            # dxf = driver_dxf.Open(source_path, 0)
            src = gdal.OpenEx(source_path, 0)
        except Exception as err:
            logging.error(err)
            youtils.erratum(dico_dataset, source_path, "err_corrupt")
            self.alert = self.alert + 1
            return None

        # raising incompatible files
        if not src:
            """if file is not compatible"""
            self.alert += 1
            dico_dataset["err_gdal"] = gdal_err.err_type, gdal_err.err_msg
            youtils.erratum(dico_dataset, source_path, "err_nobjet")
            return None
        else:
            layer = src.GetLayer()  # get the layer
            pass

        # DXF name and parent folder
        try:
            dico_dataset["name"] = path.basename(src.GetName())
            dico_dataset["folder"] = path.dirname(src.GetName())
        except AttributeError as err:
            logger.warning(err)
            dico_dataset["name"] = path.basename(source_path)
            dico_dataset["folder"] = path.dirname(source_path)

        # specific AutoDesk informations
        douxef = dxfgrabber.readfile(source_path)
        dico_dataset["version_code"] = douxef.dxfversion
        # see: http://dxfgrabber.readthedocs.org/en/latest/#Drawing.dxfversion
        if douxef.dxfversion == "AC1009":
            dico_dataset["version_name"] = "AutoCAD R12"
        elif douxef.dxfversion == "AC1015":
            dico_dataset["version_name"] = "AutoCAD R2000"
        elif douxef.dxfversion == "AC1018":
            dico_dataset["version_name"] = "AutoCAD R2004"
        elif douxef.dxfversion == "AC1021":
            dico_dataset["version_name"] = "AutoCAD R2007"
        elif douxef.dxfversion == "AC1024":
            dico_dataset["version_name"] = "AutoCAD R2010"
        elif douxef.dxfversion == "AC1027":
            dico_dataset["version_name"] = "AutoCAD R2013"
        else:
            dico_dataset["version_name"] = "douxef.dxfversion"

        # layers count and names
        dico_dataset["layers_count"] = src.GetLayerCount()
        li_layers_names = []
        li_layers_idx = []
        dico_dataset["layers_names"] = li_layers_names
        dico_dataset["layers_idx"] = li_layers_idx

        # dependencies and total size
        dependencies = youtils.list_dependencies(source_path, "auto")
        dico_dataset["dependencies"] = dependencies
        dico_dataset["total_size"] = youtils.sizeof(source_path, dependencies)
        # global dates
        crea, up = path.getctime(source_path), path.getmtime(source_path)
        dico_dataset["date_crea"] = strftime("%Y/%m/%d", localtime(crea))
        dico_dataset["date_actu"] = strftime("%Y/%m/%d", localtime(up))
        # total fields count
        total_fields = 0
        dico_dataset["total_fields"] = total_fields
        # total objects count
        total_objs = 0
        dico_dataset["total_objs"] = total_objs
        # parsing layers
        for layer_idx in range(src.GetLayerCount()):
            # dictionary where will be stored informations
            dico_layer = OrderedDict()
            dico_layer["src_name"] = dico_dataset.get("name")
            # getting layer object
            layer = src.GetLayerByIndex(layer_idx)
            # layer globals
            li_layers_names.append(layer.GetName())
            dico_layer["title"] = georeader.get_title(layer)
            li_layers_idx.append(layer_idx)
            # features
            layer_feat_count = layer.GetFeatureCount()
            dico_layer["num_obj"] = layer_feat_count
            if layer_feat_count == 0:
                """if layer doesn't have any object, return an error"""
                dico_layer["error"] = "err_nobjet"
                self.alert = self.alert + 1
            else:
                pass

            # fields
            layer_def = layer.GetLayerDefn()
            dico_layer["num_fields"] = layer_def.GetFieldCount()
            dico_layer["fields"] = georeader.get_fields_details(layer_def)

            # geometry type
            dico_layer["type_geom"] = georeader.get_geometry_type(layer)

            # SRS
            srs_details = georeader.get_srs_details(layer, txt)
            dico_layer["srs"] = srs_details[0]
            dico_layer["epsg"] = srs_details[1]
            dico_layer["srs_type"] = srs_details[2]

            # spatial extent
            extent = georeader.get_extent_as_tuple(layer)
            dico_layer["xmin"] = extent[0]
            dico_layer["xmax"] = extent[1]
            dico_layer["ymin"] = extent[2]
            dico_layer["ymax"] = extent[3]

            # storing layer into the GDB dictionary
            dico_dataset["{0}_{1}".format(
                layer_idx, dico_layer.get("title"))] = dico_layer
            # summing fields number
            total_fields += dico_layer.get("num_fields", 0)
            # summing objects number
            total_objs += dico_layer.get("num_obj", 0)
            # deleting dictionary to ensure having cleared space
            del dico_layer

        # storing fileds and objects sum
        dico_dataset["total_fields"] = total_fields
        dico_dataset["total_objs"] = total_objs

        # warnings messages
        if self.alert:
            dico_dataset["err_gdal"] = gdal_err.err_type, gdal_err.err_msg
        else:
            pass

        # clean exit
        del src
Ejemplo n.º 51
0
def GetStats(path: str) -> tuple:
    path = _convert_path(path)
    return _get_dir_size(path), ospath.getatime(path), ospath.getctime(
        path), ospath.getmtime(path)
def get_c_datecode(filename):
    return datetime.fromtimestamp(path.getctime(filename)).strftime('%Y%m%d')
Ejemplo n.º 53
0
 def _get_ctime(self):
     try:
         return strftime("%m/%d/%Y %I:%M:%S %p",
                         localtime(getctime(self.file_name)))
     except:
         return ""
Ejemplo n.º 54
0
    def get(
        self,
        url,
        post=None,
        caching=None,
        is_ref_url=True,
        md5_file_cache=None,
        time_out=None,
        headers=None,
        detect_charsets=True,
        content_type=None,
        files=None,
    ):
        prefix = "local-file:"
        if url.startswith(prefix):
            with open(url[len(prefix):], "r") as fo:
                page = fo.read().decode("utf8")
                self.last_page = page
            return page

        if not url.startswith('http') and self.last_url:
            url = urllib.parse.urljoin(self.last_url, url)
        if caching is None:
            caching = self.caching
        url = url.replace('&amp;', '&')
        url = url.replace(' ', '%20')

        makedirs(self.dir_cache, mode=0o777, exist_ok=True)

        files = files or isinstance(post, dict) and post.pop('files__', None)
        post_urlencoded = urllib.parse.urlencode(post).encode(
            'utf-8') if post and isinstance(post, dict) else post

        try:
            file_cache = ''.join((
                self.dir_cache,
                md5((md5_file_cache
                     or url + (post_urlencoded or "")).encode()).hexdigest(),
                ("/" + url[url.find("//") + 2:].split("?", 2)[0]).replace(
                    "/", "_"),
                ".html",
            ))
        except Exception:
            file_cache = None

        caching = file_cache and caching and self.cache_timeout > 0

        from_cache = caching
        if caching:
            if not path.isfile(file_cache):
                from_cache = False
            else:
                diff_time = datetime.now() - datetime.fromtimestamp(
                    path.getctime(file_cache))
                from_cache = diff_time.seconds < self.cache_timeout
        self.print("[cache]" if from_cache else "", url)
        self.error = None
        self.response = None
        if from_cache:
            with open(file_cache, "r") as f:
                page = f.read().encode('utf8')
        else:
            if self.time_sleep:
                v_time_sleep = min(1, abs(gauss(0, 1)) * self.time_sleep)
                sleep(v_time_sleep)
            if not headers:
                headers = {}
            if self.ref_url and 'Referer' not in headers:
                headers.update({"Referer": self.ref_url})
            if not self.last_url or urllib.parse.urlparse(
                    self.last_url).netloc != urllib.parse.urlparse(url).netloc:
                self.opener.addheaders = self._init_opener_headers
            if headers:
                h = dict(self.opener.addheaders)
                h.update(headers)
                self.opener.addheaders = list(h.items())

            if content_type == 'multipart/form-data' and post or files:
                post_urlencoded, multipart_headers = encode_multipart(
                    fields=post, files=files)
                headers.update(multipart_headers)

            try:
                if headers:
                    request = urllib.request.Request(url, headers=headers)
                else:
                    request = url

                time_start = datetime.utcnow()
                response = self.opener.open(
                    request,
                    post_urlencoded if post else None,
                    timeout=time_out or self.time_out,
                )
                if response.info().get("Content-Encoding", None) == "gzip":
                    buf = BytesIO(response.read())
                    page = GzipFile(fileobj=buf).read()
                else:
                    page = response.read()
                self.response = response
                self.time_response = datetime.utcnow() - time_start
                if self.verify_word and self.verify_word not in page:
                    raise NoVerifyWord("No verify word '%s', size page = %d" %
                                       (self.verify_word, len(page)))
            except Exception as err:
                self.error = err
                if self.assert_on_fail:
                    if self.proxer:
                        self.proxer.fail()
                    raise FailOnGetResponse(err)
                else:
                    traceback.print_exc()
                return

            try:
                if file_cache and caching:
                    cookie_write = True
                    if self.response.info().get("Content-Type").startswith(
                            "application/json"):
                        page = dumps(loads(page), indent=4)
                        cookie_write = False
                    if self.response.info().get("Content-Type").startswith(
                            "image/"):
                        cookie_write = False
                    with open(file_cache, "w") as f:
                        f.write(page.decode('utf8'))
                        if cookie_write:
                            f.write("\n\n" +
                                    dumps(self.get_cookies(), indent=4))
            except Exception:
                traceback.print_exc()
                self.print("[cache] ERROR: write to", file_cache)

            if self.proxer:
                if not self.error:
                    self.proxer.ok(self.time_response)
                else:
                    self.proxer.fail()

        if detect_charsets:
            matches = re.findall(
                r'charset=["\']?(?P<charset>[^"\'\s\.>;]{3,}\b)', str(page),
                re.IGNORECASE)
            if matches:
                charsets = [c.lower() for c in matches]
                if len(charsets) > 1 and len(set(charsets)) > 1:
                    self.print(
                        f'[WARNING] set multi charset values: {charsets}')
                charset = charsets[-1].lower()
            else:
                charset = 'utf-8'
            try:
                charset_detect = chardet.detect(page)
                if charset_detect and charset_detect['confidence'] > 0.98:
                    charset = charset_detect['encoding']
            except Exception as e:
                self.print('exception on charset detect:', str(e))
            if charset in ('utf-8', 'utf8'):
                page = page.decode('utf-8', 'replace')
            elif charset in ('windows-1251', 'cp1251'):
                page = page.decode('cp1251', 'replace')
            else:
                page = page.decode(charset, 'replace')

        self.last_page = page
        self.last_url = self.response.geturl() if self.response else url
        if is_ref_url:
            self.ref_url = self.last_url
        self.file_cache_clear()
        return page
Ejemplo n.º 55
0
def getFileAgeInSeconds(fileName):
    return time() - getctime(fileName)
Ejemplo n.º 56
0
 def creation_date(self, filename):
     t = path.getctime(filename)
     return datetime.datetime.fromtimestamp(t)
Ejemplo n.º 57
0
    def __init__(self, lyr_path, dico_lyr, tipo, txt=''):
        u""" Uses OGR functions to extract basic informations about
        geographic vector file (handles shapefile or MapInfo tables)
        and store into dictionaries.

        lyr_path = path to the LYR file
        dico_lyr = dictionary for global informations
        tipo = format
        text = dictionary of text in the selected language

        see: http://resources.arcgis.com/fr/help/main/10.2/index.html#//00s300000008000000
        """
        # changing working directory to layer folder
        chdir(path.dirname(lyr_path))

        # raising arcpy specific exceptions
        self.alert = 0

        # opening LYR
        try:
            layer_obj = Layer(lyr_path)
        except:
            logging.error("Unable to open this file: ", lyr_path)
            return None

        # ------------ Basics ----------------
        dico_lyr[u'name'] = layer_obj.name
        dico_lyr[u'description'] = layer_obj.description
        dico_lyr[u'folder'] = path.dirname(lyr_path)
        # by default let's start considering there is only one layer
        dico_lyr[u'layers_count'] = 1
        dico_lyr['broken'] = layer_obj.isBroken

        # ------------ LYR type ----------------
        if layer_obj.isFeatureLayer:
            dico_lyr[u'type'] = txt.get('lyr_featL')
            self.infos_geos(layer_obj, dico_lyr)
            self.infos_basics(layer_obj, dico_lyr)
            # features
            # dico_lyr[u'num_obj'] = int(obj_count(lyr_path).getOutput(0))
            # fields
            dico_fields = OrderedDict()
            if layer_obj.isBroken:
                self.erratum(dico_lyr, lyr_path, u'err_corrupt')
                self.alert = self.alert + 1
                return None
            else:
                pass

            try:
                self.infos_fields(layer_obj, dico_lyr, dico_fields)
                dico_lyr[u'fields'] = dico_fields
            except RuntimeError:
                self.erratum(dico_lyr, lyr_path, u'err_corrupt')
                self.alert = self.alert + 1
                return None

            # count features
            with SearchCursor(lyr_path, [dico_fields.keys()[0]]) as cursor:
                rows = {row[0] for row in cursor}

            count = 0
            for row in rows:
                count += 1
            dico_lyr[u'num_obj'] = count

        elif layer_obj.isRasterLayer:
            dico_lyr[u'type'] = txt.get('lyr_rastL')
            self.infos_geos(layer_obj, dico_lyr)
            self.infos_basics(layer_obj, dico_lyr)
        elif layer_obj.isRasterizingLayer:
            dico_lyr[u'type'] = txt.get('lyr_rastzL')
            self.infos_basics(layer_obj, dico_lyr)
        elif layer_obj.isServiceLayer:
            dico_lyr[u'type'] = txt.get('lyr_servL')
            self.infos_basics(layer_obj, dico_lyr)
            if layer_obj.supports("SERVICEPROPERTIES"):
                self.infos_service(layer_obj.serviceProperties, dico_lyr)
            else:
                self.erratum(dico_lyr, lyr_path, u'err_incomp')
                self.alert = self.alert + 1
                return None
        elif layer_obj.isNetworkAnalystLayer:
            dico_lyr['type'] = txt.get('lyr_netwaL')
            self.infos_basics(layer_obj, dico_lyr)
        elif layer_obj.isGroupLayer:
            dico_lyr['type'] = txt.get('lyr_groupL')
            self.infos_basics(layer_obj, dico_lyr)
            # layers inside
            sublayers = ListLayers(layer_obj)
            dico_lyr['layers_count'] = len(sublayers) - 1
            dico_lyr['layers_names'] = [
                sublyr.name for sublyr in sublayers[1:]
            ]
            dico_lyr['layers_sources'] = [
                sublyr.dataSource for sublyr in sublayers[1:]
                if sublyr.supports("DATASOURCE")
            ]
        else:
            self.erratum(dico_lyr, lyr_path, u'err_incomp')
            self.alert = self.alert + 1
            return None

        # scale
        dico_lyr['maxScale'] = layer_obj.maxScale
        dico_lyr['minScale'] = layer_obj.minScale

        # secondary
        dico_lyr['license'] = layer_obj.credits
        dico_lyr['broken'] = layer_obj.isBroken

        # dependencies
        dependencies = [
            f for f in listdir(path.dirname(lyr_path))
            if path.splitext(path.abspath(f))[0] == path.splitext(lyr_path)[0]
            and not path.splitext(path.abspath(f).lower())[1] == ".lyr"
            or path.isfile('%s.xml' % f[:-4])
        ]
        dico_lyr[u'dependencies'] = dependencies

        # cumulated size
        dependencies.append(lyr_path)
        total_size = sum([path.getsize(f) for f in dependencies])
        dico_lyr[u"total_size"] = self.sizeof(total_size)
        dependencies.pop(-1)

        # global dates
        dico_lyr[u'date_actu'] = strftime('%d/%m/%Y',
                                          localtime(path.getmtime(lyr_path)))
        dico_lyr[u'date_crea'] = strftime('%d/%m/%Y',
                                          localtime(path.getctime(lyr_path)))

#Python program that gets timestamps, converts to dates

from os import path
from datetime import date

# Get access, modification, and creation time.
a = path.getatime("/enable1.txt")
m = path.getmtime("/enable1.txt")
c = path.getctime("/enable1.txt")

# Display the times.
print(a, m, c)

# Convert timestamps to dates.
a2 = date.fromtimestamp(a)
m2 = date.fromtimestamp(m)
c2 = date.fromtimestamp(c)
print(a2, m2, c2)
Ejemplo n.º 59
0
from os import path
from datetime import datetime, timedelta
user = '******'
user_project = 'Finch'
home_dir = (str("/home/modlin/modlin/"))

week_ago = datetime.now() - timedelta(minutes=15)
filetime = datetime.fromtimestamp(
    path.getctime('/home/modlin/modlin/zoulFinch.zip'))

if filetime < week:
    print("больше недели")
else:
    print('меньше недели')
Ejemplo n.º 60
0
 def get_revision_time_string(self):
     rev_time = getctime(OUTPUT_DIR / f'{self.period}-{self.subperiod_id}.sqlite')
     dt = str(datetime.datetime.fromtimestamp(rev_time) \
              .astimezone(timezone('Australia/Melbourne'))).split('.')[0]
     return dt