Esempio n. 1
1
    def settimes(self, path, accessed_time=None, modified_time=None):
        """Set the accessed time and modified time of a file

        :param path: path to a file
        :type path: string
        :param accessed_time: the datetime the file was accessed (defaults to current time)
        :type accessed_time: datetime
        :param modified_time: the datetime the file was modified (defaults to current time)
        :type modified_time: datetime

        """

        sys_path = self.getsyspath(path, allow_none=True)
        if sys_path is not None:
            now = datetime.datetime.now()
            if accessed_time is None:
                accessed_time = now
            if modified_time is None:
                modified_time = now
            accessed_time = int(time.mktime(accessed_time.timetuple()))
            modified_time = int(time.mktime(modified_time.timetuple()))
            os.utime(sys_path, (accessed_time, modified_time))
            return True
        else:
            raise UnsupportedError("settimes")
Esempio n. 2
1
def TouchStamp(stamp_path):
    dir_name = os.path.dirname(stamp_path)
    if not os.path.isdir(dir_name):
        os.makedirs()

    with open(stamp_path, "a"):
        os.utime(stamp_path, None)
Esempio n. 3
0
def test_decoder_cache_shrinking(tmpdir):
    cache_dir = str(tmpdir)
    solver_mock = SolverMock()
    another_solver = SolverMock('another_solver')

    cache = DecoderCache(cache_dir=cache_dir)
    cache.wrap_solver(solver_mock)(**get_solver_test_args())

    # Ensure differing time stamps (depending on the file system the timestamp
    # resolution might be as bad as 1 day).
    for path in cache.get_files():
        timestamp = os.stat(path).st_atime
        timestamp -= 60 * 60 * 24 * 2  # 2 days
        os.utime(path, (timestamp, timestamp))

    cache.wrap_solver(another_solver)(**get_solver_test_args())

    cache_size = cache.get_size_in_bytes()
    assert cache_size > 0

    cache.shrink(cache_size - 1)

    # check that older cached result was removed
    assert SolverMock.n_calls[solver_mock] == 1
    cache.wrap_solver(another_solver)(**get_solver_test_args())
    cache.wrap_solver(solver_mock)(**get_solver_test_args())
    assert SolverMock.n_calls[solver_mock] == 2
    assert SolverMock.n_calls[another_solver] == 1
Esempio n. 4
0
def test_fetch_file(url, tmpdir):
    """Test URL retrieval."""
    tempdir = str(tmpdir)
    archive_name = op.join(tempdir, "download_test")
    with catch_logging() as log:
        _fetch_file(url, archive_name, timeout=30., verbose='debug')
    log = log.getvalue()
    assert 'Resuming at' not in log
    with open(archive_name, 'rb') as fid:
        data = fid.read()
    stop = len(data) // 2
    assert 0 < stop < len(data)
    with open(archive_name + '.part', 'wb') as fid:
        fid.write(data[:stop])
    with catch_logging() as log:
        _fetch_file(url, archive_name, timeout=30., verbose='debug')
    log = log.getvalue()
    assert 'Resuming at %s' % stop in log
    with pytest.raises(Exception, match='unknown url type'):
        _fetch_file('NOT_AN_ADDRESS', op.join(tempdir, 'test'), verbose=False)
    resume_name = op.join(tempdir, "download_resume")
    # touch file
    with open(resume_name + '.part', 'w'):
        os.utime(resume_name + '.part', None)
    _fetch_file(url, resume_name, resume=True, timeout=30.,
                verbose=False)
    with pytest.raises(ValueError, match='Bad hash value'):
        _fetch_file(url, archive_name, hash_='a', verbose=False)
    with pytest.raises(RuntimeError, match='Hash mismatch'):
        _fetch_file(url, archive_name, hash_='a' * 32, verbose=False)
Esempio n. 5
0
def test_decoder_cache_shrink_threadsafe(monkeypatch, tmpdir):
    """Tests that shrink handles files deleted by other processes."""
    cache_dir = str(tmpdir)
    solver_mock = SolverMock()
    another_solver = SolverMock('another_solver')

    cache = DecoderCache(cache_dir=cache_dir)
    cache.wrap_solver(solver_mock)(**get_solver_test_args())
    limit = cache.get_size()

    # Ensure differing time stamps (depending on the file system the timestamp
    # resolution might be as bad as 1 day).
    for filename in os.listdir(cache.cache_dir):
        path = os.path.join(cache.cache_dir, filename)
        timestamp = os.stat(path).st_atime
        timestamp -= 60 * 60 * 24 * 2  # 2 days
        os.utime(path, (timestamp, timestamp))

    cache.wrap_solver(another_solver)(**get_solver_test_args())

    cache_size = cache.get_size_in_bytes()
    assert cache_size > 0

    def raise_file_not_found(*args, **kwargs):
        raise OSError(errno.ENOENT, "File not found.")

    monkeypatch.setattr(cache, 'get_size_in_bytes', lambda: cache_size)
    monkeypatch.setattr('os.stat', raise_file_not_found)
    monkeypatch.setattr('os.remove', raise_file_not_found)
    monkeypatch.setattr('os.unlink', raise_file_not_found)

    cache.shrink(limit)
Esempio n. 6
0
 def can_retrieve_cache(self):
     if not getattr(self, "outputs", None):
         return None
     sig = self.signature()
     ssig = Utils.to_hex(self.uid()) + Utils.to_hex(sig)
     dname = os.path.join(self.generator.bld.cache_global, ssig)
     try:
         t1 = os.stat(dname).st_mtime
     except OSError:
         return None
     for node in self.outputs:
         orig = os.path.join(dname, node.name)
         try:
             shutil.copy2(orig, node.abspath())
             os.utime(orig, None)
         except (OSError, IOError):
             Logs.debug("task: failed retrieving file")
             return None
     try:
         t2 = os.stat(dname).st_mtime
     except OSError:
         return None
     if t1 != t2:
         return None
     for node in self.outputs:
         node.sig = sig
         if self.generator.bld.progress_bar < 1:
             self.generator.bld.to_log("restoring from cache %r\n" % node.abspath())
     self.cached = True
     return True
Esempio n. 7
0
 def create_file_if_not_available(file_path):
     if not path.exists(file_path):
         try:
             with open(file_path, 'w'):
                 utime(file_path, None)
         except FileNotFoundError:
             print(__file__)
 def touch(self, path=".", mtime=None):
     absPath = self.absolutePath(path)
     if mtime is None:
         mtime.time.time()
     #os.utime(path, accessTime, modifiedTime)
     os.utime(absPath, mtime, mtime)
     return mtime
    def test_4_Autoreload(self):
        # If test_3 has not been executed, the server won't be stopped,
        # so we'll have to do it.
        if engine.state != engine.states.EXITING:
            engine.exit()

        # Start the demo script in a new process
        p = helper.CPProcess(ssl=(self.scheme.lower() == "https"))
        p.write_conf(extra='test_case_name: "test_4_Autoreload"')
        p.start(imports="cherrypy.test._test_states_demo")
        try:
            self.getPage("/start")
            start = float(self.body)

            # Give the autoreloader time to cache the file time.
            time.sleep(2)

            # Touch the file
            os.utime(os.path.join(thisdir, "_test_states_demo.py"), None)

            # Give the autoreloader time to re-exec the process
            time.sleep(2)
            host = cherrypy.server.socket_host
            port = cherrypy.server.socket_port
            cherrypy._cpserver.wait_for_occupied_port(host, port)

            self.getPage("/start")
            if not (float(self.body) > start):
                raise AssertionError("start time %s not greater than %s" % (float(self.body), start))
        finally:
            # Shut down the spawned process
            self.getPage("/exit")
        p.join()
Esempio n. 10
0
File: path.py Progetto: RichDijk/eXe
 def touch(self):
     """ Set the access/modified times of this file to the current time.
     Create the file if it does not exist.
     """
     fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
     os.close(fd)
     os.utime(self, None)
Esempio n. 11
0
def fat_recover_slot(boot, fat, slot, destdir=''):
	"Recupera un file da uno slot di directory già decodificato"
	if slot.IsDirectory:
		return
	if not os.path.exists(os.path.join(destdir,slot.Parent)):
		try:
			os.makedirs(os.path.join(destdir,slot.Parent))
		except:
			pass
	if slot.LongName:
		dest = os.path.join(destdir,slot.Parent,slot.LongName)
	else:
		dest = os.path.join(destdir,slot.Parent,slot.ShortName)

	# Apre la catena sorgente come file
	chain = Chain(boot, fat, slot.Start, size=slot.Size)
	out = open(dest, 'wb')
	buf = 1
	while buf:
		buf = chain.read(boot.cluster)
		out.write(buf)
	out.truncate(slot.Size)
	out.close()
	os.utime(dest, (slot.ATime, slot.MTime))
	logging.info("Recuperato %s (%d byte, cluster %d).", dest, slot.Size, slot.Start)
Esempio n. 12
0
 def test_dump_delete(self):
     time_for_temp = (time.time() - DUMPED_APPS_WRITTEN - 1, time.time() - DUMPED_APPS_WRITTEN - 1)
     os.utime(DUMPED_APPS_PATH, time_for_temp)
     sum_test = len( [files for files in os.listdir(DUMPED_APPS_PATH)] )
     assertEqual(sum_test, 1)
     mkt_gc()
     assertEqual(sum_test, 0)
Esempio n. 13
0
 def set_time(self):
     for root, dirs, files in os.walk(self.working_dir):
         for file_ in files:
             os.utime(os.path.join(root, file_), (0, 0))
         for dir_ in dirs:
             os.utime(os.path.join(root, dir_), (0, 0))
     LOG.debug('Set atime and mtime to 0 for all content in working dir')
Esempio n. 14
0
    def get_contents_to_filename(self, filename, headers=None,
                                 cb=None, num_cb=10,
                                 torrent=False,
                                 version_id=None,
                                 res_download_handler=None,
                                 response_headers=None):
        """
        Retrieve an object from S3 using the name of the Key object as the
        key in S3.  Store contents of the object to a file named by 'filename'.
        See get_contents_to_file method for details about the
        parameters.
        
        :type filename: string
        :param filename: The filename of where to put the file contents
        
        :type headers: dict
        :param headers: Any additional headers to send in the request
        
        :type cb: function
        :param cb: a callback function that will be called to report
                   progress on the upload.  The callback should accept
                   two integer parameters, the first representing the
                   number of bytes that have been successfully
                   transmitted to S3 and the second representing the
                   size of the to be transmitted object.
                    
        :type cb: int
        :param num_cb: (optional) If a callback is specified with
                       the cb parameter this parameter determines the
                       granularity of the callback by defining
                       the maximum number of times the callback will
                       be called during the file transfer.  
             
        :type torrent: bool
        :param torrent: If True, returns the contents of a torrent file
                        as a string.

        :type res_upload_handler: ResumableDownloadHandler
        :param res_download_handler: If provided, this handler will
                                     perform the download.

        :type response_headers: dict
        :param response_headers: A dictionary containing HTTP headers/values
                                 that will override any headers associated with
                                 the stored object in the response.
                                 See http://goo.gl/EWOPb for details.
        """
        fp = open(filename, 'wb')
        self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
                                  version_id=version_id,
                                  res_download_handler=res_download_handler,
                                  response_headers=response_headers)
        fp.close()
        # if last_modified date was sent from s3, try to set file's timestamp
        if self.last_modified != None:
            try:
                modified_tuple = rfc822.parsedate_tz(self.last_modified)
                modified_stamp = int(rfc822.mktime_tz(modified_tuple))
                os.utime(fp.name, (modified_stamp, modified_stamp))
            except Exception: pass
Esempio n. 15
0
def download():
    f = env._ui.file
    s = env._ui.url

    if os.path.isfile(f):
        newf = False
    else:
        puts('[{0}]: "{1} file does not exist'.format(env.msgid, f))
        newf = download_file(f, s)

    mtime = file_timestamp(f)

    if mtime < time.time() - env.ACCEPTABLE:
        # if mtime is less than now - n days, it may be stale.

        newtime = time.time() - (env.ACCEPTABLE / 2)

        if newf is True:
            # if we just downloaded the file it isn't stale yet
            os.utime(f, (newtime, newtime))
        else:
            # definitley stale, must download it again.
            newf = download_file(f, s)
            if mtime == file_timestamp(f):
                # if the source is stale, modify mtime so we don't
                # download it for a few days.
                os.utime(f, (newtime, newtime))
    else:
        # otherwise, mtime is within the window of n days, and we can do nothing.
        puts('[{0}]: "{1}" is up to date'.format(env.msgid, f))
Esempio n. 16
0
def set_folder_dates_to_median(path):
    logging.info('Setting timestamps for {0}'.format(path))

    dirpath, dirnames, filenames = os.walk(path).next()

    timestamps = [get_photo_timestamp(os.path.join(dirpath, fname)) for fname in filenames]
    timestamps = sorted([tstamp.replace(tzinfo=None) for tstamp in timestamps if tstamp])

    if timestamps:

        median_timestamp = timestamps[int(len(timestamps)/2)]

        """Generate integers (number of seconds since epoch) to represent
        the Accessed and Modified timestamps, to be set for this folder.
        """
        epoch = datetime.datetime.utcfromtimestamp(0)
        atime = (datetime.datetime.today() - epoch).total_seconds()
        mtime = (median_timestamp - epoch).total_seconds()

        os.utime(path, (atime, mtime))
        logging.debug("Set Accessed time for {0} to {1}".format(path, atime))
        logging.debug("Set Modified time for {0} to {1}".format(path, mtime))

    else:
        logging.warning("No valid timestamps found for {0}".format(path))
Esempio n. 17
0
    def test_reloadAfterPrecompile(self):
        """
        """
        # Get a filename
        temp = self.mktemp()

        # Write some content
        f = file(temp, 'w')
        f.write('<p>foo</p>')
        f.close()

        # Precompile the doc
        ctx = context.WovenContext()
        doc = loaders.htmlfile(temp)
        pc = flat.precompile(flat.flatten(doc), ctx)

        before = ''.join(flat.serialize(pc, ctx))


        # Write the file with different content and make sure the
        # timestamp changes
        f = file(temp, 'w')
        f.write('<p>bar</p>')
        f.close()
        os.utime(temp, (os.path.getatime(temp), os.path.getmtime(temp)+5))

        after = ''.join(flat.serialize(pc, ctx))

        self.assertIn('foo', before)
        self.assertIn('bar', after)
        self.failIfEqual(before, after)
Esempio n. 18
0
    def set_file_attr(filename, attr):
        """
        Change a file's attributes on the local filesystem.  The contents of
        C{attr} are used to change the permissions, owner, group ownership,
        and/or modification & access time of the file, depending on which
        attributes are present in C{attr}.

        This is meant to be a handy helper function for translating SFTP file
        requests into local file operations.
        
        @param filename: name of the file to alter (should usually be an
            absolute path).
        @type filename: str
        @param attr: attributes to change.
        @type attr: L{SFTPAttributes}
        """
        if sys.platform != 'win32':
            # mode operations are meaningless on win32
            if attr._flags & attr.FLAG_PERMISSIONS:
                os.chmod(filename, attr.st_mode)
            if attr._flags & attr.FLAG_UIDGID:
                os.chown(filename, attr.st_uid, attr.st_gid)
        if attr._flags & attr.FLAG_AMTIME:
            os.utime(filename, (attr.st_atime, attr.st_mtime))
        if attr._flags & attr.FLAG_SIZE:
            open(filename, 'w+').truncate(attr.st_size)
Esempio n. 19
0
    def test_get_unstaged_changes(self):
        """Unit test for get_unstaged_changes."""

        repo_dir = tempfile.mkdtemp()
        self.addCleanup(shutil.rmtree, repo_dir)
        with Repo.init(repo_dir) as repo:

            # Commit a dummy file then modify it
            foo1_fullpath = os.path.join(repo_dir, 'foo1')
            with open(foo1_fullpath, 'wb') as f:
                f.write(b'origstuff')

            foo2_fullpath = os.path.join(repo_dir, 'foo2')
            with open(foo2_fullpath, 'wb') as f:
                f.write(b'origstuff')

            repo.stage(['foo1', 'foo2'])
            repo.do_commit(b'test status', author=b'', committer=b'')

            with open(foo1_fullpath, 'wb') as f:
                f.write(b'newstuff')

            # modify access and modify time of path
            os.utime(foo1_fullpath, (0, 0))

            changes = get_unstaged_changes(repo.open_index(), repo_dir)

            self.assertEqual(list(changes), [b'foo1'])
Esempio n. 20
0
 def downloadDidFinish_(self, downloader):
     # the downloader sets the mtime to be the web server's idea of
     # when the file was last updated. Which is cute. But useless to us.
     # I want to know when I fetched it.
     os.utime( self.filename, None )
     data = file( self.filename ).read()
     self.callback( data, False )
Esempio n. 21
0
    def test11_purge_multDirMultElement(self):
        'QueueSimple.purge() multiple directories & elements'
        qs = QueueSimple(self.qdir, granularity=1)

        qs.add('foo')
        assert qs.count() == 1
        time.sleep(2)
        qs.add('bar')
        assert qs.count() == 2
        assert len(os.listdir(self.qdir)) == 2
        qs.purge()
        assert qs.count() == 2

        elem = qs.first()
        qs.lock(elem)
        qs.remove(elem)
        assert qs.count() == 1
        qs.purge()
        assert len(os.listdir(self.qdir)) == 1

        time.sleep(2)
        qs.add('baz')
        assert len(os.listdir(self.qdir)) == 2
        for elem in qs:
            qs.lock(elem)
        elem1 = qs.first()
        lock_path1 = self.qdir + '/' + elem1 + LOCKED_SUFFIX
        assert os.path.exists(lock_path1) is True
        os.utime(lock_path1, (time.time() - 25, time.time() - 25))
        qs.purge(maxlock=10)
        assert os.path.exists(lock_path1) is False

        elem2 = qs.next()
        lock_path2 = self.qdir + '/' + elem2 + LOCKED_SUFFIX
        assert os.path.exists(lock_path2) is True
Esempio n. 22
0
    def test_status(self):
        """Integration test for `status` functionality."""

        # Commit a dummy file then modify it
        fullpath = os.path.join(self.repo.path, 'foo')
        with open(fullpath, 'w') as f:
            f.write('origstuff')

        porcelain.add(repo=self.repo.path, paths=['foo'])
        porcelain.commit(repo=self.repo.path, message=b'test status',
            author=b'', committer=b'')

        # modify access and modify time of path
        os.utime(fullpath, (0, 0))

        with open(fullpath, 'wb') as f:
            f.write(b'stuff')

        # Make a dummy file and stage it
        filename_add = 'bar'
        fullpath = os.path.join(self.repo.path, filename_add)
        with open(fullpath, 'w') as f:
            f.write('stuff')
        porcelain.add(repo=self.repo.path, paths=filename_add)

        results = porcelain.status(self.repo)

        self.assertEqual(results.staged['add'][0], filename_add.encode('ascii'))
        self.assertEqual(results.unstaged, [b'foo'])
Esempio n. 23
0
def create_cached_tile(tile, cache, timestamp=None):
    loc = cache.tile_location(tile, create_dir=True)
    with open(loc, 'w') as f:
        f.write('foo')
    
    if timestamp:
        os.utime(loc, (timestamp, timestamp))
Esempio n. 24
0
    def create_file(self, filename, contents, mtime=None, mode='w'):
        """Creates a file in a tmpdir

        ``filename`` should be a relative path, e.g. "foo/bar/baz.txt"
        It will be translated into a full path in a tmp dir.

        If the ``mtime`` argument is provided, then the file's
        mtime will be set to the provided value (must be an epoch time).
        Otherwise the mtime is left untouched.

        ``mode`` is the mode the file should be opened either as ``w`` or
        `wb``.

        Returns the full path to the file.

        """
        full_path = os.path.join(self.rootdir, filename)
        if not os.path.isdir(os.path.dirname(full_path)):
            os.makedirs(os.path.dirname(full_path))
        with open(full_path, mode) as f:
            f.write(contents)
        current_time = os.path.getmtime(full_path)
        # Subtract a few years off the last modification date.
        os.utime(full_path, (current_time, current_time - 100000000))
        if mtime is not None:
            os.utime(full_path, (mtime, mtime))
        return full_path
Esempio n. 25
0
	def _update_date(self, series):

		_date_aired = series.episode(series.fileDetails.seasonNum, series.fileDetails.episodeNums)[0].first_aired
		cur_date = time.localtime(os.path.getmtime(series.fileDetails.newName))
		if _date_aired:
			_date_aired = datetime.datetime.combine(_date_aired, datetime.time())
			tt = _date_aired.timetuple()
			log.debug('Current File Date: %s  Air Date: %s' % (time.asctime(cur_date), time.asctime(tt)))
			tup_cur = [cur_date[0],
					   cur_date[1],
					   cur_date[2],
					   cur_date[3],
					   cur_date[4],
					   cur_date[5],
					   cur_date[6],
					   cur_date[7],
					   -1]
			tup = [tt[0], tt[1], tt[2], 20, 0, 0, tt[6], tt[7], tt[8]]
			if tup != tup_cur:
				time_epoc = time.mktime(tup)
				try:
					log.info("Updating First Aired: %s" % _date_aired)
					os.utime(series.fileDetails.newName, (time_epoc, time_epoc))
				except (OSError, IOError), exc:
					log.error("Skipping, Unable to update time: %s" % series.fileDetails.newName)
					log.error("Unexpected error: %s" % exc)
			else:
					log.info("First Aired Correct: %s" % _date_aired)
Esempio n. 26
0
def fetcher(metadata, iasid, file_base, quality,  silent=False):
		sts = 1
		trycount = 0
		while sts != 0:
				print "Try number %d" % trycount

				wget_args = [
								"wget",
								"-c", "-O", file_base+".mp4",
								"--no-check-certificate",
								cdn_url(metadata["iasid"],
										metadata["eid"],
										metadata["sources2"][quality],
										0,
										"en")
								]

				kwargs = { }

				if silent:
						kwargs.update({
								"stdout" : PIPE,
								"stdin" : None,
								"stderr" : PIPE,
								"close_fds" : True,
								})

				#print wget_args
				p = Popen( wget_args, **kwargs )
				pid, sts = os.waitpid(p.pid, 0)

				trycount += 1
				if trycount > config.max_fetch_retry: break
		os.utime(file_base+".mp4", None)
Esempio n. 27
0
def download_file(url, filename, if_modified_since=None):
    """
    if_modified_since (optional) -- None or a timestamp expressed in
        seconds since the epoch
    """
    headers = {}
    if if_modified_since:
        headers['If-Modified-Since'] = timestamp_to_rfc1123(if_modified_since)

    try:
        r = requests.get(url, headers=headers)
    # TODO Improve error handling
    except requests.exceptions.ConnectionError as e:
        deeper_error = e.args[0]
        print_error('Connection error: %s - %s' % (deeper_error, url))
        return

    if r.status_code == requests.codes.OK:  # 200
        with open(filename, 'wb') as stream:
            stream.write(r.content)

        # If 'Last-Modified' is set, set it as the modification time of the
        # file we wrote
        if 'Last-Modified' in r.headers:
            mtime = datetime_from_rfc1123(r.headers['Last-Modified']).timestamp()
            os.utime(filename, (mtime, mtime))

    elif r.status_code == requests.codes.NOT_MODIFIED:  # 304
        # No action...
        print('... File is up to date:', filename)
        return

    else:
        print_error('HTTP Error: %d %s - %s' % (r.status_code, r.reason, url))
Esempio n. 28
0
def test_update():
    NAME_PNG = 'abc.png'
    path_png = os.path.join(TESTDIR, NAME_PNG)

    NAME_PPT = 'testppt.pptx'
    path_ppt = os.path.join(TESTDIR, NAME_PPT)

    directive = pptshape.directive.PPTShape(None, None, None, 
             None, None, None, None, None, None)
    if os.path.exists(path_png):
        os.unlink(path_png)

    directive.arguments = [NAME_PNG]
    directive.state = mock.Mock()
    directive.state.document.current_source = os.path.join(TESTDIR, 'test.rst')
    directive.options = {
        'pptfilename': NAME_PPT,
        'shapename': 'shape-title',
    }
    directive.run()
    assert os.path.exists(path_png)
    
    os.utime(path_png, (0, 0))
    directive.run()

    assert os.stat(path_png).st_mtime != 0

    # Make png file empty.
    open(path_png, 'w').close()

    directive.run()
    assert os.path.getsize(path_png) == 0
Esempio n. 29
0
def touch(files, tn=""):
    if os.path.exists(files):
        logger.logVV(tn, logger.I, _("Touching file") + " " + str(files))
        os.utime(files, None)
    else:
        logger.logVV(tn, logger.I, _("Creating file") + " " + str(files))
        open(files, "w").close()
Esempio n. 30
0
 def test_already_watermarked_stale(self):
     if not os.path.exists(os.path.dirname(self.dest)):
         os.makedirs(os.path.dirname(self.dest))
     open(self.dest, 'w')
     old_time = time.time() - settings.WATERMARK_REUSE_SECONDS - 5
     os.utime(self.dest, (old_time, old_time))
     assert self.file.watermark(self.user)
Esempio n. 31
0
def touch(fname, times=None):
    try:
        with file(fname, 'a'):
            os.utime(fname, times)
    except IOError:
        check_call(shlex.split('touch %s' % fname))
Esempio n. 32
0
def ugoira2webm(ugoira_file,
                exportname,
                ffmpeg=u"ffmpeg",
                codec="libvpx-vp9",
                param="-lossless 1 -vsync 2 -r 999 -pix_fmt yuv420p",
                extension="webm",
                image=None):
    ''' modified based on https://github.com/tsudoko/ugoira-tools/blob/master/ugoira2webm/ugoira2webm.py '''
    d = tempfile.mkdtemp(prefix="ugoira2webm")
    d = d.replace(os.sep, '/')

    if exportname is None or len(exportname) == 0:
        name = '.'.join(ugoira_file.split('.')[:-1])
        exportname = f"{os.path.basename(name)}.{extension}"

    tempname = d + "/temp." + extension

    cmd = f"{ffmpeg} -y -i \"{d}/i.ffconcat\" -c:v {codec} {param} \"{tempname}\""
    if codec is None:
        cmd = f"{ffmpeg} -y -i \"{d}/i.ffconcat\" {param} \"{tempname}\""

    try:
        frames = {}
        ffconcat = "ffconcat version 1.0\n"

        with zipfile.ZipFile(ugoira_file) as f:
            f.extractall(d)

        with open(d + "/animation.json") as f:
            frames = json.load(f)['frames']

        for i in frames:
            ffconcat += "file " + i['file'] + '\n'
            ffconcat += "duration " + str(float(i['delay']) / 1000) + '\n'
        # Fix ffmpeg concat demuxer as described in issue #381
        # this will increase the frame count, but will fix the last frame timestamp issue.
        ffconcat += "file " + frames[-1]['file'] + '\n'

        with open(d + "/i.ffconcat", "w") as f:
            f.write(ffconcat)

        ffmpeg_args = shlex.split(cmd)
        get_logger().info(f"[ugoira2webm()] running with cmd: {cmd}")
        p = subprocess.Popen(ffmpeg_args, stderr=subprocess.PIPE)

        # progress report
        chatter = ""
        print_and_log('info', f"Start encoding {exportname}")
        while p.stderr:
            buff = p.stderr.readline().decode('utf-8').rstrip('\n')
            chatter += buff
            if buff.endswith("\r"):
                if chatter.find("frame=") > 0:
                    print_and_log(None, chatter.strip(), os.linesep, end=' ')
                chatter = ""
            if len(buff) == 0:
                break

        ret = p.wait()
        shutil.move(tempname, exportname)

        if ret is not None:
            print_and_log(None, f"- Done with status = {ret}")

        # set last-modified and last-accessed timestamp
        if image is not None and _config.setLastModified and exportname is not None and os.path.isfile(
                exportname):
            ts = time.mktime(image.worksDateDateTime.timetuple())
            os.utime(exportname, (ts, ts))
    except FileNotFoundError:
        print_and_log("error",
                      f"Failed when converting, ffmpeg command used: {cmd}")
        raise

    finally:
        shutil.rmtree(d)
Esempio n. 33
0
def touch(fname, times=None):
    with file(fname, 'a'):
        os.utime(fname, times)
 def set_last_access_modification_datetime(full_path, m_date_time):
     m=time.mktime(m_date_time.timetuple())
     os.utime(full_path, (m, m))
Esempio n. 35
0
    def set_remote_uri_times(self, uri, atime, mtime):
        scheme, parts = uriparse(uri)
        remote_path = parts.path

        os.utime(remote_path, (atime, mtime))
Esempio n. 36
0
def process_from_group(caller,
                       config,
                       group_id,
                       limit=0,
                       process_external=True):
    br = caller.__br__
    try:
        print("Download by Group Id")
        if limit != 0:
            print("Limit: {0}".format(limit))
        if process_external:
            print("Include External Image: {0}".format(process_external))

        max_id = 0
        image_count = 0
        flag = True
        while flag:
            url = "https://www.pixiv.net/group/images.php?format=json&max_id={0}&id={1}".format(
                max_id, group_id)
            PixivHelper.print_and_log('info',
                                      "Getting images from: {0}".format(url))
            json_response = br.open(url)
            group_data = PixivGroup(json_response)
            json_response.close()
            max_id = group_data.maxId
            if group_data.imageList is not None and len(
                    group_data.imageList) > 0:
                for image in group_data.imageList:
                    if image_count > limit and limit != 0:
                        flag = False
                        break
                    print("Image #{0}".format(image_count))
                    print("ImageId: {0}".format(image))
                    result = PixivImageHandler.process_image(caller,
                                                             config,
                                                             image_id=image)
                    image_count = image_count + 1
                    PixivHelper.wait(result, config)

            if process_external and group_data.externalImageList is not None and len(
                    group_data.externalImageList) > 0:
                for image_data in group_data.externalImageList:
                    if image_count > limit and limit != 0:
                        flag = False
                        break
                    print("Image #{0}".format(image_count))
                    print("Member Id   : {0}".format(
                        image_data.artist.artistId))
                    PixivHelper.safePrint("Member Name  : " +
                                          image_data.artist.artistName)
                    print("Member Token : {0}".format(
                        image_data.artist.artistToken))
                    print("Image Url   : {0}".format(image_data.imageUrls[0]))

                    filename = PixivHelper.make_filename(
                        config.filenameFormat,
                        imageInfo=image_data,
                        tagsSeparator=config.tagsSeparator,
                        tagsLimit=config.tagsLimit,
                        fileUrl=image_data.imageUrls[0],
                        useTranslatedTag=config.useTranslatedTag,
                        tagTranslationLocale=config.tagTranslationLocale)
                    filename = PixivHelper.sanitize_filename(
                        filename, config.rootDirectory)
                    PixivHelper.safePrint("Filename  : " + filename)
                    (result, filename) = PixivDownloadHandler.download_image(
                        caller,
                        image_data.imageUrls[0],
                        filename,
                        url,
                        config.overwrite,
                        config.retry,
                        backup_old_file=config.backupOldFile)
                    PixivHelper.get_logger().debug("Download %s result: %s",
                                                   filename, result)
                    if config.setLastModified and filename is not None and os.path.isfile(
                            filename):
                        ts = time.mktime(
                            image_data.worksDateDateTime.timetuple())
                        os.utime(filename, (ts, ts))

                    image_count = image_count + 1

            if (group_data.imageList is None or len(group_data.imageList) == 0) and \
               (group_data.externalImageList is None or len(group_data.externalImageList) == 0):
                flag = False
            print("")

    except BaseException:
        PixivHelper.print_and_log(
            'error',
            'Error at process_from_group(): {0}'.format(sys.exc_info()))
        raise
Esempio n. 37
0
	def _restore_backup(cls, path,
	                    settings=None,
	                    plugin_manager=None,
	                    datafolder=None,
	                    on_install_plugins=None,
	                    on_report_unknown_plugins=None,
	                    on_invalid_backup=None,
	                    on_log_progress=None,
	                    on_log_error=None,
	                    on_restore_start=None,
	                    on_restore_done=None,
	                    on_restore_failed=None):
		if not is_os_compatible(["!windows"]):
			if callable(on_log_error):
				on_log_error("Restore is not supported on this operating system")
			if callable(on_restore_failed):
				on_restore_failed(path)
			return False

		restart_command = settings.global_get(["server", "commands", "serverRestartCommand"])

		basedir = settings._basedir
		cls._clean_dir_backup(basedir,
		                       on_log_progress=on_log_progress)

		plugin_repo = dict()
		repo_url = settings.global_get(["plugins", "pluginmanager", "repository"])
		if repo_url:
			plugin_repo = cls._get_plugin_repository_data(repo_url)

		if callable(on_restore_start):
			on_restore_start(path)

		try:

			with zipfile.ZipFile(path, "r") as zip:
				# read metadata
				try:
					metadata_zipinfo = zip.getinfo("metadata.json")
				except KeyError:
					if callable(on_invalid_backup):
						on_invalid_backup("Not an OctoPrint backup, lacks metadata.json")
					if callable(on_restore_failed):
						on_restore_failed(path)
					return False

				metadata_bytes = zip.read(metadata_zipinfo)
				metadata = json.loads(metadata_bytes)

				backup_version = get_comparable_version(metadata["version"], base=True)
				if backup_version > get_octoprint_version(base=True):
					if callable(on_invalid_backup):
						on_invalid_backup("Backup is from a newer version of OctoPrint and cannot be applied")
					if callable(on_restore_failed):
						on_restore_failed(path)
					return False

				# unzip to temporary folder
				temp = tempfile.mkdtemp()
				try:
					if callable(on_log_progress):
						on_log_progress("Unpacking backup to {}...".format(temp))

					abstemp = os.path.abspath(temp)
					dirs = {}
					for member in zip.infolist():
						abspath = os.path.abspath(os.path.join(temp, member.filename))
						if abspath.startswith(abstemp):
							date_time = time.mktime(member.date_time + (0, 0, -1))

							zip.extract(member, temp)

							if os.path.isdir(abspath):
								dirs[abspath] = date_time
							else:
								os.utime(abspath, (date_time, date_time))

					# set time on folders
					for abspath, date_time in dirs.items():
						os.utime(abspath, (date_time, date_time))

					# sanity check
					configfile = os.path.join(temp, "basedir", "config.yaml")
					if not os.path.exists(configfile):
						if callable(on_invalid_backup):
							on_invalid_backup("Backup lacks config.yaml")
						if callable(on_restore_failed):
							on_restore_failed(path)
						return False

					import yaml

					with io.open(configfile, "rt", encoding="utf-8") as f:
						configdata = yaml.safe_load(f)

					if configdata.get("accessControl", dict()).get("enabled", True):
						userfile = os.path.join(temp, "basedir", "users.yaml")
						if not os.path.exists(userfile):
							if callable(on_invalid_backup):
								on_invalid_backup("Backup lacks users.yaml")
							if callable(on_restore_failed):
								on_restore_failed(path)
							return False

					if callable(on_log_progress):
						on_log_progress("Unpacked")

					# install available plugins
					plugins = []
					plugin_list_file = os.path.join(temp, "plugin_list.json")
					if os.path.exists(plugin_list_file):
						with io.open(os.path.join(temp, "plugin_list.json"), 'rb') as f:
							plugins = json.load(f)

					known_plugins = []
					unknown_plugins = []
					if plugins:
						if plugin_repo:
							for plugin in plugins:
								if plugin["key"] in plugin_manager.plugins:
									# already installed
									continue

								if plugin["key"] in plugin_repo:
									# not installed, can be installed from repository url
									known_plugins.append(plugin_repo[plugin["key"]])
								else:
									# not installed, not installable
									unknown_plugins.append(plugin)

						else:
							# no repo, all plugins are not installable
							unknown_plugins = plugins

						if callable(on_log_progress):
							if known_plugins:
								on_log_progress("Known and installable plugins: {}".format(", ".join(map(lambda x: x["id"], known_plugins))))
							if unknown_plugins:
								on_log_progress("Unknown plugins: {}".format(", ".join(map(lambda x: x["key"], unknown_plugins))))

						if callable(on_install_plugins):
							on_install_plugins(known_plugins)

						if callable(on_report_unknown_plugins):
							on_report_unknown_plugins(unknown_plugins)

					# move config data
					basedir_backup = basedir + ".bck"
					basedir_extracted = os.path.join(temp, "basedir")

					if callable(on_log_progress):
						on_log_progress("Renaming {} to {}...".format(basedir, basedir_backup))
					shutil.move(basedir, basedir_backup)

					try:
						if callable(on_log_progress):
							on_log_progress("Moving {} to {}...".format(basedir_extracted, basedir))
						shutil.move(basedir_extracted, basedir)
					except Exception:
						if callable(on_log_error):
							on_log_error("Error while restoring config data", exc_info=sys.exc_info())
							on_log_error("Rolling back old config data")

						shutil.move(basedir_backup, basedir)

						if callable(on_restore_failed):
							on_restore_failed(path)
						return False

					if unknown_plugins:
						if callable(on_log_progress):
							on_log_progress("Writing info file about unknown plugins")

						if not os.path.isdir(datafolder):
							os.makedirs(datafolder)

						unknown_plugins_path = os.path.join(datafolder, UNKNOWN_PLUGINS_FILE)
						try:
							with io.open(unknown_plugins_path, mode='wb') as f:
								f.write(to_bytes(json.dumps(unknown_plugins)))
						except Exception:
							if callable(on_log_error):
								on_log_error("Could not persist list of unknown plugins to {}".format(unknown_plugins_path),
								             exc_info = sys.exc_info())

				finally:
					if callable(on_log_progress):
						on_log_progress("Removing temporary unpacked folder")
					shutil.rmtree(temp)

		except Exception:
			exc_info = sys.exc_info()
			try:
				if callable(on_log_error):
					on_log_error("Error while running restore", exc_info=exc_info)
				if callable(on_restore_failed):
					on_restore_failed(path)
			finally:
				del exc_info
			return False

		finally:
			# remove zip
			if callable(on_log_progress):
				on_log_progress("Removing temporary zip")
			os.remove(path)

		# restart server
		if not restart_command:
			restart_command = configdata.get("server", dict()).get("commands", dict()).get("serverRestartCommand")

		if restart_command:
			import sarge

			if callable(on_log_progress):
				on_log_progress("Restarting...")
			if callable(on_restore_done):
				on_restore_done(path)

			try:
				sarge.run(restart_command, close_fds=True, async_=True)
			except Exception:
				if callable(on_log_error):
					on_log_error("Error while restarting via command {}".format(restart_command),
					             exc_info=sys.exc_info())
					on_log_error("Please restart OctoPrint manually")
				return False

		else:
			if callable(on_restore_done):
				on_restore_done(path)
			if callable(on_log_error):
				on_log_error("No restart command configured. Please restart OctoPrint manually.")

		return True
Esempio n. 38
0
def get_files(project):
    frs = make_client(options.api_url, 'FrsApp')
    valid_pfs_filename = re.compile(
        r'(?![. ])[-_ +.,=#~@!()\[\]a-zA-Z0-9]+(?<! )$')
    pfs_output_dir = os.path.join(os.path.abspath(options.output_dir), 'PFS',
                                  convert_project_shortname(project.path))
    sql_updates = ''

    def handle_path(obj, prev_path):
        path_component = obj.title.strip().replace('/', ' ').replace(
            '&', '').replace(':', '')
        path = os.path.join(prev_path, path_component)
        if not valid_pfs_filename.match(path_component):
            log.error('Invalid filename: "%s"' % path)
        save(json.dumps(dict(obj), default=str), project, 'frs',
             path + '.json')
        return path

    frs_mapping = {}

    for pkg in frs.service.getPackageList(s, project.id).dataRows:
        pkg_path = handle_path(pkg, '')
        pkg_details = frs.service.getPackageData(s, pkg.id)  # download count
        save(json.dumps(dict(pkg_details), default=str), project, 'frs',
             pkg_path + '_details.json')

        for rel in frs.service.getReleaseList(s, pkg.id).dataRows:
            rel_path = handle_path(rel, pkg_path)
            frs_mapping[rel['id']] = rel_path
            # download count
            rel_details = frs.service.getReleaseData(s, rel.id)
            save(json.dumps(dict(rel_details), default=str), project, 'frs',
                 rel_path + '_details.json')

            for file in frs.service.getFrsFileList(s, rel.id).dataRows:
                details = frs.service.getFrsFileData(s, file.id)

                file_path = handle_path(file, rel_path)
                save(
                    json.dumps(dict(
                        file,
                        lastModifiedBy=details.lastModifiedBy,
                        lastModifiedDate=details.lastModifiedDate,
                    ),
                               default=str), project, 'frs',
                    file_path + '.json')
                if not options.skip_frs_download:
                    download_file('frs', rel.path + '/' + file.id,
                                  pfs_output_dir, file_path)
                    mtime = int(mktime(details.lastModifiedDate.timetuple()))
                    os.utime(os.path.join(pfs_output_dir, file_path),
                             (mtime, mtime))

            # releases
            created_on = int(mktime(rel.createdOn.timetuple()))
            mtime = int(mktime(rel.lastModifiedOn.timetuple()))
            if os.path.exists(os.path.join(pfs_output_dir, rel_path)):
                os.utime(os.path.join(pfs_output_dir, rel_path),
                         (mtime, mtime))
            sql_updates += _dir_sql(created_on, project, rel.title.strip(),
                                    pkg_path)
        # packages
        created_on = int(mktime(pkg.createdOn.timetuple()))
        mtime = int(mktime(pkg.lastModifiedOn.timetuple()))
        if os.path.exists(os.path.join(pfs_output_dir, pkg_path)):
            os.utime(os.path.join(pfs_output_dir, pkg_path), (mtime, mtime))
        sql_updates += _dir_sql(created_on, project, pkg.title.strip(), '')
    # save pfs update sql for this project
    with open(os.path.join(options.output_dir, 'pfs_updates.sql'), 'a') as out:
        out.write('/* %s */' % project.id)
        out.write(sql_updates)
    save(json.dumps(frs_mapping), project, 'frs_mapping.json')
Esempio n. 39
0
        if UserDictKeys.query.filter_by(key=upload.key, user_id=current_user.id).first() or UploadsUserAuth.query.filter_by(uploads_indexno=file_number, user_id=current_user.id).first() or db.session.query(UploadsRoleAuth.id).join(UserRoles, and_(UserRoles.user_id == current_user.id, UploadsRoleAuth.role_id == UserRoles.role_id)).first():
            return True
    elif session and 'tempuser' in session:
        temp_user_id = int(session['tempuser'])
        if UserDictKeys.query.filter_by(key=upload.key, temp_user_id=temp_user_id).first() or UploadsUserAuth.query.filter_by(uploads_indexno=file_number, temp_user_id=temp_user.id).first():
            return True
    return False

if in_celery:
    LOGFILE = daconfig.get('celery flask log', '/tmp/celery-flask.log')
else:
    LOGFILE = daconfig.get('flask log', '/tmp/flask.log')

if not os.path.exists(LOGFILE):
    with open(LOGFILE, 'a'):
        os.utime(LOGFILE, None)

error_file_handler = logging.FileHandler(filename=LOGFILE)
error_file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(error_file_handler)

#sys.stderr.write("__name__ is " + str(__name__) + " and __package__ is " + str(__package__) + "\n")

def flask_logger(message):
    #app.logger.warning(message)
    sys.stderr.write(str(message) + "\n")
    return

def pad(the_string):
    return the_string + bytearray((16 - len(the_string) % 16) * chr(16 - len(the_string) % 16), encoding='utf-8')
Esempio n. 40
0
def touch_file(path):
    with open(path, 'a'):
        os.utime(path, None)
    return True
Esempio n. 41
0
def _touch(fname):
    open(fname, 'a').close()
    os.utime(fname, None)
Esempio n. 42
0
 def adjustTimeStampsToMatchName(self: 'SubmissionFile') -> None:
     s = self.datetime_total_seconds
     os.utime(str(self.path), (s, s))
Esempio n. 43
0
File: ifs.py Progetto: mon/ifstools
    def extract(self,
                progress=True,
                recurse=True,
                tex_only=False,
                extract_manifest=False,
                path=None,
                rename_dupes=False,
                **kwargs):
        if path is None:
            path = self.folder_out
        if tex_only:
            kwargs['use_cache'] = False
        utils.mkdir_silent(path)
        utime(path, (self.time, self.time))

        if extract_manifest and self.manifest and not tex_only:
            with open(join(path, 'ifs_manifest.xml'), 'wb') as f:
                f.write(self.manifest.to_text().encode('utf8'))

        # build the tree
        for folder in self.tree.all_folders:
            if tex_only and folder.name == 'tex':
                self.tree = folder
                # make it root to discourage repacking
                folder.name = ''
                for f in folder.all_files:
                    f.path = ''
                break
            elif tex_only:
                continue
            f_path = join(path, folder.full_path)
            utils.mkdir_silent(f_path)
            utime(f_path, (self.time, self.time))

            # handle different-case-but-same-name for Windows
            same_name = defaultdict(list)
            for name, obj in folder.files.items():
                same_name[name.lower()].append(obj)

            for files in same_name.values():
                # common base case of "sane ifs file"
                if len(files) == 1:
                    continue

                # make them 'a (1)', 'a (2)' etc
                if rename_dupes:
                    for i, f in enumerate(files[1:]):
                        base, ext = splitext(f.name)
                        f.name = base + ' ({})'.format(i + 1) + ext
                elif progress:  # warn if not silenced
                    all_names = ', '.join([f.name for f in files])
                    tqdm.write(
                        'WARNING: Files with same name and differing case will overwrite on Windows ({}). '
                        .format(all_names) +
                        'Use --rename-dupes to extract without loss')
                # else just do nothing

        # extract the files
        for f in tqdm(self.tree.all_files, disable=not progress):
            # allow recurse + tex_only to extract ifs files
            if tex_only and not isinstance(f, ImageFile) and not isinstance(
                    f, ImageCanvas) and not (recurse
                                             and f.name.endswith('.ifs')):
                continue
            f.extract(path, **kwargs)
            if progress:
                tqdm.write(f.full_path)
            if recurse and f.name.endswith('.ifs'):
                rpath = join(path, f.full_path)
                i = IFS(rpath)
                i.extract(progress=progress,
                          recurse=recurse,
                          tex_only=tex_only,
                          extract_manifest=extract_manifest,
                          path=rpath.replace('.ifs', '_ifs'),
                          rename_dupes=rename_dupes,
                          **kwargs)

        # you can't pickle open files, so this won't work. Perhaps there is a way around it?
        '''to_extract = (f for f in self.tree.all_files if not(tex_only and not isinstance(f, ImageFile) and not isinstance(f, ImageCanvas)))
Esempio n. 44
0
    sys.exit(0)

if arguments.data_format not in supported_formats:
    print "'"+arguments.data_format+"' is not a supported format."
    print "Supported formats are the following: ",supported_formats
    sys.exit(0)

if arguments.custom_config:
    arguments.data_format += "_CUSTOM"

os.system('sed -i "s/#define DATA_FORMAT .*/#define DATA_FORMAT %s/g" $CMSSW_BASE/src/OSUT3Analysis/AnaTools/interface/DataFormat.h' % (arguments.data_format))

# CMSSW_X_Y_Z
# Ignore patchN and preN
# Interpret things like X_Y_ROOT6 or _CLANG or _THREADED as X_Y_0
CMSSWVersionCode = 0
versionWords = os.environ["CMSSW_VERSION"].split("_")[1:]
if len(versionWords) >= 3:
    CMSSWVersionCode = int(versionWords[0]) << 16 # X
    CMSSWVersionCode += int(versionWords[1]) << 8 # Y
    if versionWords[2].isdigit():
        CMSSWVersionCode += int(versionWords[2])  # Z
os.system('sed -i "s/#define CMSSW_VERSION_CODE .*/#define CMSSW_VERSION_CODE %d/g" $CMSSW_BASE/src/OSUT3Analysis/AnaTools/interface/CMSSWVersion.h' % (CMSSWVersionCode))

if arguments.custom_config:
    os.system('sed -i "s:.*CustomDataFormat.h.*:%s:" $CMSSW_BASE/src/OSUT3Analysis/AnaTools/interface/DataFormat.h' % ('  #include \\\"' + arguments.custom_config + '\\\"'))

os.utime (os.environ["CMSSW_BASE"] + "/src/OSUT3Analysis/AnaTools/interface/DataFormat.h", None)
print "Data format changed to " + arguments.data_format + "."
print "Do not forget to recompile."
Esempio n. 45
0
 def _set_times_for_all_po_files(self):
     """
     Set access and modification times to the Unix epoch time for all the .po files.
     """
     for locale in self.LOCALES:
         os.utime(self.PO_FILE % locale, (0, 0))
def copydata(settings, blkindex, blkset):
    inFn = 1
    inF = None
    outFn = 0
    outsz = 0
    outF = None
    outFname = None
    blkCount = 0

    lastDate = datetime.datetime(2000, 1, 1)
    highTS = 1408893517 - 315360000
    timestampSplit = False
    fileOutput = True
    setFileTime = False
    maxOutSz = settings['max_out_sz']
    if 'output' in settings:
        fileOutput = False
    if settings['file_timestamp'] != 0:
        setFileTime = True
    if settings['split_timestamp'] != 0:
        timestampSplit = True

    while True:
        if not inF:
            fname = "%s/blk%04d.dat" % (settings['input'], inFn)
            print("Input file" + fname)
            try:
                inF = open(fname, "rb")
            except IOError:
                print "Done"
                return

        inhdr = inF.read(8)
        if (not inhdr or (inhdr[0] == "\0")):
            inF.close()
            inF = None
            inFn = inFn + 1
            continue

        inMagic = inhdr[:4]
        if (inMagic != settings['netmagic']):
            print("Invalid magic:" + inMagic)
            return
        inLenLE = inhdr[4:]
        su = struct.unpack("<I", inLenLE)
        inLen = su[0]
        rawblock = inF.read(inLen)
        blk_hdr = rawblock[:80]

        hash_str = 0
        if blkCount > 319000:
            hash_str = calc_hash_str(blk_hdr)
        else:
            hash_str = calc_scrypt_hash_str(blk_hdr)

        if not hash_str in blkset:
            print("Skipping unknown block " + hash_str)
            continue

        if blkindex[blkCount] != hash_str:
            print("Out of order block.")
            print("Expected " + blkindex[blkCount])
            print("Got " + hash_str)
            sys.exit(1)

        if not fileOutput and ((outsz + inLen) > maxOutSz):
            outF.close()
            if setFileTime:
                os.utime(outFname, (int(time.time()), highTS))
            outF = None
            outFname = None
            outFn = outFn + 1
            outsz = 0

        (blkDate, blkTS) = get_blk_dt(blk_hdr)
        if timestampSplit and (blkDate > lastDate):
            print("New month " + blkDate.strftime("%Y-%m") + " @ " + hash_str)
            lastDate = blkDate
            if outF:
                outF.close()
                if setFileTime:
                    os.utime(outFname, (int(time.time()), highTS))
                outF = None
                outFname = None
                outFn = outFn + 1
                outsz = 0

        if not outF:
            if fileOutput:
                outFname = settings['output_file']
            else:
                outFname = "%s/blk%05d.dat" % (settings['output'], outFn)
            print("Output file" + outFname)
            outF = open(outFname, "wb")

        outF.write(inhdr)
        outF.write(rawblock)
        outsz = outsz + inLen + 8

        blkCount = blkCount + 1
        if blkTS > highTS:
            highTS = blkTS

        if (blkCount % 1000) == 0:
            print("Wrote " + str(blkCount) + " blocks")
Esempio n. 47
0
 def touch_pymods(self):
     # force a rebuild of all modules that use OpenSSL APIs
     for fname in self.module_files:
         os.utime(fname)
Esempio n. 48
0
 def _touch(self, fname, times=None):
     with open(fname, 'a'):
         utime(fname, times)
Esempio n. 49
0
    def test_bind_ports_cache(self):
        test_policies = [StoragePolicy(0, 'aay', True),
                         StoragePolicy(1, 'bee', False),
                         StoragePolicy(2, 'cee', False)]

        my_ips = ['1.2.3.4', '2.3.4.5']
        other_ips = ['3.4.5.6', '4.5.6.7']
        bind_ip = my_ips[1]
        devs_by_ring_name1 = {
            'object': [  # 'aay'
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
                 'port': 6006},
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
                 'port': 6007},
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
                 'port': 6008},
                None,
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
                 'port': 6009}],
            'object-1': [  # 'bee'
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
                 'port': 6006},  # dupe
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
                 'port': 6010},
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
                 'port': 6011},
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
                 'port': 6012}],
            'object-2': [  # 'cee'
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
                 'port': 6010},  # on our IP and a not-us IP
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0],
                 'port': 6013},
                None,
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
                 'port': 6014},
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
                 'port': 6015}],
        }
        devs_by_ring_name2 = {
            'object': [  # 'aay'
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
                 'port': 6016},
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
                 'port': 6019}],
            'object-1': [  # 'bee'
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1],
                 'port': 6016},  # dupe
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
                 'port': 6022}],
            'object-2': [  # 'cee'
                {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0],
                 'port': 6020},
                {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1],
                 'port': 6025}],
        }
        ring_files = [ring_name + '.ring.gz'
                      for ring_name in sorted(devs_by_ring_name1)]

        def _fake_load(gz_path, stub_objs, metadata_only=False):
            return RingData(
                devs=stub_objs[os.path.basename(gz_path)[:-8]],
                replica2part2dev_id=[],
                part_shift=24)

        with mock.patch(
            'swift.common.storage_policy.RingData.load'
        ) as mock_ld, \
                patch_policies(test_policies), \
                mock.patch('swift.common.storage_policy.whataremyips') \
                as mock_whataremyips, \
                temptree(ring_files) as tempdir:
            mock_whataremyips.return_value = my_ips

            cache = BindPortsCache(tempdir, bind_ip)

            self.assertEqual([
                mock.call(bind_ip),
            ], mock_whataremyips.mock_calls)
            mock_whataremyips.reset_mock()

            mock_ld.side_effect = partial(_fake_load,
                                          stub_objs=devs_by_ring_name1)
            self.assertEqual(set([
                6006, 6008, 6011, 6010, 6014,
            ]), cache.all_bind_ports_for_node())
            self.assertEqual([
                mock.call(os.path.join(tempdir, ring_files[0]),
                          metadata_only=True),
                mock.call(os.path.join(tempdir, ring_files[1]),
                          metadata_only=True),
                mock.call(os.path.join(tempdir, ring_files[2]),
                          metadata_only=True),
            ], mock_ld.mock_calls)
            mock_ld.reset_mock()

            mock_ld.side_effect = partial(_fake_load,
                                          stub_objs=devs_by_ring_name2)
            self.assertEqual(set([
                6006, 6008, 6011, 6010, 6014,
            ]), cache.all_bind_ports_for_node())
            self.assertEqual([], mock_ld.mock_calls)

            # but when all the file mtimes are made different, it'll
            # reload
            for gz_file in [os.path.join(tempdir, n)
                            for n in ring_files]:
                os.utime(gz_file, (88, 88))

            self.assertEqual(set([
                6016, 6020,
            ]), cache.all_bind_ports_for_node())
            self.assertEqual([
                mock.call(os.path.join(tempdir, ring_files[0]),
                          metadata_only=True),
                mock.call(os.path.join(tempdir, ring_files[1]),
                          metadata_only=True),
                mock.call(os.path.join(tempdir, ring_files[2]),
                          metadata_only=True),
            ], mock_ld.mock_calls)
            mock_ld.reset_mock()

            # Don't do something stupid like crash if a ring file is missing.
            os.unlink(os.path.join(tempdir, 'object-2.ring.gz'))

            self.assertEqual(set([
                6016, 6020,
            ]), cache.all_bind_ports_for_node())
            self.assertEqual([], mock_ld.mock_calls)

        # whataremyips() is only called in the constructor
        self.assertEqual([], mock_whataremyips.mock_calls)
Esempio n. 50
0
    # Remove the copied py file
    os.remove(mainscript)
    resource = "dist/" + name + ".app/Contents/Resources/"

    # Try to locate qt_menu
    # Let's try the port version first!
    if os.path.isfile("/opt/local/lib/Resources/qt_menu.nib"):
        qt_menu_location = "/opt/local/lib/Resources/qt_menu.nib"
    else:
        # No dice? Then let's try the brew version
        if os.path.exists("/usr/local/Cellar"):
            qt_menu_location = os.popen(
                "find /usr/local/Cellar -name qt_menu.nib | tail -n 1").read()
        # no brew, check /opt/local
        else:
            qt_menu_location = os.popen(
                "find /opt/local -name qt_menu.nib | tail -n 1").read()
        qt_menu_location = re.sub('\n', '', qt_menu_location)

    if (len(qt_menu_location) == 0):
        print "Sorry couldn't find your qt_menu.nib this probably won't work"
    else:
        print "Found your qib: " + qt_menu_location

    # Need to include a copy of qt_menu.nib
    shutil.copytree(qt_menu_location, resource + "qt_menu.nib")
    # Need to touch qt.conf to avoid loading 2 sets of Qt libraries
    fname = resource + "qt.conf"
    with file(fname, 'a'):
        os.utime(fname, None)
Esempio n. 51
0
 def utime(self, file_path, times):
     os.utime(file_path, times)
def main():

    argument_spec = openstack_full_argument_spec(
        id=dict(required=True),
        ttl=dict(default=7, type='int', min=1),
        state=dict(default='present', choices=['expired', 'present']),
        _cache_dir=dict(required=True),
        _cache_file=dict(required=True),
        _chunk_size=dict(default=64 * 1024, type='int'),
        _prefetched_path=dict(default=None),
        scp_continue_on_error=dict(default=False, type='bool'))
    module_kwargs = openstack_module_kwargs()
    module = AnsibleModule(argument_spec, **module_kwargs)

    image_id = module.params['id']
    cache_dir = module.params['_cache_dir']
    cache_file = module.params['_cache_file']
    chunk_size = module.params['_chunk_size']
    prefetched_path = module.params['_prefetched_path']
    scp_continue = module.params['scp_continue_on_error']

    ttl_days = module.params['ttl']

    result = dict(changed=False,
                  actions=[],
                  image=None,
                  cache_file='',
                  exists_in_cache=False,
                  mtime=0)

    sdk, cloud = openstack_cloud_from_module(module, min_version='0.11.3')

    try:
        result['exists_in_cache'] = exists_in_cache = os.path.exists(
            cache_file)
        if exists_in_cache:
            result['cache_file'] = cache_file

        image = cloud.image.find_image(name_or_id=image_id)
        exists_in_glance = image is not None
        if exists_in_glance:
            result['image'] = image.to_dict()

        if module.params['state'] == 'present':
            if not exists_in_cache:

                if not exists_in_glance:
                    module.fail_json(msg="Image not found in glance: %s" %
                                     image_id)

                md5 = hashlib.md5()
                if prefetched_path:
                    result['actions'].append(
                        {'name': 'Verify pre-fetched image checksum'})
                    with open(prefetched_path, 'rb') as prefetched_image_file:
                        while True:
                            chunk = prefetched_image_file.read(chunk_size)
                            if not chunk:
                                break
                            md5.update(chunk)
                    prefetched_checksum = md5.hexdigest()
                    if prefetched_checksum == image.checksum:
                        result['actions'].append({
                            'name':
                            'Verify pre-fetched image',
                            'result':
                            True,
                            'expected_md5':
                            image.checksum,
                            'actual_md5':
                            prefetched_checksum
                        })
                        # FIXME: chown to the container nova uid (42436)
                        # until we can run within the container
                        os.chown(prefetched_path, 42436, 42436)
                        os.rename(prefetched_path, cache_file)
                    else:
                        result['actions'].append({
                            'name':
                            'Verify pre-fetched image',
                            'result':
                            False,
                            'expected_md5':
                            image.checksum,
                            'actual_md5':
                            prefetched_checksum
                        })
                        if not scp_continue:
                            module.fail_json(
                                msg="Pre-fetched image checksum failed")
                        # Ignore it and download direct from glance.
                        # As we did not create it we should not remove it.
                        prefetched_path = ''

                if not prefetched_path:
                    with tempfile.NamedTemporaryFile(
                            'wb', dir=cache_dir,
                            delete=False) as temp_cache_file:
                        try:
                            md5 = hashlib.md5()
                            image_stream = cloud.image.download_image(
                                image, stream=True)
                            try:
                                for chunk in image_stream.iter_content(
                                        chunk_size=chunk_size):
                                    md5.update(chunk)
                                    temp_cache_file.write(chunk)
                            finally:
                                image_stream.close()
                                temp_cache_file.close()

                            download_checksum = md5.hexdigest()
                            if download_checksum != image.checksum:
                                result['actions'].append({
                                    'name':
                                    'Verify downloaded image',
                                    'result':
                                    False,
                                    'expected_md5':
                                    image.checksum,
                                    'actual_md5':
                                    download_checksum
                                })
                                module.fail_json(
                                    msg="Image data does not match checksum")
                            result['actions'].append({
                                'name':
                                'Verify downloaded image',
                                'result':
                                True,
                                'expected_md5':
                                image.checksum,
                                'actual_md5':
                                download_checksum
                            })

                            # FIXME: chown to the container nova uid (42436)
                            #        until we can run within the container
                            os.chown(temp_cache_file.name, 42436, 42436)
                            os.rename(temp_cache_file.name, cache_file)
                            result['changed'] = True
                        finally:
                            try:
                                os.unlink(temp_cache_file.name)
                            except Exception:
                                pass

            # Set the mtime in the future to prevent nova cleanup
            cache_file_stat = os.stat(cache_file)
            expiry_ts = ttl_to_ts(ttl_days)
            now = time.time()
            if cache_file_stat.st_mtime != expiry_ts:
                os.utime(cache_file, (now, expiry_ts))
                result['actions'].append({
                    'name': 'Update mtime',
                    'from': cache_file_stat.st_mtime,
                    'to': expiry_ts
                })
                result['changed'] = True

        else:  # expired
            if not exists_in_cache:
                result['changed'] = False
            else:
                # Set the mtime to epoch to enable nova cleanup
                now = time.time()
                ts = 0
                cache_file_stat = os.stat(cache_file)
                if cache_file_stat.st_mtime > ts:
                    os.utime(cache_file, (now, ts))
                    result['actions'].append({
                        'name': 'Update mtime',
                        'from': cache_file_stat.st_mtime,
                        'to': ts
                    })
                    result['changed'] = True

                cache_file_stat = os.stat(cache_file)
                result['mtime'] = cache_file_stat.st_mtime
                result['expires'] = time.strftime(
                    "%a, %d %b %Y %H:%M:%S %z",
                    time.localtime(cache_file_stat.st_mtime))

        module.exit_json(**result)

    except sdk.exceptions.OpenStackCloudException as e:
        module.fail_json(msg=str(e), extra_data=e.extra_data)
Esempio n. 53
0
 def _touch(self, key):
     path = self._get_data_path(key)
     if os.path.exists(path):
         os.utime(path, None)
     else:
         open(path, 'a').close()
Esempio n. 54
0
    def _send_message(self, filename):
        fromaddr = ''
        toaddrs = ()
        head, tail = os.path.split(filename)
        tmp_filename = os.path.join(head, '.sending-' + tail)
        rejected_filename = os.path.join(head, '.rejected-' + tail)
        try:
            # perform a series of operations in an attempt to ensure
            # that no two threads/processes send this message
            # simultaneously as well as attempting to not generate
            # spurious failure messages in the log; a diagram that
            # represents these operations is included in a
            # comment above this class
            try:
                # find the age of the tmp file (if it exists)
                mtime = os.stat(tmp_filename)[stat.ST_MTIME]
            except OSError as e:
                if e.errno == errno.ENOENT: # file does not exist
                    # the tmp file could not be stated because it
                    # doesn't exist, that's fine, keep going
                    age = None
                else: #pragma NO COVER
                    # the tmp file could not be stated for some reason
                    # other than not existing; we'll report the error
                    raise
            else:
                age = time.time() - mtime

            # if the tmp file exists, check it's age
            if age is not None:
                try:
                    if age > MAX_SEND_TIME:
                        # the tmp file is "too old"; this suggests
                        # that during an attemt to send it, the
                        # process died; remove the tmp file so we
                        # can try again
                        os.remove(tmp_filename)
                    else:
                        # the tmp file is "new", so someone else may
                        # be sending this message, try again later
                        return
                    # if we get here, the file existed, but was too
                    # old, so it was unlinked
                except OSError as e: #pragma NO COVER
                    if e.errno == errno.ENOENT: # file does not exist
                        # it looks like someone else removed the tmp
                        # file, that's fine, we'll try to deliver the
                        # message again later
                        return

            # now we know that the tmp file doesn't exist, we need to
            # "touch" the message before we create the tmp file so the
            # mtime will reflect the fact that the file is being
            # processed (there is a race here, but it's OK for two or
            # more processes to touch the file "simultaneously")
            try:
                os.utime(filename, None)
            except OSError as e: #pragma NO COVER
                if e.errno == errno.ENOENT: # file does not exist
                    # someone removed the message before we could
                    # touch it, no need to complain, we'll just keep
                    # going
                    return
                else:
                    # Some other error, propogate it
                    raise

            # creating this hard link will fail if another process is
            # also sending this message
            try:
                _os_link(filename, tmp_filename)
            except OSError as e: #pragma NO COVER
                if e.errno == errno.EEXIST: # file exists, *nix
                    # it looks like someone else is sending this
                    # message too; we'll try again later
                    return
                else:
                    # Some other error, propogate it
                    raise

            # FIXME: Need to test in Windows.  If
            # test_concurrent_delivery passes, this stanza can be
            # deleted.  Otherwise we probably need to catch
            # WindowsError and check for corresponding error code.
            #except error as e:
            #    if e[0] == 183 and e[1] == 'CreateHardLink':
            #        # file exists, win32
            #        return

            # read message file and send contents
            with open(filename) as f:
                fromaddr, toaddrs, message = self._parseMessage(f)
            try:
                self.mailer.send(fromaddr, toaddrs, message)
            except smtplib.SMTPResponseException as e:
                if 500 <= e.smtp_code <= 599:
                    # permanent error, ditch the message
                    self.log.error(
                        "Discarding email from %s to %s due to"
                        " a permanent error: %s",
                        fromaddr, ", ".join(toaddrs), e.args)
                    _os_link(filename, rejected_filename)
                else:
                    # Log an error and retry later
                    raise

            try:
                os.remove(filename)
            except OSError as e: #pragma NO COVER
                if e.errno == errno.ENOENT: # file does not exist
                    # someone else unlinked the file; oh well
                    pass
                else:
                    # something bad happend, log it
                    raise

            try:
                os.remove(tmp_filename)
            except OSError as e: #pragma NO COVER
                if e.errno == errno.ENOENT: # file does not exist
                    # someone else unlinked the file; oh well
                    pass
                else:
                    # something bad happened, log it
                    raise

            # TODO: maybe log the Message-Id of the message sent
            self.log.info("Mail from %s to %s sent.",
                          fromaddr, ", ".join(toaddrs))

        # Catch errors and log them here
        except:
            if fromaddr != '' or toaddrs != ():
                self.log.error(
                    "Error while sending mail from %s to %s.",
                    fromaddr, ", ".join(toaddrs), exc_info=True)
            else:
                self.log.error(
                    "Error while sending mail : %s ",
                    filename, exc_info=True)
Esempio n. 55
0
def touch(fname, times=None):
    """
    Modify the update and last modification times for a file.
    """
    with open(fname, 'a', encoding="utf-8", errors="ignore"):
        os.utime(fname, times)
Esempio n. 56
0
    def run_case_once(self,
                      testcase: DataDrivenTestCase,
                      incremental_step: int = 0) -> None:
        find_module_clear_caches()
        original_program_text = '\n'.join(testcase.input)
        module_data = self.parse_module(original_program_text,
                                        incremental_step)

        if incremental_step:
            if incremental_step == 1:
                # In run 1, copy program text to program file.
                for module_name, program_path, program_text in module_data:
                    if module_name == '__main__':
                        with open(program_path, 'w') as f:
                            f.write(program_text)
                        break
            elif incremental_step > 1:
                # In runs 2+, copy *.[num] files to * files.
                for dn, dirs, files in os.walk(os.curdir):
                    for file in files:
                        if file.endswith('.' + str(incremental_step)):
                            full = os.path.join(dn, file)
                            target = full[:-2]
                            shutil.copy(full, target)

                            # In some systems, mtime has a resolution of 1 second which can cause
                            # annoying-to-debug issues when a file has the same size after a
                            # change. We manually set the mtime to circumvent this.
                            new_time = os.stat(target).st_mtime + 1
                            os.utime(target, times=(new_time, new_time))

        # Parse options after moving files (in case mypy.ini is being moved).
        options = self.parse_options(original_program_text, testcase,
                                     incremental_step)
        options.use_builtins_fixtures = True
        options.show_traceback = True
        if 'optional' in testcase.file:
            options.strict_optional = True
        if incremental_step:
            options.incremental = True
        else:
            options.cache_dir = os.devnull  # Dont waste time writing cache

        sources = []
        for module_name, program_path, program_text in module_data:
            # Always set to none so we're forced to reread the module in incremental mode
            sources.append(
                BuildSource(program_path, module_name,
                            None if incremental_step else program_text))
        res = None
        try:
            res = build.build(sources=sources,
                              options=options,
                              alt_lib_path=test_temp_dir)
            a = res.errors
        except CompileError as e:
            a = e.messages
        a = normalize_error_messages(a)

        # Make sure error messages match
        if incremental_step == 0:
            # Not incremental
            msg = 'Unexpected type checker output ({}, line {})'
            output = testcase.output
        elif incremental_step == 1:
            msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})'
            output = testcase.output
        elif incremental_step > 1:
            msg = ('Unexpected type checker output in incremental, run {}'.
                   format(incremental_step) + ' ({}, line {})')
            output = testcase.output2.get(incremental_step, [])
        else:
            raise AssertionError()

        if output != a and self.update_data:
            update_testcase_output(testcase, a)
        assert_string_arrays_equal(output, a,
                                   msg.format(testcase.file, testcase.line))

        if incremental_step and res:
            if options.follow_imports == 'normal' and testcase.output is None:
                self.verify_cache(module_data, a, res.manager)
            if incremental_step > 1:
                suffix = '' if incremental_step == 2 else str(
                    incremental_step - 1)
                self.check_module_equivalence(
                    'rechecked' + suffix,
                    testcase.expected_rechecked_modules.get(incremental_step -
                                                            1),
                    res.manager.rechecked_modules)
                self.check_module_equivalence(
                    'stale' + suffix,
                    testcase.expected_stale_modules.get(incremental_step - 1),
                    res.manager.stale_modules)
Esempio n. 57
0
    def process_file(self, _file, destination, media, **kwargs):
        move = False
        if ('move' in kwargs):
            move = kwargs['move']

        allow_duplicate = False
        if ('allowDuplicate' in kwargs):
            allow_duplicate = kwargs['allowDuplicate']

        if (not media.is_valid()):
            print('%s is not a valid media file. Skipping...' % _file)
            return

        media.set_original_name()
        metadata = media.get_metadata()

        directory_name = self.get_folder_path(metadata)

        dest_directory = os.path.join(destination, directory_name)
        file_name = self.get_file_name(media)
        dest_path = os.path.join(dest_directory, file_name)

        db = Db()
        checksum = db.checksum(_file)
        if (checksum is None):
            log.info('Could not get checksum for %s. Skipping...' % _file)
            return

        # If duplicates are not allowed then we check if we've seen this file
        #  before via checksum. We also check that the file exists at the
        #   location we believe it to be.
        # If we find a checksum match but the file doesn't exist where we
        #  believe it to be then we write a debug log and proceed to import.
        checksum_file = db.get_hash(checksum)
        if (allow_duplicate is False and checksum_file is not None):
            if (os.path.isfile(checksum_file)):
                log.info('%s already exists at %s. Skipping...' %
                         (_file, checksum_file))
                return
            else:
                log.info(
                    '%s matched checksum but file not found at %s. Importing again...'
                    % (  # noqa
                        _file, checksum_file))

        # If source and destination are identical then
        #  we should not write the file. gh-210
        if (_file == dest_path):
            print('Final source and destination path should not be identical')
            return

        self.create_directory(dest_directory)

        if (move is True):
            stat = os.stat(_file)
            shutil.move(_file, dest_path)
            os.utime(dest_path, (stat.st_atime, stat.st_mtime))
        else:
            compatability._copyfile(_file, dest_path)
            self.set_utime_from_metadata(media.get_metadata(), dest_path)

        db.add_hash(checksum, dest_path)
        db.update_hash_db()

        return dest_path
Esempio n. 58
0
def write_workchain(outlines, directory=None, filename=None):
    """
    Given a list of string formatted outlines, write the corresponding workchains to file
    """
    dirpath = os.path.dirname(os.path.realpath(__file__))
    template_dir = os.path.join(dirpath, 'template')
    template_file_base = os.path.join(template_dir, 'base.tpl')
    template_file_workchain = os.path.join(template_dir, 'workchain.tpl')

    if directory is None:
        directory = os.path.join(dirpath, 'polish_workchains')

    if filename is None:
        filename = os.path.join(directory, 'polish.py')
    else:
        filename = os.path.join(directory, filename)

    try:
        os.makedirs(directory)
    except OSError as e:
        if e.errno != errno.EEXIST:
            raise

    try:
        init_file = os.path.join(directory, '__init__.py')
        os.utime(init_file, None)
    except OSError:
        open(init_file, 'a').close()

    with open(template_file_base, 'r') as handle:
        template_base = handle.readlines()

    with open(template_file_workchain, 'r') as handle:
        template_workchain = Template(handle.read())

    with open(filename, 'w') as handle:

        for line in template_base:
            handle.write(line)
        handle.write('\n\n')

        counter = len(outlines) - 1
        for outline in outlines:

            outline_string = ''
            for subline in outline.split('\n'):
                outline_string += '\t\t\t{}\n'.format(subline)

            if counter == len(outlines) - 1:
                child_class = None
            else:
                child_class = 'Polish{:02d}WorkChain'.format(counter + 1)

            subs = {
                'class_name': 'Polish{:02d}WorkChain'.format(counter),
                'child_class': child_class,
                'outline': outline_string,
            }
            handle.write(template_workchain.substitute(**subs))
            handle.write('\n\n')

            counter -= 1

    return filename
Esempio n. 59
0
 def set_version(self, bundle, env, filename, version):
     # Update the mtime of the newly created file with the version
     os.utime(filename, (-1, version))
Esempio n. 60
0
 def touch(self):
     curtime = time.time()
     os.utime(self._picklepath, (curtime, curtime))