def test_should_delete_files_and_folders_and_files_withing_sub_folders( self): # declare file paths tmp = tempfile.gettempdir() base = os.path.join(tmp, 'base_folder') filename = os.path.join(base, 'test.txt') subfolder = os.path.join(base, 'sub_folder') sub_file = os.path.join(subfolder, 'subtest.txt') # create file structure os.mkdir(base) os.mkdir(subfolder) file_put_contents(filename, 'Foo') file_put_contents(sub_file, 'Bar') try: # delete content of folder folder_delete_content(base) # folder should still exist self.assertTrue(os.path.isdir(base)) # ...but folder should be empty self.assertTrue(folder_is_empty(base)) finally: if os.path.isdir(base): shutil.rmtree(base)
def test_cleanup_should_remove_all_files_and_folders_that_are_not_within_the_cache( self): # create cache self.cache.add('index.html', None, None, '<h1>Foo</h1>', minify_html=False) self.cache.write() # create content the cache system is not aware of file_put_contents(os.path.join(settings.CACHE_ROOT, 'foo.html'), 'Foo') os.mkdir(os.path.join(settings.CACHE_ROOT, 'test')) os.mkdir(os.path.join(settings.CACHE_ROOT, 'test', 'foo')) file_put_contents( os.path.join(settings.CACHE_ROOT, 'test', 'bar.html'), 'Bar') # cleanup cache content self.cache.cleanup() # except for the cache content, everything else should be gone self.assertFalse( os.path.isfile(os.path.join(settings.CACHE_ROOT, 'foo.html'))) self.assertFalse( os.path.isfile( os.path.join(settings.CACHE_ROOT, 'test', 'bar.html'))) self.assertFalse( os.path.isdir(os.path.join(settings.CACHE_ROOT, 'test'))) self.assertFalse( os.path.isdir(os.path.join(settings.CACHE_ROOT, 'test', 'foo')))
def _generate_file(self, filename): if not os.path.exists(settings.STATIC_ROOT): os.path.makedirs(settings.STATIC_ROOT) filename = os.path.join(settings.STATIC_ROOT, filename) file_put_contents(filename, ' ') self.filenames.append(filename) return filename
def test_should_set_file_mod_time(self): tmp = tempfile.gettempdir() filename = os.path.join(tmp, 'mtime_test') file_put_contents(filename, 'test') ts = datetime(2016, 10, 8, 13, 40, 13) file_set_mtime(filename, ts) ts_read = file_get_mtime(filename) self.assertEqual(ts, ts_read)
def test_should_overwrite_existing_file(self): file_put_contents(get_deploy_timestamp_filename(), 'hello world') save_deploy_timestamp() timestamp = datetime.strptime( file_get_contents(get_deploy_timestamp_filename()), '%Y-%m-%dT%H:%M:%S' ) self.assertEqual(datetime.now(), timestamp)
def save_deploy_timestamp(): """ Write the current timestamp to the timestamp file that stores the timestamp when the last deployment happened in ISO format. """ ts = datetime.now() file_put_contents(get_deploy_timestamp_filename(), ts.strftime(TIMESTAMP_FORMAT)) return ts
def test_should_return_true_if_both_files_contain_the_same_content(self): tmp = tempfile.gettempdir() a = os.path.join(tmp, 'a.txt') file_put_contents(a, 'Hello World') tmp = tempfile.gettempdir() b = os.path.join(tmp, 'b.txt') file_put_contents(b, 'Hello World') self.assertTrue(is_same_file(a, b))
def test_should_return_false_if_folder_contains_files(self): tmp = tempfile.gettempdir() folder = os.path.join(tmp, 'not_empty_folder') filename = os.path.join(folder, 'test.txt') os.mkdir(folder) file_put_contents(filename, 'Foo') try: self.assertFalse(folder_is_empty(folder)) finally: shutil.rmtree(folder)
def test_get_mtime_should_return_last_mod_time_of_file(self): base = tempfile.gettempdir() filename = os.path.join(base, 'foo') file_put_contents(filename, 'test') try: ts = datetime(2016, 11, 18) file_set_mtime(filename, ts) self.assertEqual(ts, self.cache.get_mtime(filename)) finally: if os.path.isfile(filename): os.remove(filename)
def test_should_write_file_content_as_unicode(self): tmp = tempfile.gettempdir() filename = os.path.join(tmp, 'test.txt') content = unicode('Hello World') file_put_contents(filename, content) content_read = file_get_contents(filename) self.assertEqual(content_read, content) self.assertIsInstance(content_read, unicode) os.remove(filename)
def test_should_return_false_if_both_files_do_not_contain_the_same_content( self): tmp = tempfile.gettempdir() a = os.path.join(tmp, 'a.txt') file_put_contents(a, 'Hello Foo') tmp = tempfile.gettempdir() b = os.path.join(tmp, 'b.txt') file_put_contents(b, 'Hello Bar') self.assertFalse(is_same_file(a, b))
def _report_status(self): """ Write status to status file. """ file_put_contents( os.path.join(settings.PUBLIC_HTML_ROOT, self.STATUS_FILENAME), to_json({ 'totalRecords': self._total_records, 'recordCounter': self._record_counter, 'message': self._message, 'stopped': self._stopped }))
def _copy_file(src_path, dst_path, context): """ Copy the given file to the given dest. path. The filename and it's content may be substituted. """ if is_text_file(src_path): # text, substritute content against given context content = file_get_contents(src_path) content = _get_substituted_content(content, context) file_put_contents(dst_path, content) else: # binary, directly copy without substitution shutil.copyfile(src_path, dst_path)
def handle(self, *args, **options): """ Run command. """ # load source data filename = os.path.join(settings.CUBANE_PATH, 'rawdata', 'countries.json') json = decode_json(file_get_contents(filename)) # generate xml file content xml = [] xml.append('<?xml version="1.0" encoding="utf-8"?>') xml.append('<django-objects version="1.0">') for c in json: # country name(s) name = c.get('name') # num code num_code = c.get('ccn3') # calling code calling_code = c.get('callingCode') if len(calling_code) >= 1: calling_code = calling_code[0] else: calling_code = None # generate xml xml.append(' <object pk="%s" model="cubane.country">' % c.get('cca2')) xml.append(' <field type="CharField" name="name">%s</field>' % name.get('common').upper()) xml.append(' <field type="BooleanField" name="flag_state">0</field>') xml.append(' <field type="CharField" name="printable_name">%s</field>' % name.get('common')) xml.append(' <field type="CharField" name="iso3">%s</field>' % c.get('cca3')) xml.append(' <field type="BooleanField" name="landlocked">%s</field>' % ('1' if c.get('landlocked') else '0')) if num_code: xml.append(' <field type="PositiveSmallIntegerField" name="numcode">%s</field>' % num_code) xml.append(' <field type="CharField" name="calling_code">%s</field>' % calling_code) xml.append(' </object>') xml.append('</django-objects>') # save to xml file (fixture) filename = os.path.join(settings.CUBANE_PATH, 'fixtures', 'cubane', 'country.xml') file_put_contents(filename, '\n'.join(xml)) # import fixture file call_command('loaddata', 'cubane/country.xml', interactive=False)
def notify(cls): """ Notify the task runner to be executed shortly. """ file_put_contents(cls.get_signal_filename(), '1')
def test_should_return_none_if_incorrect_timestamp_format(self): file_put_contents(get_deploy_timestamp_filename(), 'not an ISO formatted timestamp') self.assertIsNone(load_deploy_timestamp())
def _store_font_data(self, font_declaration, descriptor): """ Store the given font data within the store cache. """ # get base path for font font_name = descriptor.font_name name = FontCache.get_unique_font_name(font_name) path = os.path.join(settings.CUBANE_FONT_ROOT, name) version = descriptor.version mtime = descriptor.mtime # create base path if it does not exist yet if not os.path.isdir(settings.CUBANE_FONT_ROOT): os.makedirs(settings.CUBANE_FONT_ROOT) # create folder if not exists yet if not os.path.isdir(path): os.mkdir(path) # download font files for each varient that has been declared and is # supported by the font and generate css font declaration code... css = [] for variant in descriptor.variants: # extract key information from variant family = variant.get('fontFamily') font_weight = variant.get('fontWeight') font_style = variant.get('fontStyle') local = variant.get('local') # skip if not declared if not font_declaration.supports_variant_by_components(font_weight, font_style): continue # determine list of available font files, some may not be available # due to download errors... imports = [] for ext in self.FONT_TYPES: filename = '%(name)s-%(version)s-%(weight)s-%(style)s.%(ext)s' % { 'name': name, 'version': version, 'weight': font_weight, 'style': font_style, 'ext': ext } font_path = os.path.join(path, filename) url = variant.get(ext) if self._download_font_file(url, font_path, mtime): imports.append( ( ext, '/media/fonts/%s/%s' % (name, filename) ) ) else: # unable to download file if not settings.TEST: # pragma: no cover sys.stderr.write( ('ERROR: Unable to download font ' + \ 'file from \'%s\'.\n') % url ) # generate font-declaration (css) for this variant... if len(imports) > 0: css.extend([ '/* %s %s %s latin */' % (font_name, font_weight, font_style), '@font-face {', ' font-family: \'%s\';' % family, ' font-style: %s;' % font_style, ' font-weight: %s;' % font_weight, ' src: ' + ', '.join([ 'local(\'%s\')' % local_name for local_name in local ]) + ',', ',\n'.join([ ' url(\'%s\') format(\'%s\')' % (url, ext) for ext, url in imports ]) + ';', '}\n' ]) # write css file for the entire font, covering all variants... if len(css) > 0: css_filename = os.path.join(path, '%s.css' % name) file_put_contents(css_filename, '\n'.join(css)) return True else: # if we failed, remove font folder and skip this font if os.path.isdir(path): folder_delete_content(path) os.rmdir(path) return False
def save_resource_version_identifier(identifier): """ Store resource version identifier with in website's deployment target folder. """ file_put_contents(get_resource_version_filename(), identifier)
def test_should_generate_hash_for_file_with_content(self): tmp = tempfile.gettempdir() filename = os.path.join(tmp, 'content.txt') file_put_contents(filename, 'Hello World') self.assertEqual(self.CONTENT_FILE_HASH, get_file_hash(filename))
def test_should_generate_hash_for_empty_file(self): tmp = tempfile.gettempdir() filename = os.path.join(tmp, 'empty.txt') file_put_contents(filename, '') self.assertEqual(self.EMPTY_FILE_HASH, get_file_hash(filename))
def resize_svg_image(filename, dst_filename, width, height, focal_point=None, optimize=True, crop=True, prefix=None): """ Scale the given vector image (SVG) to the given width and height and save the result as a new vector image as the given dest. filename. Vector images, such as SVG do not necessarily need to be resized, however they may contain bitmap data which we will scale accordingly. Any vector data and viewport information is preserved. In addition, we will prefix all id attribute values and internal references with a short prefix that is unique to the file. We do this in case we inline multiple SVG files into the actual websites and we do not want to have any identifiers and/or references colliding. Finally, we also remove any style from the SVG, since this may then become global to the document when we inline an SVG. """ def _scale_image(data, width, height): """ Scale given image data blog to fit the given width and height. """ # open image data try: img = WandImage(blob=data) except NOT_AN_IMAGE_WAND_EXCEPTIONS: # not an image! -> ignore and leave as is return None # do not upscale! w, h = img.size if width > w or height > h: width = w height = h if width < 1: width = 1 if height < 1: height = 1 # calc. new width and height by fitting it into the # desired boundaries w, h = get_image_fitting(w, h, width, height) img.resize(w, h) # return image blob blob = io.BytesIO() img.strip() img.save(file=blob) return blob.getvalue() def _match_image(m): """ Process SVG ))', _match_image, svg) def _get_viewbox(xml): """ Return the viewbox coordinates from the viewBox attribute on the svg tag. """ # parse vieewBox attribute viewbox = xml.svg.get('viewBox', '') components = re.split(r'[\s,]', viewbox, 4) x, y, w, h = (0.0, 0.0, 64.0, 64.0) if len(components) == 4: try: x, y, w, h = [float(c) for c in components] except: pass # do not allow an invalid viewbox, default to square if h == 0.0: h = 1.0 return x, y, w, h def _set_viewbox(xml, x, y, w, h): """ Set the viewBox within the given SVG xml markup to the new dimensions as given. """ # set viewBox xml.svg['viewBox'] = '%s %s %s %s' % (x, y, w, h) # rewrite enable-background style (not really used by many browsders, # but we want to be consistent) style = xml.svg.get('style', '') style = re.sub( r'enable-background:\s*new\s+([-.,\d]+)\s+([-.,\d]+)\s+([-.,\d]+)\s+([-.,\d]+);?', 'enable-background:new %s %s %s %s;' % (x, y, w, h), style) xml.svg['style'] = style def _normalised(x): """ Return integer of x, if x has no fraction component. """ intx = int(x) return intx if intx == x else x def _scale_viewbox(xml, width, height): """ Scale the viewBox to the new aspect ratio of the given width and height. """ # get viewBox from current SVG x, y, w, h = _get_viewbox(xml) # make the target width the same with as the SVG. The target height # is adjusted based on the target aspect ratio if height == 0: height = 1 ar = float(width) / float(height) width = w height = width / ar # determine new cropping area cropx, cropy, cropw, croph = get_image_crop_area( w, h, width, height, focal_point) # adjust to new (cropped) viewBox cropx += x cropy += y # normalise values to int if we do not have a fraction cropx = _normalised(cropx) cropy = _normalised(cropy) cropw = _normalised(cropw) croph = _normalised(croph) # apply new viewbox _set_viewbox(xml, cropx, cropy, cropw, croph) def _inline_style(xml): """ Take embedded style and inline all style information. """ # extract style style = '' for tag in xml.svg.find_all('style'): style += '\n'.join(tag.contents) + '\n' tag.decompose() # inline all collected style inline_style(xml.svg, parse_style(style)) remove_attr(xml.svg, ['class']) def _get_prefix(filename, prefix): """ Return a unique prefix to be used, if the given prefix is None. Since the prefix may be used as unique identifiers, they must start with a letter and cannot start with a number. """ # generate hash based on filename if prefix is None: m = hashlib.sha224() m.update(filename) prefix = m.hexdigest()[:6] # if the first character is a number, it becomes a letter # between a-f depending on the number. if prefix: try: n = int(prefix[0]) prefix = ['a', 'b', 'c', 'd', 'e', 'f'][n % 6] + prefix[1:] except ValueError: pass if prefix != '' and not prefix.endswith('_'): prefix += '_' return prefix def _prefix_ids(node, prefix): prefix_ids(node, prefix) # open svg file as text and replace embedded bitmap data markup = file_get_contents(filename) markup = _replace_images(markup) # load xml if optimize or crop: xml = BeautifulSoup(markup, 'xml') else: xml = None # is SVG? if xml and xml.svg: # crop if crop: _scale_viewbox(xml, width, height) if optimize: _inline_style(xml) _prefix = _get_prefix(dst_filename, prefix) _prefix_ids(xml, _prefix) if _prefix: xml.svg['data-prefix'] = _prefix[:-1] # render xml out if xml: markup = unicode(xml) # write output to target filename file_put_contents(dst_filename, markup)
def signal_running(cls): """ Indicate that the task running is currently running by writing the current process identifier (pid) to the pid file. """ file_put_contents(cls.get_pid_filename(), unicode(os.getpid()))