def write_filename(self, uuid, obj, filename): # Working filename is like # a.b.c.log # We want to roll this over (and compress) when it reaches a size limit if isinstance(obj, basestring): jsonstr = self.clean_newlines(unicode(obj), obj) else: # Use minimal json (without excess spaces) jsonstr = unicode(json.dumps(obj, separators=(',', ':'))) output_line = u"%s\t%s\n" % (uuid, jsonstr) dirname = os.path.dirname(filename) if dirname != '' and not os.path.exists(dirname): fileutil.makedirs_concurrent(dirname) # According to SO, this should be atomic on a well-behaved OS: # http://stackoverflow.com/questions/7561663/appending-to-the-end-of-a-file-in-a-concurrent-environment with io.open(filename, "a") as fout: fout.write(output_line) filesize = fout.tell() logging.debug("Wrote to %s: new size is %d" % (filename, filesize)) if filesize >= self._max_log_size: return self.rotate(filename) else: return filename
def save_to_cache(self, repo, revision, contents): filename = os.path.join(self._cache_dir, repo, revision, "Histograms.json") try: fout = open(filename, 'w') except IOError: fu.makedirs_concurrent(os.path.dirname(filename)) fout = open(filename, 'w') fout.write(contents) fout.close()
def download_one(args): local_path, bucket, remote_key = args target = os.path.join(local_path, remote_key) target_dir = os.path.dirname(target) if not os.path.exists(target_dir): fu.makedirs_concurrent(target_dir) success = False err = None for retry in range(1, 4): try: k = Key(bucket) k.key = remote_key k.get_contents_to_filename(target) # TODO: compare md5? Note that it will fail if we switch to # multipart uploads. success = True break except S3ResponseError, e: print >> sys.stderr, "S3 Error on attempt #%i:" % retry, e.status, e.reason except: