def dumps(self): """ Return a JSON serialization of this blueprint. Make a best effort to prevent variance from run-to-run. """ self.normalize() return util.json_dumps(self)
def walk_files(b, **kwargs): """ Walk a blueprint's files and execute callbacks. * `before_files():` Executed before files are enumerated. * `file(pathname, f):` Executed when a file is enumerated. * `after_files():` Executed after files are enumerated. """ kwargs.get("before_files", lambda *args: None)() callable = kwargs.get("file", lambda *args: None) for pathname, f in sorted(b.get("files", {}).iteritems()): # AWS cfn-init templates may specify file content as JSON, which # must be converted to a string here, lest each frontend have to # do so. if "content" in f and not isinstance(f["content"], basestring): f["content"] = util.json_dumps(f["content"]) callable(pathname, f) kwargs.get("after_files", lambda *args: None)()
def walk_files(b, **kwargs): """ Walk a blueprint's files and execute callbacks. * `before_files():` Executed before files are enumerated. * `file(pathname, f):` Executed when a file is enumerated. * `after_files():` Executed after files are enumerated. """ kwargs.get('before_files', lambda *args: None)() callable = kwargs.get('file', lambda *args: None) for pathname, f in sorted(b.get('files', {}).iteritems()): # AWS cfn-init templates may specify file content as JSON, which # must be converted to a string here, lest each frontend have to # do so. if 'content' in f and not isinstance(f['content'], basestring): f['content'] = util.json_dumps(f['content']) callable(pathname, f) kwargs.get('after_files', lambda *args: None)()
def __exit__(self, *args): if self._clear_called: os.remove(self.filepath) # Delete timer. else: with util.OpenAndLock(self.filepath, 'w') as f: f.write(util.json_dumps(self._timer_obj)) self._enter_called = False
def increment_effective(label, delta): if not os.path.isfile(_resource_path(label)): return False with util.OpenAndLock(_resource_path(label), 'r+') as f: entry = json.load(f) entry['effective'] += delta # Can validly result in negative numbers. f.truncate(0) f.seek(0) f.write(util.json_dumps(entry)) return True
def dumps(self): """ Return a JSON serialization of this blueprint. Make a best effort to prevent variance from run-to-run. Remove superfluous empty keys. """ if 'arch' in self and self['arch'] is None: del self['arch'] for key in ['files', 'packages', 'sources']: if key in self and 0 == len(self[key]): del self[key] return util.json_dumps(self)
def process_batch(self): try: p = util.json_dumps(self.batch_data) mac = hmac.new(self.key, p) send_bytes = struct.pack('B', mac.digest_size) + mac.digest() + p s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: s.connect((self.host, self.port)) s.send(send_bytes) finally: s.close() except Exception as e: self.logger_logger.error("Failed to send network data: %s", e)
def new_command(label): if os.path.isfile(_resource_path(label)): util.tlog("A diary entry with label `%s` already exists" % label) return now = time.time() entry = { 'label': label, 'epoch': now, 'interval_start_time': now, 'effective': 0.0, } with util.OpenAndLock(_resource_path(label), 'w') as f: f.write(util.json_dumps(entry)) util.tlog("diary entry with label `%s` created" % label)
def __exit__(self, *args): """Signals this module that the timer is running on the given label. If a diary entry for the given label exists, this function increments its 'effective' field by (time.time() - interval_start_time). """ if os.path.isfile(_resource_path(self._label)): # TODO(alive): there's a harmless and unlikely race condition here. with util.OpenAndLock(_resource_path(self._label), 'r+') as f: entry = json.load(f) entry['effective'] += time.time( ) - entry['interval_start_time'] f.seek(0) f.truncate(0) f.write(util.json_dumps(entry))
def __enter__(self): # TODO(alive): Should likely hold the file lock throughout the entire `with` # statement. # TODO(alive): Explicitly create timer files. This can cause subtle bugs. self._enter_called = True if not os.path.isfile(self.filepath): self._timer_obj = { 'label': self._label, 'endtime': 0, # This field is 0 when the timer is not running. 'remaining': sys.maxint } with util.OpenAndLock(self.filepath, 'w') as f: f.write(util.json_dumps(self._timer_obj)) else: with util.OpenAndLock(self.filepath, 'r') as f: self._timer_obj = json.load(f) return self
def process_batch(self): try: p = util.json_dumps(self.batch_data) mac = hmac.new(self.key, p) send_bytes = struct.pack('B', mac.digest_size) + mac.digest() + p s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: try: s.connect((self.host, self.port)) except socket.error: s.close() s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) s.connect((self.host, self.port)) s.send(send_bytes) finally: s.close() except Exception: self.logger_logger.exception("Failed to send network data")
def sync_post( user_profile, post_object ): print "SYNC_POST", post_object.uuid content = urllib.urlopen( post_object.entry_share_url ).read() try: content_json = plistlib.readPlistFromString(content) except: content = content.replace('UTF-8','UTF-16') content_json = plistlib.readPlistFromString(content) util.clean_keys(content_json) # post_object.content = content # post_object.post_json = json.dumps( json ) post_object.sync_ready = False post_object.sync_complete = True post_object.last_sync = time.time() post_tags = content_json.get('tags',[]) post_tags = [ each.lower() for each in post_tags ] post_object.all_tags = ",".join(post_tags) if user_profile.anon_tag.lower().strip() in post_tags: post_object.is_anonymous = True if user_profile.pub_tag.lower().strip() in post_tags: post_object.is_public = True if post_object.is_anonymous or post_object.is_public: post_object.content = util.json_dumps(content_json) else: post_object.content = '' # TODO: Think about support for multiple tags # post_object.is_public = False # user_pub_tags = user_profile.pub_tag.lower().split(',') # user_pub_tags = map( lambda item: item.strip(), user_pub_tags ) # for each_pub_tag in user_pub_tags: # if each_pub_tag in post_tags: # post_object.is_public = True post_object.save() pass
def __enter__(self): """Signals this module that the timer is running on the given label. If a diary entry for the given label exists, this function sets its interval_start_time to the current time. Possible interactions with timer: Trivial orderings (no interaction): In these cases, new and done track all elapsed time. 1. new, done, __enter__, __exit__ 2. __enter__, __exit__, new, done 3. __enter__, new, done, __exit__ In this case, __enter__ and __exit__ track all elapsed time. 4. new, __enter__, __exit__, done Tricky orderings: 5. new, __enter__, done, __exit__ In this case, done captures the amount of time elapsed after __enter__. 6. __enter__, new, __exit__, done In this case, __exit__ captures the amount of time elapsed after new. """ # TODO(alive): rewrite with the paradigm used in timer_db.py. if os.path.isfile(_resource_path(self._label)): # TODO(alive): there's a harmless and unlikely race condition here. with util.OpenAndLock(_resource_path(self._label), 'r+') as f: entry = json.load(f) entry['interval_start_time'] = time.time() f.seek(0) f.truncate(0) f.write(util.json_dumps(entry)) return self