def hadoop_feature_line(id, properties, geometry): ''' Convert portions of a GeoJSON feature to a single line of text. Allows Hadoop to stream features from the mapper to the reducer. See also skeletron-hadoop-mapper.py and skeletron-hadoop-reducer.py. ''' line = [ json_encode(id), ' ', b64encode(pickle(sorted(list(properties.items())))), '\t', b64encode(geometry.wkb) ] return ''.join(line)
def onEVENTS(self, event, *args, **kwargs): channel = event.channel if True in [event.name == name for name in self.IgnoreEvents]: return elif channel in self.IgnoreChannels: return elif hasattr(event, "ignore") and event.ignore: return else: event.ignore = True event.source = self.ourself try: s = pickle(event, -1) except: return if self.nodes: for node in self.nodes: self.__write__(node, s) else: self.__write__(("<broadcast>", self.port), s)
def process_message(self, msg, waiting=False): ''' Process a remote message. <msg> is a backend.Message object If <waiting> is True it indicates a main loop is waiting for something, in which case signals get queued. ''' # Decode message try: info = pickle.loads(msg.parts[0]) if DEBUG: logger.debug('Msg %s from %s: %s', info[0], msg.sender_uid, info) except Exception as e: logger.warning('Unable to decode object: %s [%r]', str(e), msg.parts[0]) return if info[0] == OS_CALL: # In a try statement to postpone checks try: # if 1: (callid, objid, funcname, args, kwargs) = info[1:6] objid = misc.UID(bytes=objid) sig = kwargs.get(OS_SIGNAL, False) # Store signals if waiting a reply or event if waiting and sig: self._signal_queue.append(msg) return # Unwrap arguments try: bufs = msg.parts[1:] args, bufs = _unwrap(args, bufs, msg.sender_uid) kwargs, bufs = _unwrap(kwargs, bufs, msg.sender_uid) except: ret = misc.RemoteException('Unable to unwrap objects') reply = [ pickle.dumps((OS_RETURN, callid, ret), PICKLE_PROTO) ] self.backend.send_msg(msg.sender_uid, reply) return if DEBUG: logger.debug(' Processing call %s: %s.%s(%s,%s)', callid, objid, funcname, args, kwargs) # Call function obj = self.get_object(objid) func = getattr(obj, funcname, None) ret = func(*args, **kwargs) # If a signal, no need to return anything to caller if sig: return # Wrap return value ret, bufs = _wrap(ret) if DEBUG: logger.debug(' Returning for call %s: %s', callid, misc.ellipsize(str(ret))) # Handle errors except Exception as e: # if 0: if len(info) < 6: logger.error('Invalid call msg: %s', info) ret = misc.RemoteException('Invalid call msg') elif 'obj' not in locals() or obj is None: ret = misc.RemoteException( 'Object %s not available for calls' % objid) elif 'func' not in locals() or func is None: ret = misc.RemoteException( 'Object %s does not have function %s' % (objid, funcname)) else: tb = traceback.format_exc(15) ret = misc.RemoteException('%s\n%s' % (e, tb)) # Prepare return packet try: reply = [pickle.dumps((OS_RETURN, callid, ret), PICKLE_PROTO)] reply.extend(bufs) except: ret = misc.RemoteException('Unable to pickle return %s' % str(ret)) reply = [pickle.dumps((OS_RETURN, callid, ret), PICKLE_PROTO)] self.backend.send_msg(msg.sender_uid, reply) elif info[0] == OS_RETURN: if len(info) < 3: return Exception('Invalid return msg') # Get call id and unwrap return value callid, ret = info[1:3] ret, bufs = _unwrap(ret, msg.parts[1:], msg.sender_uid) if DEBUG: logger.debug(' Processing return for %s: %s', callid, ret) if callid in self.reply_objects: self.reply_objects[callid].set(ret) # We should not keep track of the reply object del self.reply_objects[callid] else: raise Exception('Reply for unkown call %s', callid) elif info[0] == 'hello_from': msg.sender_uid = misc.UID(bytes=info[1]) from_addr = info[2] logger.debug('hello_from client %s with server @ %s', msg.sender_uid, from_addr) self.backend.connect_from(msg.sock, msg.sender_uid, from_addr) # This was necessary for ZMQ sockets # if not self.backend.connected_to(msg.sender_uid): # if DEBUG:logger.debug('Initiating reverse connection...') # self.backend.connect_to(from_addr, msg.sender_uid) if DEBUG: logger.debug('Sending hello_reply') reply = ('hello_reply', self.root_uid.b, self.backend.get_server_address()) self.backend.send_msg(msg.sender_uid, [pickle.dumps(reply, PICKLE_PROTO)]) self.request_client_proxy(msg.sender_uid, async_value=True) elif info[0] == 'hello_reply': msg.sender_uid = misc.UID(bytes=info[1]) from_addr = info[2] if DEBUG: logger.debug('hello_reply client %s with server @ %s', msg.sender_uid, from_addr) self.backend.connect_from(msg.sock, msg.sender_uid, from_addr) self.request_client_proxy(msg.sender_uid, async_value=True) elif info[0] == 'goodbye_from': if DEBUG: logger.debug('Goodbye client %s from %s', msg.sender_uid, info[1]) forget_uid = self.backend.get_uid_for_addr(info[1]) if forget_uid in self.clients: del self.clients[forget_uid] if DEBUG: logger.debug('deleting client %s', forget_uid) self.backend.forget_connection(info[1], remote=False) if msg.sender_uid in self.clients: del self.clients[msg.sender_uid] if DEBUG: logger.debug('deleting client %s', msg.sender_uid) # Ping - pong to check alive elif info[0] == 'ping': logger.info('PING from %s', msg.sender_uid) msg = pickle.pickle(('pong', ), PICKLE_PROTO) self.backend.send_msg(msg.sender_uid, [msg]) elif info[0] == 'pong': logger.info('PONG from %s', msg.sender_uid) else: logger.warning('Unknown msg: %s', info)
def b64_pickle(value): return b64encode(pickle(value, 2))
def save(self, filename): tmp = tempfile.NamedTemporaryFile(delete=False) pickle(self, tmp) tmp.close() shutil.copyfile(tmp.name, filename) os.unlink(tmp.name)