Example #1
0
    def run_server(self):
        logging.basicConfig(level=logging.WARN,format='%(asctime)s [%(process)s] %(levelname)s %(message)s')

        socket = nanomsg.Socket(nanomsg.REP)
        socket.bind(self.endpoint)
        socket.recv_timeout = 1000

        last_check_ttr = time.time()
        while True:
            if time.time() - last_check_ttr > 1:
                self.check_ttr()

            try:
                msg = msgpack.loads(socket.recv())
                command = msg.pop(0)+'_cmd'
                msg = [ (_ if _ != '' else None) for _ in msg ]
                logger.debug("Command %s %s", command, msg)
                if hasattr(self,command):
                    result = getattr(self,command)(*msg)
                    socket.send(msgpack.dumps(result))
                else:
                    socket.send_multipart(msgpack.dumps('ERR','Unknown command "%s"' % command[0:-4]))
            except nanomsg.NanoMsgAPIError as error:
                if error.errno != nanomsg.EAGAIN:
                    raise
Example #2
0
    def kill_workers(self, timeout=5):
        """
        Send a suicide message to all workers, with some kind of timeout.
        """
        logging.info('Killing workers, taking up to %d seconds.', int(timeout))
        poller = zmq.Poller()
        poller.register(self.results_pull, zmq.POLLIN)

        while True:
            # Seems to get stuck gevent-blocking in the work_push.send() after
            # all the workers have died.  Also, gevent.Timeout() doesn't seem
            # to work here?!
            signal.alarm(int(timeout))
            self.work_push.send(msgpack.dumps([{'type': 'PING'}]))
            socks = dict(poller.poll(timeout * 1500))
            if self.results_pull in socks \
                    and socks[self.results_pull] == zmq.POLLIN:
                result_packed = self.results_pull.recv()
                result = msgpack.loads(result_packed)
                logging.info('Heard from worker id=%d; sending SUICIDE',
                            result['worker_id'])
                self.work_push.send(msgpack.dumps([{'type': 'SUICIDE'}]))
                gevent.sleep(0.1)
            else:
                break
            signal.alarm(0)
Example #3
0
    def test_encode(self):
        """Test %s() can be encoded into msgpack serialised binary data."""

        # Test message can be encoded.
        msgA = self.message(self.items)
        serialised = msgA.encode()

        # Test message can be decoded.
        msgB = self.message(serialised)
        self.compare(msgA, msgB)

        # Ensure timestamp was copied.
        self.assertEqual(msgA['timestamp'], msgB['timestamp'])

        # Ensure additional fields can be sent.
        msgA['newfieldA'] = 'A'
        msgA['newfieldB'] = 'B'
        msgB.update(msgA.encode())
        self.compare(msgA, msgB)

        # Test arbitrary serialised data raises exceptions.
        with self.assertRaises(Exception):
            self.message(msgpack.dumps('invalid_type'))

        # Test serialised incomplete dictionaries raise exceptions.
        dct = self.items.copy()
        dct.popitem()
        serialised = msgpack.dumps(dct)
        with self.assertRaises(TypeError):
            self.message(serialised)
    def handle(self, command):
        handle_dict = {}
        handle_dict['login'] = login
        handle_dict['publish_notice'] = publish_notice
        handle_dict['get_notices'] = get_notices
        handle_dict['publish_proposal'] = publish_proposal
        handle_dict['get_proposal_list'] = get_proposal_list
        handle_dict['add_comment'] = add_comment
        handle_dict['del_comment'] = del_comment
        handle_dict['get_comments'] = get_comments
        handle_dict['get_proposal'] = get_proposal
        handle_dict['get_reconsider_list'] = get_reconsider_list
        handle_dict['publish_reconsider'] = publish_reconsider

        try:
            command_dict = msgpack.loads(command)
            print "command_dict %r" % command_dict
            if "command" in command_dict:
            	#TODO auto import command file call the process function
                print "command"
                if command_dict['command'] in handle_dict:
                    ##print command_dict['command']
                    reply = handle_dict[command_dict['command']].process(command_dict)
                else:
                    print command_dict['command']

            	return msgpack.dumps(reply)
            else:
            	 print "Unsupport command"
            	 return msgpack.dumps({"result":False})
            return msgpack.dumps({"result":False})
        except Exception,e:
            print e
            return msgpack.dumps({"result":False})
def fake_multisignal_mainLoop(stop_flag, stream,  precomputed):
    import zmq
    pos = 0
    abs_pos = pos2 = 0
    
    context = zmq.Context()
    socket = context.socket(zmq.PUB)
    socket.bind("tcp://*:{}".format(stream['port']))
    
    socket.send(msgpack.dumps(abs_pos))
    
    packet_size = stream['packet_size']
    sampling_rate = stream['sampling_rate']
    np_arr = stream['shared_array'].to_numpy_array()
    half_size = np_arr.shape[1]/2
    while True:
        t1 = time.time()
        #~ print 'pos', pos, 'abs_pos', abs_pos
        #double copy
        np_arr[:,pos2:pos2+packet_size] = precomputed[:,pos:pos+packet_size] 
        np_arr[:,pos2+half_size:pos2+packet_size+half_size] = precomputed[:,pos:pos+packet_size]
        pos += packet_size
        pos = pos%precomputed.shape[1]
        abs_pos += packet_size
        pos2 = abs_pos%half_size
        socket.send(msgpack.dumps(abs_pos))
        
        if stop_flag.value:
            print 'will stop'
            break
        t2 = time.time()
        #~ time.sleep(packet_size/sampling_rate-(t2-t1))
        
        time.sleep(packet_size/sampling_rate)
Example #6
0
 def handle_intern_message(self, addr, message):
     self._logger.info("received intern message %s", message)
     try:
         if message["type"] == "ssh_key":
             # copy the dict manually to ensure the corectness of the message
             yield from self.write_stdout({"type": "ssh_key", "ssh_key": message["ssh_key"]})
             yield from self.intern.send_multipart([addr, b'', msgpack.dumps({"type": "ok"}, encoding="utf8", use_bin_type=True)])
             return False
         if message["type"] == "run_student":
             # copy the dict manually to ensure the corectness of the message
             self.running_student_container[message["socket_id"]] = addr
             yield from self.write_stdout({"type": "run_student", "environment": message["environment"],
                                      "time_limit": message["time_limit"], "hard_time_limit": message["hard_time_limit"],
                                      "memory_limit": message["memory_limit"], "share_network": message["share_network"],
                                      "socket_id": message["socket_id"]})
             return False
         if message["type"] == "run_student_ask_retval":
             # ignore, just a dummy message
             return False
         if message["type"] == "done":
             yield from self.intern.send_multipart([addr, b'', msgpack.dumps({"type": "ok"}, encoding="utf8", use_bin_type=True)])
             return True
         return False
     except:
         self._logger.exception("Exception occured while handling an internal message")
Example #7
0
    def header(self, length):
        self._sync = self.conn.generate_sync()
        header = msgpack.dumps({IPROTO_CODE: self.request_type,
                                IPROTO_SYNC: self._sync,
                                IPROTO_SCHEMA_ID: self.conn.schema_version})

        return msgpack.dumps(length + len(header)) + header
Example #8
0
File: bm.py Project: cym13/bookmark
def format_tags(tags, fmt):
    verbose = None not in tags.values()

    if fmt == "msgpack":
        import msgpack
        if not verbose:
            return msgpack.dumps(tags.keys())
        else:
            return msgpack.dumps(
                { tag:count for tag,count in tags.items() }
            )

    if fmt == "text":
        if not verbose:
            result =  '\n'.join(map(str,tags))
        else:
            result = '\n'.join("{} {}".format(tag, count)
                               for tag,count in tags.items())

    elif fmt == "json":
        import json
        if not verbose:
            result = json.dumps(tags.keys(), indent=4)
        else:
            result = json.dumps({ tag:count for tag,count in tags.items() },
                                indent=4)

    elif fmt == "web":
        print("Error: web output not supported for tag listing")

    return (result + "\n").encode("utf-8")
Example #9
0
def redis_store(input_dir, name, server, port, **kw):
    import redis
    r = redis.StrictRedis(server, port)
    times = set()
    sensor_types = {}
    fn_to_time = lambda x: int(x.rsplit('/', 1)[-1].split('.', 1)[0])
    r.sadd('users', name)
    for fn in sorted(glob.glob(input_dir + '/*'), key=fn_to_time):
        fn_time = fn_to_time(fn) / 1000.
        if fn.endswith('.jpg'):
            times.add(sample[1])
            r.zadd(name + ':images', fn_time, os.path.basename(fn))
        else:
            try:
                data = msgpack.load(open(fn))
            except ValueError:
                print('Could not parse [%s]' % fn)
                continue
            print(data)
            for sensor_name, type_num in data[2].items():
                sensor_types[sensor_name] = msgpack.dumps(type_num)
            for sensor_name, samples in data[3].items():
                for sample in samples:
                    times.add(sample[1])
                    r.zadd(name + ':sensor:' + sensor_name, sample[1], msgpack.dumps(sample))
    r.hmset(name + ':sensors', sensor_types)
    r.zadd(name + ':times', **{msgpack.dumps(x): x for x in times})
Example #10
0
 def handle(self):
     print '%s: connected' % self.client_address[0]
     unpacker = msgpack.Unpacker()
     self.wfile.write(msgpack.dumps(self.server.get_next_bits()))
     self.wfile.flush()
     try:
         while True:
             try:
                 d = unpacker.unpack()
             except msgpack.OutOfData:
                 tmp = self.request.recv(4096)
                 if not tmp:
                     break
                 unpacker.feed(tmp)
                 continue
             if (not isinstance(d, list) or len(d) != 3 or
                         not isinstance(d[0], basestring) or
                         not isinstance(d[1], int) or
                         not isinstance(d[2], list)):
                 print '%s: invalid input %s' % (
                                 self.client_address[0], repr(d))
                 break
             client, bits, res = d
             self.server.register(client, bits, res)
             self.wfile.write(msgpack.dumps(self.server.get_next_bits()))
             self.wfile.flush()
     except IOError, e:
         print '%s: %s' % (self.client_address[0], e)
Example #11
0
    def dumps(self, msg):
        '''
        Run the correct dumps serialization format
        '''
        try:
            return msgpack.dumps(msg)
        except TypeError:
            if msgpack.version >= (0, 2, 0):
                # Should support OrderedDict serialization, so, let's
                # raise the exception
                raise

            # msgpack is < 0.2.0, let's make its life easier
            # Since OrderedDict is identified as a dictionary, we can't
            # make use of msgpack custom types, we will need to convert by
            # hand.
            # This means iterating through all elements of a dictionary or
            # list/tuple
            def odict_encoder(obj):
                if isinstance(obj, dict):
                    for key, value in obj.copy().iteritems():
                        obj[key] = odict_encoder(value)
                    return dict(obj)
                elif isinstance(obj, (list, tuple)):
                    obj = list(obj)
                    for idx, entry in enumerate(obj):
                        obj[idx] = odict_encoder(entry)
                    return obj
                return obj
            return msgpack.dumps(odict_encoder(msg))
        except SystemError as exc:
            log.critical('Unable to serialize message! Consider upgrading msgpack. '
                         'Message which failed was {failed_message} '
                         'with exception {exception_message}').format(msg, exc)
Example #12
0
    def dumps(self, msg):
        '''
        Run the correct dumps serialization format
        '''
        if self.serial == 'pickle':
            return pickle.dumps(msg)
        else:
            try:
                return msgpack.dumps(msg)
            except TypeError:
                if msgpack.version >= (0, 2, 0):
                    # Should support OrderedDict serialization, so, let's
                    # raise the exception
                    raise

                # msgpack is < 0.2.0, let's make it's life easier
                # Since OrderedDict is identified as a dictionary, we can't
                # make use of msgpack custom types, we will need to convert by
                # hand.
                # This means iterating through all elements of a dictionary or
                # list/tuple
                def odict_encoder(obj):
                    if isinstance(obj, dict):
                        for key, value in obj.copy().iteritems():
                            obj[key] = odict_encoder(value)
                        return dict(obj)
                    elif isinstance(obj, (list, tuple)):
                        obj = list(obj)
                        for idx, entry in enumerate(obj):
                            obj[idx] = odict_encoder(entry)
                        return obj
                    return obj
                return msgpack.dumps(odict_encoder(msg))
Example #13
0
def test_request(req_header, req_body):
    query_header = msgpack.dumps(req_header)
    query_body = msgpack.dumps(req_body)
    packet_len = len(query_header) + len(query_body)
    query = msgpack.dumps(packet_len) + query_header + query_body
    try:
        s.send(query)
    except OSError as e:
        print '   => ', 'Failed to send request'
    resp_len = ''
    resp_headerbody = ''
    resp_header = {}
    resp_body = {}
    try:
        resp_len = s.recv(5)
        resp_len = msgpack.loads(resp_len)
        resp_headerbody = s.recv(resp_len)
        unpacker = msgpack.Unpacker(use_list = True)
        unpacker.feed(resp_headerbody)
        resp_header = unpacker.unpack()
        resp_body = unpacker.unpack()
    except OSError as e:
        print '   => ', 'Failed to recv response'
    res = {}
    res['header'] = resp_header
    res['body'] = resp_body
    return res
Example #14
0
 def post_row(self, row, params, files):
     if files:
         bottle.abort(400)
     params = {k: base64.b64decode(v) for k, v in params.items()}
     action = params['action']
     with thrift_lock() as thrift:
         manager = PicarusManager(thrift=thrift)
         print(params)
         model_key = params['model']
         print('ModelKey[%r]' % model_key)
         # TODO: Allow io/ so that we can write back to the image too
         if action == 'i/link':
             self._row_validate(row, 'r')
             # TODO: Get this directly from model
             chain_input, model_link = _takeout_input_model_link_from_key(manager, model_key)
             binary_input = thrift.get(self.table, row, chain_input)[0].value  # TODO: Check val
             model = picarus_takeout.ModelChain(msgpack.dumps([model_link]))
             bottle.response.headers["Content-type"] = "application/json"
             return json.dumps({base64.b64encode(params['model']): base64.b64encode(model.process_binary(binary_input))})
         elif action == 'i/chain':
             self._row_validate(row, 'r')
             # TODO: Get this directly from model
             chain_inputs, model_chain = zip(*_takeout_input_model_chain_from_key(manager, model_key))
             binary_input = thrift.get(self.table, row, chain_inputs[0])[0].value  # TODO: Check val
             model_chain = list(model_chain)
             model = picarus_takeout.ModelChain(msgpack.dumps(model_chain))
             bottle.response.headers["Content-type"] = "application/json"
             v = base64.b64encode(model.process_binary(binary_input))
             return json.dumps({base64.b64encode(params['model']): v})
         else:
             bottle.abort(400)
Example #15
0
def my_dumps(obj, context=None):
    if type(obj).__name__ == 'MyObject':
        header = {'serializer': 'my-ser'}
        frames = [msgpack.dumps(obj.__dict__, use_bin_type=True),
                  msgpack.dumps(context, use_bin_type=True)]
        return header, frames
    else:
        raise NotImplementedError()
Example #16
0
    def test_to_dict_msgpack_with_data_token(self):
        token = DataToken('Host', 'www.w3af.com', ('Host',))
        headers = Headers([('Host', token)])
        freq = FuzzableRequest(URL("http://www.w3af.com/"), headers=headers)

        req = HTTPRequest.from_fuzzable_request(freq)

        msgpack.dumps(req.to_dict())
Example #17
0
    def dump_string(self, obj):
        try:
            msgpack.dumps(obj)
            return True
        except Exception as ex:
            log.warn('Unable to dump object: %s', ex, exc_info=True)

        return False
Example #18
0
def array_sha256(a):
    dtype = msgpack.dumps(str(a.dtype))
    shape = msgpack.dumps(a.shape)
    bdata = a.flatten().view(numpy.uint8)
    sha = hashlib.sha256()
    sha.update(dtype)
    sha.update(shape)
    sha.update(bdata)
    return sha.hexdigest()
Example #19
0
def test_request(req_header, req_body):
    query_header = msgpack.dumps(req_header)
    query_body = msgpack.dumps(req_body)
    packet_len = len(query_header) + len(query_body)
    query = msgpack.dumps(packet_len) + query_header + query_body
    try:
        s.send(query)
    except OSError as e:
        print '   => ', 'Failed to send request'
    return receive_response()
def brainvisionsocket_mainLoop(stop_flag, streams, brain_host, brain_port, resolutions):
    import zmq
    abs_pos = pos2 = 0
    
    context = zmq.Context()
    
    stream0 = streams[0]
    socket0 = context.socket(zmq.PUB)
    socket0.bind("tcp://*:{}".format(stream0['port']))
    socket0.send(msgpack.dumps(abs_pos))
    
    stream1 = streams[1]
    socket1 = context.socket(zmq.PUB)
    socket1.bind("tcp://*:{}".format(stream1['port']))
    
    brain_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    brain_socket.connect((brain_host, brain_port))
    
    packet_size = stream0['packet_size']
    sampling_rate = stream0['sampling_rate']
    np_arr = stream0['shared_array'].to_numpy_array()
    half_size = np_arr.shape[1]/2
    while not stop_flag.value :
        buf_header = recv_data(brain_socket, 24)
        (id1, id2, id3, id4, msgsize, msgtype) = struct.unpack('<llllLL', buf_header)
        rawdata = recv_data(brain_socket,  msgsize - 24)
        if msgtype == 1:
            pass
        elif msgtype == 4:
            block, chunk, markers = get_signal_and_markers(rawdata, stream0.nb_channel)
            
            # Signals
            chunk *= resolutions[np.newaxis, :]
            packet_size = chunk.shape[0]
            #~ print 'packet_size', packet_size
            np_arr[:,pos2:pos2+packet_size] = chunk.transpose() 
            np_arr[:,pos2+half_size:pos2+packet_size+half_size] = chunk.transpose()
            if pos2+packet_size>half_size:
                pass
                #TODO : check packet_size
            abs_pos += packet_size
            pos2 = abs_pos%half_size
            socket0.send(msgpack.dumps(abs_pos))
            
            #Triggers
            markers['pos'] += (abs_pos-packet_size)
            for marker in markers:
                socket1.send(marker.tostring())
            

        elif msgtype == 3:
            break
    
    brain_socket.close()
def chunker(request, response):
    chunks = yield request.read()
    try:
        chunks = int(msgpack.loads(chunks))
    except ValueError:
        chunks = int(chunks)

    for num in xrange(chunks):
        response.write(msgpack.dumps('{0:-<1024}'.format(num)))
    response.write(msgpack.dumps('Done'))
    response.close()
Example #22
0
 def _send(self, tx_uuid, rx_uuid, data):
     data = packer.dumps(data, encoding='utf-8', use_bin_type=True)
     i = 0
     s = int(len(data)/self.packetsize)
     while data[i*self.packetsize:i*self.packetsize+self.packetsize]:
         self._nodes[tx_uuid].last_hb = time()
         self._nodes[tx_uuid].counter += 1
         packet = packer.dumps((self._nodes[tx_uuid].counter, i, s, tx_uuid, rx_uuid,
                                data[i*self.packetsize:i*self.packetsize+self.packetsize]))
         h = hmac.new(self.mcast_secret, packet, self._hashalg)
         packet = h.digest() + packet
         self.tx_socket.sendto(packet, (self.mcast_group, self.mcast_port))
         i += 1
    def load(cls, context):
        if isinstance(context, dict):
            log.debug('Content specified directly by dict')
            return msgpack.dumps(context)

        if isJsonValid(context):
            log.debug('Content specified directly by string')
            content = context
        else:
            log.debug('Loading content from file ...')
            with open(context, 'rb') as fh:
                content = fh.read()
        return msgpack.dumps(json.loads(content))
Example #24
0
 def post_row(self, row, params, files):
     action = params['action']
     with thrift_lock() as thrift:
         manager = PicarusManager(thrift=thrift)
         if action == 'i/takeout/link':
             self._row_validate(row, 'r', thrift)
             return base64.b64encode(msgpack.dumps(_takeout_model_link_from_key(manager, row)[1]))
         elif action == 'i/takeout/chain':
             self._row_validate(row, 'r', thrift)
             o = msgpack.dumps(zip(*_takeout_model_chain_from_key(manager, row))[1])
             open('takeouthack.model', 'w').write(o)
             return base64.b64encode(o)
         else:
             bottle.abort(400)
Example #25
0
File: rpc.py Project: yoki123/torpc
 def _pack_request(self, msg_id, msg_type, method_name, arg):
     """
     !ibi: data_length, msg_type, msg_id
     data_length = len(buf)
     """
     buf = packer.dumps((method_name, arg))
     return struct.pack('!ibi', len(buf), msg_type, msg_id) + buf
Example #26
0
    def simple_copy_and_transmit(self):

        t_in = time.time()
        pos = self.thread_pos.pos
        half = self.half_size_in
        head = pos%half
        #head2 = pos%(half+1)
        print 'last head: ', self.last_head, ' head: ', head

        #if self.last_head != head:
        # Copy data
        #self.np_arr_out[:,self.last_head2:head2] = self.np_arr_in[:,self.last_head+half:head+half]
        self.np_arr_out[:,self.last_head:head+half] = self.np_arr_in[:,self.last_head:head+half]
        #self.np_arr_out[:,self.last_head2+half:head2+half] = self.np_arr_in[:,self.last_head+half:head+half]

        self.socket_out.send(msgpack.dumps(pos))

        ## Debug mode. use head-1 instead head caus' send pos is out of the real data written (numpy way to use tables)
        #print 'Value write on pos ', head-1, ' array in: ', self.np_arr_in[1,head-1], ' array out: ', self.np_arr_out[1,head-1]
        #if self.np_arr_in[1,head-1] != self.np_arr_out[1,head-1]:
        #    print 'Error writing array out pos = ', pos

        self.last_head = head
        #self.last_head2 = head2
        t_out = time.time()

        t_wait = 1/self.sr_out - (t_out - t_in)
        #print 't wait :', t_wait
        if t_wait > 0:
            time.sleep(t_wait)
           # print 'sleep'
        else:
           # print 'Output stream sampling rate too fast for calculation'
            self.stop()
Example #27
0
def genrepo():
    '''
    Generate win_repo_cachefile based on sls files in the win_repo
    '''
    ret = {}
    repo = __opts__['win_repo']
    winrepo = __opts__['win_repo_mastercachefile']
    for root, dirs, files in os.walk(repo):
        for name in files:
            if name.endswith('.sls'):
                with salt.utils.fopen(os.path.join(root, name), 'r') as slsfile:
                    try:
                        config = yaml.safe_load(slsfile.read()) or {}
                    except yaml.parser.ParserError as exc:
                        # log.debug doesn't seem to be working
                        # delete the following print statement 
                        # when log.debug works
                        log.debug('Failed to compile'
                                '{0}: {1}'.format(os.path.join(root, name), exc))
                        print 'Failed to compile {0}: {1}'.format(os.path.join(root, name), exc)
                if config:
                    ret.update(config)
    with salt.utils.fopen(os.path.join(repo, winrepo), 'w') as repo:
        repo.write(msgpack.dumps(ret))
    salt.output.display_output(ret, 'pprint', __opts__)
    return ret
Example #28
0
def _pack_msgpack_snappy(obj):
    # print "pack", obj
    tmp = msgpack.dumps(obj, encoding='utf-8')
    if len(tmp) > 1000:
        return b'S' + snappy.compress(tmp)
    else:
        return b'\0' + tmp
Example #29
0
def main(name):
   sm = generate_map(name)
   
   opcd = OPCD_Interface(sm['opcd_ctrl'])
   platform = opcd.get('platform')
   device = opcd.get(platform + '.nrf_serial')
   
   global THIS_SYS_ID
   THIS_SYS_ID = opcd.get('aircomm.id')
   key = opcd.get('aircomm.psk')
   crypt.init(key)
   mhist = MessageHistory(60)

   out_socket = sm['aircomm_out']
   in_socket = sm['aircomm_in']

   aci = Interface(device)
   acr = ACIReader(aci, out_socket, mhist)
   acr.start()

   # read from SCL in socket and send data via NRF
   while True:
      data = loads(in_socket.recv())
      if len(data) == 2:
         msg = [data[0], THIS_SYS_ID, data[1]]
      elif len(data) > 2:
         msg = [data[0], THIS_SYS_ID] + data[1:]
      else:
         continue
      crypt_data = crypt.encrypt(dumps(msg))
      mhist.append(crypt_data)
      aci.send(crypt_data)
    def downloadBatch(self):
        self._send()        
        jids=self._downloadbatch.keys()
        self.blobstor._cmdchannel.send_multipart([msgpack.dumps([[0,"getresults",{},jids]]),"S",str(60),self.blobstor.sessionkey])
        res= self.blobstor._cmdchannel.recv_multipart()
       
        for item in res:
            if item=="":
                continue
            else:                
                jid,rcode,result=msgpack.loads(item)
                if rcode==0:
                    jid,key,dest,link,repoid,chmod,chownuid,chowngid=self._downloadbatch[jid]
                    key2=result[0]
                    if key2<>key:
                        raise RuntimeError("Keys need to be the same")
                    blob=result[2]
                    serialization=result[1]
                    
                    self._downloadFilePhase2(blob,dest,key,chmod,chownuid,chowngid,link,serialization)
                else:
                    ##TODO
                    pass

        self._downloadbatchSize=0
        self._downloadbatch={}
Example #31
0
    def _thread_loop(self, context, pipe):
        # Pyre helper functions
        def setup_group_member():
            group_member = Pyre(self.name)
            # set headers
            for header in self.default_headers:
                group_member.set_header(*header)
            # join active group
            group_member.join(self.active_group)

            # start group_member
            group_member.start()
            return group_member

        def shutdown_group_member(node):
            node.leave(self.active_group)
            node.stop()

        # setup sockets
        local_in = Msg_Receiver(context,
                                self.g_pool.ipc_sub_url,
                                topics=('remote_notify.', ))
        local_out = Msg_Dispatcher(context, self.g_pool.ipc_push_url)
        group_member = setup_group_member()

        # register sockets for polling
        poller = zmq.Poller()
        poller.register(pipe, zmq.POLLIN)
        poller.register(local_in.socket, zmq.POLLIN)
        poller.register(group_member.socket(), zmq.POLLIN)

        logger.info('Pupil Groups started.')

        # Poll loop
        while True:
            # Wait for next readable item
            readable = dict(poller.poll())

            # shout or whisper marked notifications
            if local_in.socket in readable:
                topic, notification = local_in.recv()
                remote_key = 'remote_notify'
                if notification[remote_key] == 'all':
                    del notification[remote_key]
                    serialized = serializer.dumps(notification)
                    group_member.shout(self.active_group, serialized)
                else:
                    peer_uuid_bytes = notification[remote_key]
                    del notification[remote_key]
                    serialized = serializer.dumps(notification)
                    peer_uuid = uuid.UUID(bytes=peer_uuid_bytes)
                    group_member.whisper(peer_uuid, serialized)

            if group_member.socket() in readable:
                event = PyreEvent(group_member)
                if event.msg:
                    for msg in event.msg:
                        try:
                            # try to unpack data
                            notification = serializer.loads(msg,
                                                            encoding='utf-8')
                            # test if dictionary and if `subject` key is present
                            notification['subject']
                            # add peer information
                            notification['groups.peer'] = {
                                'uuid_bytes': event.peer_uuid_bytes,
                                'name': event.peer_name,
                                'arrival_timestamp':
                                self.g_pool.get_timestamp(),
                                'type': event.type
                            }
                            local_out.notify(notification)
                        except Exception:
                            logger.info(
                                'Dropped garbage data by peer {} ({})'.format(
                                    event.peer_name, event.peer_uuid))
                elif event.type == 'JOIN' and event.group == self.active_group:
                    local_out.notify({
                        'subject': 'groups.member_joined',
                        'name': event.peer_name,
                        'uuid_bytes': event.peer_uuid_bytes
                    })
                elif (event.type == 'LEAVE' and event.group
                      == self.active_group) or event.type == 'EXIT':
                    local_out.notify({
                        'subject': 'groups.member_left',
                        'name': event.peer_name,
                        'uuid_bytes': event.peer_uuid_bytes
                    })

            if pipe in readable:
                command = pipe.recv_string()
                if command == '$RESTART':
                    # Restart group_member node to change name
                    poller.unregister(group_member.socket())
                    shutdown_group_member(group_member)
                    group_member = setup_group_member()
                    poller.register(group_member.socket(), zmq.POLLIN)
                elif command == '$TERM':
                    break

        del local_in
        del local_out
        shutdown_group_member(group_member)
        self.thread_pipe = None
Example #32
0
def export_msgpack(mesh):
    import msgpack
    blob = export_dict(mesh, encoding='binary')
    export = msgpack.dumps(blob)
    return export
Example #33
0
 def payload(self):
     capsules_as_bytes = [bytes(p) for p in self.capsules]
     capsule_signatures_as_bytes = [bytes(s) for s in self.capsule_signatures]
     packed_receipt_and_capsules = msgpack.dumps(
         (self.receipt_bytes, msgpack.dumps(capsules_as_bytes), msgpack.dumps(capsule_signatures_as_bytes)))
     return bytes(self.receipt_signature) + self.bob.stamp + packed_receipt_and_capsules
Example #34
0
 def send_waypoints(self):
     print("Sending waypoints to simulator ...")
     data = msgpack.dumps(self.waypoints)
     self.connection._master.write(data)
Example #35
0
    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError(
                "Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s"
                % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {
                'messaging': dict(CFG.server.amqp),
                'container-agent': self.name,
                'container-xp': bootstrap.get_sys_name()
            }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()  # cleanup the pidfile first
                self.quit(
                )  # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)

        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        # set up greenlet debugging signal handler
        gevent.signal(signal.SIGUSR2, self._handle_sigusr2)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()
        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects",
                                             DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # internal router for local transports
        self.local_router = LocalRouter(bootstrap.get_sys_name())
        self.local_router.start()
        self.local_router.ready.wait(timeout=2)
        self._capabilities.append("LOCAL_ROUTER")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.get_safe('container.sflow.enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node,
                                from_name=self.name,
                                service=self,
                                process=self)

        cleanup = lambda _: self.proc_manager._cleanup_method(self.name, rsvc)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name,
                                                listeners=[rsvc],
                                                service=self,
                                                cleanup_method=cleanup)
        self.proc_manager.proc_sup.ensure_ready(proc)
        proc.start_listeners()
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id,
                                     origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started = True
        self._status = "RUNNING"

        log.info("Container (%s) started, OK.", self.id)
Example #36
0
import salt.log
import salt.crypt
from salt.exceptions import SaltReqTimeoutError
from salt._compat import pickle

# Import third party libs
import zmq

log = salt.log.logging.getLogger(__name__)

try:
    # Attempt to import msgpack
    import msgpack
    # There is a serialization issue on ARM and potentially other platforms
    # for some msgpack bindings, check for it
    if msgpack.loads(msgpack.dumps([1, 2, 3]), use_list=True) is None:
        raise ImportError
except ImportError:
    # Fall back to msgpack_pure
    try:
        import msgpack_pure as msgpack
    except ImportError:
        # TODO: Come up with a sane way to get a configured logfile
        #       and write to the logfile when this error is hit also
        log_format = '[%(levelname)-8s] %(message)s'
        salt.log.setup_console_logger(log_format=log_format)
        log.fatal('Unable to import msgpack or msgpack_pure python modules')
        sys.exit(1)


def package(payload):
Example #37
0
def package(payload):
    '''
    This method for now just wraps msgpack.dumps, but it is here so that
    we can make the serialization a custom option in the future with ease.
    '''
    return msgpack.dumps(payload)
Example #38
0
import struct
import msgpack

from ..utils import nbytes

BIG_BYTES_SHARD_SIZE = 2**26

msgpack_opts = {("max_%s_len" % x): 2**31 - 1
                for x in ["str", "bin", "array", "map", "ext"]}
msgpack_opts["strict_map_key"] = False

try:
    msgpack.loads(msgpack.dumps(""), raw=False, **msgpack_opts)
    msgpack_opts["raw"] = False
except TypeError:
    # Backward compat with old msgpack (prior to 0.5.2)
    msgpack_opts["encoding"] = "utf-8"


def frame_split_size(frame, n=BIG_BYTES_SHARD_SIZE) -> list:
    """
    Split a frame into a list of frames of maximum size

    This helps us to avoid passing around very large bytestrings.

    Examples
    --------
    >>> frame_split_size([b'12345', b'678'], n=3)  # doctest: +SKIP
    [b'123', b'45', b'678']
    """
    if nbytes(frame) <= n:
Example #39
0
 def to_token(self):
     return encode_base64(msgpack.dumps({
         self.KEY_DICT[key]: val
         for key, val in self._asdict().items()
     }))
Example #40
0
 def dumps(self):
     return (base64.urlsafe_b64encode(
         zlib.compress(msgpack.dumps(
             self._to_config_structure()))).decode("ascii").strip("="))
Example #41
0
 def dumps(self, value):
     return msgpack.dumps(value)
Example #42
0
    def submit_calculation(self,
                           data_list,
                           url_template,
                           workers=DROPQ_WORKERS,
                           increment_counter=True,
                           use_wnc_offset=True):

        if use_wnc_offset:
            wnc, created = WorkerNodesCounter.objects.get_or_create(
                singleton_enforce=1)
            dropq_worker_offset = wnc.current_offset
            if dropq_worker_offset > len(workers):
                dropq_worker_offset = 0
            if increment_counter:
                wnc.current_offset = (dropq_worker_offset +
                                      len(data_list)) % len(DROPQ_WORKERS)
                wnc.save()
        else:
            dropq_worker_offset = 0

        hostnames = workers[dropq_worker_offset:dropq_worker_offset +
                            len(data_list)]
        print("hostnames: ", hostnames)
        print("submitting data: ", data_list)
        num_hosts = len(hostnames)
        job_ids = []
        hostname_idx = 0
        max_queue_length = 0
        for data in data_list:
            year_submitted = False
            attempts = 0
            while not year_submitted:
                packed = msgpack.dumps({'inputs': data}, use_bin_type=True)
                theurl = url_template.format(hn=hostnames[hostname_idx])
                try:
                    response = self.remote_submit_job(
                        theurl,
                        data=packed,
                        timeout=TIMEOUT_IN_SECONDS,
                        headers=BYTES_HEADER)
                    if response.status_code == 200:
                        print("submitted: ", hostnames[hostname_idx])
                        year_submitted = True
                        response_d = response.json()
                        job_ids.append(
                            (response_d['job_id'], hostnames[hostname_idx]))
                        hostname_idx = (hostname_idx + 1) % num_hosts
                        if response_d['qlength'] > max_queue_length:
                            max_queue_length = response_d['qlength']
                    else:
                        print("FAILED: ", data, hostnames[hostname_idx])
                        hostname_idx = (hostname_idx + 1) % num_hosts
                        attempts += 1
                except Timeout:
                    print("Couldn't submit to: ", hostnames[hostname_idx])
                    hostname_idx = (hostname_idx + 1) % num_hosts
                    attempts += 1
                except RequestException as re:
                    print("Something unexpected happened: ", re)
                    hostname_idx = (hostname_idx + 1) % num_hosts
                    attempts += 1
                if attempts > MAX_ATTEMPTS_SUBMIT_JOB:
                    print("Exceeded max attempts. Bailing out.")
                    raise IOError()

        return job_ids, max_queue_length