Ejemplo n.º 1
0
    def get(self, _type, key, key_path=None):
        k = self._key(_type, key)
        with self.index_lock:
            if not k in self.index:
                return None

        with self.locks[k]:
            if not key_path:
                # TODO: Should we do an extra copy?
                return msgpack.loads(self.caches[k])
            else:
                path = key_path.split('/')
                # Hard limit to avoid nasty lengthy requests
                if len(path) > 7 or len(path) < 1:
                    return None
                else:
                    cached = msgpack.loads(self.caches[k])
                    data = dict()
                    data['type'] = cached['type'] + '/' + key_path
                    data['key'] = cached['key']
                    data['timestamp'] = cached['data'].get('timestamp', None)

                    d = cached['data']
                    try:
                        d = reduce(lambda di, key: di.get(key, None), path, d)
                    except AttributeError:
                        d = None

                    if d:
                        data['data'] = d
                        return copy(data)
                    else:
                        return None
Ejemplo n.º 2
0
def get_event_sensors(rows, row_columns, start_time, stop_time, max_samples=None):
    """
    Returns:
        (event_sensors, sensor_names)
        event_sensors: Dict of type with value as a list of sensors
        sensor_names: Dict of type with value as name
    """
    event_sensors = {}  # [type] = values
    sensor_types = {}  # [name] = type
    for row in rows:
        print(row_columns[row].keys())
        try:
            sensor_types.update(msgpack.loads(row_columns[row]['meta:sensor_types']))
        except KeyError:
            continue
        for sensor_name, samples in msgpack.loads(row_columns[row]['meta:sensor_samples']).items():
            sensor_type = sensor_types[sensor_name]
            for s in samples:
                if not (start_time <= s[1] <= stop_time):
                    continue
                event_sensors.setdefault(sensor_type, []).append(s)
    sensor_names = {v: k for k, v in sensor_types.items()}  # [type] = name
    for t in event_sensors:
        event_sensors[t].sort(key=lambda x: x[1])
    if max_samples is not None:
        for t in event_sensors:
            if len(event_sensors[t]) < max_samples:
                continue
            skip = len(event_sensors[t]) / (max_samples - 1)
            event_sensors[t] = event_sensors[t][:-1:skip] + [event_sensors[t][-1]]
    return event_sensors, sensor_names
Ejemplo n.º 3
0
def my_loads(header, frames):
    obj = MyObject(**msgpack.loads(frames[0], raw=False))

    # to provide something to test against, lets just attach the context to
    # the object itself
    obj.context = msgpack.loads(frames[1], raw=False)
    return obj
Ejemplo n.º 4
0
def assert_packed_msg_equal(b1, b2):
    """Assert that two packed msgpack messages are equal."""
    msg1 = msgpack.loads(b1, encoding='utf8')
    msg2 = msgpack.loads(b2, encoding='utf8')
    assert sorted(msg1.keys()) == sorted(msg2.keys())
    for (k1, v1), (k2, v2) in zip(sorted(msg1.items()), sorted(msg2.items())):
        assert k1 == k2
        assert v1 == v2
Ejemplo n.º 5
0
    def load(data):
        r = Request()
        try:
            r.__dict__ = msgpack.loads(data, encoding="utf-8")
        except (UnicodeDecodeError):
            r.__dict__ = msgpack.loads(data, encoding="ISO-8859-1")

        return r
Ejemplo n.º 6
0
 def get_notify_data(self, notify_id):
     try:
         ret = self.redis.hget(self.__hash_name__, notify_id)
         if ret is not None:
             _tmp = loads(ret) 
             log.debug('ct=%d|difftm=%d', _tmp['ct'], int(time.time()*1000)-_tmp['ct'])
             return loads(ret)['data']
     except Exception, e:
         log.warn(traceback.format_exc())
Ejemplo n.º 7
0
def yield_api():
    try:
        chunk = yield s.enqueue('chunkMe', num)
        msgpack.loads(chunk)
        while True:
            ch = yield
            msgpack.loads(ch)
    except ChokeEvent:
        IOLoop.current().stop()
Ejemplo n.º 8
0
 def loads(self, msg):
     '''
     Run the correct loads serialization format
     '''
     if self.serial == 'msgpack':
         return msgpack.loads(msg, use_list=True)
     elif self.serial == 'pickle':
         try:
             return pickle.loads(msg)
         except Exception:
             return msgpack.loads(msg, use_list=True)
Ejemplo n.º 9
0
def _unpack_msgpack_snappy(str):
    if str.startswith(b'S'):
        tmp = snappy.uncompress(str[1:])
        # print "SNAPPY: ", len(str), len(tmp)
        obj = msgpack.loads(tmp, encoding='utf-8')
    elif str.startswith(b'\0'):
        obj = msgpack.loads(str[1:], encoding='utf-8')
    else:
        return None
    
    return obj
Ejemplo n.º 10
0
 def test_compare(self):
     for x in glob.glob("picarus_takeout_models/test_models/picarus-*.msgpack.gz"):
         print(x)
         m0 = PicarusModel(x)
         m1 = PicarusCommandModel(x)
         for y in glob.glob("picarus_takeout_models/test_images/*"):
             outm0 = m0.process_binary(y)
             outm1 = m1.process_binary(y)
             if outm0 != outm1:
                 print(msgpack.loads(outm0))
                 print(msgpack.loads(outm1))
             self.assertEqual(outm0, outm1)
Ejemplo n.º 11
0
def _unpack(str) :

    if str[0] == 'S':
        tmp = snappy.uncompress(str[1:])
        obj = msgpack.loads(tmp)
    elif str[0] == '\0':
        obj = msgpack.loads(str[1:])
    else:
        return None

    #print "UNPACK", obj
    return obj
Ejemplo n.º 12
0
    def loads(cls, data_str, msg_pack=False):
        if msg_pack:
            if isinstance(data_str, bytes):
                data = msgpack.loads(data_str, encoding='utf-8')
            else:
                data = msgpack.loads(data_str)
        else:
            data = json.loads(data_str)

        if isinstance(data, (list, tuple,)):
            return [cls.restore(d) for d in data]
        return cls.restore(data)
Ejemplo n.º 13
0
 def from_token(cls, token):
     if PY3:
         # The argument raw=False is only available on new versions of
         # msgpack, and only really needed on Python 3. Gate it behind
         # a PY3 check to avoid causing issues on Debian-packaged versions.
         decoded = msgpack.loads(decode_base64(token), raw=False)
     else:
         decoded = msgpack.loads(decode_base64(token))
     return RoomListNextBatch(**{
         cls.REVERSE_KEY_DICT[key]: val
         for key, val in decoded.items()
     })
Ejemplo n.º 14
0
def load_market_data():
    try:
        fp_bm = get_datafile('benchmark.msgpack', "rb")
    except IOError:
        print """
data msgpacks aren't distribute with source.
Fetching data from Yahoo Finance.
""".strip()
        dump_benchmarks()
        fp_bm = get_datafile('benchmark.msgpack', "rb")

    bm_list = msgpack.loads(fp_bm.read())
    bm_returns = []
    for packed_date, returns in bm_list:
        event_dt = tuple_to_date(packed_date)
        #event_dt = event_dt.replace(
        #    hour=0,
        #    minute=0,
        #    second=0,
        #    tzinfo=pytz.utc
        #)

        daily_return = risk.DailyReturn(date=event_dt, returns=returns)
        bm_returns.append(daily_return)

    fp_bm.close()

    bm_returns = sorted(bm_returns, key=attrgetter('date'))

    try:
        fp_tr = get_datafile('treasury_curves.msgpack', "rb")
    except IOError:
        print """
data msgpacks aren't distribute with source.
Fetching data from data.treasury.gov
""".strip()
        dump_treasury_curves()
        fp_tr = get_datafile('treasury_curves.msgpack', "rb")

    tr_list = msgpack.loads(fp_tr.read())
    tr_curves = {}
    for packed_date, curve in tr_list:
        tr_dt = tuple_to_date(packed_date)
        #tr_dt = tr_dt.replace(hour=0, minute=0, second=0, tzinfo=pytz.utc)
        tr_curves[tr_dt] = curve

    fp_tr.close()

    tr_curves = OrderedDict(sorted(
                            ((dt, c) for dt, c in tr_curves.iteritems()),
                            key=lambda t: t[0]))

    return bm_returns, tr_curves
Ejemplo n.º 15
0
 def fetchAll():
     chunk = yield service.enqueue('chunkMe', str(sys.argv[1]))
     chunk = msgpack.loads(chunk)
     size = len(chunk)
     counter = 0
     while True:
         ch = yield
         chunk = msgpack.loads(ch)
         size += len(chunk)
         counter += 1
         print(counter, len(chunk), size)
         if chunk == 'Done':
             break
Ejemplo n.º 16
0
 def _run(self, picarus_model_class):
     results = {}
     model_path = "picarus_takeout_models/test_models/"
     image_path = "picarus_takeout_models/test_images/"
     for x in glob.glob(model_path + "picarus-*.msgpack.gz"):
         model_results = {}
         model = picarus_model_class(x)
         for y in glob.glob(image_path + "*"):
             model_results[os.path.basename(y)] = base64.b64encode(model.process_binary(y))
         results[os.path.basename(x)] = model_results
     json.dump(results, gzip.GzipFile("test_model_outputs-%s.js.gz" % (picarus_model_class.__name__,), "w"))
     prev_results = json.load(gzip.GzipFile("picarus_takeout_models/test_models/test_model_outputs.js.gz"))
     num_checked = 0
     failed_models = []
     failed_images = {}
     for x in set(results).intersection(set(prev_results)):
         for y in set(results[x]).intersection(set(prev_results[x])):
             num_checked += 1
             if results[x][y] == prev_results[x][y]:
                 continue
             try:
                 a = msgpack.loads(base64.b64decode(results[x][y]))
             except:
                 raise ValueError("Cant test for partial equality because not msgpack encoded")
             b = msgpack.loads(base64.b64decode(prev_results[x][y]))
             if not self.almostEqualAny(a, b):
                 if 1:
                     print("Current(b64msgpack)--------")
                     print(results[x][y])
                     print("Previous(b64msgpack)-------")
                     print(prev_results[x][y])
                     print("Current--------")
                     print(msgpack.loads(base64.b64decode(results[x][y])))
                     print("Previous-------")
                     print(msgpack.loads(base64.b64decode(prev_results[x][y])))
                 try:
                     failed_images[y] += 1
                 except KeyError:
                     failed_images[y] = 1
                 failed_models.append(model_path + x)
                 print(
                     "Process Failed[%s][%s][%s][%s]"
                     % (x, y, hashlib.sha1(results[x][y]).hexdigest(), hashlib.sha1(prev_results[x][y]).hexdigest())
                 )
     print("Number of models * images checked[%d][%r]" % (num_checked, picarus_model_class))
     blame_components(failed_models)
     print(failed_images)
     self.assertEqual(len(failed_models), 0)
Ejemplo n.º 17
0
    def kill_workers(self, timeout=5):
        """
        Send a suicide message to all workers, with some kind of timeout.
        """
        logging.info('Killing workers, taking up to %d seconds.', int(timeout))
        poller = zmq.Poller()
        poller.register(self.results_pull, zmq.POLLIN)

        while True:
            # Seems to get stuck gevent-blocking in the work_push.send() after
            # all the workers have died.  Also, gevent.Timeout() doesn't seem
            # to work here?!
            signal.alarm(int(timeout))
            self.work_push.send(msgpack.dumps([{'type': 'PING'}]))
            socks = dict(poller.poll(timeout * 1500))
            if self.results_pull in socks \
                    and socks[self.results_pull] == zmq.POLLIN:
                result_packed = self.results_pull.recv()
                result = msgpack.loads(result_packed)
                logging.info('Heard from worker id=%d; sending SUICIDE',
                            result['worker_id'])
                self.work_push.send(msgpack.dumps([{'type': 'SUICIDE'}]))
                gevent.sleep(0.1)
            else:
                break
            signal.alarm(0)
Ejemplo n.º 18
0
    def from_frame(cls, frame):
        """
        Construct a ``Message`` from a raw binary frame.

        :param frame: The binary frame.

        :returns: A constructed ``Message`` instance.
        """

        # Load the data
        data = msgpack.loads(frame)

        # A PDU must be a msgpack-encoded dict
        if not isinstance(data, dict):
            raise ValueError('invalid PDU')

        # It must always have a __version__ and a type
        try:
            version = data['__version__']
            msg_type = data['msg_type']
        except KeyError as e:
            raise ValueError("missing required PDU field %s" % e)

        # Construct a message; we pass the frame in to prime the frame
        # cache
        return cls(msg_type, __version__=version, __frame__=frame, **dict(
            (k, v) for k, v in data.items()
            if k not in ('__version__', 'msg_type')))
Ejemplo n.º 19
0
def decode_value(value):
    """Decodes a cliqztionary value."""
    if value is None or len(value) == 0:
        return None
    elif len(value) > 1 and  value[0] == ' ':
        value = zlib.decompress(value[1:])
    return msgpack.loads(value)
Ejemplo n.º 20
0
 def test_set(self):
     "success type (+OK)"
     self.query("DEL/hello")
     f = self.query("SET/hello/world.msg")
     self.assertTrue(f.headers.getheader("Content-Type") == "application/x-msgpack")
     obj = msgpack.loads(f.read())
     self.assertTrue(obj == {"SET": (True, "OK")})
Ejemplo n.º 21
0
def classifier_sklearn(row_cols, params):
    label_features = {0: [], 1: []}
    for row, columns in row_cols:
        label = int(columns['meta'] == params['class_positive'])
        label_features[label].append(msgpack.loads(columns['feature'])[0])
        if np.any(np.isnan(label_features[label][-1])):
            import base64
            print('Row[%s] is NaN' % (base64.b64encode(row)))
    labels = [0] * len(label_features[0]) + [1] * len(label_features[1])
    features = label_features[0] + label_features[1]
    features = np.asfarray(features)
    print('Feature Shape[%s]' % repr(features.shape))
    import sklearn.svm
    classifier = sklearn.svm.LinearSVC()
    try:
        classifier.fit(features, np.asarray(labels))
    except:
        print('Debug info')
        for f in features:
            print(f.tolist())
        print(labels)
        raise
    model_link = {'name': 'picarus.LinearClassifier', 'kw': {'coefficients': classifier.coef_.tolist()[0],
                                                             'intercept': classifier.intercept_[0]}}
    return 'feature', 'binary_class_confidence', model_link
Ejemplo n.º 22
0
    def parse(self):
        '''
        Parses body. Assumes already unpacked.
        Results in updated .data
        '''
        bk = self.packet.data['bk']

        if bk not in raeting.BODY_KIND_NAMES:
            self.packet.data['bk']= raeting.bodyKinds.unknown
            emsg = "Unrecognizable packet body."
            raise raeting.PacketError(emsg)

        self.data = odict()

        if bk == raeting.bodyKinds.json:
            if self.packed:
                kit = json.loads(self.packed, object_pairs_hook=odict)
                if not isinstance(kit, Mapping):
                    emsg = "Packet body not a mapping."
                    raise raeting.PacketError(emsg)
                self.data = kit
        elif bk == raeting.bodyKinds.msgpack:
            if self.packed:
                if not msgpack:
                    emsg = "Msgpack not installed."
                    raise raeting.PacketError(emsg)
                kit = msgpack.loads(self.packed, object_pairs_hook=odict)
                if not isinstance(kit, Mapping):
                    emsg = "Packet body not a mapping."
                    raise raeting.PacketError(emsg)
                self.data = kit
        elif bk == raeting.bodyKinds.raw:
            self.data = self.packed # return as string
        elif bk == raeting.bodyKinds.nada:
            pass
Ejemplo n.º 23
0
    def downloadBatch(self):
        self._send()        
        jids=self._downloadbatch.keys()
        self.blobstor._cmdchannel.send_multipart([msgpack.dumps([[0,"getresults",{},jids]]),"S",str(60),self.blobstor.sessionkey])
        res= self.blobstor._cmdchannel.recv_multipart()
       
        for item in res:
            if item=="":
                continue
            else:                
                jid,rcode,result=msgpack.loads(item)
                if rcode==0:
                    jid,key,dest,link,repoid,chmod,chownuid,chowngid=self._downloadbatch[jid]
                    key2=result[0]
                    if key2<>key:
                        raise RuntimeError("Keys need to be the same")
                    blob=result[2]
                    serialization=result[1]
                    
                    self._downloadFilePhase2(blob,dest,key,chmod,chownuid,chowngid,link,serialization)
                else:
                    ##TODO
                    pass

        self._downloadbatchSize=0
        self._downloadbatch={}
Ejemplo n.º 24
0
def main(name):
   sm = generate_map(name)
   
   opcd = OPCD_Interface(sm['opcd_ctrl'])
   platform = opcd.get('platform')
   device = opcd.get(platform + '.nrf_serial')
   
   global THIS_SYS_ID
   THIS_SYS_ID = opcd.get('aircomm.id')
   key = opcd.get('aircomm.psk')
   crypt.init(key)
   mhist = MessageHistory(60)

   out_socket = sm['aircomm_out']
   in_socket = sm['aircomm_in']

   aci = Interface(device)
   acr = ACIReader(aci, out_socket, mhist)
   acr.start()

   # read from SCL in socket and send data via NRF
   while True:
      data = loads(in_socket.recv())
      if len(data) == 2:
         msg = [data[0], THIS_SYS_ID, data[1]]
      elif len(data) > 2:
         msg = [data[0], THIS_SYS_ID] + data[1:]
      else:
         continue
      crypt_data = crypt.encrypt(dumps(msg))
      mhist.append(crypt_data)
      aci.send(crypt_data)
Ejemplo n.º 25
0
Archivo: tcp.py Proyecto: iquaba/salt
 def wrap_callback(body):
     if not isinstance(body, dict):
         # TODO: For some reason we need to decode here for things
         #       to work. Fix this.
         body = msgpack.loads(body)
     ret = yield self._decode_payload(body)
     callback(ret)
Ejemplo n.º 26
0
    def test_published(self):
        self.data_sent = []
        with mock.patch('socket.socket',
                        self._make_fake_socket(self.data_sent)):
            publisher = udp.UDPPublisher(
                netutils.urlsplit('udp://somehost'))
        publisher.publish_samples(None,
                                  self.test_data)

        self.assertEqual(5, len(self.data_sent))

        sent_counters = []

        for data, dest in self.data_sent:
            counter = msgpack.loads(data)
            sent_counters.append(counter)

            # Check destination
            self.assertEqual(('somehost',
                              self.CONF.collector.udp_port), dest)

        # Check that counters are equal
        self.assertEqual(sorted(
            [utils.meter_message_from_counter(d, "not-so-secret")
             for d in self.test_data]), sorted(sent_counters))
Ejemplo n.º 27
0
    def start_udp(self):
        address_family = socket.AF_INET
        if netutils.is_valid_ipv6(cfg.CONF.collector.udp_address):
            address_family = socket.AF_INET6
        udp = socket.socket(address_family, socket.SOCK_DGRAM)
        udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        udp.bind((cfg.CONF.collector.udp_address,
                  cfg.CONF.collector.udp_port))

        self.udp_run = True
        while self.udp_run:
            # NOTE(jd) Arbitrary limit of 64K because that ought to be
            # enough for anybody.
            data, source = udp.recvfrom(64 * units.Ki)
            try:
                sample = msgpack.loads(data, encoding='utf-8')
            except Exception:
                LOG.warn(_("UDP: Cannot decode data sent by %s"), source)
            else:
                try:
                    LOG.debug(_("UDP: Storing %s"), sample)
                    self.dispatcher_manager.map_method('record_metering_data',
                                                       sample)
                except Exception:
                    LOG.exception(_("UDP: Unable to store meter"))
Ejemplo n.º 28
0
   def run(self):
      s = 0
      while True:
         try:
            # receive encrypted message:
            crypt_data = self.aci.receive()
            if crypt_data:
               
               # check if we have seen this message before:
               if not self.mhist.check(crypt_data):
                  continue
               
               # decrypt message:
               raw_msg = crypt.decrypt(crypt_data)
               
               # load msgpack contents:
               try:
                  msg = loads(raw_msg)
               except Exception, e:
                  continue
               
               addr = msg[0]
               # if message is meant for us, forward to application(s):
               if addr in [THIS_SYS_ID, BCAST, BCAST_NOFW]:
                  msg_scl = dumps(msg[1:]) # strip the type and pack again
                  self.scl_socket.send(msg_scl)

               # if the message (a) is not meant for us or (b) is NOFW, re-broadcast:
               if addr not in [THIS_SYS_ID, BCAST_NOFW]:
                  self.aci.send(crypt_data)

         except Exception, e:
            sleep(1)
Ejemplo n.º 29
0
def get_repo_data(saltenv="base"):
    """
    Returns the cached winrepo data

    CLI Example:

    .. code-block:: bash

        salt '*' pkg.get_repo_data
    """
    # if 'winrepo.data' in __context__:
    #    return __context__['winrepo.data']
    repocache_dir = _get_local_repo_dir(saltenv=saltenv)
    winrepo = "winrepo.p"
    try:
        with salt.utils.fopen(os.path.join(repocache_dir, winrepo), "rb") as repofile:
            try:
                repodata = msgpack.loads(repofile.read()) or {}
                return repodata
            except Exception as exc:
                log.exception(exc)
                return {}
    except IOError as exc:
        log.error("Not able to read repo file")
        log.exception(exc)
        return {}
Ejemplo n.º 30
0
 def _read_index_entry(self, table, prev):
     table["fp"].seek(prev)
     table["fp"].seek(prev)
     data_head = struct.unpack(IND_HEAD_FMT, table["fp"].read(3))
     index = msgpack.loads(table["fp"].read(data_head[0]))
     index["_status"] = data_head[1]
     return index
Ejemplo n.º 31
0
sub_port = pupil_remote.recv_string()

# Request 'PUB_PORT' for writing data
pupil_remote.send_string('PUB_PORT')
pub_port = pupil_remote.recv_string()

# Assumes `sub_port` to be set to the current subscription port
subscriber = ctx.socket(zmq.SUB)
subscriber.connect(f'tcp://{ip}:{sub_port}')
subscriber.subscribe('surface')  # receive all gaze messages
subscriber.subscribe('gaze')  # receive all gaze messages
subscriber.subscribe('pupil')  # receive all diameter messages

while True:
    topic, payload = subscriber.recv_multipart()
    message = msgpack.loads(payload)
    # print(f"{topic}: {message}")

    topic_str = topic.decode("utf-8")
    # TODO use topic_str ending to determine the eye it belongs to
    # then memorize last right and left eye, whenever both are set, agerage them (using function from Tobii script)
    # create and send same event

    if topic_str.startswith('pupil'):
        diameter = message[b'diameter']
        id = message[b'id']
        if id == 0:
            pupil0 = diameter
        elif id == 1:
            pupil1 = diameter
Ejemplo n.º 32
0
import struct
import msgpack

from ..utils import ensure_bytes, nbytes

BIG_BYTES_SHARD_SIZE = 2**26

msgpack_opts = {("max_%s_len" % x): 2**31 - 1
                for x in ["str", "bin", "array", "map", "ext"]}
msgpack_opts["strict_map_key"] = False

try:
    msgpack.loads(msgpack.dumps(""), raw=False, **msgpack_opts)
    msgpack_opts["raw"] = False
except TypeError:
    # Backward compat with old msgpack (prior to 0.5.2)
    msgpack_opts["encoding"] = "utf-8"


def frame_split_size(frame, n=BIG_BYTES_SHARD_SIZE) -> list:
    """
    Split a frame into a list of frames of maximum size

    This helps us to avoid passing around very large bytestrings.

    Examples
    --------
    >>> frame_split_size([b'12345', b'678'], n=3)  # doctest: +SKIP
    [b'123', b'45', b'678']
    """
    if nbytes(frame) <= n:
Ejemplo n.º 33
0
    def spin(self):
        '''
        Spinning loop: receive ZMQ data and publish to ROS topics
        :return:
        '''
        if not self.ros_started:
            print 'Pupil_ZMQ_ROS: ros not started'
            return
        while True:
            # rospy.is_shutdown check inside while loop to enable Ctrl-C termination
            if rospy.is_shutdown():
                break
            # receive message from ZMQ subscriber
            zmq_multipart = self.zmq_sub.recv_multipart()
            zmq_topic, zmq_raw_msg = zmq_multipart[0], zmq_multipart[1]
            # ROS header message
            header = Header()
            header.seq = self.seq
            header.stamp = rospy.get_rostime()
            header.frame_id = "Pupil_ZMQ_ROS"
            zmq_msg = loads(zmq_raw_msg)
            if 'pupil' in zmq_topic:
                # pupil data parser
                pupil_msg = pupil_positions()
                pupil_msg.header = header
                # the pupil_info_list contains only one pupil data to keep
                # the ROS message format same as Pupil_ROS_Bridge plugin
                pupil_info_list = []
                pupil_info = pupil()
                pupil_info.diameter = zmq_msg['diameter']
                pupil_info.confidence = zmq_msg['confidence']
                pupil_info.projected_sphere_axes = tupleToPoint(
                    zmq_msg['projected_sphere'].get('axes'))
                pupil_info.projected_sphere_angle = zmq_msg[
                    'projected_sphere'].get('angle')
                pupil_info.projected_sphere_center = tupleToPoint(
                    zmq_msg['projected_sphere'].get('center'))
                pupil_info.model_id = zmq_msg['model_id']
                pupil_info.model_confidence = zmq_msg['model_confidence']
                pupil_info.pupil_timestamp = zmq_msg['timestamp']
                pupil_info.model_birth_timestamp = zmq_msg[
                    'model_birth_timestamp']
                pupil_info.topic = zmq_msg['topic']
                pupil_info.sphere_radius = zmq_msg['sphere'].get('radius')
                pupil_info.sphere_center = tupleToPoint(
                    zmq_msg['sphere'].get('center'))
                pupil_info.diameter_3d = zmq_msg['diameter_3d']
                pupil_info.ellipse_axes = tupleToPoint(
                    zmq_msg['ellipse'].get('axes'))
                pupil_info.ellipse_angle = zmq_msg['ellipse'].get('angle')
                pupil_info.ellipse_center = tupleToPoint(
                    zmq_msg['ellipse'].get('center'))
                pupil_info.norm_pos = tupleToPoint(zmq_msg['norm_pos'])
                pupil_info.phi = zmq_msg['phi']
                pupil_info.theta = zmq_msg['theta']
                pupil_info.circle_3d_radius = zmq_msg['circle_3d'].get(
                    'radius')
                pupil_info.circle_3d_center = tupleToPoint(
                    zmq_msg['circle_3d'].get('center'))
                pupil_info.circle_3d_normal = tupleToPoint(
                    zmq_msg['circle_3d'].get('normal'))
                pupil_info.id = zmq_msg['id']
                pupil_info_list.append(pupil_info)
                pupil_msg.pupils = pupil_info_list
                self.ros_pupil_publisher.publish(pupil_msg)
            if 'gaze' in zmq_topic:
                # gaze data after combining pupil data and gaze mapping plugin
                gaze_msg = gaze_positions()
                # the gaze_info_list contains only one gaze data to keep
                # the ROS message format same as Pupil_ROS_Bridge plugin
                gaze_info_list = []
                gaze_info = gaze()
                gaze_info.confidence = zmq_msg['confidence']
                gaze_info.norm_pos = tupleToPoint(zmq_msg.get('norm_pos'))
                gaze_info.gaze_point_3d = tupleToPoint(
                    zmq_msg.get('gaze_point_3d'))
                gaze_info.gaze_normal_3d = tupleToPoint(
                    zmq_msg.get('gaze_normal_3d'))
                gaze_info.eye_center_3d = tupleToPoint(
                    zmq_msg.get('eye_center_3d'))
                gaze_info.pupil_timestamp = zmq_msg['timestamp']
                gaze_info_list.append(gaze_info)
                gaze_msg.gazes = gaze_info_list
                gaze_msg.header = header
                self.ros_gaze_publisher.publish(gaze_msg)

            if 'frame.world' in zmq_topic:
                if zmq_msg['format'] == 'bgr':
                    # pupil eye.py and frame_publisher.py should be updated according to
                    # Issue: https://github.com/pupil-labs/pupil/issues/525
                    cv_img = np.frombuffer(zmq_multipart[2],
                                           dtype=np.uint8).reshape(
                                               zmq_msg['height'],
                                               zmq_msg['width'], 3)
                    world_image_msg = self.cv_bridge.cv2_to_imgmsg(
                        cv_img, encoding="bgr8")
                    world_image_msg.header = header
                    self.ros_world_img_publisher.publish(world_image_msg)

                    # for now, hardcode calibration matrix for world camera...eventually read from file
                    self.cameraInfo.height = 720
                    self.cameraInfo.width = 1280
                    self.cameraInfo.K = [
                        812.4802979544222, 0.0, 606.6808701300608, 0.0,
                        744.0427566597859, 386.0530183151042, 0.0, 0.0, 1.0
                    ]
                    self.cameraInfo.D = [0, 0, 0, 0, 0]
                    self.cameraInfo.distortion_model = 'plumb_bob'
                    self.cameraInfo.R = [
                        1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0
                    ]
                    self.cameraInfo.P = [
                        812.4802979544222, 0.0, 606.6808701300608, 0.0, 0.0,
                        744.0427566597859, 386.0530183151042, 0.0, 0.0, 0.0,
                        1.0, 0.0
                    ]

                    #self.cameraInfo.D = [0, 0, 0, 0, 0]
                    #self.cameraInfo.K = [0, 0.0, 0, 0.0, 0, 0, 0.0, 0.0, 0.0]
                    #self.cameraInfo.distortion_model = ''

                    self.cameraInfo.header = header
                    self.ros_world_calibration_publisher.publish(
                        self.cameraInfo)

            if 'frame.eye.0' in zmq_topic:
                if zmq_msg['format'] == 'bgr':
                    cv_img = np.frombuffer(zmq_multipart[2],
                                           dtype=np.uint8).reshape(
                                               zmq_msg['height'],
                                               zmq_msg['width'], 3)
                    eye0_image_msg = self.cv_bridge.cv2_to_imgmsg(
                        cv_img, encoding="bgr8")
                    eye0_image_msg.header = header
                    self.ros_eye0_img_publisher.publish(eye0_image_msg)
            if 'frame.eye.1' in zmq_topic:
                if zmq_msg['format'] == 'bgr':
                    cv_img = np.frombuffer(zmq_multipart[2],
                                           dtype=np.uint8).reshape(
                                               zmq_msg['height'],
                                               zmq_msg['width'], 3)
                    eye1_image_msg = self.cv_bridge.cv2_to_imgmsg(
                        cv_img, encoding="bgr8")
                    eye1_image_msg.header = header
                    self.ros_eye1_img_publisher.publish(eye1_image_msg)

        # Disable ROS interface
        self.ros_started = False
Ejemplo n.º 34
0
import kiwi.db
from msgpack import loads

d = kiwi.db.DB('/tmp')
it = kiwi.db.DBIterator(d)
it.seek('e/')

for k, v in it:
    print "%s => %s" % (k, loads(v)), repr(v)

d.close()
Ejemplo n.º 35
0
import pytest
import falcon
import msgpack
import json
import re
from falcon import testing

import mcuapi.app
from mcuapi.constants import MCUAPI_URL
from mcuapi.schema import FilmSchema, CharacterSchema
from mcuapi.content import Content
from mcuapi.utils import MEDIA_HANDLERS

MEDIA_FMT_PARAMS = (('msgpack', lambda data: msgpack.loads(data, raw=False)),
                    ('json', json.loads), ('', json.loads))


@pytest.fixture
def client(db):
    api = mcuapi.app.create_app(db, MEDIA_HANDLERS)
    return testing.TestClient(api)


@pytest.fixture
def film_re():
    return re.compile('/'.join((MCUAPI_URL, 'films', '\\d+')))


@pytest.fixture
def character_re():
    return re.compile('/'.join((MCUAPI_URL, 'characters', '\\d+')))
Ejemplo n.º 36
0
 def msgpackloads(b):
     return msgpack.loads(b, **loadargs)
 def _getSnapshot(self, stateType, key):
     raw = self._redis.get(key)
     out = []
     if raw is not None:
         out = msgpack.loads(raw)[stateType]
     return out
Ejemplo n.º 38
0
# Import third party libs
import salt.ext.six as six
try:
    import zmq
except ImportError:
    # No need for zeromq in local mode
    pass

log = logging.getLogger(__name__)

try:
    # Attempt to import msgpack
    import msgpack
    # There is a serialization issue on ARM and potentially other platforms
    # for some msgpack bindings, check for it
    if msgpack.loads(msgpack.dumps([1, 2, 3]), use_list=True) is None:
        raise ImportError
except ImportError:
    # Fall back to msgpack_pure
    try:
        import msgpack_pure as msgpack  # pylint: disable=import-error
    except ImportError:
        # TODO: Come up with a sane way to get a configured logfile
        #       and write to the logfile when this error is hit also
        LOG_FORMAT = '[%(levelname)-8s] %(message)s'
        salt.log.setup_console_logger(log_format=LOG_FORMAT)
        log.fatal('Unable to import msgpack or msgpack_pure python modules')
        # Don't exit if msgpack is not available, this is to make local mode
        # work without msgpack
        #sys.exit(salt.defaults.exitcodes.EX_GENERIC)
Ejemplo n.º 39
0
    def _fetch(self, url, cache=None, size=None, allow_redirects=True):
        """Fetch content from a url into a file.

        Very similar to _download but lacks any "file" management and decodes
        content

        Parameters
        ----------
        url: str
          URL to download
        cache: bool, optional
          If None, config is consulted either results should be cached.
          Cache is operating based on url, so no verification of any kind
          is carried out

        Returns
        -------
        bytes, dict
          content, headers
        """
        lgr.log(3, "_fetch(%r, cache=%r, size=%r, allow_redirects=%r)", url,
                cache, size, allow_redirects)
        if cache is None:
            cache = cfg.obtain('datalad.crawl.cache', default=False)

        if cache:
            cache_key = msgpack.dumps(url)
            lgr.debug("Loading content for url %s from cache", url)
            res = self.cache.get(cache_key)
            if res is not None:
                try:
                    return msgpack.loads(res, encoding='utf-8')
                except Exception as exc:
                    lgr.warning(
                        "Failed to unpack loaded from cache for %s: %s", url,
                        exc_str(exc))

        downloader_session = self.get_downloader_session(
            url, allow_redirects=allow_redirects)

        target_size = downloader_session.size
        if size is not None:
            if size == 0:
                # no download of the content was requested -- just return headers and be done
                return None, downloader_session.headers
            target_size = min(size, target_size)

        # FETCH CONTENT
        try:
            # Consider to improve to make it animated as well, or shorten here
            #pbar = ui.get_progressbar(label=url, fill_text=filepath, total=target_size)
            content = downloader_session.download(size=size)
            #pbar.finish()
            downloaded_size = len(content)

            # now that we know size based on encoded content, let's decode into string type
            if PY3 and isinstance(content, binary_type):
                content = content.decode()
            # downloaded_size = os.stat(temp_filepath).st_size

            self._verify_download(url,
                                  downloaded_size,
                                  target_size,
                                  None,
                                  content=content)

        except (AccessDeniedError, IncompleteDownloadError) as e:
            raise
        except Exception as e:
            e_str = exc_str(e, limit=5)
            lgr.error("Failed to fetch {url}: {e_str}".format(**locals()))
            raise DownloadError(exc_str(e, limit=8))  # for now

        if cache:
            # apparently requests' CaseInsensitiveDict is not serialazable
            # TODO:  may be we should reuse that type everywhere, to avoid
            # out own handling for case-handling
            self.cache[cache_key] = msgpack.dumps(
                (content, dict(downloader_session.headers)))

        return content, downloader_session.headers
Ejemplo n.º 40
0
def msgpack_loads(header, frames):
    return msgpack.loads(
        b"".join(frames), encoding="utf8", use_list=False, **msgpack_len_opts
    )
Ejemplo n.º 41
0
    def __serialize_multiple_threads(self):
        """"""

        manager = mp.Manager()
        send_queue = manager.Queue(-1)
        return_queue = manager.JoinableQueue(-1)
        self.logging_queue = mp.Queue(-1)
        self.logger_queue_handler = logging_handlers.QueueHandler(
            self.logging_queue)
        self.queue_logger = logging.getLogger("parser")
        self.queue_logger.addHandler(self.logger_queue_handler)
        self.queue_logger.setLevel(self.log_level)
        self.queue_logger.propagate = False
        self.log_writer = logging_handlers.QueueListener(
            self.logging_queue, self.logger)
        self.log_writer.start()

        line_parser = mp.Process(target=line_parser_func,
                                 args=(self._handle, self.fasta_index.filename,
                                       send_queue))
        line_parser.start()

        parsers = [
            bed12.Bed12ParseWrapper(identifier=index,
                                    rec_queue=send_queue,
                                    log_queue=self.logging_queue,
                                    level=self.log_level,
                                    return_queue=return_queue,
                                    fasta_index=None,
                                    is_gff=(not self.is_bed12),
                                    transcriptomic=True,
                                    max_regression=self._max_regression,
                                    table=self._table)
            for index in range(self.procs)
        ]
        [_.start() for _ in parsers]

        not_found = set()
        done = 0
        objects = []
        procs_done = 0
        while True:
            num = return_queue.get()
            if num in ("FINISHED", b"FINISHED"):
                procs_done += 1
                if procs_done == self.procs:
                    break
                else:
                    continue
            num, obj = num
            try:
                loaded_obj = msgpack.loads(obj, raw=False)
            except TypeError:
                raise TypeError(obj)

            if loaded_obj["id"] in self.query_cache:
                current_query = self.query_cache[loaded_obj["id"]]
            elif not self.initial_cache:
                current_query = Query(loaded_obj["id"], loaded_obj["end"])
                not_found.add(loaded_obj["id"])
                self.session.add(current_query)
                self.session.commit()
                self.query_cache[
                    current_query.query_name] = current_query.query_id
                current_query = current_query.query_id
            else:
                exc = "The provided ORFs do not match the transcripts provided and already present in the database.\
This could be due to having called the ORFs on a FASTA file different from `mikado_prepared.fasta`, the output of \
mikado prepare. If this is the case, please use mikado_prepared.fasta to call the ORFs and then restart \
`mikado serialise` using them as input."

                self.logger.critical(exc)
                raise InvalidSerialization(exc)

            loaded_obj["query_id"] = current_query
            objects.append(loaded_obj)
            if len(objects) >= self.maxobjects:
                done += len(objects)
                self.session.begin(subtransactions=True)
                self.engine.execute(Orf.__table__.insert(), objects)
                self.session.commit()
                self.logger.debug("Loaded %d ORFs into the database", done)
                objects = []

        [proc.join() for proc in parsers]
        done += len(objects)
        # self.session.begin(subtransactions=True)
        # self.session.bulk_save_objects(objects, update_changed_only=False)
        if objects:
            self.engine.execute(Orf.__table__.insert(), objects)
        self.session.commit()
        self.session.close()
        self.logger.info("Finished loading %d ORFs into the database", done)

        manager.shutdown()
        orfs = pd.read_sql_table("orf", self.engine, index_col="query_id")
        if orfs.shape[0] != done:
            raise ValueError(
                "I should have serialised {} ORFs, but {} are present!".format(
                    done, orfs.shape[0]))
Ejemplo n.º 42
0
 def get(self, k, default=None):
     if self.cache.exists():
         with open(self.cache, "rb+") as fh_:
             return msgpack.loads(fh_.read()).get(k, default)
     else:
         return default
Ejemplo n.º 43
0
def unpack(value):
    return loads(value, encoding='utf-8')
Ejemplo n.º 44
0
def unpackage(package_):
    '''
    Unpackages a payload
    '''
    return msgpack.loads(package_, use_list=True)
 def getSpParams(self, spid):
     params = msgpack.loads(self._redis.get(self.SP_PARAMS.format(spid)))
     return params["params"]
Ejemplo n.º 46
0
def loads_msgpack(buf):
    """
    Args:
        buf: the output of `dumps`.
    """
    return msgpack.loads(buf, encoding='utf-8')
Ejemplo n.º 47
0
    def __init__(self,
                 root,
                 db_path_flow,
                 db_path_rgb,
                 filename_flow,
                 filename_rgb,
                 transform=None,
                 mode='val',
                 num_frames=32,
                 ds=1,
                 return_label=False,
                 return_path=False,
                 return_source=False):
        split_mode = mode
        if mode == 'test': split_mode = 'val'

        self.root = root
        self.db_path_flow = os.path.join(
            db_path_flow, '%s_%s.lmdb' % (filename_flow, split_mode))
        self.db_path_rgb = os.path.join(
            db_path_rgb, '%s_%s.lmdb' % (filename_rgb, split_mode))
        self.transform = transform
        self.mode = mode
        self.num_frames = num_frames
        self.ds = ds
        self.return_label = return_label
        self.return_path = return_path
        self.return_source = return_source

        print('Loading flow LMDB from %s' % self.db_path_flow)
        self.env_flow = lmdb.open(self.db_path_flow,
                                  subdir=os.path.isdir(self.db_path_flow),
                                  readonly=True,
                                  lock=False,
                                  readahead=False,
                                  meminit=False)
        with self.env_flow.begin(write=False) as txn:
            self.db_length_flow = msgpack.loads(txn.get(b'__len__'))
            self.db_keys_flow = msgpack.loads(txn.get(b'__keys__'))
            self.db_order_flow = msgpack.loads(txn.get(b'__order__'))
            self.vlen_list_flow = msgpack.loads(txn.get(b'__vlen__'))

        print('Loading rgb LMDB from %s' % self.db_path_rgb)
        self.env_rgb = lmdb.open(self.db_path_rgb,
                                 subdir=os.path.isdir(self.db_path_rgb),
                                 readonly=True,
                                 lock=False,
                                 readahead=False,
                                 meminit=False)
        with self.env_rgb.begin(write=False) as txn:
            self.db_length_rgb = msgpack.loads(txn.get(b'__len__'))
            self.db_keys_rgb = msgpack.loads(txn.get(b'__keys__'))
            self.db_order_rgb = msgpack.loads(txn.get(b'__order__'))

        classes = read_file(os.path.join(root, 'ClassInd.txt'))
        if ',' in classes[0]:
            classes = [i.split(',')[-1].strip() for i in classes]
        print('Two-Stream Dataset from "%s" has #class %d' %
              (root, len(classes)))
        self.num_class = len(classes)
        self.class_to_idx = {classes[i]: i for i in range(len(classes))}
        self.idx_to_class = {i: classes[i] for i in range(len(classes))}

        video_info = pd.read_csv(os.path.join(root,
                                              '%s_split.csv' % split_mode),
                                 header=None)
        video_info[2] = video_info[0].str.split('/').str.get(-2)
        video_info[3] = video_info[0]
        video_info = video_info[video_info[2].isin(classes)]

        # load video source to id dictionary
        self.video_source = read_json(os.path.join(root, 'video_source.json'))

        # check vlen
        vname_list_rgb = [i.decode() for i in self.db_order_rgb]
        vname_list_flow = [i.decode() for i in self.db_order_flow]
        vlen_list_ordered = sorted(list(
            zip([i.decode() for i in self.db_keys_flow], self.vlen_list_flow)),
                                   key=lambda x: x[0])
        vlen_list_ordered = [i[-1] for i in vlen_list_ordered]

        vlen_df_flow = pd.DataFrame(zip(vname_list_flow, vlen_list_ordered),
                                    columns=[3, 4])
        vlen_df_flow = vlen_df_flow[vlen_df_flow[3].isin(vname_list_rgb)]

        if video_info.iloc[0][3].split(
                '/').__len__() != 2:  # long path to short path
            video_info[3] = video_info[3].str.split('/').str.slice(
                -2, None, 1).str.join('/')

        video_info = video_info.merge(vlen_df_flow, left_on=3,
                                      right_on=3).dropna()
        video_info[4] = video_info[[1, 4]].min(axis=1)

        self.get_video_id_flow = dict(
            zip([i.decode() for i in self.db_order_flow],
                ['%09d' % i for i in range(len(self.db_order_flow))]))
        self.get_video_id_rgb = dict(
            zip([i.decode() for i in self.db_order_rgb],
                ['%09d' % i for i in range(len(self.db_order_rgb))]))

        drop_idx = []
        print('filter out too short videos ...')
        for idx, row in tqdm(video_info.iterrows(),
                             total=len(video_info),
                             disable=True):
            vpath, _, _, _, vlen = row
            if vlen - self.num_frames * self.ds - 1 <= 0:
                drop_idx.append(idx)
        self.video_info = video_info.drop(drop_idx, axis=0)

        if mode == 'val':
            self.video_info = self.video_info.sample(frac=0.3,
                                                     random_state=666)
        self.video_subset = self.video_info
 def listSpIds(self):
     return msgpack.loads(self._redis.get(self.SP_LIST))["sps"]
Ejemplo n.º 49
0
 def load(data):
     o = msgpack.loads(data)
     o['requests'] = [Request.load(r) for r in o['requests']]
     b = Bin()
     b.__dict__ = o
     return b
Ejemplo n.º 50
0
 def deserialize(cls, data):
     message = msgpack.loads(data)
     session, request, meta, msg = message
     return cls(message=bytearray_to_text(msg), meta=bytearray_to_text(meta), data=data)
Ejemplo n.º 51
0
 def from_token(cls, token):
     return RoomListNextBatch(**{
         cls.REVERSE_KEY_DICT[key]: val
         for key, val in msgpack.loads(decode_base64(token)).items()
     })
Ejemplo n.º 52
0
 def __getitem__(self, key):
     return msgpack.loads(decompress(self.txn.get(key.encode('utf-8'))),
                          raw=False)
Ejemplo n.º 53
0
 def loads(self, value):
     return msgpack.loads(value, encoding="utf-8")
Ejemplo n.º 54
0
def loads(buf):
    return msgpack.loads(buf)
Ejemplo n.º 55
0
    def __init__(self,
                 root,
                 db_path,
                 filename,
                 transform=None,
                 mode='val',
                 num_frames=32,
                 ds=1,
                 window=False,
                 return_label=False,
                 return_path=False,
                 is_flow=False):
        split_mode = mode
        if mode == 'test': split_mode = 'val'
        self.root = root
        self.db_path = os.path.join(db_path,
                                    '%s_%s.lmdb' % (filename, split_mode))
        self.transform = transform
        self.mode = mode
        self.num_frames = num_frames
        self.window = window
        self.ds = ds
        self.return_label = return_label
        self.return_path = return_path
        self.is_flow = is_flow

        print('Loading LMDB from %s' % self.db_path)
        self.env = lmdb.open(self.db_path,
                             subdir=os.path.isdir(self.db_path),
                             readonly=True,
                             lock=False,
                             readahead=False,
                             meminit=False)
        with self.env.begin(write=False) as txn:
            self.db_length = msgpack.loads(txn.get(b'__len__'))
            self.db_keys = msgpack.loads(txn.get(b'__keys__'))
            self.db_order = msgpack.loads(txn.get(b'__order__'))
            if self.is_flow:
                self.vlen_list = msgpack.loads(txn.get(b'__vlen__'))

        classes = read_file(os.path.join(root, 'ClassInd.txt'))
        if ',' in classes[0]:
            classes = [i.split(',')[-1].strip() for i in classes]
        print('%s Dataset from "%s" has #class %d' %
              (filename, root, len(classes)))
        self.num_class = len(classes)
        self.class_to_idx = {classes[i]: i for i in range(len(classes))}
        self.idx_to_class = {i: classes[i] for i in range(len(classes))}

        video_info = pd.read_csv(os.path.join(root,
                                              '%s_split.csv' % split_mode),
                                 header=None)
        video_info[2] = video_info[0].str.split('/').str.get(-2)
        video_info[3] = video_info[2] + '/' + video_info[0].str.split(
            '/').str.get(-1)
        video_info = video_info[video_info[2].isin(classes)]

        # load video source to id dictionary
        self.video_source = read_json(os.path.join(root, 'video_source.json'))

        if self.is_flow:
            # check vlen for flow dataset
            vname_list = [i.decode() for i in self.db_order]
            vlen_list_ordered = sorted(list(
                zip([i.decode() for i in self.db_keys], self.vlen_list)),
                                       key=lambda x: x[0])
            vlen_list_ordered = [i[-1] for i in vlen_list_ordered]
            video_info = video_info.merge(pd.DataFrame(zip(
                vname_list, vlen_list_ordered),
                                                       columns=[3, 4]),
                                          left_on=3,
                                          right_on=3).dropna()

        self.get_video_id = dict(
            zip([i.decode() for i in self.db_order],
                ['%09d' % i for i in range(len(self.db_order))]))

        drop_idx = []
        print('filter out too short videos ...')
        for idx, row in tqdm(video_info.iterrows(),
                             total=len(video_info),
                             disable=True):
            if self.is_flow:
                vpath, _, _, _, vlen = row
            else:
                vpath, vlen, _, _ = row
            if vlen - self.num_frames * self.ds - 1 <= 0:
                drop_idx.append(idx)
        self.video_info = video_info.drop(drop_idx, axis=0)
        if mode == 'val':
            self.video_info = self.video_info.sample(frac=0.3,
                                                     random_state=666)
        self.video_subset = self.video_info
Ejemplo n.º 56
0
 def deserialize_payload(self, payload_serialized, *extra_frames):
     payload = serializer.loads(payload_serialized, encoding="utf-8")
     if extra_frames:
         payload["__raw_data__"] = extra_frames
     return payload
Ejemplo n.º 57
0
    def __init__(self,
                 root='%s/../process_data/data/ucf101' %
                 os.path.dirname(os.path.abspath(__file__)),
                 db_path=os.path.join(lmdb_root, 'UCF101/ucf101_frame.lmdb'),
                 transform=None,
                 mode='val',
                 num_frames=32,
                 ds=1,
                 which_split=1,
                 window=False,
                 return_path=False,
                 return_label=False,
                 return_source=False):
        self.root = root
        self.db_path = db_path
        self.transform = transform
        self.mode = mode
        self.num_frames = num_frames
        self.window = window
        self.ds = ds
        self.which_split = which_split
        self.return_label = return_label
        self.return_source = return_source
        self.return_path = return_path

        print('Loading LMDB from %s, split:%d' %
              (self.db_path, self.which_split))
        self.env = lmdb.open(self.db_path,
                             subdir=os.path.isdir(self.db_path),
                             readonly=True,
                             lock=False,
                             readahead=False,
                             meminit=False)
        with self.env.begin(write=False) as txn:
            self.db_length = msgpack.loads(txn.get(b'__len__'))
            self.db_keys = msgpack.loads(txn.get(b'__keys__'))
            self.db_order = msgpack.loads(txn.get(b'__order__'))

        classes = read_file(os.path.join(root, 'ClassInd.txt'))
        if ',' in classes[0]:
            classes = [i.split(',')[-1].strip() for i in classes]
        print('Frame Dataset from "%s" has #class %d' % (root, len(classes)))

        self.num_class = len(classes)
        self.class_to_idx = {classes[i]: i for i in range(len(classes))}
        self.idx_to_class = {i: classes[i] for i in range(len(classes))}

        split_mode = mode
        if mode == 'val': split_mode = 'test'
        video_info = pd.read_csv(os.path.join(
            root, '%s_split%02d.csv' % (split_mode, which_split)),
                                 header=None)
        video_info[2] = video_info[0].str.split('/').str.get(-3)
        video_info[3] = video_info[2] + '/' + video_info[0].str.split(
            '/').str.get(-2)
        assert len(pd.unique(video_info[2])) == self.num_class

        # load video source to id dictionary,
        # only useful to handle sibling videos in UCF101 pre-training
        if self.return_source:
            self.video_source = read_json(
                os.path.join(root, 'video_source.json'))

        self.get_video_id = dict(
            zip([i.decode() for i in self.db_order],
                ['%09d' % i for i in range(len(self.db_order))]))

        drop_idx = []
        print('filter out too short videos ...')
        for idx, row in tqdm(video_info.iterrows(),
                             total=len(video_info),
                             disable=True):
            vpath, vlen, _, _ = row
            if vlen - self.num_frames // 2 * self.ds - 1 <= 0:  # allow max padding = half video
                drop_idx.append(idx)
        self.video_info = video_info.drop(drop_idx, axis=0)

        if mode == 'val':
            self.video_info = self.video_info.sample(frac=0.3,
                                                     random_state=666)
        self.video_subset = self.video_info
Ejemplo n.º 58
0
    def __init__(self,
                 root='%s/../process_data/data/ucf101' %
                 os.path.dirname(os.path.abspath(__file__)),
                 db_path_flow=os.path.join(lmdb_root,
                                           'UCF101/ucf101_tvl1.lmdb'),
                 db_path_rgb=os.path.join(lmdb_root,
                                          'UCF101/ucf101_frame.lmdb'),
                 transform=None,
                 mode='val',
                 num_frames=32,
                 ds=1,
                 which_split=1,
                 return_label=False,
                 return_path=False,
                 return_source=False):
        self.root = root
        self.db_path_flow = db_path_flow
        self.db_path_rgb = db_path_rgb
        self.transform = transform
        self.mode = mode
        self.num_frames = num_frames
        self.ds = ds
        self.which_split = which_split
        self.return_label = return_label
        self.return_path = return_path
        self.return_source = return_source

        print('Loading flow LMDB from %s, split:%d' %
              (self.db_path_flow, self.which_split))
        self.env_flow = lmdb.open(self.db_path_flow,
                                  subdir=os.path.isdir(self.db_path_flow),
                                  readonly=True,
                                  lock=False,
                                  readahead=False,
                                  meminit=False)
        with self.env_flow.begin(write=False) as txn:
            self.db_length_flow = msgpack.loads(txn.get(b'__len__'))
            self.db_keys_flow = msgpack.loads(txn.get(b'__keys__'))
            self.db_order_flow = msgpack.loads(txn.get(b'__order__'))
            self.vlen_list_flow = msgpack.loads(txn.get(b'__vlen__'))

        print('Loading rgb LMDB from %s, split:%d' %
              (self.db_path_rgb, self.which_split))
        self.env_rgb = lmdb.open(self.db_path_rgb,
                                 subdir=os.path.isdir(self.db_path_rgb),
                                 readonly=True,
                                 lock=False,
                                 readahead=False,
                                 meminit=False)
        with self.env_rgb.begin(write=False) as txn:
            self.db_length_rgb = msgpack.loads(txn.get(b'__len__'))
            self.db_keys_rgb = msgpack.loads(txn.get(b'__keys__'))
            self.db_order_rgb = msgpack.loads(txn.get(b'__order__'))

        classes = read_file(os.path.join(root, 'ClassInd.txt'))
        if ',' in classes[0]:
            classes = [i.split(',')[-1].strip() for i in classes]
        print('Two-Stream Dataset from "%s" has #class %d' %
              (root, len(classes)))
        self.num_class = len(classes)
        self.class_to_idx = {classes[i]: i for i in range(len(classes))}
        self.idx_to_class = {i: classes[i] for i in range(len(classes))}

        split_mode = mode
        if mode == 'val': split_mode = 'test'
        video_info = pd.read_csv(os.path.join(
            root, '%s_split%02d.csv' % (split_mode, which_split)),
                                 header=None)
        video_info[2] = video_info[0].str.split('/').str.get(-3)
        video_info[3] = video_info[2] + '/' + video_info[0].str.split(
            '/').str.get(-2)
        assert len(pd.unique(video_info[2])) == self.num_class

        # load video source to id dictionary
        self.video_source = read_json(os.path.join(root, 'video_source.json'))

        # check vlen
        vname_list_rgb = [i.decode() for i in self.db_order_rgb]
        vname_list_flow = [i.decode() for i in self.db_order_flow]
        vlen_list_ordered = sorted(list(
            zip([i.decode() for i in self.db_keys_flow], self.vlen_list_flow)),
                                   key=lambda x: x[0])
        vlen_list_ordered = [i[-1] for i in vlen_list_ordered]

        vlen_df_flow = pd.DataFrame(zip(vname_list_flow, vlen_list_ordered),
                                    columns=[3, 4])
        vlen_df_flow = vlen_df_flow[vlen_df_flow[3].isin(vname_list_rgb)]

        video_info = video_info.merge(vlen_df_flow, left_on=3,
                                      right_on=3).dropna()
        video_info[4] = video_info[[1, 4]].min(axis=1)

        self.get_video_id_flow = dict(
            zip([i.decode() for i in self.db_order_flow],
                ['%09d' % i for i in range(len(self.db_order_flow))]))
        self.get_video_id_rgb = dict(
            zip([i.decode() for i in self.db_order_rgb],
                ['%09d' % i for i in range(len(self.db_order_rgb))]))

        drop_idx = []
        print('filter out too short videos ...')
        for idx, row in tqdm(video_info.iterrows(),
                             total=len(video_info),
                             disable=True):
            vpath, _, _, _, vlen = row
            if vlen - self.num_frames // 2 * self.ds - 1 <= 0:  # allow max padding = half video
                drop_idx.append(idx)
        self.video_info = video_info.drop(drop_idx, axis=0)

        if mode == 'val':
            self.video_info = self.video_info.sample(frac=0.3,
                                                     random_state=666)
        self.video_subset = self.video_info
Ejemplo n.º 59
0
def msgpack_loads(header, frames):
    return msgpack.loads(b"".join(frames), use_list=False, **msgpack_opts)
Ejemplo n.º 60
0
def io_player_move(dir):
    login = session['user']
    try:
        x = int(redis_store.get(login + ':player.x'))
        y = int(redis_store.get(login + ':player.y'))
        visible = msgpack.loads(redis_store.get(login + ':visible'))
    except:
        return
    if dir not in DIRECTIONS:
        return
    dx, dy = DIRECTIONS[dir]
    nx, ny = x + dx, y + dy
    g = at(nx, ny)
    if (g not in SOLID) and (g != "D" or is_door_opened(login, nx, ny)):
        x = nx
        y = ny
        if update_visible(visible, x, y, first=True):
            redis_store.set(login + ':visible', msgpack.dumps(visible))
            emit('map.visible', msgpack.dumps(visible), room=login)
        redis_store.set(login + ':player.x', x)
        redis_store.set(login + ':player.y', y)
        emit('player.position', (x, y), room=login)
        if login not in hidden_users:
            emit('ghost.position', (login, x, y), broadcast=True)
        if g == "$" and not is_coin_picked(nx, ny):
            redis_store.incr(login + ':coins')
            redis_store.set('coin_%d,%d' % (nx, ny), 1)
    else:
        if g == 's':
            # scoreboard
            emit('scoreboard.view', msgpack.dumps(get_scoreboard()))

        if g == "D":
            # closed door
            prob_id = get_door_problem(nx, ny)
            if prob_id is None:
                return

            prob_data = problems.get(str(prob_id))
            if prob_data is None:
                return

            if prob_data['type'] in ["team_name", "answer", "guess"]:
                data = {
                    "name": prob_data['name'],
                    "type": "answer",
                    "id": prob_id,
                    "reward": prob_data['reward'],
                    "cur_reward": get_reward(prob_data['reward'])
                }
                if 'statement' in prob_data:
                    data['statement'] = prob_data['statement']
                else:
                    if 'internal_name' in prob_data:
                        data['statement_url'] = "/%s.pdf" % prob_data[
                            'internal_name']
                    else:
                        data['statement_url'] = prob_data['statement_url']
                emit('problem.view', msgpack.dumps(data))

            if prob_data['type'] in ['standard']:
                data = {
                    "name": prob_data['name'],
                    "type": "standard",
                    "short_name": prob_data['short_name'],
                    "statement_url": "/%s.pdf" % prob_data['internal_name'],
                    "id": prob_id,
                    "reward": prob_data['reward'],
                    "cur_reward": get_reward(prob_data['reward'])
                }
                emit('problem.view', msgpack.dumps(data))