예제 #1
0
 def __init__(self, ctx: discord.ext.commands.Context = None, name=None):
     self.ctx = ctx
     self.name = name
     self.data = None
     if isinstance(ctx, str):
         self.filename = f"data/Bot/{self.ctx}.msgpack"
         try:
             ensure_file_existence(self.filename)
             with open(self.filename, "rb") as fp:
                 if fp.read():
                     fp.seek(0)
                     self.data = msgpack.unpack(fp, encoding="utf-8")
                 else:
                     self.data = {}
         except FileNotFoundError:
             self.data = {}
     else:
         self.filename = f"data/{self.ctx.guild.id}/{self.name}.msgpack"
         try:
             ensure_file_existence(self.filename)
             with open(self.filename, "rb") as fp:
                 if fp.read():
                     fp.seek(0)
                     self.data = msgpack.unpack(fp, encoding="utf-8")
                 else:
                     self.data = {}
         except FileNotFoundError:
             self.data = {}
예제 #2
0
 def __init__(self, tsv_file_path, messagepack_tsv_path,
              transposed_tsv_file_path, transposed_messagepack_tsv_path):
     messagepack_tsv = open(messagepack_tsv_path + "/sample_data.msgpack",
                            "rb")
     self.tsv_map = msgpack.unpack(messagepack_tsv,
                                   max_map_len=10000000,
                                   max_array_len=10000000)
     sample_file = open(messagepack_tsv_path + "/samples.msgpack", "rb")
     self.samples = msgpack.unpack(sample_file,
                                   max_map_len=10000000,
                                   max_array_len=10000000)
     transposed_map_file = open(
         transposed_messagepack_tsv_path + "/sample_data.msgpack", "rb")
     self.transposed_map = msgpack.unpack(transposed_map_file,
                                          max_map_len=10000000,
                                          max_array_len=10000000)
     transposed_samples_file = open(
         transposed_messagepack_tsv_path + "/samples.msgpack", "rb")
     self.transposed_samples = msgpack.unpack(transposed_samples_file,
                                              max_map_len=10000000,
                                              max_array_len=10000000)
     self.tsv_file = open(tsv_file_path, "rb")
     self.transposed_tsv_file = open(transposed_tsv_file_path, "rb")
     features_file = open(messagepack_tsv_path + "/features.msgpack", "rb")
     self.features = msgpack.unpack(features_file,
                                    max_map_len=10000000,
                                    max_array_len=10000000)
예제 #3
0
def merge(Origin, Destination):
    folders = [f[2:] for f in [i[0] for i in os.walk(Origin)][1:]]
    newWordDict = []
    newWordCount = []
    for f in range(len(folders)):
        print(''.join(("Merging folder " + str(f) + " out of ",
                       str(len(folders)) + ": ", folders[f])))
        with open(Path(Origin) / folders[f] / "WordDict.msgpack",
                  "rb") as worddict, open(
                      Path(Origin) / folders[f] / "WordCount.msgpack",
                      "rb") as wordcount:
            wd = msgpack.unpack(worddict)
            wc = msgpack.unpack(wordcount)
            for i in range(len(wd)):
                if wd[i] in newWordDict:
                    index = newWordDict.index(wd[i])
                    newWordCount[index] += wc[i]
                else:
                    newWordDict.append(wd[i])
                    newWordCount.append(wc[i])
            del wd
            del wc
    with open(Path(Destination) / "wordCount.msgpack", 'wb+') as outfile:
        msgpack.pack(newWordCount, outfile)
    with open(Path(Destination) / "wordDict.msgpack", 'wb+') as outfile:
        msgpack.pack(newWordDict, outfile)
예제 #4
0
def multiHot(Origin, Destination):
    print("Generating multihot arrays")
    folders = [f[2:] for f in [i[0] for i in os.walk(Origin)][1:]]
    for f in range(len(folders)):
        print(''.join(("Creating MultiHot of folder " + str(f) + " out of ",
                       str(len(folders)) + ": ", folders[f])))
        forumKeys.append([
            f, folders[f], [1 if (f == k) else 0 for k in range(len(folders))]
        ])
        with open(Path(folders[f]) / "Posts.msgpack", "rb") as posts, open(
                Path(Origin) / "WordDict.msgpack", "rb") as w:
            p, wD = msgpack.unpack(posts), msgpack.unpack(w)
            c, l = 0, len(p)
            r = []
            for i in range(l):
                print(''.join((str(c), '/', str(l))))
                r.append([[1 if (j in p[i][0]) else 0 for j in wD],
                          [1 if (f == k) else 0 for k in range(len(folders))]])
                c += 1
        with open(Path(Destination) / folders[f] / "OneHot.msgpack",
                  'wb+') as outfile:
            msgpack.pack(r, outfile)
        del p
        del wD
        del r
        del c
        del l

    with open(Path(Destination) / "forumKeys.msgpack", 'wb+') as outfile:
        msgpack.pack(forumKeys, outfile)
def load_network(network_to_use):
    with open('./cache/' + network_to_use + '/vectors.msgpack', 'rb') as f:
        vectors = msgpack.unpack(f)
    with open('./cache/' + network_to_use + '/matrices.msgpack', 'rb') as f:
        matrices = msgpack.unpack(f)
    with open('./cache/' + network_to_use + '/dictionary.msgpack', 'rb') as f:
        cat_dict = msgpack.unpack(f)
    return vectors, matrices, cat_dict
예제 #6
0
 def __init__(self, map_path, cost_weights, waypoints, directory, is_ego):
     self.limp_s = 0.
     self.is_limping = False
     self.is_ego = is_ego
     self.prev_traj = None
     self.prev_param = None
     self.prev_steer = 0.
     self.cost_weights = cost_weights
     self.waypoints = waypoints
     self.wheelbase = 0.3302
     self.max_reacquire = 10
     self.safe_speed = 2.5
     self.CORNER_ON = False
     self.track_lad = 1.0
     self.STEER_LP = 0.99
     self.CURVATURE_THRESH = 20.
     self.WINDOW_SIZE = 3.
     self.TOP_POP_NUM = 3
     lut_all = np.load(directory + 'mpc/lut_inuse.npz')
     self.lut_x = lut_all['x']
     self.lut_y = lut_all['y']
     self.lut_theta = lut_all['theta']
     self.lut_kappa = lut_all['kappa']
     self.lut = lut_all['lut']
     step_sizes = []
     step_sizes.append(self.lut_x[1] - self.lut_x[0])
     step_sizes.append(self.lut_y[1] - self.lut_y[0])
     step_sizes.append(self.lut_theta[1] - self.lut_theta[0])
     step_sizes.append(self.lut_kappa[1] - self.lut_kappa[0])
     self.lut_stepsizes = np.array(step_sizes)
     with open(directory + 'config.yaml', 'r') as yaml_stream:
         try:
             config = yaml.safe_load(yaml_stream)
             speed_lut_name = config['speed_lut_name']
             range_lut_name = config['range_lut_name']
         except yaml.YAMLError as ex:
             print(ex)
     speed_lut_temp = msgpack.unpack(open(directory + speed_lut_name, 'rb'),
                                     use_list=False)
     self.speed_lut_numba = Dict()
     for key, val in speed_lut_temp.items():
         if key == b'resolution':
             continue
         self.speed_lut_numba[key] = val
     range_lut_temp = msgpack.unpack(open(directory + range_lut_name, 'rb'),
                                     use_list=False)
     self.range_lut_numba = Dict()
     for key, val in range_lut_temp.items():
         if key == b'resolution':
             continue
         self.range_lut_numba[key] = val
     self.lut_resolution = float(speed_lut_temp[b'resolution'][0])
예제 #7
0
def save_metadata(pr: PullRequest, transposed_data_file, transposed_map_dir,
                  out_file):
    printToLog("Saving metadata", pr)
    # In the transposed file, samples are actually features
    with open(os.path.join(transposed_map_dir, 'samples.msgpack'),
              'rb') as samples_file:
        features = msgpack.unpack(samples_file)

    # Open the transposed data file so we can read feature values
    with open(os.path.join(transposed_map_dir, 'sample_data.msgpack'),
              'rb') as map_file:
        data_map = msgpack.unpack(map_file)

    meta_dict = {}

    with open(transposed_data_file) as transposed_file:
        for i in range(len(features)):
            if i > 0 and i % 1000 == 0:
                printToLog("{}".format(i), pr)

            feature = features[i]
            feature_coordinates = data_map[feature]
            transposed_file.seek(feature_coordinates[0])

            feature_values = [
                x for x in transposed_file.read(feature_coordinates[1]).split(
                    "\t") if x != "NA"
            ]
            feature_values = sorted(list(set(feature_values)))

            # Check whether we only had missing (NA) values
            if len(feature_values) == 0:
                meta_dict[feature] = {'options': ["NA"], 'numOptions': 1}
            else:
                float_values = convert_to_floats(feature_values)

                if not float_values:
                    meta_dict[feature] = {
                        'options': feature_values,
                        'numOptions': len(feature_values)
                    }
                else:
                    meta_dict[feature] = {
                        'options': 'continuous',
                        'min': min(float_values),
                        'max': max(float_values)
                    }

    metadata = {'meta': meta_dict}
    with open(out_file, 'wb') as fp:
        pickle.dump(metadata, fp)
예제 #8
0
파일: client.py 프로젝트: iot49/iot49
 def readinto(self, buffer):
     global _stream
     _clear_rx_buffer()
     msgpack.pack(("ri", self._ext_type, len(buffer)), _stream)
     _ready_to_read()
     # get actual number of bytes read OR handle error (if any)
     sz = msgpack.unpack(_stream, ext_hook=_ext_handler, use_list=False)
     # read data; not all versions of readinto support length argument
     mv = memoryview(buffer)
     # print("urpc client.readinto [sz]")
     _stream.readinto(mv[:sz])
     # server sends an extra None
     assert msgpack.unpack(_stream) == None, "readinto expects terminating 'None'"
     return sz
예제 #9
0
def _call_mmtf(f):
    '''Call function for mmtf files'''

    if ".mmtf.gz" in f:
        name = f.split('/')[-1].split('.')[0].upper()
        data = gzip.open(f, 'rb')
        unpack = msgpack.unpack(data, raw=False)
        decoder = MmtfStructure(unpack)
        return (name, decoder)

    elif ".mmtf" in f:
        name = f.split('/')[-1].split('.')[0].upper()
        unpack = msgpack.unpack(open(f, "rb"), raw=False)
        decoder = MmtfStructure(unpack)
        return (name, decoder)
예제 #10
0
def unpack(package):
    """
  Unpacks system data from exported package and saves to DB
  """
    contents = msgpack.unpack(package)
    # load deps and check for missing
    deps, missing = dependency_check(contents['deps'])
    if len(missing) > 0:
        raise ImportDependencyMissing(missing)
    # start creating jobs
    for job in contents['jobs']:
        # transform directly stored object back
        job['hashes'] = list(
            map(lambda h: recreate_hash(h, job['hash_type']), job['hashes']))
        if job.get('masks'):
            job['masks'] = list(map(recreate_mask, job['masks']))
        newjob = FcJob()
        for field in JOB_EXPORTABLE_COLUMNS:
            setattr(newjob, field, job.get(field))
        # manual labor now
        #
        # !!! If you added new DEPS to export, make sure to unpack them like these !!!
        #
        dep_rule = job.get('rulesFile')
        if dep_rule:
            newjob.rulesFile = deps[dep_rule[0]]
        dep_markov = job.get('markov')
        if dep_markov:
            newjob.markov = deps[dep_markov[0]]
        dep_left_dictionaries = job.get('left_dictionaries')
        if dep_left_dictionaries:
            for index in dep_left_dictionaries:
                jd = FcJobDictionary(is_left=1, dictionary=deps[index])
                newjob.left_dictionaries.append(jd)
        dep_right_dictionaries = job.get('right_dictionaries')
        if dep_right_dictionaries:
            for index in dep_right_dictionaries:
                jd = FcJobDictionary(is_left=0, dictionary=deps[index])
                newjob.right_dictionaries.append(jd)
        dep_pcfg = job.get('pcfg')
        if dep_pcfg:
            newjob.pcfg = deps[dep_pcfg[0]]
        # adding values for useless non-null fields
        newjob.indexes_verified = 0
        newjob.current_index_2 = 0
        # add owner
        newjob.permission_records.append(
            FcUserPermission(user_id=current_user.id,
                             view=1,
                             modify=1,
                             operate=1,
                             owner=1))
        # save
        db.session.add(newjob)
    # end for loop over jobs
    try:
        db.session.commit()
    except exc.IntegrityError as e:
        db.session().rollback()
        raise e
예제 #11
0
    def _decode(self, response):
        ct = response.getheader('Content-Type')
        ce = response.getheader('Content-Encoding')
        cl = response.getheader('Content-Length')

        if cl and not int(cl):
            LOG.debug("Empty response body")
            return ""

        if ce and ce == "gzip":
            # this reads the whole response in memory, but json.load() would do the same anyway...
            data = response.read()
            response = gzip.GzipFile(fileobj=StringIO(data))

        if ct and 'application/json' in ct:
            LOG.debug("decoding %s", ct)
            return json.load(response)
        elif ct and 'application/x-msgpack' in ct:
            if msgpack:
                LOG.debug("decoding %s", ct)
                return msgpack.unpack(response)
            else:
                LOG.debug("not decoding %s, decoder is unavailable", ct)
                return response.read()

        return response.read()
예제 #12
0
    def __read_gdf(self):

        if not os.path.exists(self.gdfpath):
            return {}

        with open(self.gdfpath, "rb") as stream:
            return msgpack.unpack(stream, encoding="utf-8")
예제 #13
0
파일: ip2as.py 프로젝트: GOVCERT-LU/ip2as
  def __init__(self, data_path, use_msgpack=False):
    self.net_as = SubnetTree.SubnetTree()
    self.d_asn = {}
    self.net_as_map = {}
    net_as_raw = None

    net_as_file = open(data_path, 'rb')
    if use_msgpack:
      import msgpack
      net_as_raw = msgpack.unpack(net_as_file)
    else:
      net_as_raw = json.load(net_as_file)
    net_as_file.close()

    for asn, v in net_as_raw.items():
      '''
       "11542": {
          "name": "EYEMG - EYEMG - interactive media group", "cc": "US", "timestamp": "20070524", "rir": "arin",
          "nets": {"208.79.156.0/22": {"cc": "US", "timestamp": "20070621", "rir": "arin"}
                  },
          "asn": "11542"}
      '''
      self.d_asn[asn] = ASN(asn)
      self.d_asn[asn].fromdict(v)

      for net in self.d_asn[asn].nets.keys():
        '''A single net may be announced in various ASNs'''
        self.net_as.insert(str(net), net)

        if not net in self.net_as_map:
          self.net_as_map[net] = []

        if not asn in self.net_as_map[net]:
          self.net_as_map[net].append(asn)
예제 #14
0
 def prepare_txn(self, transaction_id, do_cleanup=True):
     self._active_txn = True
     try:
         self.lock.upgrade()
     except (LockError, LockErrorT):
         # if upgrading the lock to exclusive fails, we do not have an
         # active transaction. this is important for "serve" mode, where
         # the repository instance lives on - even if exceptions happened.
         self._active_txn = False
         raise
     if not self.index or transaction_id is None:
         self.index = self.open_index(transaction_id)
     if transaction_id is None:
         self.segments = {
         }  # XXX bad name: usage_count_of_segment_x = self.segments[x]
         self.compact = set(
         )  # XXX bad name: segments_needing_compaction = self.compact
     else:
         if do_cleanup:
             self.io.cleanup(transaction_id)
         with open(os.path.join(self.path, 'hints.%d' % transaction_id),
                   'rb') as fd:
             hints = msgpack.unpack(fd)
         if hints[b'version'] != 1:
             raise ValueError('Unknown hints file version: %d' %
                              hints['version'])
         self.segments = hints[b'segments']
         self.compact = set(hints[b'compact'])
예제 #15
0
def _load_token_statistics(file_name):
    with open_binary('akimous.resources', file_name) as f1:
        with lzma.open(f1, 'rb') as f2:
            return msgpack.unpack(f2,
                                  use_list=False,
                                  raw=False,
                                  strict_map_key=False)
예제 #16
0
def unpack(filename):
    """
    Open and unpack a named msgpack file.
    """
    with open(filename, 'rb') as infile:
        unpacked = msgpack.unpack(infile)
        return msgpack_transform(unpacked[1], unpacked[0])
예제 #17
0
    def __init__(self, data=None):
        '''
        @param data: file contents or open file handle, optionally gzipped
        @type data: bytes, stream or dict
        '''
        if data is None:
            self._data = {}
            return

        if isinstance(data, dict):
            self._data = {}
            for key, value in data.items():
                # discard non-required lists of length zero
                if key not in requiredfields and hasattr(
                        value, '__len__') and len(value) == 0:
                    continue

                self.set(key, value)
            return

        if isinstance(data, bytes):
            if data[:2] != b'\x1f\x8b':  # gzip magic number
                self._set_data(msgpack.unpackb(data, **_KWARGS_UNPACK))
                return

            import io, gzip
            data = gzip.GzipFile(fileobj=io.BytesIO(data))

        self._set_data(msgpack.unpack(data, **_KWARGS_UNPACK))
예제 #18
0
 def put(self):
     if request.content_type != "application/msgpack":
         raise InvalidUsage("Data should be sent as application/msgpack",
                            400)
     try:
         t = Timer()
         packed = BytesIO(request.get_data())
         t.debug_reset("To BytesIO")
         data = msgpack.unpack(packed, encoding='utf-8')
         t.debug_reset("Unpack")
         payload = PayloadFromImport(data)
         t.debug_reset("Read payload")
         length, blob = payload.to_np_save()
         tags = payload.tags
         t.debug_reset("Repack payload")
         new_uuid = str(uuid4())
         self.metadata.put(uuid=new_uuid,
                           network=tags['network'],
                           station=tags['station'],
                           channel=tags['channel'],
                           start=payload.start,
                           end=payload.end,
                           sampling_rate=tags['sampling_rate'])
         t.debug_reset("Put to Metadata")
         self.datastore.put(new_uuid, blob, length)
         t.debug_reset("Put to Datastore")
     except (DatastoreError, MetadataError, PayloadError) as e:
         raise InvalidUsage("Could not accept payload: {}".format(e))
예제 #19
0
파일: custom_prepro.py 프로젝트: MovElb/Ann
    def __init__(self, meta_file='./squad2_preprocessed/meta.msgpack'):
        with open(meta_file, 'rb') as f:
            meta = msgpack.unpack(f, encoding='utf8')

        self.vocab = meta['vocab']
        self.vocab_tag = meta['vocab_tag']
        self.vocab_ent = meta['vocab_ent']
        self.embeddings = meta['embedding']
        self.wv_cased = meta['wv_cased']

        self.w2id = {w: i for i, w in enumerate(self.vocab)}
        self.tag2id = {w: i for i, w in enumerate(self.vocab_tag)}
        self.ent2id = {w: i for i, w in enumerate(self.vocab_ent)}

        self.nlp = spacy.load('en', parser=False)
        self.annotate = partial(annotate,
                                wv_cased=self.wv_cased,
                                init_nlp=self.nlp)
        self.annotate_single = partial(annotate_single, init_nlp=self.nlp)
        self.annotate_crossed = partial(annotate_crossed,
                                        wv_cased=self.wv_cased)
        self.to_id = partial(to_id,
                             w2id=self.w2id,
                             tag2id=self.tag2id,
                             ent2id=self.ent2id)
예제 #20
0
파일: ruuster.py 프로젝트: Jille/tkbd
 def fetch_inst_id(self):
     """ Fetches the institute id of the RU """
     for d in msgpack.unpack(urllib2.urlopen(
             "%s/list/institutes?format=msgpack" % self.url)):
         if d['name'] == 'Radboud Universiteit Nijmegen':
             return d['id']
     assert False
예제 #21
0
 def test_1chain_simplerestore(self):
     m = andrey.Markov(1, 1)
     m.teach('alpha beta conky delta')
     dump = msgpack.dumps(m.todict())
     dic = msgpack.unpack(io.BytesIO(dump), encoding='utf-8')
     m2 = andrey.Markov.fromdict(dic)
     self.assertEqual('beta conky', m2.choose('alpha', continued=1))
예제 #22
0
    def _load_metadata(self):
        meta_path = self.base_path / "metadata.msg"
        if not meta_path.exists():
            _logger.info("Creating metadata of Waymo dataset (%s)...",
                         self.phase)
            metadata = {}

            if self.inzip:
                for archive in self.base_path.iterdir():
                    if archive.is_dir() or archive.suffix != ".zip":
                        continue

                    with PatchedZipFile(archive,
                                        to_extract="context/stats.json") as ar:
                        metadata[archive.stem] = json.loads(
                            ar.read("context/stats.json"))
            else:
                for folder in self.base_path.iterdir():
                    if not folder.is_dir():
                        continue

                    with (folder / "context/stats.json").open() as fin:
                        metadata[folder.name] = json.load(fin)

            with open(meta_path, "wb") as fout:
                msgpack.pack(metadata, fout)

        with open(meta_path, "rb") as fin:
            self._metadata = SortedDict()
            meta_json = msgpack.unpack(fin)
            for k, v in meta_json.items():
                self._metadata[k] = edict(v)
예제 #23
0
파일: msgpack.py 프로젝트: lvg01/salt.old
def fetch(bank, key):
    '''
    Fetch information from a msgpack file.

    bank
        The name of the directory, inside the configured cache directory,
        which will hold the data. If slashes are included in the name, then
        they refer to a nested directory structure.

    key
        The name of the file which holds the data. This filename will have
        ``.p`` appended to it.
    '''
    base = os.path.join(salt.syspaths.CACHE_DIR, bank)
    if not os.path.isdir(base):
        log.debug('Cache directory %s does not exist', base)
        return None

    outfile = os.path.join(base, '{0}.p'.format(key))
    try:
        with salt.utils.fopen(outfile, 'rb') as fh_:
            return msgpack.unpack(fh_)
    except IOError as exc:
        log.warn('There was an error reading the cache file, {0}: {1}'.format(
            base, exc))
        return None
예제 #24
0
 def test_to_msgpack(self):
     m_file = os.path.join(tmp_dir, "movie_data")
     msg_data = _to_msgpack(movie_data)
     assert b"Rick Moranis" in msg_data
     _to_msgpack(movie_data, filename=m_file)
     assert b"Rick Moranis" in open(m_file, "rb").read()
     assert msgpack.unpack(open(m_file, "rb")) == msgpack.unpackb(msg_data)
예제 #25
0
def formatGazeData(inputDir):
    """
    - load the pupil_data and timestamps
    - get the "gaze" fields from pupil data (i.e. the gaze lcoation w/r/t world camera)
    - sync gaze data with the world_timestamps array
    """

    # load pupil data
    pupil_data_path = join(inputDir, 'pupil_data')
    with open(pupil_data_path, 'rb') as fh:
        try:
            gc.disable()
            pupil_data = msgpack.unpack(fh, encoding='utf-8')
        except Exception as e:
            print(e)
        finally:
            gc.enable()
    gaze_list = pupil_data['gaze_positions']  # gaze posiiton (world camera)

    # load timestamps
    timestamps_path = join(inputDir, 'world_timestamps.npy')
    frame_timestamps = np.load(timestamps_path)

    # align gaze with world camera timestamps
    gaze_by_frame = correlate_data(gaze_list, frame_timestamps)

    # make frame_timestamps relative to the first data timestamp
    start_timeStamp = gaze_by_frame[0][0]['timestamp']
    frame_timestamps = (frame_timestamps -
                        start_timeStamp) * 1000  # convert to ms

    return gaze_by_frame, frame_timestamps
예제 #26
0
        def thread_function():
            with open(self.mapInfo.filename, 'rb') as f:
                self.data = msgpack.unpack(f)

            self._get_landmarks()
            self._get_trajectory()
            self.mapInfo.loading = False
예제 #27
0
def unpack(stream, **kwargs):
    """Unpack a stream of packed bytes using MessagePack (with extended types support)
       and return unpacked object."""
    return msgpack.unpack(stream,
                          ext_hook=_decode_ext_type,
                          raw=False,
                          **kwargs)
def Deserializer(stream_or_string):
    if isinstance(stream_or_string, basestring):
        stream = StringIO(stream_or_string)
    else:
        stream = stream_or_string
    for obj in PythonDeserializer(msgpack.unpack(stream, object_hook=DjangoMsgPackDecoder().decode)):
        yield obj
예제 #29
0
파일: ruuster.py 프로젝트: Jille/tkbd
 def fetch_todays_schedule(self, rooms):
     """ Fetch the schedules for the given rooms. """
     room_ids = self.fetch_room_ids(rooms)
     inst_id = self.fetch_inst_id()
     ret = {}
     now = datetime.datetime.now()
     day = DAYS[now.isoweekday() - 1]
     for room_name in room_ids:
         ret[room_name] = []
         events = msgpack.unpack(urllib2.urlopen(
             "%s/snapshot/head/%s/location/%s?format=msgpack" % (
                 self.url, inst_id, room_ids[room_name])))['events']
         for event in events:
             starttime = datetime.datetime.strptime(
                     event['starttime'], '%H:%M:%S').time()
             endtime = datetime.datetime.strptime(
                     event['endtime'], '%H:%M:%S').time()
             if event['day'] != day:
                 continue
             ok = False
             for period in event['eventperiods']:
                 startdate = datetime.datetime.strptime(
                         period['startdate'], '%Y-%m-%d %H:%M:%SZ').date()
                 enddate = datetime.datetime.strptime(
                         period['enddate'], '%Y-%m-%d %H:%M:%SZ').date()
                 if (startdate <= now.date() and now.date() <= enddate):
                     ok = True
                     break
             if not ok:
                 continue
             ret[room_name].append((starttime, endtime,
                     event['course']['name']))
     return ret
예제 #30
0
 def getPiecemap(self, inner_path):
     file_info = self.site.content_manager.getFileInfo(inner_path)
     piecemap_inner_path = helper.getDirname(file_info["content_inner_path"]) + file_info["piecemap"]
     self.site.needFile(piecemap_inner_path, priority=20)
     piecemap = msgpack.unpack(self.site.storage.open(piecemap_inner_path))[helper.getFilename(inner_path)]
     piecemap["piece_size"] = file_info["piece_size"]
     return piecemap
예제 #31
0
 def getPiecemap(self, inner_path):
     file_info = self.site.content_manager.getFileInfo(inner_path)
     piecemap_inner_path = helper.getDirname(file_info["content_inner_path"]) + file_info["piecemap"]
     self.site.needFile(piecemap_inner_path, priority=20)
     piecemap = msgpack.unpack(self.site.storage.open(piecemap_inner_path))[helper.getFilename(inner_path)]
     piecemap["piece_size"] = file_info["piece_size"]
     return piecemap
예제 #32
0
def readTrainingData(dataName):
    dataFile = open(dataName, "rb")
    gc.disable()
    inputList, outputList = msgpack.unpack(dataFile)
    gc.enable()
    dataFile.close()
    return list(inputList), list(outputList)
예제 #33
0
파일: api.py 프로젝트: mobishift2011/amzn
    def _rpc_all(self, cmd):
        clients = {}
        poller = zmq.Poller()

        ret = []
        for k, d in self.workers.items():
            socket = self.context.socket(zmq.REQ)
            socket.setsockopt(zmq.LINGER, 0)
            socket.connect(d['rpc_address'])
            poller.register(socket, zmq.POLLIN)
            socket.send(cmd)
            clients[socket] = k

        t = time.time()
        while time.time() - t < 1.000:
            socks = dict(poller.poll(50)) # wait 0.05s
            for socket in socks.keys():
                status = unpack(socket.recv())
                self.workers[clients[socket]] = status
                ret.append(status)
                del clients[socket]
            if not clients:
                break

        # timeouted sockets
        for k, socket in clients.items():
            status = {"status":"no response"} 
            if k in self.workers:
                status.update( self.workers[k] )
            ret.append(status)

        return ret
예제 #34
0
    def load_latest(self,
                    name='Megavolume',
                    filter='rff2',
                    conversion='EU60',
                    factor=1.):
        line = open(
            '/raid1/mh826/Stacking_Algorithm/CCP_stacks/CCP_volumes/' + name +
            '_' + filter + '_' + conversion + '_' + str(factor) +
            '/filenames.dat', 'r').readlines()[-1]
        runnum = int(float(line.split()[0]))
        volumefile = line.split()[1]
        print('loading', name, runnum, volumefile)

        # get last stackfile name

        # Read in volumes

        self.VOL.update(
            msgpack.unpack(open(volumefile, 'rb'),
                           use_list=False,
                           object_hook=m.decode,
                           encoding='utf-8'))
        # del self.VOL['trackRFs']

        return self
예제 #35
0
    def load_latest(self,
                    name='trialarea_1',
                    filter='jgf1',
                    conversion='prem',
                    factor=2.):
        '''
        Loads latest volume
        '''
        print(name)
        line = open(
            root + 'CCP_Stack/Volumes/' + name + '_' + filter + '_' +
            conversion + '_' + str(factor) + '/filenames.dat',
            'r').readlines()[-1]
        runnum = int(float(line.split()[0]))
        volumefile = line.split()[1]
        print(runnum, volumefile)

        # get last stackfile name

        ####### Read in volumes

        self.VOL.update(
            msgpack.unpack(open(volumefile, 'rb'),
                           use_list=False,
                           object_hook=m.decode))
        #del self.VOL['trackRFs']

        return self
예제 #36
0
def _init_dataset():
    """
    Load the bond dataset from MessagePack file.

    Since loading the database is computationally expensive,
    this is only done, when the bond database is actually required.
    """
    global _intra_bonds
    if _intra_bonds is not None:
        # Database is already initialized
        return

    # Bonds are taken from
    # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif
    # (2019/01/27)
    _info_dir = dirname(realpath(__file__))
    with open(join(_info_dir, "intra_bonds.msgpack"), "rb") as file:
        _intra_bonds_raw = msgpack.unpack(file, use_list=False, raw=False)
        _intra_bonds = {}
        for group, group_bonds_raw in _intra_bonds_raw.items():
            group_bonds = {
                frozenset(bond_raw): count
                for bond_raw, count in group_bonds_raw.items()
            }
            _intra_bonds[group] = group_bonds
예제 #37
0
    def __init__(self, data_path, use_msgpack=False):
        self.net_as = SubnetTree.SubnetTree()
        self.d_asn = {}
        self.net_as_map = {}
        net_as_raw = None

        net_as_file = open(data_path, 'rb')
        if use_msgpack:
            import msgpack
            net_as_raw = msgpack.unpack(net_as_file)
        else:
            net_as_raw = json.load(net_as_file)
        net_as_file.close()

        for asn, v in net_as_raw.items():
            '''
       "11542": {
          "name": "EYEMG - EYEMG - interactive media group", "cc": "US", "timestamp": "20070524", "rir": "arin",
          "nets": {"208.79.156.0/22": {"cc": "US", "timestamp": "20070621", "rir": "arin"}
                  },
          "asn": "11542"}
      '''
            self.d_asn[asn] = ASN(asn)
            self.d_asn[asn].fromdict(v)

            for net in self.d_asn[asn].nets.keys():
                '''A single net may be announced in various ASNs'''
                self.net_as.insert(str(net), net)

                if not net in self.net_as_map:
                    self.net_as_map[net] = []

                if not asn in self.net_as_map[net]:
                    self.net_as_map[net].append(asn)
예제 #38
0
 def __iter__(self):
     for fname in os.listdir(self.dirname):
         log('reading file ' + fname)
         with open(os.path.join(self.dirname, fname), 'rb') as f:
             sentences = msgpack.unpack(f)
             for sentence in sentences:
                 yield sentence
예제 #39
0
	def load(self):
		self.data = {'version' : self._version}
		if os.path.exists(self.db_path):
			with open(self.db_path) as f:
				self.data = msgpack.unpack(f)
		else:
			print 'Initializing replay database...'
			self.update()
예제 #40
0
파일: remote.py 프로젝트: jrydberg/guild
 def _wait(self, unpacker, sock):
     while True:
         data = sock.read()
         unpacker.feed(data)
         try:
             return msgpack.unpack()
         except StopIteration:
             pass
예제 #41
0
 def prepare_txn(self, transaction_id, do_cleanup=True):
     self._active_txn = True
     if not self.lock.got_exclusive_lock():
         if self.exclusive is not None:
             # self.exclusive is either True or False, thus a new client is active here.
             # if it is False and we get here, the caller did not use exclusive=True although
             # it is needed for a write operation. if it is True and we get here, something else
             # went very wrong, because we should have a exclusive lock, but we don't.
             raise AssertionError("bug in code, exclusive lock should exist here")
         # if we are here, this is an old client talking to a new server (expecting lock upgrade).
         # or we are replaying segments and might need a lock upgrade for that.
         try:
             self.lock.upgrade()
         except (LockError, LockErrorT):
             # if upgrading the lock to exclusive fails, we do not have an
             # active transaction. this is important for "serve" mode, where
             # the repository instance lives on - even if exceptions happened.
             self._active_txn = False
             raise
     if not self.index or transaction_id is None:
         try:
             self.index = self.open_index(transaction_id, False)
         except RuntimeError:
             self.check_transaction()
             self.index = self.open_index(transaction_id, False)
     if transaction_id is None:
         self.segments = {}  # XXX bad name: usage_count_of_segment_x = self.segments[x]
         self.compact = FreeSpace()  # XXX bad name: freeable_space_of_segment_x = self.compact[x]
     else:
         if do_cleanup:
             self.io.cleanup(transaction_id)
         hints_path = os.path.join(self.path, 'hints.%d' % transaction_id)
         index_path = os.path.join(self.path, 'index.%d' % transaction_id)
         try:
             with open(hints_path, 'rb') as fd:
                 hints = msgpack.unpack(fd)
         except (msgpack.UnpackException, msgpack.ExtraData, FileNotFoundError) as e:
             logger.warning('Repository hints file missing or corrupted, trying to recover')
             if not isinstance(e, FileNotFoundError):
                 os.unlink(hints_path)
             # index must exist at this point
             os.unlink(index_path)
             self.check_transaction()
             self.prepare_txn(transaction_id)
             return
         if hints[b'version'] == 1:
             logger.debug('Upgrading from v1 hints.%d', transaction_id)
             self.segments = hints[b'segments']
             self.compact = FreeSpace()
             for segment in sorted(hints[b'compact']):
                 logger.debug('Rebuilding sparse info for segment %d', segment)
                 self._rebuild_sparse(segment)
             logger.debug('Upgrade to v2 hints complete')
         elif hints[b'version'] != 2:
             raise ValueError('Unknown hints file version: %d' % hints[b'version'])
         else:
             self.segments = hints[b'segments']
             self.compact = FreeSpace(hints[b'compact'])
예제 #42
0
파일: gengraph.py 프로젝트: daureg/magnet
def load_graph(filename):
    with open(filename, 'r+b') as outfile:
        psi, rV, phi, rE = msgpack.unpack(outfile, use_list=False)
    G, E = {}, {}
    for u, v, s in zip(rE[::3], rE[1::3], rE[2::3]):
        E[(u, v)] = bool(s)
        pg.add_edge(G, u, v)
    nodes_sign = list(rV)
    return psi, phi, nodes_sign, G, E
예제 #43
0
 def __iter__(self):
     while True:
         try:
             pos = self.buf.tell()
             yield unpack(self.buf)
         except umsgpack.InsufficientDataException:
             self.buf.seek(pos)
             self.buf = io.BytesIO(self.buf.read())
             raise StopIteration
예제 #44
0
def load_clips():
    """
    Load previous clips from DATA_FILE
    """
    try:
        with open(DATA_FILE, 'r') as f:
            return msgpack.unpack(f, encoding='utf-8')
    except IOError:
        return {}
예제 #45
0
파일: ruuster.py 프로젝트: bwesterb/tkbd
 def fetch_inst_id(self):
     """ Fetches the institute id of the RU """
     try:
         for d in msgpack.unpack(urllib2.urlopen(
                 "%s/list/institutes?format=msgpack" % self.url)):
             if d['name'] == 'Radboud Universiteit Nijmegen':
                 return d['id']
     except IOError, e: # urllib2 exceptions are a subclass of IOError
         raise RuusterError(e)
예제 #46
0
    def __init__(self, data):
        if isinstance(data, bytes):
            if data[:2] != b'\x1f\x8b': # gzip magic number
                self._data = msgpack.unpackb(data)
                return

            import io, gzip
            data = gzip.GzipFile(fileobj=io.BytesIO(data))

        self._data = msgpack.unpack(data)
예제 #47
0
파일: ruuster.py 프로젝트: Jille/tkbd
 def fetch_room_ids(self, names):
     """ Fetches the ids of the rooms with the given names """
     ret = {}
     names_set = set(names)
     for d in msgpack.unpack(urllib2.urlopen(
             "%s/list/locations?format=msgpack" % self.url)):
         name = d['name'].upper() # normalize: Hg -> HG
         if name in names_set:
             ret[name] = d['id']
     return ret
예제 #48
0
    def load_from_stream(self, stream, to_container, **opts):
        """
        Load JSON config from given stream `stream`.

        :param stream: Stream will provide MessagePack-ed config content string
        :param to_container: callble to make a container object
        :param opts: keyword options passed to `msgpack.unpack`

        :return: Dict-like object holding configuration
        """
        return msgpack.unpack(stream, object_hook=to_container, **opts)
예제 #49
0
파일: api.py 프로젝트: mobishift2011/amzn
 def request(self, request):
     """ processing request
 
     {"op":"all_stats"} -> a dict of stats
     """
     response = {}
     response.update({"status":"ok"})
     try:
         req = unpack(request)
     except Exception, e:
         response.update({"status":"error","message":e.message})
예제 #50
0
def load_msgpack_gc(_file):
    output = open(_file, 'rb')

    # disable garbage collector
    gc.disable()

    mydict = msgpack.unpack(output)

    # enable garbage collector again
    gc.enable()
    output.close()
    return mydict
예제 #51
0
   def _init(self, terms_fn=None, bigrams_fn=None, trigrams_fn=None,
                   terms_by_root_form_fn=None,
                   is_use_emoticons=False,
                   is_dump_cls=False,
                   is_load_cached_cls=False):

      # load dictionaries of terms/bigrams/trigrams (first element)
      # and sets of all terms/bigrams/trigrams
      if terms_fn:
         terms, self._terms_set = self._csv_to_dict(terms_fn)

      if bigrams_fn:
         bigrams, self._bigrams_set = self._csv_to_dict(bigrams_fn)

      if trigrams_fn:
         trigrams, self._trigrams_set = self._csv_to_dict(trigrams_fn)

      # try to load cached classifiers
      if is_load_cached_cls:
         self.terms_cls = self._load_terms_cls()
         self.bigrams_cls = self._load_bigrams_cls()
         self.trigrams_cls = self._load_trigrams_cls()

      # train classifiers if it didn't work
      if terms_fn and not self.terms_cls:
         self.terms_cls = self._train(terms)
         if is_dump_cls:
            self._dump_terms_cls()

      if bigrams_fn and not self.bigrams_cls:
         self.bigrams_cls = self._train(bigrams)
         if is_dump_cls:
            self._dump_bigrams_cls()

      if trigrams_fn and not self.trigrams_cls:
         self.trigrams_cls = self._train(trigrams)
         if is_dump_cls:
            self._dump_trigrams_cls()

      # load dictinary for stemming purposes
      if terms_by_root_form_fn:
         w = gzip.open(terms_by_root_form_fn)
         self._terms_by_root_form = msgpack.unpack(w, encoding='utf-8')
         w.close()

         # create a set of all terms from 'self._terms_by_root_form'
         # in 'self._allterms'; this is a temporary solution of increasing
         # the speed of looking up for terms to stem
         for aroot in self._terms_by_root_form:
            self._allterms.add(aroot)
            for aterm in self._terms_by_root_form[aroot]:
               self._allterms.add(aterm)
예제 #52
0
    def work(self):
        time.sleep(random.random())
        while True:
            task = None

            # check status, sleep longer if not processing, break if finished
            status = self.status()
            #print self.name, status
            if status == 'F':
                break
            elif status in ['?']:
                time.sleep(random.randint(15, 30))
                continue

            try:
                if self.max_workers is not None and \
                    conn.llen(self.processing.format(self.name)) >= self.max_workers:
                    time.sleep(1)
                    continue

                result = conn.lpop(self.tasks.format(self.name))
                if result is None:
                    time.sleep(3)
                    continue

                task = result
                caller, args, kwargs = unpack(task)
                conn.rpush(self.processing.format(self.name), task)
                conn.set(self.updated_at.format(self.name), time.mktime(time.gmtime()))

                print('work on {}, {}, {}'.format(caller, args[:5], kwargs))
                if '.' in caller:
                    module, method = caller.rsplit('.', 1)
                    module = __import__(module, fromlist=[method])
                    caller = getattr(module, method)
                else:
                    method = caller
                    caller = sys.modules['__builtin__'].__dict__[method]
            except:
                print("can't obtain caller, locals: {}".format(locals()))
                traceback.print_exc()
                if task is not None:
                    self.finish_task(task)

                continue

            try:
                caller(*args, **kwargs)
            except:
                traceback.print_exc()
            finally:
                self.finish_task(task)
예제 #53
0
    def testPiecemapCreate(self, site):
        inner_path = self.createBigfile(site)
        content = site.storage.loadJson("content.json")
        assert "data/optional.any.iso" in content["files_optional"]
        file_node = content["files_optional"][inner_path]
        assert file_node["size"] == 10 * 1000 * 1000
        assert file_node["sha512"] == "47a72cde3be80b4a829e7674f72b7c6878cf6a70b0c58c6aa6c17d7e9948daf6"
        assert file_node["piecemap"] == inner_path + ".piecemap.msgpack"

        piecemap = msgpack.unpack(site.storage.open(file_node["piecemap"], "rb"))["optional.any.iso"]
        assert len(piecemap["sha512_pieces"]) == 10
        assert piecemap["sha512_pieces"][0] != piecemap["sha512_pieces"][1]
        assert piecemap["sha512_pieces"][0].encode("hex") == "a73abad9992b3d0b672d0c2a292046695d31bebdcb1e150c8410bbe7c972eff3"
예제 #54
0
파일: mirteFile.py 프로젝트: sgielen/mirte
def _parse_mirteFile(path):
    """ Open and parses the mirteFile at <path>. """
    cache_path = os.path.join(os.path.dirname(path),
                CACHE_FILENAME_TEMPLATE % os.path.basename(path))
    if (os.path.exists(cache_path) and
                os.path.getmtime(cache_path) >= os.path.getmtime(path)):
        with open(cache_path) as f:
            return msgpack.unpack(f)
    with open(path) as f:
        ret = yaml.load(f)
    with open(cache_path, 'w') as f:
        msgpack.pack(ret, f)
    return ret
예제 #55
0
 def prepare_txn(self, transaction_id, do_cleanup=True):
     self._active_txn = True
     try:
         self.lock.upgrade()
     except (LockError, LockErrorT):
         # if upgrading the lock to exclusive fails, we do not have an
         # active transaction. this is important for "serve" mode, where
         # the repository instance lives on - even if exceptions happened.
         self._active_txn = False
         raise
     if not self.index or transaction_id is None:
         try:
             self.index = self.open_index(transaction_id, False)
         except RuntimeError:
             self.check_transaction()
             self.index = self.open_index(transaction_id, False)
     if transaction_id is None:
         self.segments = {}  # XXX bad name: usage_count_of_segment_x = self.segments[x]
         self.compact = FreeSpace()  # XXX bad name: freeable_space_of_segment_x = self.compact[x]
     else:
         if do_cleanup:
             self.io.cleanup(transaction_id)
         hints_path = os.path.join(self.path, 'hints.%d' % transaction_id)
         index_path = os.path.join(self.path, 'index.%d' % transaction_id)
         try:
             with open(hints_path, 'rb') as fd:
                 hints = msgpack.unpack(fd)
         except (msgpack.UnpackException, msgpack.ExtraData, FileNotFoundError) as e:
             logger.warning('Repository hints file missing or corrupted, trying to recover')
             if not isinstance(e, FileNotFoundError):
                 os.unlink(hints_path)
             # index must exist at this point
             os.unlink(index_path)
             self.check_transaction()
             self.prepare_txn(transaction_id)
             return
         except OSError as os_error:
             raise InternalOSError(os_error) from None
         if hints[b'version'] == 1:
             logger.debug('Upgrading from v1 hints.%d', transaction_id)
             self.segments = hints[b'segments']
             self.compact = FreeSpace()
             for segment in sorted(hints[b'compact']):
                 logger.debug('Rebuilding sparse info for segment %d', segment)
                 self._rebuild_sparse(segment)
             logger.debug('Upgrade to v2 hints complete')
         elif hints[b'version'] != 2:
             raise ValueError('Unknown hints file version: %d' % hints[b'version'])
         else:
             self.segments = hints[b'segments']
             self.compact = FreeSpace(hints[b'compact'])
예제 #56
0
    def __init__(self, dataset, machine_id, all_machines, max_trees=None):
        with open('{}.random'.format(dataset), 'r+b') as packfile:
            data = msgpack.unpack(packfile, use_list=False)
        g_adj, g_ew, gold_signs, phi = load_real_graph(dataset)
        self.bfs_root = max(g_adj.items(), key=lambda x: len(x[1]))[0]
        nodes = list((range(len(g_adj))))
        self.gold_signs = np.array([gold_signs[u] for u in nodes])
        self.gold = np.array([gold_signs[u] for u in nodes])

        batch_orders_raw = np.array(data[b'batch_order'])
        self.batch_orders = {}
        for bo, tf in zip(batch_orders_raw, train_size):
            self.batch_orders[int(1000*tf)] = np.array([list(_) for _ in bo])

        with open('{}_extra_000.random'.format(dataset), 'r+b') as packfile:
            pertub_35 = np.array(msgpack.unpack(packfile, use_list=False))
        changed_signs = data[b'changed_signs']
        self.changed_signs = np.vstack((changed_signs, pertub_35[np.newaxis, :, :]))

        if max_trees is not None:
            max_trees *= len(all_machines)
        mst_raw = data[b'mst']
        self.mst_processed = []
        for tree_edges in mst_raw[:max_trees]:
            self.mst_processed.append(process_edges_into_tree(tree_edges, g_ew))

        rst_raw = data[b'rst']
        self.rst_processed = []
        for tree_edges in rst_raw[:max_trees]:
            self.rst_processed.append(process_edges_into_tree(tree_edges, g_ew))

        self.nodes_order = np.array(data[b'nodes_order'])

        self.mmapping = {v: i for i, v in enumerate(sorted(all_machines))}
        self.this_machine = self.mmapping[machine_id]

        self.sorted_training_set = None
        self.sorted_testing_set = None
예제 #57
0
파일: parse.py 프로젝트: jseppanen/textpile
def load(paths):
    for path in sorted(paths):
        for doc in msgpack.unpack(file(path), encoding='utf-8'):
            soup=BeautifulSoup(doc['_page'])
            desc=soup.find(id='jobDescription').text
            try:
                date=parse_publication_time(doc['publication_time'])
            except ValueError, err:
                warnings.warn(str(err))
                continue
            yield dict(title=doc['name'], desc=desc,
                       url=doc['job_url'], company=doc['employer'],
                       location=doc['location_text'],
                       published=date)
예제 #58
0
    def wrapper(io):
        http_dict = msgpack.unpack(io)
        meta = http_dict['meta']
        headers = meta['headers']
        cookies = meta['cookies']
        request = http_dict['request']

        environ = {
            'wsgi.version':         (1, 0),
            'wsgi.url_scheme':      'https' if meta['secure'] else 'http',
            'wsgi.input':           io,
            'wsgi.errors':          Log(),
            'wsgi.multithread':     False,
            'wsgi.multiprocess':    True,
            'wsgi.run_once':        False,
            'SERVER_SOFTWARE':      "Cocaine",
            'REQUEST_METHOD':       meta['method'],
            'SCRIPT_NAME':          meta.get('script_name', ''),
            'PATH_INFO':            meta.get('path_info', ''),
            'QUERY_STRING':         meta.get('query_string', ''),
            'CONTENT_TYPE':         headers.get('CONTENT-TYPE', ''),
            'CONTENT_LENGTH':       headers.get('CONTENT_LENGTH', ''),
            'REMOTE_ADDR':          meta.get('remote_addr', ''),
            'REMOTE_PORT':          meta.get('remote_port', ''),
            'SERVER_NAME':          '',
            'SERVER_PORT':          '',
            'SERVER_PROTOCOL':      '',
            'HTTP_HOST':            meta['host']
        }

        for key, value in headers.items():
            key = 'HTTP_' + key.upper().replace('-', '_')
            if key not in ('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
                environ[key] = value

        def start_response(status, response_headers, exc_info=None):
            if exc_info:
                 try:
                     raise exc_info[0], exc_info[1], exc_info[2]
                 finally:
                     exc_info = None    # Avoid circular ref.

            pack({'code': int(status.split(' ')[0]), 'headers': response_headers}, io)

        result = function(environ, start_response)
        pack(result, io)

        if hasattr(result, 'close'):
            result.close()
예제 #59
0
파일: safe.py 프로젝트: bwesterb/pol
    def load_from_stream(stream, nworkers, use_threads):
        """ Loads a Safe form a `stream'.

            If you load from a file, use `open' for that function also
            handles locking. """
        start_time = time.time()
        l.debug("Unpacking ...")
        magic = stream.read(len(SAFE_MAGIC))
        if magic != SAFE_MAGIC:
            raise WrongMagicError
        data = msgpack.unpack(stream, use_list=True)
        l.debug(" unpacked in %.2fs", time.time() - start_time)
        if "type" not in data or not isinstance(data["type"], basestring) or data["type"] not in TYPE_MAP:
            raise SafeFormatError("Invalid `type' attribute")
        return TYPE_MAP[data["type"]](data, nworkers, use_threads)
예제 #60
0
def merge_attached_event(mpack_event, data):
    # Merge msgpack serialized event.
    if mpack_event.size > MAX_MSGPACK_EVENT_SIZE_BYTES:
        return

    try:
        event = unpack(mpack_event)
    except (UnpackException, ExtraData) as e:
        minidumps_logger.exception(e)
        return

    for key in event:
        value = event.get(key)
        if value is not None:
            data[key] = value