コード例 #1
0
ファイル: flann.py プロジェクト: Kitware/SMQTK
    def _load_flann_model(self):
        if not self._descr_cache and not self._descr_cache_elem.is_empty():
            # Load descriptor cache
            # - is copied on fork, so only need to load here.
            self._log.debug("Loading cached descriptors")
            self._descr_cache = \
                cPickle.loads(self._descr_cache_elem.get_bytes())

        # Params pickle include the build params + our local state params
        if self._index_param_elem and not self._index_param_elem.is_empty():
            state = cPickle.loads(self._index_param_elem.get_bytes())
            self._build_autotune = state['b_autotune']
            self._build_target_precision = state['b_target_precision']
            self._build_sample_frac = state['b_sample_frac']
            self._distance_method = state['distance_method']
            self._flann_build_params = state['flann_build_params']

        # Load the binary index
        if self._index_elem and not self._index_elem.is_empty():
            # make numpy matrix of descriptor vectors for FLANN
            pts_array = [d.vector() for d in self._descr_cache]
            pts_array = numpy.array(pts_array, dtype=pts_array[0].dtype)
            pyflann.set_distance_type(self._distance_method)
            self._flann = pyflann.FLANN()
            tmp_fp = self._index_elem.write_temp()
            self._flann.load_index(tmp_fp, pts_array)
            self._index_elem.clean_temp()
            del pts_array, tmp_fp

        # Set current PID to the current
        self._pid = multiprocessing.current_process().pid
コード例 #2
0
    def test_pickle(self):
        a = T.scalar()  # the a is for 'anonymous' (un-named).
        x, s = T.scalars('xs')

        f = function([x, In(a, value=1.0, name='a'), In(s, value=0.0, update=s+a*x, mutable=True)], s+a*x)

        try:
            # Note that here we also test protocol 0 on purpose, since it
            # should work (even though one should not use it).
            g = pickle.loads(pickle.dumps(f, protocol=0))
            g = pickle.loads(pickle.dumps(f, protocol=-1))
        except NotImplementedError as e:
            if e[0].startswith('DebugMode is not picklable'):
                return
            else:
                raise
        # if they both return, assume  that they return equivalent things.
        # print [(k,id(k)) for k in f.finder.keys()]
        # print [(k,id(k)) for k in g.finder.keys()]

        self.assertFalse(g.container[0].storage is f.container[0].storage)
        self.assertFalse(g.container[1].storage is f.container[1].storage)
        self.assertFalse(g.container[2].storage is f.container[2].storage)
        self.assertFalse(x in g.container)
        self.assertFalse(x in g.value)

        self.assertFalse(g.value[1] is f.value[1])  # should not have been copied
        self.assertFalse(g.value[2] is f.value[2])  # should have been copied because it is mutable.
        self.assertFalse((g.value[2] != f.value[2]).any())  # its contents should be identical

        self.assertTrue(f(2, 1) == g(2))  # they should be in sync, default value should be copied.
        self.assertTrue(f(2, 1) == g(2))  # they should be in sync, default value should be copied.
        f(1, 2)  # put them out of sync
        self.assertFalse(f(1, 2) == g(1, 2))  # they should not be equal anymore.
コード例 #3
0
def test_rrulewrapper():
    r = rrulewrapper(2)
    try:
        pickle.loads(pickle.dumps(r))
    except RecursionError:
        print('rrulewrapper pickling test failed')
        raise
コード例 #4
0
ファイル: test_context.py プロジェクト: sot/pyyaks
def test_store_update_context():
    src.val.nested = 'nested{{src.ccdid}}'
    src.val['ccdid'] = 2
    src['srcdir'] = 'obs{{ src.obsid }}/{{src.nested}}'
    files['srcdir'] = '{{ src.srcdir }}'
    src['obsid'] = 123
    src.val['ccdid'] = 2
    src['ra'] = 1.4343256789
    src['ra'].format = '%.4f'
    files['evt2'] = 'obs{{ src.obsid }}/{{src.nested}}/acis_evt2'
    src.val.nested = 'nested{{src.ccdid}}'

    tmp = pickle.dumps(src)
    tmp2 = pickle.dumps(files)

    src.clear()
    files.clear()

    assert src['ra'].val is None
    assert files['evt2'].val is None

    src.update(pickle.loads(tmp))
    files.update(pickle.loads(tmp2))

    assert str(src['ra']) == '1.4343'
    assert str(src['srcdir']) == 'obs123/nested2'
    assert files['srcdir'].rel == 'data/obs123/nested2'
    assert files.rel.srcdir == 'data/obs123/nested2'
    assert str(files['evt2.fits']) == 'data/obs123/nested2/acis_evt2.fits'
コード例 #5
0
ファイル: test_run.py プロジェクト: manahl/pytest-plugins
def test_run_in_runclassmethod():
    class C(object):
        @classmethod
        def fn(cls, *args, **kwargs):
            return cls, args, kwargs
    C.__name__ = 'C_' + str(uuid4()).replace('-', '_')
    C.__module__ = 'pytest_shutil.run'
    with patch('pytest_shutil.run.execnet') as execnet:
        gw = execnet.makegateway.return_value
        chan = gw.remote_exec.return_value
        chan.receive.return_value = cPickle.dumps(sentinel.ret)
        c = C()
        with patch.object(run, C.__name__, C, create=True):
            run.run_in_subprocess(c.fn, python='sentinel.python')(ARG, kw=KW)
            ((s,), _) = chan.send.call_args
            if sys.version_info < (3, 0, 0):
                # Class methods are not pickleable in Python 2.
                assert cPickle.loads(s) == (run._invoke_method, (C, 'fn', ARG), {'kw': KW})
            else:
                # Class methods are pickleable in Python 3.
                assert cPickle.loads(s) == (c.fn, (ARG,), {'kw': KW})
            ((remote_fn,), _) = gw.remote_exec.call_args
            ((chan.receive.return_value,), _) = chan.send.call_args
            remote_fn(chan)
            chan.send.assert_called_with(cPickle.dumps((C, (ARG,), {'kw': KW}), protocol=0))
コード例 #6
0
ファイル: test_pickle.py プロジェクト: 7924102/matplotlib
def test_polar():
    ax = plt.subplot(111, polar=True)
    fig = plt.gcf()
    result = BytesIO()
    pf = pickle.dumps(fig)
    pickle.loads(pf)
    plt.draw()
コード例 #7
0
 def test_unpickle_wrongname(self, universe_n, pickle_str_n):
     # we change the universe's anchor_name
     universe_n.anchor_name = "test2"
     # shouldn't unpickle if no name matches, even if there's a compatible
     # universe in the unnamed anchor list.
     with pytest.raises(RuntimeError):
         cPickle.loads(pickle_str_n)
コード例 #8
0
 def test_pickling(self):
     """intbitset - pickling"""
     from six.moves import cPickle
     for set1 in self.sets + [[]]:
         self.assertEqual(self.intbitset(set1), cPickle.loads(cPickle.dumps(self.intbitset(set1), -1)))
     for set1 in self.sets + [[]]:
         self.assertEqual(self.intbitset(set1, trailing_bits=True), cPickle.loads(cPickle.dumps(self.intbitset(set1, trailing_bits=True), -1)))
コード例 #9
0
def test_none_Constant():
    """ Tests equals

    We had an error in the past with unpickling
    """
    o1 = Constant(NoneTypeT(), None, name='NoneConst')
    o2 = Constant(NoneTypeT(), None, name='NoneConst')
    assert o1.equals(o2)
    assert NoneConst.equals(o1)
    assert o1.equals(NoneConst)
    assert NoneConst.equals(o2)
    assert o2.equals(NoneConst)

    # This trigger equals that returned the wrong answer in the past.
    import six.moves.cPickle as pickle
    import theano
    from theano import tensor

    x = tensor.vector('x')
    y = tensor.argmax(x)
    kwargs = {}
    # We can't pickle DebugMode
    if theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
        kwargs = {'mode': 'FAST_RUN'}
    f = theano.function([x], [y], **kwargs)
    pickle.loads(pickle.dumps(f))
コード例 #10
0
ファイル: test_DI_memory.py プロジェクト: Kitware/SMQTK
    def test_added_descriptor_table_caching(self):
        cache_elem = DataMemoryElement(readonly=False)
        descrs = [random_descriptor() for _ in range(3)]
        expected_table = dict((r.uuid(), r) for r in descrs)

        i = MemoryDescriptorIndex(cache_elem)
        self.assertTrue(cache_elem.is_empty())

        # Should add descriptors to table, caching to writable element.
        i.add_many_descriptors(descrs)
        self.assertFalse(cache_elem.is_empty())
        self.assertEqual(pickle.loads(i.cache_element.get_bytes()),
                         expected_table)

        # Changing the internal table (remove, add) it should reflect in
        # cache
        new_d = random_descriptor()
        expected_table[new_d.uuid()] = new_d
        i.add_descriptor(new_d)
        self.assertEqual(pickle.loads(i.cache_element.get_bytes()),
                         expected_table)

        rm_d = list(expected_table.values())[0]
        del expected_table[rm_d.uuid()]
        i.remove_descriptor(rm_d.uuid())
        self.assertEqual(pickle.loads(i.cache_element.get_bytes()),
                         expected_table)
コード例 #11
0
ファイル: definitions.py プロジェクト: Averroes/Mathics
 def set_user_definitions(self, definitions):
     if definitions:
         if six.PY2:
             self.user = pickle.loads(base64.decodestring(definitions.encode('ascii')))
         else:
             self.user = pickle.loads(base64.decodebytes(definitions.encode('ascii')))
     else:
         self.user = {}
コード例 #12
0
 def test_unpickle_noanchor(self, universe, pickle_str):
     # Shouldn't unpickle if the universe is removed from the anchors
     universe.remove_anchor()
     # In the complex (parallel) testing environment there's the risk of
     # other compatible Universes being available for anchoring even after
     # this one is expressly removed.
     # assert_raises(RuntimeError, cPickle.loads, pickle_str)
     with pytest.raises(RuntimeError):
         cPickle.loads(pickle_str)
コード例 #13
0
ファイル: test_CE_memory.py プロジェクト: Kitware/SMQTK
    def test_serialization(self):
        e = smqtk.representation.classification_element.memory\
            .MemoryClassificationElement('test', 0)

        e2 = cPickle.loads(cPickle.dumps(e))
        self.assertEqual(e, e2)

        e.set_classification(a=0, b=1)
        e2 = cPickle.loads(cPickle.dumps(e))
        self.assertEqual(e, e2)
コード例 #14
0
ファイル: py.py プロジェクト: mbodenhamer/syn
def assert_pickle_idempotent(obj):
    '''Assert that obj does not change (w.r.t. ==) under repeated picklings
    '''
    from six.moves.cPickle import dumps, loads
    obj1 = loads(dumps(obj))
    obj2 = loads(dumps(obj1))
    obj3 = loads(dumps(obj2))
    assert_equivalent(obj, obj1)
    assert_equivalent(obj, obj2)
    assert_equivalent(obj, obj3)
    assert type(obj) is type(obj3)
コード例 #15
0
 def get(self, id):
     envelope_raw, attempts, delivered_indexes_raw = \
         self.redis.hmget(self._get_key(id), 'envelope', 'attempts',
                          'delivered_indexes')
     if not envelope_raw:
         raise KeyError(id)
     envelope = cPickle.loads(envelope_raw)
     del envelope_raw
     if delivered_indexes_raw:
         delivered_indexes = cPickle.loads(delivered_indexes_raw)
         self._remove_delivered_rcpts(envelope, delivered_indexes)
     return envelope, int(attempts or 0)
コード例 #16
0
 def assert_sml_written_to_db(self, sml, voter_id):
     with sqlite3.connect("sqlite_sink.db") as conn:
         c = conn.cursor()
         c.execute('SELECT sml FROM smls WHERE voter_id = "' +
                   voter_id + '"')
         fetched_sml = c.fetchone()
         if six.PY2:
             fetched_sml = cPickle.loads(str(fetched_sml[0]))
         else:
             fetched_sml = cPickle.loads(fetched_sml[0])
         self.assertEqual(len(sml), len(fetched_sml))
         self.assertTrue((sml == fetched_sml).all())
コード例 #17
0
ファイル: solr_index.py プロジェクト: Kitware/SMQTK
 def iterdescriptors(self):
     """
     Return an iterator over indexed descriptor element instances.
     """
     r = self.solr.select('%s:%s %s:*'
                          % (self.index_uuid_field, self.index_uuid,
                             self.descriptor_field))
     for doc in r.results:
         yield cPickle.loads(doc[self.descriptor_field])
     for _ in range(r.numFound // 10):
         r = r.next_batch()
         for doc in r.results:
             yield cPickle.loads(doc[self.descriptor_field])
コード例 #18
0
ファイル: solr_index.py プロジェクト: Kitware/SMQTK
 def iteritems(self):
     """
     Return an iterator over indexed descriptor key and instance pairs.
     """
     r = self.solr.select('%s:%s %s:* %s:*'
                          % (self.index_uuid_field, self.index_uuid,
                             self.d_uid_field, self.descriptor_field))
     for doc in r.results:
         d = cPickle.loads(doc[self.descriptor_field])
         yield d.uuid(), d
     for _ in range(r.numFound // 10):
         r = r.next_batch()
         for doc in r.results:
             d = cPickle.loads(doc[self.descriptor_field])
             yield d.uuid(), d
コード例 #19
0
ファイル: redis_wrapper.py プロジェクト: JiShangShiDai/frappe
	def get_value(self, key, generator=None, user=None, expires=False):
		"""Returns cache value. If not found and generator function is
			given, it will call the generator.

		:param key: Cache key.
		:param generator: Function to be called to generate a value if `None` is returned.
		:param expires: If the key is supposed to be with an expiry, don't store it in frappe.local
		"""
		original_key = key
		key = self.make_key(key, user)

		if key in frappe.local.cache:
			val = frappe.local.cache[key]

		else:
			val = None
			try:
				val = self.get(key)
			except redis.exceptions.ConnectionError:
				pass

			if val is not None:
				val = pickle.loads(val)

			if not expires:
				if val is None and generator:
					val = generator()
					self.set_value(original_key, val, user=user)

				else:
					frappe.local.cache[key] = val

		return val
コード例 #20
0
ファイル: TaskManager.py プロジェクト: gunesacar/OpenWPM
    def _check_failure_status(self):
        """ Check the status of command failures. Raise exceptions as necessary

        The failure status property is used by the various asynchronous
        command execution threads which interface with the
        remote browser manager processes. If a failure status is found, the
        appropriate steps are taken to gracefully close the infrastructure
        """
        self.logger.debug("Checking command failure status indicator...")
        if self.failure_status:
            self.logger.debug(
                "TaskManager failure status set, halting command execution.")
            self._cleanup_before_fail()
            if self.failure_status['ErrorType'] == 'ExceedCommandFailureLimit':
                raise CommandExecutionError(
                    "TaskManager exceeded maximum consecutive command "
                    "execution failures.",
                    self.failure_status['CommandSequence']
                )
            elif (self.failure_status['ErrorType'] == ("ExceedLaunch"
                                                       "FailureLimit")):
                raise CommandExecutionError(
                    "TaskManager failed to launch browser within allowable "
                    "failure limit.", self.failure_status['CommandSequence']
                )
            if self.failure_status['ErrorType'] == 'CriticalChildException':
                reraise(*pickle.loads(self.failure_status['Exception']))
コード例 #21
0
ファイル: parallel.py プロジェクト: KristianJensen/cameo
        def get(self, block=True, timeout=None):
            """
            Retrieves the next item in the queue.

            Parameters
            ----------
            block: bool, default is True
                If true, the queue is blocked until it an object is retrieved reaches the timeout.
            timeout: long
                The timeout (in seconds) that the method should wait for the queue to return an item. If block is False,
                time wil ignored.

            Returns
            -------
            item: object

            """
            if block:
                item = self._db.blpop(self._key, timeout=timeout)
                if item:
                    item = item[1]
                logger.debug("Wait...")
            else:
                item = self._db.lpop(self._key)
                logger.debug("No-wait...")

            logger.debug("Item: %s" % [item])
            if item:
                return pickle.loads(item)
            else:
                raise six.moves.queue.Empty
コード例 #22
0
ファイル: mutable_dict.py プロジェクト: douban/dpark
    def _fetch_missing(cls, key):
        result = {}
        urls = env.trackerClient.call(GetValueMessage('mutable_dict:%s' % key))
        for url in urls:
            f = urllib.request.urlopen(url)
            if f.code is not None and f.code != 200:
                raise IOError('Open %s failed:%s' % (url, f.code))

            data = f.read()
            if len(data) < 4:
                raise IOError('Transfer %s failed: %s received' % (url, len(data)))

            length, = struct.unpack('<I', data[:4])
            if length != len(data):
                raise IOError('Transfer %s failed: %s received, %s expected' % (url,
                                                                                len(data), length))

            data = cPickle.loads(decompress(data[4:]))
            for k, v in data.items():
                if k in result:
                    r = result[k]
                    if v[1] == r[1]:
                        r0 = r[0]
                        v0 = v[0]
                        merged = r0.value if isinstance(r0, ConflictValues) else [r0]
                        merged += v0.value if isinstance(v0, ConflictValues) else [v0]
                        result[k] = (ConflictValues(merged), r[1])
                    else:
                        result[k] = v if v[1] > r[1] else r
                else:
                    result[k] = v

        return result
コード例 #23
0
def get_server_weights(master_url='localhost:5000'):
    '''
    Retrieve master weights from parameter server
    '''
    request = urllib2.Request('http://{0}/parameters'.format(master_url),
                              headers={'Content-Type': 'application/elephas'})
    return pickle.loads(urllib2.urlopen(request).read())
コード例 #24
0
ファイル: test_libdcd.py プロジェクト: MDAnalysis/mdanalysis
def test_pickle(dcd):
    dcd.seek(len(dcd) - 1)
    dump = pickle.dumps(dcd)
    new_dcd = pickle.loads(dump)

    assert dcd.fname == new_dcd.fname
    assert dcd.tell() == new_dcd.tell()
コード例 #25
0
def test_pickle_unpickle_without_reoptimization():
    mode = theano.config.mode
    if mode in ["DEBUG_MODE", "DebugMode"]:
        mode = "FAST_RUN"
    x1 = T.fmatrix('x1')
    x2 = T.fmatrix('x2')
    x3 = theano.shared(numpy.ones((10, 10), dtype=floatX))
    x4 = theano.shared(numpy.ones((10, 10), dtype=floatX))
    y = T.sum(T.sum(T.sum(x1**2 + x2) + x3) + x4)

    updates = OrderedDict()
    updates[x3] = x3 + 1
    updates[x4] = x4 + 1
    f = theano.function([x1, x2], y, updates=updates, mode=mode)

    # now pickle the compiled theano fn
    string_pkl = pickle.dumps(f, -1)

    # compute f value
    in1 = numpy.ones((10, 10), dtype=floatX)
    in2 = numpy.ones((10, 10), dtype=floatX)

    # test unpickle without optimization
    default = theano.config.reoptimize_unpickled_function
    try:
        # the default is True
        theano.config.reoptimize_unpickled_function = False
        f_ = pickle.loads(string_pkl)
        assert f(in1, in2) == f_(in1, in2)
    finally:
        theano.config.reoptimize_unpickled_function = default
コード例 #26
0
ファイル: diskdict.py プロジェクト: damiendr/pytools
    def __delitem__(self, key):
        if key in self.cache:
            del self.cache[key]

        from six.moves.cPickle import loads
        for item_id, key_pickle, version_pickle in self.db_conn.execute(
                "select id, key_pickle, version_pickle from data"
                " where key_hash = ? and version_hash = ?",
                (hash(key), self.version_hash)):
            if loads(key_pickle) == key and loads(version_pickle) == self.version:
                self.db_conn.execute("delete from data where id = ?", (item_id,))

        self.commit_countdown -= 1
        if self.commit_countdown <= 0:
            self.commit_countdown = self.commit_interval
            self.db_conn.commit()
コード例 #27
0
 def test_unpickle_named(self):
     """Test that an AtomGroup can be unpickled (Issue 293)"""
     newag = cPickle.loads(self.pickle_str_n)
     # Can unpickle
     assert_array_equal(self.ag_n.indices, newag.indices)
     assert_(newag.universe is self.universe_n,
             "Unpickled AtomGroup on wrong Universe.")
コード例 #28
0
ファイル: memcached.py プロジェクト: clayg/swift
    def get(self, key):
        """
        Gets the object specified by key.  It will also unserialize the object
        before returning if it is serialized in memcache with JSON, or if it
        is pickled and unpickling is allowed.

        :param key: key
        :returns: value of the key in memcache
        """
        key = md5hash(key)
        value = None
        for (server, fp, sock) in self._get_conns(key):
            try:
                with Timeout(self._io_timeout):
                    sock.sendall('get %s\r\n' % key)
                    line = fp.readline().strip().split()
                    while line[0].upper() != 'END':
                        if line[0].upper() == 'VALUE' and line[1] == key:
                            size = int(line[3])
                            value = fp.read(size)
                            if int(line[2]) & PICKLE_FLAG:
                                if self._allow_unpickle:
                                    value = pickle.loads(value)
                                else:
                                    value = None
                            elif int(line[2]) & JSON_FLAG:
                                value = json.loads(value)
                            fp.readline()
                        line = fp.readline().strip().split()
                    self._return_conn(server, fp, sock)
                    return value
            except (Exception, Timeout) as e:
                self._exception_occurred(server, e, sock=sock, fp=fp)
コード例 #29
0
ファイル: diskdict.py プロジェクト: damiendr/pytools
    def __contains__(self, key):
        if key in self.cache:
            return True
        else:
            from six.moves.cPickle import loads
            for key_pickle, version_pickle, result_pickle in self.db_conn.execute(
                    "select key_pickle, version_pickle, result_pickle from data"
                    " where key_hash = ? and version_hash = ?",
                    (hash(key), self.version_hash)):
                if loads(str(key_pickle)) == key \
                        and loads(str(version_pickle)) == self.version:
                    result = loads(str(result_pickle))
                    self.cache[key] = result
                    return True

            return False
コード例 #30
0
ファイル: diskdict.py プロジェクト: damiendr/pytools
    def __getitem__(self, key):
        try:
            return self.cache[key]
        except KeyError:
            from six.moves.cPickle import loads
            for key_pickle, version_pickle, result_pickle in self.db_conn.execute(
                    "select key_pickle, version_pickle, result_pickle from data"
                    " where key_hash = ? and version_hash = ?",
                    (hash(key), self.version_hash)):
                if loads(str(key_pickle)) == key \
                        and loads(str(version_pickle)) == self.version:
                    result = loads(str(result_pickle))
                    self.cache[key] = result
                    return result

            raise KeyError(key)
コード例 #31
0
 def test_pickle_cpu(self):
     s = pickle.dumps(self.fs)
     fs2 = pickle.loads(s)
     self.check_equal_fs(self.fs, fs2)
コード例 #32
0
 def test_pickle_cpu(self):
     fs2_serialized = pickle.dumps(self.fs2)
     fs2_loaded = pickle.loads(fs2_serialized)
     self.assertTrue((self.fs2.b.p.data == fs2_loaded.b.p.data).all())
     self.assertTrue(
         (self.fs2.fs1.a.p.data == fs2_loaded.fs1.a.p.data).all())
コード例 #33
0
def pickle_then_unpickle(obj):
    """Pickle to a temp file then un-pickle."""
    import six.moves.cPickle as pickle

    return pickle.loads(pickle.dumps(obj))
コード例 #34
0
 def _check_output(self):
     self._assert_expected_item(pickle.loads(self.output.getvalue()))
コード例 #35
0
ファイル: tracker_store.py プロジェクト: nanhaishun/Chat-bot
 def deserialise_tracker(self, sender_id, _json):
     dialogue = pickler.loads(_json)
     tracker = self.init_tracker(sender_id)
     tracker.recreate_from_dialogue(dialogue)
     return tracker
コード例 #36
0
ファイル: size_test.py プロジェクト: yurchor/blivet
 def test_pickling(self):
     s = Size("10 MiB")
     self.assertEqual(s, cPickle.loads(cPickle.dumps(s)))
コード例 #37
0
    def add_classifier(self):
        """
        Upload a **trained** classifier pickled and encoded in standard base64
        encoding, matched with a descriptive label of that classifier's topic.

        Since all classifiers have only two result classes (positive and
        negative), the topic of the classifier is encoded in the descriptive
        label the user applies to the classifier.

        Below is an example call to this endpoint via the ``requests`` python
        module, showing how base64 data is sent::

            import base64
            import requests
            data_bytes = "Load some content bytes here."
            requests.post('http://localhost:5000/classifier',
                          data={'bytes_b64': base64.b64encode(data_bytes),
                                'label': 'some_label'})

        With curl on the command line::

            $ curl -X POST localhost:5000/classifier -d label=some_label \
                --data-urlencode "bytes_b64=$(base64 -w0 /path/to/file)"

            # If this fails, you may wish to encode the file separately and
            # use the file reference syntax instead:

            $ base64 -w0 /path/to/file.pkl > /path/to/file.pkl.b64
            $ curl -X POST localhost:5000/classifier -d label=some_label \
                --data-urlencode bytes_64@/path/to/file.pkl.b64

        To lock this classifier and guard it against deletion, add
        "lock_label=true"::

            $ curl -X POST localhost:5000/classifier \
                -d "label=some_label" \
                -d "lock_label=true" \
                --data-urlencode "bytes_b64=$(base64 -w0 /path/to/file.pkl)"

        Data/Form arguments:
            bytes_b64
                Bytes, in the standard base64 encoding, of the pickled
                classifier.
            label
                Descriptive label to apply to this classifier. This should not
                conflict with existing classifier labels.
            lock_label
                If 'true', disallow deletion of this label. If 'false', allow
                deletion of this label. Only has an effect if deletion is
                enabled for this service. (Default: 'false')

        Possible error codes:
            400
                May mean one of:
                    - No pickled classifier base64 data or label provided.
                    - Label provided is in conflict with an existing label in
                    the classifier collection.

        Returns code 201 on success and the message: {
            label: <str>
        }

        """
        clfr_b64 = flask.request.values.get('bytes_b64', default=None)
        label = flask.request.values.get('label', default=None)
        lock_clfr_str = flask.request.values.get('lock_label', default='false')

        if clfr_b64 is None or len(clfr_b64) == 0:
            return make_response_json("No state base64 data provided.", 400)
        elif label is None or len(label) == 0:
            return make_response_json("No descriptive label provided.", 400)
        try:
            # This can throw a ValueError if lock_clfr is malformed JSON
            lock_clfr = bool(flask.json.loads(lock_clfr_str))
        except JSON_DECODE_EXCEPTION:
            return make_response_json(
                "Invalid boolean value for"
                " 'lock_label'. Was given: '%s'" % lock_clfr_str, 400)

        # If the given label conflicts with one already in the collection,
        # fail.
        if label in self.classifier_collection.labels():
            return make_response_json("Label '%s' already exists in"
                                      " classifier collection." % label,
                                      400,
                                      label=label)

        clfr = pickle.loads(base64.b64decode(clfr_b64.encode('utf-8')))

        try:
            self.classifier_collection.add_classifier(label, clfr)

            # If we're allowing deletions, get the lock flag from the form
            # and set it for this classifier
            if self.enable_classifier_removal and lock_clfr:
                self.immutable_labels.add(label)

        except ValueError:
            return make_response_json("Data added for label '%s' is not a"
                                      " Classifier." % label,
                                      400,
                                      label=label)

        return make_response_json("Uploaded classifier for label '%s'." %
                                  label,
                                  201,
                                  label=label)
コード例 #38
0
ファイル: nodes.py プロジェクト: yuhonghong7035/Bento
def __copy(d):
    # Faster than deepcopy - ideally remove the need for deepcopy altogether
    return cPickle.loads(cPickle.dumps(d, protocol=2))
コード例 #39
0
    def update(self, job):
        """
        High-level method that replicates a single partition.

        :param job: a dict containing info about the partition to be replicated
        """
        self.replication_count += 1
        self.logger.increment('partition.update.count.%s' % (job['device'], ))
        headers = dict(self.default_headers)
        headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
        target_devs_info = set()
        failure_devs_info = set()
        begin = time.time()
        try:
            hashed, local_hash = tpool_reraise(
                self._diskfile_mgr._get_hashes,
                job['path'],
                do_listdir=(self.replication_count % 10) == 0,
                reclaim_age=self.reclaim_age)
            self.suffix_hash += hashed
            self.logger.update_stats('suffix.hashes', hashed)
            attempts_left = len(job['nodes'])
            synced_remote_regions = set()
            random.shuffle(job['nodes'])
            nodes = itertools.chain(
                job['nodes'], job['policy'].object_ring.get_more_nodes(
                    int(job['partition'])))
            while attempts_left > 0:
                # If this throws StopIteration it will be caught way below
                node = next(nodes)
                target_devs_info.add((node['replication_ip'], node['device']))
                attempts_left -= 1
                # if we have already synced to this remote region,
                # don't sync again on this replication pass
                if node['region'] in synced_remote_regions:
                    continue
                try:
                    with Timeout(self.http_timeout):
                        resp = http_connect(node['replication_ip'],
                                            node['replication_port'],
                                            node['device'],
                                            job['partition'],
                                            'REPLICATE',
                                            '',
                                            headers=headers).getresponse()
                        if resp.status == HTTP_INSUFFICIENT_STORAGE:
                            self.logger.error(
                                _('%(ip)s/%(device)s responded'
                                  ' as unmounted'), node)
                            attempts_left += 1
                            failure_devs_info.add(
                                (node['replication_ip'], node['device']))
                            continue
                        if resp.status != HTTP_OK:
                            self.logger.error(
                                _("Invalid response %(resp)s "
                                  "from %(ip)s"), {
                                      'resp': resp.status,
                                      'ip': node['replication_ip']
                                  })
                            failure_devs_info.add(
                                (node['replication_ip'], node['device']))
                            continue
                        remote_hash = pickle.loads(resp.read())
                        del resp
                    suffixes = [
                        suffix for suffix in local_hash
                        if local_hash[suffix] != remote_hash.get(suffix, -1)
                    ]
                    if not suffixes:
                        self.stats['hashmatch'] += 1
                        continue
                    hashed, recalc_hash = tpool_reraise(
                        self._diskfile_mgr._get_hashes,
                        job['path'],
                        recalculate=suffixes,
                        reclaim_age=self.reclaim_age)
                    self.logger.update_stats('suffix.hashes', hashed)
                    local_hash = recalc_hash
                    suffixes = [
                        suffix for suffix in local_hash
                        if local_hash[suffix] != remote_hash.get(suffix, -1)
                    ]
                    self.stats['rsync'] += 1
                    success, _junk = self.sync(node, job, suffixes)
                    with Timeout(self.http_timeout):
                        conn = http_connect(node['replication_ip'],
                                            node['replication_port'],
                                            node['device'],
                                            job['partition'],
                                            'REPLICATE',
                                            '/' + '-'.join(suffixes),
                                            headers=headers)
                        conn.getresponse().read()
                    if not success:
                        failure_devs_info.add(
                            (node['replication_ip'], node['device']))
                    # add only remote region when replicate succeeded
                    if success and node['region'] != job['region']:
                        synced_remote_regions.add(node['region'])
                    self.suffix_sync += len(suffixes)
                    self.logger.update_stats('suffix.syncs', len(suffixes))
                except (Exception, Timeout):
                    failure_devs_info.add(
                        (node['replication_ip'], node['device']))
                    self.logger.exception(
                        _("Error syncing with node: %s") % node)
            self.suffix_count += len(local_hash)
        except (Exception, Timeout):
            failure_devs_info.update(target_devs_info)
            self.logger.exception(_("Error syncing partition"))
        finally:
            self.stats['success'] += len(target_devs_info - failure_devs_info)
            self._add_failure_stats(failure_devs_info)
            self.partition_times.append(time.time() - begin)
            self.logger.timing_since('partition.update.timing', begin)
コード例 #40
0
def loadTestReactor(inputFilePath=TEST_ROOT,
                    customSettings=None,
                    inputFileName="armiRun.yaml"):
    r"""
    Loads a test reactor. Can be used in other test modules.

    Parameters
    ----------
    inputFilePath : str
        Path to the directory of the armiRun.yaml input file.

    customSettings : dict with str keys and values of any type
        For each key in customSettings, the cs which is loaded from the
        armiRun.yaml will be overwritten to the value given in customSettings
        for that key.

    Returns
    -------
    o : Operator
    r : Reactor
    """
    # TODO: it would be nice to have this be more stream-oriented. Juggling files is
    # devilishly difficult.
    global TEST_REACTOR
    isotopicDepletion.applyDefaultBurnChain()
    fName = os.path.join(inputFilePath, inputFileName)
    customSettings = customSettings or {}
    isPickeledReactor = fName == ARMI_RUN_PATH and customSettings == {}
    assemblies.resetAssemNumCounter()

    if isPickeledReactor and TEST_REACTOR:
        # return test reactor only if no custom settings are needed.
        o, r, assemNum = cPickle.loads(TEST_REACTOR)
        assemblies.setAssemNumCounter(assemNum)
        settings.setMasterCs(o.cs)
        o.reattach(r, o.cs)
        return o, r

    cs = settings.Settings(fName=fName)

    # Overwrite settings if desired
    if customSettings:
        for settingKey, settingVal in customSettings.items():
            cs[settingKey] = settingVal

    if "verbosity" not in customSettings:
        runLog.setVerbosity("error")
    settings.setMasterCs(cs)
    cs["stationaryBlocks"] = []
    cs["nCycles"] = 3

    o = operators.factory(cs)
    r = reactors.loadFromCs(cs)
    o.initializeInterfaces(r)

    # put some stuff in the SFP too.
    for a in range(10):
        a = o.r.blueprints.constructAssem(o.r.core.geomType,
                                          o.cs,
                                          name="feed fuel")
        o.r.core.sfp.add(a)

    o.r.core.regenAssemblyLists()

    if isPickeledReactor:
        # cache it for fast load for other future tests
        # protocol=2 allows for classes with __slots__ but not __getstate__ to be pickled
        TEST_REACTOR = cPickle.dumps((o, o.r, assemblies.getAssemNum()),
                                     protocol=2)
    return o, o.r
コード例 #41
0
ファイル: reconstructor.py プロジェクト: peng1916/swift
    def _get_suffixes_to_sync(self, job, node):
        """
        For SYNC jobs we need to make a remote REPLICATE request to get
        the remote node's current suffix's hashes and then compare to our
        local suffix's hashes to decide which suffixes (if any) are out
        of sync.

        :param: the job dict, with the keys defined in ``_get_part_jobs``
        :param node: the remote node dict
        :returns: a (possibly empty) list of strings, the suffixes to be
                  synced with the remote node.
        """
        # get hashes from the remote node
        remote_suffixes = None
        try:
            with Timeout(self.http_timeout):
                resp = http_connect(
                    node['replication_ip'], node['replication_port'],
                    node['device'], job['partition'], 'REPLICATE',
                    '', headers=self.headers).getresponse()
            if resp.status == HTTP_INSUFFICIENT_STORAGE:
                self.logger.error(
                    _('%s responded as unmounted'),
                    self._full_path(node, job['partition'], '',
                                    job['policy']))
            elif resp.status != HTTP_OK:
                full_path = self._full_path(node, job['partition'], '',
                                            job['policy'])
                self.logger.error(
                    _("Invalid response %(resp)s from %(full_path)s"),
                    {'resp': resp.status, 'full_path': full_path})
            else:
                remote_suffixes = pickle.loads(resp.read())
        except (Exception, Timeout):
            # all exceptions are logged here so that our caller can
            # safely catch our exception and continue to the next node
            # without logging
            self.logger.exception('Unable to get remote suffix hashes '
                                  'from %r' % self._full_path(
                                      node, job['partition'], '',
                                      job['policy']))

        if remote_suffixes is None:
            raise SuffixSyncError('Unable to get remote suffix hashes')

        suffixes = self.get_suffix_delta(job['hashes'],
                                         job['frag_index'],
                                         remote_suffixes,
                                         node['index'])
        # now recalculate local hashes for suffixes that don't
        # match so we're comparing the latest
        local_suff = self._get_hashes(job['policy'], job['path'],
                                      recalculate=suffixes)

        suffixes = self.get_suffix_delta(local_suff,
                                         job['frag_index'],
                                         remote_suffixes,
                                         node['index'])

        self.suffix_count += len(suffixes)
        return suffixes
コード例 #42
0
ファイル: run.py プロジェクト: akx/manahl-pytest-plugins
def _run_in_subprocess_remote_fn(channel):
    from six.moves import cPickle  # @UnresolvedImport @Reimport # NOQA
    fn, args, kwargs = cPickle.loads(channel.receive(None))
    channel.send(cPickle.dumps(fn(*args, **kwargs), protocol=0))
コード例 #43
0
 def merge(docs, otherenv):
     # type: (List[unicode], bytes) -> None
     env = pickle.loads(otherenv)
     self.env.merge_info_from(docs, env, self.app)
コード例 #44
0
ファイル: tst_miller.py プロジェクト: dials/cctbx
def exercise_bins():
  uc = uctbx.unit_cell((11,11,13,90,90,120))
  sg_type = sgtbx.space_group_type("P 3 2 1")
  anomalous_flag = False
  d_min = 1
  m = miller.index_generator(uc, sg_type, anomalous_flag, d_min).to_array()
  f = flex.double()
  for i in range(m.size()): f.append(random.random())
  n_bins = 10
  b = miller.binning(uc, n_bins, 0, d_min)
  b = miller.binning(uc, n_bins, 0, d_min, 1.e-6)
  b = miller.binning(uc, n_bins, m)
  b = miller.binning(uc, n_bins, m, 0)
  b = miller.binning(uc, n_bins, m, 0, d_min)
  b = miller.binning(uc, n_bins, m, 0, d_min, 1.e-6)
  assert b.d_max() == -1
  assert approx_equal(b.d_min(), d_min)
  assert b.bin_d_range(0) == (-1,-1)
  assert approx_equal(b.bin_d_range(1), (-1,2.1544336))
  assert approx_equal(b.bin_d_range(b.n_bins_all()-1), (1,-1))
  d_star_sq = 0.5
  r = b.bin_d_range(b.get_i_bin(d_star_sq))
  d = 1/math.sqrt(d_star_sq)
  assert r[1] <= d <= r[0]
  h = (3,4,5)
  r = b.bin_d_range(b.get_i_bin(h))
  assert r[1] <= uc.d(h) <= r[0]
  # a quick test to excercise d-spacings on fractional Miller indices:
  assert approx_equal( uc.d((3,4,5)), uc.d_frac((3.001,4,5)), eps=0.001)
  binning1 = miller.binning(uc, n_bins, m)
  assert binning1.unit_cell().is_similar_to(uc)
  assert binning1.n_bins_used() == n_bins
  assert binning1.limits().size() == n_bins + 1
  assert binning1.n_bins_all() == n_bins + 2
  s = pickle.dumps(binning1)
  l = pickle.loads(s)
  assert str(l.unit_cell()) == "(11, 11, 13, 90, 90, 120)"
  assert approx_equal(l.limits(), binning1.limits())
  #
  binner1 = miller.ext.binner(binning1, m)
  assert binner1.miller_indices().id() == m.id()
  assert binner1.count(binner1.i_bin_d_too_large()) == 0
  assert binner1.count(binner1.i_bin_d_too_small()) == 0
  counts = binner1.counts()
  for i_bin in binner1.range_all():
    assert binner1.count(i_bin) == counts[i_bin]
    assert binner1.selection(i_bin).count(True) == counts[i_bin]
  assert list(binner1.range_all()) == list(range(binner1.n_bins_all()))
  assert list(binner1.range_used()) == list(range(1, binner1.n_bins_used()+1))
  binning2 = miller.binning(uc, n_bins - 2,
    binning1.bin_d_min(2),
    binning1.bin_d_min(n_bins))
  binner2 = miller.ext.binner(binning2, m)
  assert tuple(binner1.counts())[1:-1] == tuple(binner2.counts())
  array_indices = flex.size_t(range(m.size()))
  perm_array_indices1 = flex.size_t()
  perm_array_indices2 = flex.size_t()
  for i_bin in binner1.range_all():
    perm_array_indices1.extend(array_indices.select(binner1.selection(i_bin)))
    perm_array_indices2.extend(binner1.array_indices(i_bin))
  assert perm_array_indices1.size() == m.size()
  assert perm_array_indices2.size() == m.size()
  assert tuple(perm_array_indices1) == tuple(perm_array_indices2)
  b = miller.ext.binner(miller.binning(uc, n_bins, m, 0, d_min), m)
  assert approx_equal(b.bin_centers(1),
    (0.23207956, 0.52448148, 0.62711856, 0.70311998, 0.7652538,
     0.818567, 0.86566877, 0.90811134, 0.94690405, 0.98274518))
  assert approx_equal(b.bin_centers(2),
    (0.10772184, 0.27871961, 0.39506823, 0.49551249, 0.58642261,
     0.67067026, 0.74987684, 0.82507452, 0.89697271, 0.96608584))
  assert approx_equal(b.bin_centers(3),
    (0.050000075, 0.15000023, 0.25000038, 0.35000053, 0.45000068,
     0.55000083, 0.65000098, 0.75000113, 0.85000128, 0.95000143))
  v = flex.double(range(b.n_bins_used()))
  i = b.interpolate(v, 0)
  for i_bin in b.range_used():
    assert i.select(b.selection(i_bin)).all_eq(v[i_bin-1])
  dss = uc.d_star_sq(m)
  for d_star_power in (1,2,3):
    j = b.interpolate(v, d_star_power)
    x = flex.pow(dss, (d_star_power/2.))
    r = flex.linear_correlation(x, j)
    assert r.is_well_defined()
    assert approx_equal(
      r.coefficient(), (0.946401,0.990764,1.0)[d_star_power-1],
      eps=1.e-4, multiplier=None)
  #
  s = pickle.dumps(binner2)
  l = pickle.loads(s)
  assert str(l.unit_cell()) == "(11, 11, 13, 90, 90, 120)"
  assert approx_equal(l.limits(), binner2.limits())
  assert l.miller_indices().all_eq(binner2.miller_indices())
  assert l.bin_indices().all_eq(binner2.bin_indices())
  #
  limits = flex.random_double(size=10)
  bng = miller.binning(uc, limits)
  assert bng.unit_cell().is_similar_to(uc)
  assert approx_equal(bng.limits(), limits)
コード例 #45
0
class MujocoEngineTest(parameterized.TestCase):

  def setUp(self):
    super(MujocoEngineTest, self).setUp()
    self._physics = engine.Physics.from_xml_path(MODEL_PATH)

  def _assert_attributes_equal(self, actual_obj, expected_obj, attr_to_compare):
    for name in attr_to_compare:
      actual_value = getattr(actual_obj, name)
      expected_value = getattr(expected_obj, name)
      try:
        if isinstance(expected_value, np.ndarray):
          np.testing.assert_array_equal(actual_value, expected_value)
        else:
          self.assertEqual(actual_value, expected_value)
      except AssertionError as e:
        raise AssertionError("Attribute '{}' differs from expected value. {}"
                             "".format(name, e.message))

  @parameterized.parameters(0, 'cart', u'cart')
  def testCameraIndexing(self, camera_id):
    height, width = 480, 640
    _ = engine.Camera(
        self._physics, height, width, camera_id=camera_id)

  def testDepthRender(self):
    plane_and_box = """
    <mujoco>
      <worldbody>
        <geom type="plane" pos="0 0 0" size="2 2 .1"/>
        <geom type="box" size=".1 .1 .1" pos="0 0 .1"/>
        <camera name="top" pos="0 0 3"/>
      </worldbody>
    </mujoco>
    """
    physics = engine.Physics.from_xml_string(plane_and_box)
    pixels = physics.render(height=200, width=200, camera_id='top', depth=True)
    # Nearest pixels should be 2.8m away
    np.testing.assert_approx_equal(pixels.min(), 2.8, 3)
    # Furthest pixels should be 3m away (depth is orthographic)
    np.testing.assert_approx_equal(pixels.max(), 3.0, 3)

  @parameterized.parameters([True, False])
  def testSegmentationRender(self, enable_geom_frame_rendering):
    box_four_corners = """
    <mujoco>
      <visual>
        <scale framelength="2"/>
      </visual>
      <worldbody>
        <geom name="box0" type="box" size=".2 .2 .2" pos="-1 1 .1"/>
        <geom name="box1" type="box" size=".2 .2 .2" pos="1 1 .1"/>
        <site name="box2" type="box" size=".2 .2 .2" pos="1 -1 .1"/>
        <site name="box3" type="box" size=".2 .2 .2" pos="-1 -1 .1"/>
        <camera name="top" pos="0 0 3"/>
      </worldbody>
    </mujoco>
    """
    physics = engine.Physics.from_xml_string(box_four_corners)
    obj_type_geom = enums.mjtObj.mjOBJ_GEOM  # Geom object type
    obj_type_site = enums.mjtObj.mjOBJ_SITE  # Site object type
    obj_type_decor = enums.mjtObj.mjOBJ_UNKNOWN  # Decor object type
    scene_options = wrapper.MjvOption()
    if enable_geom_frame_rendering:
      scene_options.frame = wrapper.mjbindings.enums.mjtFrame.mjFRAME_GEOM
    pixels = physics.render(height=200, width=200, camera_id='top',
                            segmentation=True, scene_option=scene_options)

    # The pixel indices below were chosen so that toggling the frame decors do
    # not affect the segmentation results.
    with self.subTest('Center pixels should have background label'):
      np.testing.assert_equal(pixels[95:105, 95:105, 0], -1)
      np.testing.assert_equal(pixels[95:105, 95:105, 1], -1)
    with self.subTest('Geoms have correct object type'):
      np.testing.assert_equal(pixels[15:25, 0:10, 1], obj_type_geom)
      np.testing.assert_equal(pixels[15:25, 190:200, 1], obj_type_geom)
    with self.subTest('Sites have correct object type'):
      np.testing.assert_equal(pixels[190:200, 190:200, 1], obj_type_site)
      np.testing.assert_equal(pixels[190:200, 0:10, 1], obj_type_site)
    with self.subTest('Geoms have correct object IDs'):
      np.testing.assert_equal(pixels[15:25, 0:10, 0],
                              physics.model.name2id('box0', obj_type_geom))
      np.testing.assert_equal(pixels[15:25, 190:200, 0],
                              physics.model.name2id('box1', obj_type_geom))
    with self.subTest('Sites have correct object IDs'):
      np.testing.assert_equal(pixels[190:200, 190:200, 0],
                              physics.model.name2id('box2', obj_type_site))
      np.testing.assert_equal(pixels[190:200, 0:10, 0],
                              physics.model.name2id('box3', obj_type_site))
    with self.subTest('Decor elements present if and only if geom frames are '
                      'enabled'):
      contains_decor = np.any(pixels[:, :, 1] == obj_type_decor)
      self.assertEqual(contains_decor, enable_geom_frame_rendering)

  def testTextOverlay(self):
    height, width = 480, 640
    overlay = engine.TextOverlay(title='Title', body='Body', style='big',
                                 position='bottom right')

    no_overlay = self._physics.render(height, width, camera_id=0)
    with_overlay = self._physics.render(height, width, camera_id=0,
                                        overlays=[overlay])
    self.assertFalse(np.all(no_overlay == with_overlay),
                     msg='Images are identical with and without text overlay.')

  def testSceneOption(self):
    height, width = 480, 640
    scene_option = wrapper.MjvOption()
    mjlib.mjv_defaultOption(scene_option.ptr)

    # Render geoms as semi-transparent.
    scene_option.flags[enums.mjtVisFlag.mjVIS_TRANSPARENT] = 1

    no_scene_option = self._physics.render(height, width, camera_id=0)
    with_scene_option = self._physics.render(height, width, camera_id=0,
                                             scene_option=scene_option)
    self.assertFalse(np.all(no_scene_option == with_scene_option),
                     msg='Images are identical with and without scene option.')

  def testRenderFlags(self):
    height, width = 480, 640
    cam = engine.Camera(self._physics, height, width, camera_id=0)
    cam.scene.flags[enums.mjtRndFlag.mjRND_WIREFRAME] = 1  # Enable wireframe
    enabled = cam.render().copy()
    cam.scene.flags[enums.mjtRndFlag.mjRND_WIREFRAME] = 0  # Disable wireframe
    disabled = cam.render().copy()
    self.assertFalse(
        np.all(disabled == enabled),
        msg='Images are identical regardless of whether wireframe is enabled.')

  @parameterized.parameters(((0.5, 0.5), (1, 3)),  # pole
                            ((0.5, 0.1), (0, 0)),  # ground
                            ((0.9, 0.9), (None, None)),  # sky
                           )
  def testCameraSelection(self, coordinates, expected_selection):
    height, width = 480, 640
    camera = engine.Camera(self._physics, height, width, camera_id=0)

    # Test for b/63380170: Enabling visualization of body frames adds
    # "non-model" geoms to the scene. This means that the indices of geoms
    # within `camera._scene.geoms` don't match the rows of `model.geom_bodyid`.
    camera.option.frame = enums.mjtFrame.mjFRAME_BODY

    selected = camera.select(coordinates)
    self.assertEqual(expected_selection, selected[:2])

  @parameterized.parameters(
      dict(camera_id='cam0', height=200, width=300),
      dict(camera_id=1, height=300, width=200),
      dict(camera_id=-1, height=400, width=400),
  )
  def testCameraMatrix(self, camera_id, height, width):
    """Tests the camera_matrix() method.

       Creates a model with two cameras and two small geoms. We render the scene
       with one of the cameras and check that the geom locations, projected into
       pixel space, are correct, using segmenation rendering.
       xyz2pixels() shows how the transformation is used. For a description
       of the camera matrix see https://en.wikipedia.org/wiki/Camera_matrix.

    Args:
      camera_id: One of the two cameras. Can be either integer or String.
      height: The height of the image (pixels).
      width: The width of the image (pixels).
    """

    def xyz2pixels(x, y, z, camera_matrix):
      """Transforms from world coordinates to pixel coordinates."""
      xs, ys, s = camera_matrix.dot(np.array([x, y, z, 1.0]))
      return xs/s, ys/s

    two_geoms_and_two_cameras = """
    <mujoco>
      <visual>
        <global fovy="55"/>
      </visual>
      <worldbody>
        <light name="top" pos="0 0 1"/>
        <geom name="red" pos=".2 0 0" size=".005" rgba="1 0 0 1"/>
        <geom name="green" pos=".2 .2 .1" size=".005" rgba="0 1 0 1"/>
        <camera name="cam0" pos="1 .5 1" zaxis="1 .5 1" fovy="20"/>
        <camera name="cam1" pos=".1 .1 1" xyaxes="1 1 0 -1 0 0"/>
      </worldbody>
    </mujoco>
    """
    physics = engine.Physics.from_xml_string(two_geoms_and_two_cameras)
    camera = engine.Camera(physics, width=width, height=height,
                           camera_id=camera_id)
    camera_matrix = camera.matrix  # Get camera matrix.
    pixels = camera.render(segmentation=True)  # Render a segmentation frame.
    for geom_id in [0, 1]:
      # Compute the location of the geom in pixel space using the camera matrix.
      x, y = xyz2pixels(*physics.data.geom_xpos[geom_id], camera_matrix)
      row = int(round(y))
      column = int(round(x))
      # Compare segmentation values of nearest pixel to corresponding geom.
      [obj_id, obj_type] = pixels[row, column, :]
      self.assertEqual(obj_type, enums.mjtObj.mjOBJ_GEOM)
      self.assertEqual(obj_id, geom_id)

  def testMovableCameraSetGetPose(self):
    height, width = 240, 320

    camera = engine.MovableCamera(self._physics, height, width)
    image = camera.render().copy()

    pose = camera.get_pose()

    lookat_offset = np.array([0.01, 0.02, -0.03])

    # Would normally pass the new values directly to camera.set_pose instead of
    # using the namedtuple _replace method, but this makes the asserts at the
    # end of the test a little cleaner.
    new_pose = pose._replace(distance=pose.distance * 1.5,
                             lookat=pose.lookat + lookat_offset,
                             azimuth=pose.azimuth + -15,
                             elevation=pose.elevation - 10)

    camera.set_pose(*new_pose)

    self.assertEqual(new_pose.distance, camera.get_pose().distance)
    self.assertEqual(new_pose.azimuth, camera.get_pose().azimuth)
    self.assertEqual(new_pose.elevation, camera.get_pose().elevation)
    np.testing.assert_allclose(new_pose.lookat, camera.get_pose().lookat)

    self.assertFalse(np.all(image == camera.render()))

  def testRenderExceptions(self):
    max_width = self._physics.model.vis.global_.offwidth
    max_height = self._physics.model.vis.global_.offheight
    max_camid = self._physics.model.ncam - 1
    with six.assertRaisesRegex(self, ValueError, 'width'):
      self._physics.render(max_height, max_width + 1, camera_id=max_camid)
    with six.assertRaisesRegex(self, ValueError, 'height'):
      self._physics.render(max_height + 1, max_width, camera_id=max_camid)
    with six.assertRaisesRegex(self, ValueError, 'camera_id'):
      self._physics.render(max_height, max_width, camera_id=max_camid + 1)
    with six.assertRaisesRegex(self, ValueError, 'camera_id'):
      self._physics.render(max_height, max_width, camera_id=-2)

  def testPhysicsRenderMethod(self):
    height, width = 240, 320
    image = self._physics.render(height=height, width=width)
    self.assertEqual(image.shape, (height, width, 3))
    depth = self._physics.render(height=height, width=width, depth=True)
    self.assertEqual(depth.shape, (height, width))
    segmentation = self._physics.render(height=height, width=width,
                                        segmentation=True)
    self.assertEqual(segmentation.shape, (height, width, 2))

  def testExceptionIfBothDepthAndSegmentation(self):
    with self.assertRaisesWithLiteralMatch(
        ValueError, engine._BOTH_SEGMENTATION_AND_DEPTH_ENABLED):
      self._physics.render(depth=True, segmentation=True)

  def testRenderFlagOverridesAreNotPersistent(self):
    camera = engine.Camera(self._physics)
    first_rgb = camera.render().copy()
    camera.render(segmentation=True)
    second_rgb = camera.render().copy()
    np.testing.assert_array_equal(first_rgb, second_rgb)

  def testCustomRenderFlags(self):
    default = self._physics.render()
    wireframe_string_key = self._physics.render(
        render_flag_overrides=dict(wireframe=True))
    self.assertFalse((default == wireframe_string_key).all())
    wireframe_enum_key = self._physics.render(
        render_flag_overrides={enums.mjtRndFlag.mjRND_WIREFRAME: True})
    np.testing.assert_array_equal(wireframe_string_key, wireframe_enum_key)

  @parameterized.parameters(dict(depth=True), dict(segmentation=True))
  def testExceptionIfRenderFlagOverridesAndDepthOrSegmentation(self, **kwargs):
    with self.assertRaisesWithLiteralMatch(
        ValueError,
        engine._RENDER_FLAG_OVERRIDES_NOT_SUPPORTED_FOR_DEPTH_OR_SEGMENTATION):
      self._physics.render(render_flag_overrides=dict(wireframe=True), **kwargs)

  def testExceptionIfOverlaysAndDepthOrSegmentation(self):
    overlay = engine.TextOverlay()
    with self.assertRaisesWithLiteralMatch(
        ValueError, engine._OVERLAYS_NOT_SUPPORTED_FOR_DEPTH_OR_SEGMENTATION):
      self._physics.render(depth=True, overlays=[overlay])
    with self.assertRaisesWithLiteralMatch(
        ValueError, engine._OVERLAYS_NOT_SUPPORTED_FOR_DEPTH_OR_SEGMENTATION):
      self._physics.render(segmentation=True, overlays=[overlay])

  def testNamedViews(self):
    self.assertEqual((1,), self._physics.control().shape)
    self.assertEqual((2,), self._physics.position().shape)
    self.assertEqual((2,), self._physics.velocity().shape)
    self.assertEqual((0,), self._physics.activation().shape)
    self.assertEqual((4,), self._physics.state().shape)
    self.assertEqual(0., self._physics.time())
    self.assertEqual(0.01, self._physics.timestep())

  def testSetGetPhysicsState(self):
    physics_state = self._physics.get_state()
    self._physics.set_state(physics_state)

    new_physics_state = np.random.random_sample(physics_state.shape)
    self._physics.set_state(new_physics_state)

    np.testing.assert_allclose(new_physics_state,
                               self._physics.get_state())

  def testSetInvalidPhysicsState(self):
    badly_shaped_state = np.repeat(self._physics.get_state(), repeats=2)

    with self.assertRaises(ValueError):
      self._physics.set_state(badly_shaped_state)

  def testNamedIndexing(self):
    self.assertEqual((3,), self._physics.named.data.xpos['cart'].shape)
    self.assertEqual((2, 3),
                     self._physics.named.data.xpos[['cart', 'pole']].shape)

  def testReload(self):
    self._physics.reload_from_xml_path(MODEL_PATH)

  def testReset(self):
    self._physics.reset()
    self.assertEqual(self._physics.data.qpos[1], 0)
    keyframe_id = 0
    self._physics.reset(keyframe_id=keyframe_id)
    self.assertEqual(self._physics.data.qpos[1],
                     self._physics.model.key_qpos[keyframe_id, 1])
    out_of_range = [-1, 3]
    max_valid = self._physics.model.nkey - 1
    for actual in out_of_range:
      with self.assertRaisesWithLiteralMatch(
          ValueError,
          engine._KEYFRAME_ID_OUT_OF_RANGE.format(
              max_valid=max_valid, actual=actual)):
        self._physics.reset(keyframe_id=actual)

  def testLoadAndReloadFromStringWithAssets(self):
    physics = engine.Physics.from_xml_string(
        MODEL_WITH_ASSETS, assets=ASSETS)
    physics.reload_from_xml_string(MODEL_WITH_ASSETS, assets=ASSETS)

  def testFree(self):
    def mock_free(obj):
      return mock.patch.object(obj, 'free', wraps=obj.free)

    with mock_free(self._physics.model) as mock_free_model:
      with mock_free(self._physics.data) as mock_free_data:
        with mock_free(self._physics.contexts.mujoco) as mock_free_mjrcontext:
          self._physics.free()

    mock_free_model.assert_called_once()
    mock_free_data.assert_called_once()
    mock_free_mjrcontext.assert_called_once()
    self.assertIsNone(self._physics.model.ptr)
    self.assertIsNone(self._physics.data.ptr)

  @parameterized.parameters(*enums.mjtWarning._fields[:-1])
  def testDivergenceException(self, warning_name):
    warning_enum = getattr(enums.mjtWarning, warning_name)
    with self.assertRaisesWithLiteralMatch(
        control.PhysicsError,
        engine._INVALID_PHYSICS_STATE.format(warning_names=warning_name)):
      with self._physics.check_invalid_state():
        self._physics.data.warning[warning_enum].number = 1
    # Existing warnings should not raise an exception.
    with self._physics.check_invalid_state():
      pass
    self._physics.reset()
    with self._physics.check_invalid_state():
      pass

  @parameterized.parameters(float('inf'), float('nan'), 1e15)
  def testBadQpos(self, bad_value):
    with self._physics.reset_context():
      self._physics.data.qpos[0] = bad_value
    with self.assertRaises(control.PhysicsError):
      with self._physics.check_invalid_state():
        mjlib.mj_checkPos(self._physics.model.ptr, self._physics.data.ptr)
    self._physics.reset()
    with self._physics.check_invalid_state():
      mjlib.mj_checkPos(self._physics.model.ptr, self._physics.data.ptr)

  def testNanControl(self):
    with self._physics.reset_context():
      pass

    # Apply the controls.
    with self.assertRaisesWithLiteralMatch(
        control.PhysicsError,
        engine._INVALID_PHYSICS_STATE.format(warning_names='mjWARN_BADCTRL')):
      with self._physics.check_invalid_state():
        self._physics.data.ctrl[0] = float('nan')
        self._physics.step()

  def testSuppressPhysicsError(self):
    bad_value = float('nan')
    message = engine._INVALID_PHYSICS_STATE.format(
        warning_names='mjWARN_BADCTRL')

    def assert_physics_error():
      self._physics.data.ctrl[0] = bad_value
      with self.assertRaisesWithLiteralMatch(control.PhysicsError, message):
        self._physics.forward()

    def assert_warning():
      self._physics.data.ctrl[0] = bad_value
      with mock.patch.object(engine.logging, 'warn') as mock_warn:
        self._physics.forward()
      mock_warn.assert_called_once_with(message)

    assert_physics_error()
    with self._physics.suppress_physics_errors():
      assert_warning()
      with self._physics.suppress_physics_errors():
        assert_warning()
      assert_warning()
    assert_physics_error()

  @parameterized.named_parameters(
      ('_copy', lambda x: x.copy()),
      ('_pickle_and_unpickle', lambda x: cPickle.loads(cPickle.dumps(x))),
  )
  def testCopyOrPicklePhysics(self, func):
    for _ in range(10):
      self._physics.step()
    physics2 = func(self._physics)
    self.assertNotEqual(physics2.model.ptr, self._physics.model.ptr)
    self.assertNotEqual(physics2.data.ptr, self._physics.data.ptr)
    model_attr_to_compare = ('nnames', 'njmax', 'body_pos', 'geom_quat')
    self._assert_attributes_equal(
        physics2.model, self._physics.model, model_attr_to_compare)
    data_attr_to_compare = ('time', 'energy', 'qpos', 'xpos')
    self._assert_attributes_equal(
        physics2.data, self._physics.data, data_attr_to_compare)
    for _ in range(10):
      self._physics.step()
      physics2.step()
    self._assert_attributes_equal(
        physics2.model, self._physics.model, model_attr_to_compare)
    self._assert_attributes_equal(
        physics2.data, self._physics.data, data_attr_to_compare)

  def testCopyDataOnly(self):
    physics2 = self._physics.copy(share_model=True)
    self.assertEqual(physics2.model.ptr, self._physics.model.ptr)
    self.assertNotEqual(physics2.data.ptr, self._physics.data.ptr)

  def testForwardDynamicsUpdatedAfterReset(self):
    gravity = -9.81
    self._physics.model.opt.gravity[2] = gravity
    with self._physics.reset_context():
      pass
    self.assertAlmostEqual(
        self._physics.named.data.sensordata['accelerometer'][2], -gravity)

  def testActuationNotAppliedInAfterReset(self):
    self._physics.data.ctrl[0] = 1.
    self._physics.after_reset()  # Calls `forward()` with actuation disabled.
    self.assertEqual(self._physics.data.actuator_force[0], 0.)
    self._physics.forward()  # Call `forward` directly with actuation enabled.
    self.assertEqual(self._physics.data.actuator_force[0], 1.)

  def testActionSpec(self):
    xml = """
    <mujoco>
      <worldbody>
        <body>
          <geom type="sphere" size="0.1"/>
          <joint type="hinge" name="hinge"/>
        </body>
      </worldbody>
      <actuator>
        <motor joint="hinge" ctrllimited="false"/>
        <motor joint="hinge" ctrllimited="true" ctrlrange="-1 2"/>
      </actuator>
    </mujoco>
    """
    physics = engine.Physics.from_xml_string(xml)
    spec = engine.action_spec(physics)
    self.assertEqual(np.float, spec.dtype)
    np.testing.assert_array_equal(spec.minimum, [-constants.mjMAXVAL, -1.0])
    np.testing.assert_array_equal(spec.maximum, [constants.mjMAXVAL, 2.0])

  def _check_valid_rotation_matrix(self, data_field_name):
    name = 'foo'
    quat = '0.5 0.7 0 0'  # Not normalized.
    xml_string = """
    <mujoco>
      <worldbody>
        <body name='{name}' quat='{quat}'/>
        <camera name='{name}' quat='{quat}'/>
        <geom name='{name}' quat='{quat}' size='0.1'/>
        <site name='{name}' quat='{quat}' size='0.1'/>
      </worldbody>
    </mujoco>
    """.format(name=name, quat=quat)
    physics = engine.Physics.from_xml_string(xml_string)
    rotation_matrix = getattr(physics.named.data, data_field_name)[name]
    self.assertAlmostEqual(np.linalg.det(rotation_matrix.reshape(3, 3)), 1)

  @parameterized.parameters(['xmat', 'geom_xmat', 'site_xmat'])
  def testValidRotationMatrixIfQuatNotNormalizedInXML(self, field_name):
    self._check_valid_rotation_matrix(field_name)

  # TODO(b/123918714): Update this once the bug has been fixed in MuJoCo.
  @unittest.expectedFailure
  def testValidCameraRotationMatrixIfQuatNotNormalizedInXML(self):
    self._check_valid_rotation_matrix('cam_xmat')
コード例 #46
0
 def _loads(self, value):
     return pickle.loads(value)
コード例 #47
0
ファイル: pickle.py プロジェクト: jsa4000/Singularity
def deserialize(data):
    return cPickle.loads(data)
コード例 #48
0
ファイル: test_hdf5.py プロジェクト: zhoujian1210/fuel
 def test_pickling(self):
     dataset = cPickle.loads(cPickle.dumps(self.dataset))
     assert_equal(len(dataset.nodes), 1)
コード例 #49
0
	def hgetall(self, name):
		return {key: pickle.loads(value) for key, value in
			iteritems(super(RedisWrapper, self).hgetall(self.make_key(name)))}
コード例 #50
0
 def decode(self, input):
     try:
         return pickle.loads(input)
     except pickle.PickleError as e:
         raise SessionSerializerException(e)
コード例 #51
0
ファイル: model_test.py プロジェクト: professorlust/odin-ai
    def test_mnist(self):
        ds = fuel.load_mnist()
        m = model.SequentialClassifier(N.Flatten(outdim=2),
                                       N.Dense(64, activation=K.relu),
                                       N.Dense(10, activation=K.softmax))
        m.set_inputs(
            K.placeholder(shape=(None, 28, 28), name='X',
                          dtype='float32')).set_outputs(
                              K.placeholder(shape=(None, ),
                                            name='y',
                                            dtype='int32'))
        # ====== query info ====== #
        m.path
        self.assertEqual(m.is_initialized, True)
        self.assertEqual(m.input_shape, (None, 28, 28))
        self.assertEqual(m.output_shape, (None, 10))
        # ====== training test ====== #
        m.set_training_info(learning_rate=0.001, n_epoch=3)
        m.fit(X=(ds['X_train'], ds['y_train']),
              X_valid=(ds['X_valid'], ds['y_valid']))
        score = m.score(ds['X_test'][:], ds['y_test'][:])
        self.assertEqual(
            score > 0.8,
            True,
            msg='Test if the model get reasonable results: %f accuracy' %
            score)
        # ====== make prediction and transform test ====== #
        np.random.seed(12)
        _ = np.random.rand(8, 28, 28)
        self.assertEqual(m.transform(_).shape, (8, 10))
        self.assertEqual(
            np.isclose(m.predict_proba(_).sum(-1), 1.).sum() == 8, True)
        self.assertEqual(len(m.predict(_)), 8)
        # ====== pickling test ====== #
        str_old = str(m)
        p_old = m.get_params(True)

        m = cPickle.loads(cPickle.dumps(m, protocol=cPickle.HIGHEST_PROTOCOL))
        str_new = str(m)
        p_new = m.get_params(True)
        # ====== test same configurations ====== #
        self.assertEqual(str_new, str_old)
        # ====== test same params ====== #
        for i, j in p_new.items():
            k = p_old[i]
            for a, b in zip(j, k):
                self.assertEqual(np.array_equal(a, b), True)
        # ====== test set params ====== #
        params = m.get_params(deep=True)
        params_new = {}
        for n, p in params.items():
            params_new[n] = [
                np.random.rand(*i.shape).astype('float32') for i in p
            ]
        m.set_params(**params_new)
        # test if equal new onces
        for i, j in m.get_params(deep=True).items():
            k = params_new[i]
            for a, b in zip(j, k):
                self.assertEqual(np.array_equal(a, b), True)
        # ====== training test ====== #
        print('Re-train the model second time:')
        m.fit(X=(ds['X_train'], ds['y_train']),
              X_valid=(ds['X_valid'], ds['y_valid']))
        score = m.score(ds['X_test'][:], ds['y_test'][:])
        self.assertEqual(
            score > 0.8,
            True,
            msg='Test if the model get reasonable results: %f accuracy' %
            score)
コード例 #52
0
ファイル: test_hdf5.py プロジェクト: zhoujian1210/fuel
 def test_data_stream_pickling(self):
     stream = DataStream(H5PYDataset(self.h5file, which_sets=('train', )),
                         iteration_scheme=SequentialScheme(100, 10))
     cPickle.loads(cPickle.dumps(stream))
     stream.close()
コード例 #53
0
 def set_state(self, state):
     self._lazy_value = state[0]
     self._value = pickle.loads(state[1])
コード例 #54
0
    def result(self,
               timeout=None,
               check_only=False,
               throw_except=True,
               storage_handler=None):
        """


        From the python docs:

        Return the value returned by the call. If the call hasn't yet
        completed then this method will wait up to timeout seconds. If
        the call hasn't completed in timeout seconds then a
        TimeoutError will be raised. timeout can be an int or float.If
        timeout is not specified or None then there is no limit to the
        wait time.

        If the future is cancelled before completing then CancelledError will be raised.

        If the call raised then this method will raise the same exception.

        """
        if self._state == JobState.new:
            raise ValueError("job not yet invoked")

        if self._state == JobState.success:
            return self._return_val

        if self._state == JobState.error:
            if throw_except:
                raise self._exception
            else:
                return None

        if storage_handler is None:
            storage_config = wrenconfig.extract_storage_config(
                wrenconfig.default())
            storage_handler = storage.Storage(storage_config)

        storage_utils.check_storage_path(storage_handler.get_storage_config(),
                                         self.storage_path)

        call_status = storage_handler.get_call_status(self.callset_id,
                                                      self.call_id)

        self.status_query_count += 1

        ## FIXME implement timeout
        if timeout is not None:
            raise NotImplementedError()

        if check_only is True:
            if call_status is None:
                return None

        while call_status is None:
            time.sleep(self.GET_RESULT_SLEEP_SECS)
            call_status = storage_handler.get_call_status(
                self.callset_id, self.call_id)

            self.status_query_count += 1
        self._invoke_metadata['status_done_timestamp'] = time.time()
        self._invoke_metadata['status_query_count'] = self.status_query_count

        self.run_status = call_status  # this is the remote status information
        self.invoke_status = self._invoke_metadata  # local status information

        if call_status['exception'] is not None:
            # the wrenhandler had an exception
            exception_str = call_status['exception']
            print(call_status)
            exception_args = call_status['exception_args']
            if exception_args[0] == "WRONGVERSION":
                if throw_except:
                    raise Exception("Pywren version mismatch: remote " + \
                        "expected version {}, local library is version {}".format(
                            exception_args[2], exception_args[3]))
                return None
            elif exception_args[0] == "OUTATIME":
                if throw_except:
                    raise Exception("process ran out of time")
                return None
            else:
                if throw_except:
                    if 'exception_traceback' in call_status:
                        logger.error(call_status['exception_traceback'])
                    raise Exception(exception_str, *exception_args)
                return None

        call_output_time = time.time()
        call_invoker_result = pickle.loads(
            storage_handler.get_call_output(self.callset_id, self.call_id))

        call_output_time_done = time.time()
        self._invoke_metadata[
            'download_output_time'] = call_output_time_done - call_output_time

        self._invoke_metadata[
            'download_output_timestamp'] = call_output_time_done
        call_success = call_invoker_result['success']
        logger.info("ResponseFuture.result() {} {} call_success {}".format(
            self.callset_id, self.call_id, call_success))

        self._call_invoker_result = call_invoker_result

        if call_success:

            self._return_val = call_invoker_result['result']
            self._state = JobState.success
            return self._return_val

        elif throw_except:

            self._exception = call_invoker_result['result']
            self._traceback = (call_invoker_result['exc_type'],
                               call_invoker_result['exc_value'],
                               call_invoker_result['exc_traceback'])

            self._state = JobState.error
            if call_invoker_result.get('pickle_fail', False):
                logging.warning(
                    "there was an error pickling. The original exception: " + \
                        "{}\nThe pickling exception: {}".format(
                            call_invoker_result['exc_value'],
                            str(call_invoker_result['pickle_exception'])))

                reraise(Exception, call_invoker_result['exc_value'],
                        call_invoker_result['exc_traceback'])
            else:
                # reraise the exception
                reraise(*self._traceback)
        else:
            return None  # nothing, don't raise, no value
コード例 #55
0
ファイル: test_pickle.py プロジェクト: geib7592/matplotlib
def test_polar():
    ax = plt.subplot(111, polar=True)
    fig = plt.gcf()
    pf = pickle.dumps(fig)
    pickle.loads(pf)
    plt.draw()
コード例 #56
0
def _fix_recid(recid, logger):
    """Fix a given recid."""
    # logger.info("Upgrading record %s:" % recid)
    # 1) moving docname and type to the relation with bibrec
    bibrec_docs = run_sql(
        """select id_bibdoc, type from bibrec_bibdoc
                             where id_bibrec=%s""", (recid, ))
    are_equal = True

    for docid_str in bibrec_docs:
        docid = str(docid_str[0])
        doctype = str(docid_str[1])

        # logger.info("Upgrading document %s:" % (docid, ))
        res2 = run_sql("select docname, more_info from bibdoc where id=%s",
                       (docid, ))
        if not res2:
            logger.error(
                ("Error when migrating document %s attached to the "
                 "record %s: can not retrieve from the bibdoc table ") %
                (docid, recid))
        else:
            docname = str(res2[0][0])
            run_sql(
                """update bibrec_bibdoc set docname=%%s where id_bibrec=%s
                       and id_bibdoc=%s""" % (str(recid), docid), (docname, ))
            run_sql("update bibdoc set doctype=%%s where id=%s" % (docid, ),
                    (doctype, ))

        # 2) moving moreinfo to the new moreinfo structures (default namespace)
        if res2[0][1]:
            minfo = cPickle.loads(res2[0][1])
            # 2a migrating descriptions->version->format
            new_value = cPickle.dumps(minfo.get('descriptions', {}))
            run_sql(
                """INSERT INTO bibdocmoreinfo
                       (id_bibdoc, namespace, data_key, data_value)
                       VALUES (%s, %s, %s, %s)""",
                (str(docid), "", "descriptions", new_value))
            # 2b migrating comments->version->format
            new_value = cPickle.dumps(minfo.get('comments', {}))
            run_sql(
                """INSERT INTO bibdocmoreinfo
                       (id_bibdoc, namespace, data_key, data_value)
                       VALUES (%s, %s, %s, %s)""",
                (str(docid), "", "comments", new_value))
            # 2c migrating flags->flagname->version->format
            new_value = cPickle.dumps(minfo.get('flags', {}))
            run_sql(
                """INSERT INTO bibdocmoreinfo
                       (id_bibdoc, namespace, data_key, data_value)
                       VALUES (%s, %s, %s, %s)""",
                (str(docid), "", "flags", new_value))

            # 3) Verify the correctness of moreinfo transformations
            try:
                descriptions = cPickle.loads(
                    run_sql(
                        """SELECT data_value FROM bibdocmoreinfo
                       WHERE id_bibdoc=%s AND namespace=%s AND data_key=%s""",
                        (str(docid), '', 'descriptions'))[0][0])
                for version in minfo.get('descriptions', {}):
                    for docformat in minfo['descriptions'][version]:
                        v1 = descriptions[version][docformat]
                        v2 = minfo['descriptions'][version][docformat]
                        if v1 != v2:
                            are_equal = False
                            logger.info(("ERROR: Document %s: Expected "
                                         "description %s and got %s") %
                                        (str(docid), str(v2), str(v1)))
            except Exception as e:
                logger.info(
                    ("ERROR: Document %s: Problem with retrieving "
                     "descriptions: %s  MoreInfo: %s Descriptions: %s") %
                    (str(docid), str(e), str(minfo), str(descriptions)))

            try:
                comments = cPickle.loads(
                    run_sql(
                        """SELECT data_value FROM bibdocmoreinfo
                       WHERE id_bibdoc=%s AND namespace=%s AND data_key=%s""",
                        (str(docid), '', 'comments'))[0][0])
                for version in minfo.get('comments', {}):
                    for docformat in minfo['comments'][version]:

                        v1 = comments[version][docformat]
                        v2 = minfo['comments'][version][docformat]
                        if v1 != v2:
                            are_equal = False
                            logger.info(("ERROR: Document %s: Expected "
                                         "comment %s and got %s") %
                                        (str(docid), str(v2), str(v1)))
            except Exception as e:
                logger.info(("ERROR: Document %s: Problem with retrieving "
                             "comments: %s MoreInfo: %s  Comments: %s") %
                            (str(docid), str(e), str(minfo), str(comments)))

            try:
                flags = cPickle.loads(
                    run_sql(
                        """SELECT data_value FROM bibdocmoreinfo
                       WHERE id_bibdoc=%s AND namespace=%s AND data_key=%s""",
                        (str(docid), '', 'flags'))[0][0])
                for flagname in minfo.get('flags', {}):
                    for version in minfo['flags'][flagname]:
                        for docformat in minfo['flags'][flagname][version]:
                            if minfo['flags'][flagname][version][docformat]:
                                are_equal = are_equal and \
                                    (docformat in flags[flagname][version])
                                if not (docformat in flags[flagname][version]):
                                    logger.info(("ERROR: Document %s: "
                                                 "Expected  %s") %
                                                (str(docid), str(minfo)))
            except Exception as e:
                logger.info(("ERROR: Document %s: Problem with retrieving "
                             "flags. %s MoreInfo: %s  flags: %s") %
                            (str(docid), str(e), str(minfo), str(flags)))

            if not are_equal:
                logger.info(
                    ("Failed to move MoreInfo structures from old "
                     "database to the new one docid: %s") % (str(docid), ))

    return are_equal
コード例 #57
0
 def get_extra_data(self):
     """Get extra data saved to the object."""
     return cPickle.loads(base64.b64decode(self._extra_data))
コード例 #58
0
 def get_data(self):
     """Get data saved in the object."""
     return cPickle.loads(base64.b64decode(self._data))
コード例 #59
0
ファイル: make_db.py プロジェクト: lablup/TAC-GAN-eCommerce
    def save_caption_vectors_products(self, train_ratio=0.8):
        target, one_hot_targets, n_target = get_one_hot_targets(self.category_path)

        train_list = []
        with open(self.parse_data_path) as cap_f:
            for i, line in enumerate(cap_f):
                row = line.strip().split('\t')
                asin = row[0]
                categories = row[1]
                title = row[2]
                imgid = asin+'.jpg'
                onehot_cate = one_hot_encode_str_lbl(categories, target, one_hot_targets)
                train_list.append((imgid, onehot_cate, title))

                if i % self.n_log_print == 0:
                    print(i, train_list[-1])

        chunk_size = self.chunk_size
        datasize = len(train_list)
        self.logger.info('data size: %d' % datasize)
        n_chunk = datasize // chunk_size + 1

        chunk_list = []
        for index in range(n_chunk):
            start_index = index * chunk_size
            end_index = start_index + chunk_size
            chunk = train_list[start_index:end_index]
            chunk_list.append((index, chunk))

        for chunk in chunk_list:
            index = chunk[0]
            chunk = chunk[1]
            st = time.time()
            temp_list = []
            for i, (key, cate_vec, title) in enumerate(chunk):

                title_vec = self.parser.text2vec(title)
                temp_list.append((key, cate_vec, title_vec))

            chunk_path = (os.path.join(self.temp_dir_path, 'products_tv_{}.pkl'.format(index)))
            f_out = open(chunk_path, 'wb')
            p = cPickle.Pickler(f_out)
            p.dump(temp_list)
            p.clear_memo()
            f_out.close()

            self.logger.info("%s done: %d" % (chunk_path, time.time() - st))

        if not os.path.isdir(self.train_dir_path):
            os.makedirs(self.train_dir_path)

        all_train = train_ratio >= 1.0
        all_dev = train_ratio == 0.0

        train_indices, train_size = self.get_train_indices(datasize, train_ratio)

        dev_size = datasize - train_size
        if all_dev:
            train_size = 1
            dev_size = datasize
        if all_train:
            dev_size = 1
            train_size = datasize

        data_fout = h5py.File(os.path.join(self.train_dir_path, 'data.h5py'), 'w')
        train = data_fout.create_group('train')
        dev = data_fout.create_group('dev')
        self.create_dataset(train, train_size, n_target)
        self.create_dataset(dev, dev_size, n_target)

        sample_idx = 0
        dataset = {'train': train, 'dev': dev}
        num_samples = {'train': 0, 'dev': 0}
        chunk = {'train': self.init_chunk(chunk_size, n_target),
                 'dev': self.init_chunk(chunk_size, n_target)}

        # make h5file
        for input_chunk_idx in range(n_chunk):
            path = os.path.join(self.temp_dir_path, 'products_tv_{}.pkl'.format(input_chunk_idx))
            print('processing %s ...' % path)
            data = cPickle.loads(open(path, 'rb').read())
            for data_idx, (img_idx, cate_vec, title_vec) in enumerate(data):

                is_train = train_indices[sample_idx + data_idx]
                if all_dev:
                    is_train = False
                if all_train:
                    is_train = True

                c = chunk['train'] if is_train else chunk['dev']
                idx = c['num']
                c['docvec'][idx] = title_vec
                c['cate'][idx] = cate_vec
                c['asin'].append(np.string_(img_idx))
                c['num'] += 1

                for t in ['train', 'dev']:
                    if chunk[t]['num'] >= chunk_size:
                        self.copy_chunk(dataset[t], chunk[t], num_samples[t])
                        num_samples[t] += chunk[t]['num']
                        chunk[t] = self.init_chunk(chunk_size, n_target)

            sample_idx += len(data)

        for t in ['train', 'dev']:
            if chunk[t]['num'] > 0:
                self.copy_chunk(dataset[t], chunk[t], num_samples[t])
                num_samples[t] += chunk[t]['num']

        for div in ['train', 'dev']:
            ds = dataset[div]
            size = num_samples[div]
            ds['cate'].resize((size, n_target))
            ds['docvec'].resize((size, self.doc_vec_size))

        data_fout.close()
コード例 #60
0
def track_event(category=None, action=None, label=None):
    if not settings.analytics.getboolean('enabled'):
        return

    anonymousConfig = Config()
    anonymousConfig.anonimize_ip_address = True

    tracker = Tracker('UA-138214134-3', 'none', conf=anonymousConfig)

    try:
        if settings.analytics.visitor:
            visitor = pickle.loads(
                codecs.decode(settings.analytics.visitor.encode(), "base64"))
            if visitor.user_agent is None:
                visitor.user_agent = os.environ.get("SZ_USER_AGENT")
            if visitor.unique_id > int(0x7fffffff):
                visitor.unique_id = random.randint(0, 0x7fffffff)
    except:
        visitor = Visitor()
        visitor.unique_id = random.randint(0, 0x7fffffff)

    session = Session()
    event = Event(category=category, action=action, label=label, value=1)

    tracker.add_custom_variable(
        CustomVariable(index=1,
                       name='BazarrVersion',
                       value=os.environ["BAZARR_VERSION"],
                       scope=1))
    tracker.add_custom_variable(
        CustomVariable(index=2,
                       name='PythonVersion',
                       value=platform.python_version(),
                       scope=1))
    if settings.general.getboolean('use_sonarr'):
        tracker.add_custom_variable(
            CustomVariable(index=3,
                           name='SonarrVersion',
                           value=sonarr_version,
                           scope=1))
    else:
        tracker.add_custom_variable(
            CustomVariable(index=3,
                           name='SonarrVersion',
                           value='unused',
                           scope=1))
    if settings.general.getboolean('use_radarr'):
        tracker.add_custom_variable(
            CustomVariable(index=4,
                           name='RadarrVersion',
                           value=radarr_version,
                           scope=1))
    else:
        tracker.add_custom_variable(
            CustomVariable(index=4,
                           name='RadarrVersion',
                           value='unused',
                           scope=1))
    tracker.add_custom_variable(
        CustomVariable(index=5,
                       name='OSVersion',
                       value=platform.platform(),
                       scope=1))

    try:
        tracker.track_event(event, session, visitor)
    except:
        logging.debug("BAZARR unable to track event.")
        pass
    else:
        settings.analytics.visitor = codecs.encode(pickle.dumps(visitor),
                                                   "base64").decode()
        with open(os.path.join(args.config_dir, 'config', 'config.ini'),
                  'w+') as handle:
            settings.write(handle)