def _decrypt_config(self, config_class, encrypted_config, extra=None):
     if not encrypted_config:
         if extra:
             return config_class(None, *extra)
         else:
             return config_class()
     encrypted_config = base64.b64decode(encrypted_config)
     iv = encrypted_config[:16]
     cipher, iv = self._get_cipher(iv)
     pickled = cipher.decryptor().update(encrypted_config[16:])
     try:
         unpickled = pickle.loads(pickled, encoding="bytes")
     except TypeError:  # Python 2
         unpickled = pickle.loads(pickled)
         # Convert text created in Python 3
         config_dict = {}
         for k, v in unpickled.items():
             if isinstance(k, unicode):
                 k = k.encode("utf-8")
             if isinstance(v, unicode):
                 v = v.encode("utf-8")
             config_dict[k] = v
     else:  # Python 3
         # Convert bytes created in Python 2
         config_dict = {}
         for k, v in unpickled.items():
             if isinstance(k, bytes):
                 k = k.decode("utf-8")
             if isinstance(v, bytes):
                 v = v.decode("utf-8")
             config_dict[k] = v
     args = [config_dict]
     if extra:
         args += extra
     return self._construct_config(config_class, args)
示例#2
0
def load_data():
    """Loads movie_data, cust_data, and answers from pickles.

    Returns:
        The tuple (movie_data, cust_data, answers) with the objects loaded from
        their pickles.
    """
    # load movie data cache
    if isfile(CACHE_LOC + MOVIE_PICKLE):
        with open(CACHE_LOC + MOVIE_PICKLE, 'rb') as movie_file:
            movie_data = load(movie_file)
    else:
        movie_data = loads(urlopen(CACHE_URL + MOVIE_PICKLE).read())
    # load customer data cache
    if isfile(CACHE_LOC + CUSTOMER_PICKLE):
        with open(CACHE_LOC + CUSTOMER_PICKLE, 'rb') as cust_file:
            cust_data = load(cust_file)
    else:
        cust_data = loads(urlopen(CACHE_URL + CUSTOMER_PICKLE).read())
    # load answers
    if isfile(CACHE_LOC + ANSWER_PICKLE):
        with open(CACHE_LOC + ANSWER_PICKLE, 'rb') as answer_file:
            answers = load(answer_file)
    else:
        answers = loads(urlopen(CACHE_URL + ANSWER_PICKLE).read())
    return(movie_data, cust_data, answers)
示例#3
0
def test_pickling_and_unpickling_encoded_file():
    # See https://bitbucket.org/pytest-dev/pytest/pull-request/194
    # pickle.loads() raises infinite recursion if
    # EncodedFile.__getattr__ is not implemented properly
    ef = capture.EncodedFile(None, None)
    ef_as_str = pickle.dumps(ef)
    pickle.loads(ef_as_str)
def test_pickle():
    # Check pickability.
    import pickle

    # Adaboost classifier
    for alg in ['SAMME', 'SAMME.R']:
        obj = AdaBoostClassifier(algorithm=alg)
        obj.fit(iris.data, iris.target)
        score = obj.score(iris.data, iris.target)
        s = pickle.dumps(obj)

        obj2 = pickle.loads(s)
        assert_equal(type(obj2), obj.__class__)
        score2 = obj2.score(iris.data, iris.target)
        assert_equal(score, score2)

    # Adaboost regressor
    obj = AdaBoostRegressor(random_state=0)
    obj.fit(boston.data, boston.target)
    score = obj.score(boston.data, boston.target)
    s = pickle.dumps(obj)

    obj2 = pickle.loads(s)
    assert_equal(type(obj2), obj.__class__)
    score2 = obj2.score(boston.data, boston.target)
    assert_equal(score, score2)
示例#5
0
def test_pickle():
    import pickle

    # classification
    obj = tree.DecisionTreeClassifier()
    obj.fit(iris.data, iris.target)
    score = obj.score(iris.data, iris.target)
    s = pickle.dumps(obj)

    obj2 = pickle.loads(s)
    assert_equal(type(obj2), obj.__class__)
    score2 = obj2.score(iris.data, iris.target)
    assert score == score2, "Failed to generate same score " + \
        " after pickling (classification) "

    # regression
    obj = tree.DecisionTreeRegressor()
    obj.fit(boston.data, boston.target)
    score = obj.score(boston.data, boston.target)
    s = pickle.dumps(obj)

    obj2 = pickle.loads(s)
    assert_equal(type(obj2), obj.__class__)
    score2 = obj2.score(boston.data, boston.target)
    assert score == score2, "Failed to generate same score " + \
        " after pickling (regression) "
示例#6
0
    def test_copy_pickle(self):

        d = Deque('abc')

        e = d.__copy__()
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        e = Deque(d)
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
            s = pickle.dumps(d, proto)
            e = pickle.loads(s)
            self.assertNotEqual(id(d), id(e))
            self.assertEqual(type(d), type(e))
            self.assertEqual(list(d), list(e))

        d = Deque('abcde', maxlen=4)

        e = d.__copy__()
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        e = Deque(d)
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
            s = pickle.dumps(d, proto)
            e = pickle.loads(s)
            self.assertNotEqual(id(d), id(e))
            self.assertEqual(type(d), type(e))
            self.assertEqual(list(d), list(e))
def install(portal, reinstall=False):
    acl_users = getToolByName(portal, 'acl_users')

    # Put an apachepas multiplugin in the acl_users folder, if there isn't one:
    pluginId = _firstIdOfClass(acl_users, ApacheAuthPluginHandler)
    if not pluginId:
        addautousermakerplugin(acl_users)

    # Activate it:
    plugins = acl_users.plugins
    for interface in [IAuthenticationPlugin, IExtractionPlugin, IChallengePlugin]:
        try:
            plugins.activatePlugin(interface, pluginId)  # plugins is a PluginRegistry
        except KeyError:
            continue
    while plugins.listPluginIds(IChallengePlugin)[0] != pluginId:
        plugins.movePluginsUp(IChallengePlugin, (pluginId,))

    if reinstall:
        import pickle
        plugin = getattr(plugins, pluginId)
        #logger.info("plugin = %s" % repr(plugin))
        # Get the configuration out of the property, and delete the property.
        try:
            prop = "\n".join(acl_users.getProperty('aum_config'))
            #logger.info("aum_config = %s" % repr(prop))
            config = pickle.loads(prop)
        except Exception, err:
            logger.info("error getting config: %s of %r" % (str(err), repr(err)))
        try:
            prop = "\n".join(acl_users.getProperty('aum_mappings'))
            #logger.info("aum_mappings = %s" % repr(prop))
            mappings = pickle.loads(prop)
        except Exception, err:
            logger.info("error getting mappings: %s of %r" % (str(err), repr(err)))
示例#8
0
    def test_make_proxy_disc(self):
        abc = DiscreteVariable("abc", values="abc", ordered=True)
        abc1 = abc.make_proxy()
        abc2 = abc1.make_proxy()
        self.assertIs(abc.master, abc)
        self.assertIs(abc1.master, abc)
        self.assertIs(abc2.master, abc)
        self.assertEqual(abc, abc1)
        self.assertEqual(abc, abc2)
        self.assertEqual(abc1, abc2)
        self.assertEqual(hash(abc), hash(abc1))
        self.assertEqual(hash(abc1), hash(abc2))

        abcx = DiscreteVariable("abc", values="abc", ordered=True)
        self.assertNotEqual(abc, abcx)

        abc1p = pickle.loads(pickle.dumps(abc1))
        self.assertIs(abc1p.master, abc)
        self.assertEqual(abc1p, abc)

        abcp, abc1p, abc2p = pickle.loads(pickle.dumps((abc, abc1, abc2)))
        self.assertIs(abcp.master, abcp.master)
        self.assertIs(abc1p.master, abcp.master)
        self.assertIs(abc2p.master, abcp.master)
        self.assertEqual(abcp, abc1p)
        self.assertEqual(abcp, abc2p)
        self.assertEqual(abc1p, abc2p)
def server():
	global connection
	global board
	global newTiles
	global WAIT
	# Create a TCP/IP socket
	server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
	# Bind the socket to the port
	if int(sys.argv[1]) == 1:
		server_address = ('localhost', PORT_NUMBER_2)
	else:
		server_address = ('localhost', PORT_NUMBER_1)
	print >>sys.stderr, 'starting up on %s port %s' % server_address
	server_sock.bind(server_address)
	# Listen for incoming connections
	server_sock.listen(1)

	while True:
	    # Wait for a connection
	    print >>sys.stderr, 'waiting for a connection'
	    connection, client_address = server_sock.accept()
	    try:
	        print >>sys.stderr, 'connection from', client_address
	        state = "NORMAL"
	        # Receive the data in small chunks and retransmit it
	        while True:
	            data = connection.recv(1000)
	            # print >>sys.stderr, 'received "%s"' % data
	            if data:
	            	if data == "BOARD" or data == "INITIAL" or data == "NEW_TILES":
	            		state = data
	            		print "STATE: %s"%state
	            	else:
		            	if state == "BOARD":
		            		board = pickle.loads(data)
		            		print colored("The other player has made a move",'red')
		            		printBoard()
		            		print "received board"
		            		WAIT = False
		            	elif state == "INITIAL":
		            		board = pickle.loads(data)
		            		print "received initial"
		            		printBoard()
		            	elif state == "NEW_TILES":
		            		newTiles = pickle.loads(data)
		            		print newTiles
		            		print "received new tiles"
		            	state = "NORMAL"
		            	print "STATE: NORMAL"
	            else:
	                break
	    except KeyboardInterrupt:
	    	print "Closed connection on server"
	    	connection.close()
	    	break     
	    finally:
	        # Clean up the connection
	    	print "Closed connection on server"
	        connection.close()
	        break
示例#10
0
def initiate(main_port, tracker_addr, uname, pwd):
	sock = create_sock(main_port)
	sock.connect((tracker_addr["ip"], tracker_addr["port"]))
	sock.send(pickle.dumps({"cat" : "get", "item" : "peerlist"}))
	r = sock.recv(1024)
	peerlist = pickle.loads(r)
	sock.close()
	print(peerlist)
	modpath = input("Enter path of module : ")
	if os.path.isfile(modpath) == True:
		command = "python3 " + modpath + " initiate"
		threading.Thread(target = caller, args = (command, ), daemon = True).start()
	else:
		initiate(main_port, tracker_addr, uname, pwd)
	time.sleep(3)
	sock = create_sock(main_port)
	sock.connect(("127.0.0.1", 54321))
	sock.send(pickle.dumps("data"))
	data = pickle.loads(transfer.receiver(sock))
	sock.close()
	print("Data received : ")
	print(data)

	results = initiate2(main_port, tracker_addr, uname, pwd, modpath, data)
	sock = create_sock()
	sock.connect(("127.0.0.1", 54321))
	transfer.sender(sock, pickle.dumps(results))
	sock.close()
示例#11
0
 def test_pickle(self):
     vlist = _create_variable_list(3)
     ctuple = matrix_constraint(
         numpy.array([[0,0,0],[0,0,0]]),
         x=vlist)
     self.assertTrue((ctuple.lb == -numpy.inf).all())
     self.assertTrue((ctuple.ub == numpy.inf).all())
     self.assertTrue((ctuple.equality == False).all())
     self.assertEqual(ctuple.parent, None)
     ctuple_up = pickle.loads(
         pickle.dumps(ctuple))
     self.assertTrue((ctuple_up.lb == -numpy.inf).all())
     self.assertTrue((ctuple_up.ub == numpy.inf).all())
     self.assertTrue((ctuple_up.equality == False).all())
     self.assertEqual(ctuple_up.parent, None)
     b = block()
     b.ctuple = ctuple
     self.assertIs(ctuple.parent, b)
     bup = pickle.loads(
         pickle.dumps(b))
     ctuple_up = bup.ctuple
     self.assertTrue((ctuple_up.lb == -numpy.inf).all())
     self.assertTrue((ctuple_up.ub == numpy.inf).all())
     self.assertTrue((ctuple_up.equality == False).all())
     self.assertIs(ctuple_up.parent, bup)
    def test_copy_pickle(self):

        d = Deque('abc')

        e = d.__copy__()
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        e = Deque(d)
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        s = pickle.dumps(d)
        e = pickle.loads(s)
        self.assertNotEqual(id(d), id(e))
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        d = Deque('abcde', maxlen=4)

        e = d.__copy__()
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        e = Deque(d)
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))

        s = pickle.dumps(d)
        e = pickle.loads(s)
        self.assertNotEqual(id(d), id(e))
        self.assertEqual(type(d), type(e))
        self.assertEqual(list(d), list(e))
示例#13
0
    def test_pickle_parameters(self):
        # test that we can pickle a Parameters object
        p = Parameters()
        p.add('a', 10, True, 0, 100)
        p.add('b', 10, True, 0, 100, 'a * sin(1)')
        p.update_constraints()
        p._asteval.symtable['abc'] = '2 * 3.142'

        pkl = pickle.dumps(p, -1)
        q = pickle.loads(pkl)

        q.update_constraints()
        assert_(p == q)
        assert_(not p is q)

        # now test if the asteval machinery survived
        assert_(q._asteval.symtable['abc'] == '2 * 3.142')

        # check that unpickling of Parameters is not affected by expr that
        # refer to Parameter that are added later on. In the following
        # example var_0.expr refers to var_1, which is a Parameter later
        # on in the Parameters OrderedDict.
        p = Parameters()
        p.add('var_0', value=1)
        p.add('var_1', value=2)
        p['var_0'].expr = 'var_1'
        pkl = pickle.dumps(p)
        q = pickle.loads(pkl)
示例#14
0
    def testDatabaseFixes(self):
        # Hack the pickle to make it refer to a timezone abbreviation
        # that does not match anything. The unpickler should be able
        # to repair this case
        tz = pytz.timezone('Australia/Melbourne')
        p = pickle.dumps(tz)
        tzname = tz._tzname
        hacked_p = p.replace(_byte_string(tzname), _byte_string('???'))
        self.assertNotEqual(p, hacked_p)
        unpickled_tz = pickle.loads(hacked_p)
        self.assertTrue(tz is unpickled_tz)

        # Simulate a database correction. In this case, the incorrect
        # data will continue to be used.
        p = pickle.dumps(tz)
        new_utcoffset = tz._utcoffset.seconds + 42

        # Python 3 introduced a new pickle protocol where numbers are stored in
        # hexadecimal representation. Here we extract the pickle
        # representation of the number for the current Python version.
        old_pickle_pattern = pickle.dumps(tz._utcoffset.seconds)[3:-1]
        new_pickle_pattern = pickle.dumps(new_utcoffset)[3:-1]
        hacked_p = p.replace(old_pickle_pattern, new_pickle_pattern)

        self.assertNotEqual(p, hacked_p)
        unpickled_tz = pickle.loads(hacked_p)
        self.assertEqual(unpickled_tz._utcoffset.seconds, new_utcoffset)
        self.assertTrue(tz is not unpickled_tz)
示例#15
0
    def test01(self):
        tabname = "test01"
        colname = 'cool numbers'
        try:
            self.tdb.Drop(tabname)
        except dbtables.TableDBError:
            pass
        self.tdb.CreateTable(tabname, [colname])
        import sys
        if sys.version_info[0] < 3 :
            self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159, 1)})
        else :
            self.tdb.Insert(tabname, {colname: pickle.dumps(3.14159,
                1).decode("iso8859-1")})  # 8 bits

        if verbose:
            self.tdb._db_print()

        values = self.tdb.Select(
            tabname, [colname], conditions={colname: None})

        import sys
        if sys.version_info[0] < 3 :
            colval = pickle.loads(values[0][colname])
        else :
            colval = pickle.loads(bytes(values[0][colname], "iso8859-1"))
        self.assert_(colval > 3.141)
        self.assert_(colval < 3.142)
示例#16
0
文件: sessions.py 项目: fay/tao
    def __getitem__(self, keyname):
        """
        Get item from session data.

        keyname: The keyname of the mapping.
        """
        # flash messages don't go in the datastore

        if self.integrate_flash and (keyname == "flash"):
            return self.flash.msg
        if keyname in self.cache:
            # UNPICKLING CACHE return pickle.loads(str(self.cache[keyname]))
            return self.cache[keyname]
        if keyname in self.cookie_vals:
            return self.cookie_vals[keyname]
        if hasattr(self, "session"):
            mc = memcache.get("sid-" + str(self.session.key()))
            if mc is not None:
                if keyname in mc:
                    return mc[keyname]
            data = self._get(keyname)
            if data:
                # UNPICKLING CACHE self.cache[keyname] = data.content
                self.cache[keyname] = pickle.loads(data.content)
                self._set_memcache()
                return pickle.loads(data.content)
            else:
                raise KeyError(str(keyname))
        raise KeyError(str(keyname))
示例#17
0
文件: runner.py 项目: Flimm/django
 def _confirm_picklable(self, obj):
     """
     Confirm that obj can be pickled and unpickled as multiprocessing will
     need to pickle the exception in the child process and unpickle it in
     the parent process. Let the exception rise, if not.
     """
     pickle.loads(pickle.dumps(obj))
 def test_copying(self):
     # Check that counters are copyable, deepcopyable, picklable, and
     #have a repr/eval round-trip
     words = Counter('which witch had which witches wrist watch'.split())
     update_test = Counter()
     update_test.update(words)
     for i, dup in enumerate([
                 words.copy(),
                 copy.copy(words),
                 copy.deepcopy(words),
                 pickle.loads(pickle.dumps(words, 0)),
                 pickle.loads(pickle.dumps(words, 1)),
                 pickle.loads(pickle.dumps(words, 2)),
                 pickle.loads(pickle.dumps(words, -1)),
                 cPickle.loads(cPickle.dumps(words, 0)),
                 cPickle.loads(cPickle.dumps(words, 1)),
                 cPickle.loads(cPickle.dumps(words, 2)),
                 cPickle.loads(cPickle.dumps(words, -1)),
                 eval(repr(words)),
                 update_test,
                 Counter(words),
                 ]):
         msg = (i, dup, words)
         self.assertTrue(dup is not words)
         self.assertEquals(dup, words)
         self.assertEquals(len(dup), len(words))
         self.assertEquals(type(dup), type(words))
    def test_discover_with_init_modules_that_fail_to_import(self):
        vfs = {abspath('/foo'): ['my_package'],
               abspath('/foo/my_package'): ['__init__.py', 'test_module.py']}
        self.setup_import_issue_package_tests(vfs)
        import_calls = []
        def _get_module_from_name(name):
            import_calls.append(name)
            raise ImportError("Cannot import Name")
        loader = unittest.TestLoader()
        loader._get_module_from_name = _get_module_from_name
        suite = loader.discover(abspath('/foo'))

        self.assertIn(abspath('/foo'), sys.path)
        self.assertEqual(suite.countTestCases(), 1)
        # Errors loading the suite are also captured for introspection.
        self.assertNotEqual([], loader.errors)
        self.assertEqual(1, len(loader.errors))
        error = loader.errors[0]
        self.assertTrue(
            'Failed to import test module: my_package' in error,
            'missing error string in %r' % error)
        test = list(list(suite)[0])[0] # extract test from suite
        with self.assertRaises(ImportError):
            test.my_package()
        self.assertEqual(import_calls, ['my_package'])

        # Check picklability
        for proto in range(pickle.HIGHEST_PROTOCOL + 1):
            pickle.loads(pickle.dumps(test, proto))
示例#20
0
def test_max_pooling_old_pickle():
    brick = MaxPooling((3, 4))
    brick.allocate()
    # Simulate old pickle, before #899.
    del brick.ignore_border
    del brick.mode
    del brick.padding
    # Pickle in this broken state and re-load.
    broken_pickled = pickle.dumps(brick)
    loaded = pickle.loads(broken_pickled)
    # Same shape, same step.
    assert brick.pooling_size == loaded.pooling_size
    assert brick.step == loaded.step
    # Check that the new attributes were indeed added.
    assert hasattr(loaded, "padding") and loaded.padding == (0, 0)
    assert hasattr(loaded, "mode") and loaded.mode == "max"
    assert hasattr(loaded, "ignore_border") and not loaded.ignore_border
    try:
        loaded.apply(tensor.tensor4())
    except Exception:
        raise AssertionError("failed to apply on unpickled MaxPooling")
    # Make sure we're not overriding these attributes wrongly.
    new_brick = MaxPooling((4, 3), padding=(2, 1))
    new_brick_unpickled = pickle.loads(pickle.dumps(new_brick))
    assert new_brick_unpickled.padding == (2, 1)
    assert new_brick_unpickled.ignore_border
示例#21
0
文件: serialize.py 项目: 2t7/ipython
def unpack_apply_message(bufs, g=None, copy=True):
    """unpack f,args,kwargs from buffers packed by pack_apply_message()
    Returns: original f,args,kwargs"""
    bufs = list(bufs) # allow us to pop
    assert len(bufs) >= 2, "not enough buffers!"
    if not copy:
        for i in range(2):
            bufs[i] = bufs[i].bytes
    f = uncan(pickle.loads(bufs.pop(0)), g)
    info = pickle.loads(bufs.pop(0))
    arg_bufs, kwarg_bufs = bufs[:info['narg_bufs']], bufs[info['narg_bufs']:]
    
    args = []
    for i in range(info['nargs']):
        arg, arg_bufs = deserialize_object(arg_bufs, g)
        args.append(arg)
    args = tuple(args)
    assert not arg_bufs, "Shouldn't be any arg bufs left over"
    
    kwargs = {}
    for key in info['kw_keys']:
        kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g)
        kwargs[key] = kwarg
    assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over"
    
    return f,args,kwargs
示例#22
0
 def getnewaddress(self, accountname = None):
     if not accountname:
         accountname = "account"
     walletdb = self.open(writable = True)
     # if wallet is not initialized
     if 'accounts' not in walletdb:
         print "Wallet not initialized ... quitting!"
         return None
     # if wallet is initialized
     subaccount = self.getnewsubaccount()
     accountnames = loads(walletdb['accounts'])
     print "account names: ", accountnames
     if accountname in accountnames:
         account = loads(walletdb[accountname])
         account[subaccount['address']] = subaccount
     else:
         print "account: ", accountname, " not in accounts"
         print "creating new account" 
         account = {subaccount['address']: subaccount}
         # add the new account name to account names
         walletdb['accounts'] = dumps(accountnames.append(accountname))
     walletdb[accountname] = dumps(account)
     walletdb.sync()
     walletdb.close()
     print subaccount
     return subaccount['public_key'], subaccount['address']
示例#23
0
 def _call_function(self, task_id, function_name=None, args=None, kwargs=None):
     try:
         if not function_name:
             try:
                 task = Task.objects.get(pk=task_id)
                 function_name = task.function_name
                 args = pickle.loads(str(task.args))
                 kwargs = pickle.loads(str(task.kwargs))
             except Exception, e:
                 self.logger.info('Count not get task with id %s:\n%s' % (task_id, e))
                 return
             
         self.logger.info('Calling %s' % function_name)
         #self.logger.info('Task ID: %s' % task_id)
         try:
             function = self.func_cache[function_name]
         except KeyError:
             parts = function_name.split('.')
             module_name = '.'.join(parts[:-1])
             member_name = parts[-1]
             if not module_name in sys.modules:
                 __import__(module_name)
             function = getattr(sys.modules[module_name], member_name)
             self.func_cache[function_name] = function
         function(*args, **kwargs)
         self.logger.info('Called %s successfully' % function_name)
         Task.objects.get(pk=task_id).delete()
示例#24
0
def pickle_loads(s, pickle_type=None):
    if pickle_type is None:
        pickle_type = _pickle_type

    if pickle_type=='json_dict':
        import jsonpickle.unpickler
        j = jsonpickle.unpickler.Unpickler()
        return j.restore(s)
    elif pickle_type=='json':
        import jsonpickle
        return jsonpickle.decode(s)
    elif pickle_type=='yaml':
        import yaml
        return yaml.load(s)
    elif pickle_type=='pickle_hp':
        import pickle
        return pickle.loads(s, pickle.HIGHEST_PROTOCOL)
    elif pickle_type=='pickle':
        import pickle
        return pickle.loads(s)
    elif pickle_type=='cPickle':
        import cPickle
        return cPickle.loads(s)
    else:
        raise Exception("unkown pickle type")
示例#25
0
    def test_pickling_compat(self):
        event = self.create_event(
            data={'tags': [
                ('logger', 'foobar'),
                ('site', 'foo'),
                ('server_name', 'bar'),
            ]}
        )

        # Ensure we load and memoize the interfaces as well.
        assert len(event.interfaces) > 0

        # When we pickle an event we need to make sure our canonical code
        # does not appear here or it breaks old workers.
        data = pickle.dumps(event, protocol=2)
        assert 'canonical' not in data

        # For testing we remove the backwards compat support in the
        # `NodeData` as well.
        nodedata_getstate = NodeData.__getstate__
        del NodeData.__getstate__

        # Old worker loading
        try:
            event2 = pickle.loads(data)
            assert event2.data == event.data
        finally:
            NodeData.__getstate__ = nodedata_getstate

        # New worker loading
        event2 = pickle.loads(data)
        assert event2.data == event.data
示例#26
0
def network_synchronize(conn):
	# PUSH
	for obj in objectPushQueue:
		if(obj['pushed'] == False):
			send_command(s, obj['operation'] + " " + obj['object'])
			result = retrieve_command(s)
			if(obj['operation'] == 'addObject'):
				for drawobj in canvasDrawObject:
					if drawobj.local_id == pickle.loads(eval(obj['object'])).local_id:
						drawobj.id = result[0]
						break
			obj['pushed'] = True

	# PULL
	send_command(s, 'pull')
	result = retrieve_command(s)[0]
	pulled_object = pickle.loads(eval(result))
	for obj in pulled_object:
		if(obj['command'] == 'addObject'):
			new_object = pickle.loads(eval(obj['params']))
			new_object.local_id = ''.join(random.choice(string.ascii_uppercase + string.digits + string.ascii_lowercase) for x in range(20))
			canvasDrawObject.append(new_object) 
		elif(obj['command'] == 'modifyObject'):
			new_object = pickle.loads(eval(obj['params']))
			for drawobj in canvasDrawObject:
				if(drawobj.id == new_object.id):
					canvasDrawObject[canvasDrawObject.index(drawobj)] = new_object

		elif(obj['command'] == 'removeObject'):
			new_object = pickle.loads(eval(obj['params']))
			for drawobj in canvasDrawObject:
				if(drawobj.id == new_object.id):
					canvasDrawObject.remove(drawobj)
			pass
示例#27
0
	def run(self):
		self.log("def(MythBoxeeMainUIUpdater.Run): Started")
		while not self._stopEvent.isSet():
			self.log("def(MythBoxeeMainUIUpdater.Run): Run")
			
			if self.config.GetValue("cache.titles"):
				self.titles = pickle.loads(self.config.GetValue("cache.titles"))
			if self.config.GetValue("cache.banners"):
				self.banners = pickle.loads(self.config.GetValue("cache.banners"))
			if self.config.GetValue("cache.series"):
				self.series = pickle.loads(self.config.GetValue("cache.series"))
			if self.config.GetValue("cache.shows"):
				self.shows = pickle.loads(self.config.GetValue("cache.shows"))

			if (len(mc.GetWindow(14001).GetList(1030).GetItems())) == 0 or self.config.GetValue("cache.changed") == "true":
				self.log("def(MythBoxeeMainUIUpdater.Run): Change!")
				self.config.SetValue("loadingmain", "true")
				self.SetShows()
				self.config.SetValue("cache.changed", "false")
			else:
				self.log("def(MythBoxeeMainUIUpdater.Run): No Change")
				
				## Put focus on last selected item
				if self.config.GetValue("CurrentShowItemID"):
					itemId = int(self.config.GetValue("CurrentShowItemID"))
					if itemId and itemId != 0:
						mc.GetWindow(14001).GetList(1030).SetFocusedItem(itemId)

				self.config.Reset("loadingmain")
				self._sleepPeriod = 10

			## Sleep
			self._stopEvent.wait(self._sleepPeriod)
示例#28
0
文件: cache.py 项目: 424f/sc2sng
    def get(self, key):
        """
        Used to return the cache value associated with the key passed.

        Args:
            key: The key of the value to retrieve.

        Returns the value of the cache item.
        """
        mc = memcache.get('cache-%s' % (key))
        if mc:
            if 'AEU_Events' in sys.modules['__main__'].__dict__:
                sys.modules['__main__'].AEU_Events.fire_event('cacheReadFromMemcache')
            if 'AEU_Events' in sys.modules['__main__'].__dict__:
                sys.modules['__main__'].AEU_Events.fire_event('cacheRead')
            return mc
        result = self._read(key)
        if result:
            timeout = result.timeout - datetime.datetime.now()
            memcache.set('cache-%s' % (key), pickle.loads(result.value),
               int(timeout.seconds))
            if 'AEU_Events' in sys.modules['__main__'].__dict__:
                sys.modules['__main__'].AEU_Events.fire_event('cacheRead')
            return pickle.loads(result.value)
        else:
            raise KeyError
示例#29
0
文件: algo.py 项目: mdavezac/LaDa
def test():
  from pickle import loads, dumps
  from pylada.vasp.incar._params import Algo
  import pylada
  pylada.is_vasp_4 = True

  # default.
  a = Algo()
  assert a.value == "Fast"
  # wrong argument.
  try: a.value = 0
  except: pass
  else: raise RuntimeError()
  try: a.value = "WTF"
  except: pass
  else: raise RuntimeError()

  # possible inputs and some.
  d = {
      'Very_Fast': ['very fast', 'VERY-fAst', 'very_FAST', 'v'],
      'VeryFast': ['very fast', 'VERY-fAst', 'very_FAST', 'v'],
      'Fast': ['fast', 'f'],
      'Normal': ['normal', 'n'],
      'Damped': ['damped', 'd'],
      'Diag': ['diag'],
      'All': ['all', 'a'],
      'Nothing': ['nothing'],
      'chi': ['chi'],
      'GW': ['gw'],
      'GW0': ['gw0'],
      'scGW': ['scgw'],
      'scGW0': ['scgw0'],
      'Conjugate': ['conjugate', 'c'],
      'Subrot': ['subrot', 's'],
      'Eigenval': ['eigenval', 'e']
  }
  vasp5 = 'Subrot', 'chi', 'GW', 'GW0', 'scGW', 'scGW0', 'Conjugate', 'Eigenval', 'Exact', 'Nothing'
  for isvasp4 in [True, False]:
    pylada.is_vasp_4 = isvasp4
    for key, items in d.iteritems():
      for value in items:
        if key in vasp5 and isvasp4:
          try: a.value = value
          except: pass 
          else: raise RuntimeError((value, key))
        elif key == 'VeryFast' and isvasp4:
          a.value = value
          assert a.value == 'Very_Fast'
          assert loads(dumps(a)).incar_string() == "ALGO = {0}".format('Very_Fast')
          assert repr(a) == "Algo('Very_Fast')"
        elif key == 'Very_Fast' and not isvasp4:
          a.value = value
          assert a.value == 'VeryFast'
          assert loads(dumps(a)).incar_string() == "ALGO = {0}".format('VeryFast')
          assert repr(a) == "Algo('VeryFast')"
        else:
          a.value = value
          assert a.value == key
          assert loads(dumps(a)).incar_string() == "ALGO = {0}".format(key)
          assert repr(a) == "Algo({0!r})".format(key)
示例#30
0
 def getbalance(self, accountname):
     if not accountname:
         accountname = "account"
     walletdb = self.open()
     # if wallet is not initialized, return
     if 'accounts' not in walletdb:
         print "Wallet not initialized ... quitting!"
         return None
     # if wallet is initialized
     accountnames = loads(walletdb['accounts'])
     if accountname not in accountnames:
         print "Error: Account not found"
         return
     # if account is in wallet
     account = loads(walletdb['account']) # FIXME: account = loads(walletdb[accountname])
     walletdb.close()
     for address, subaccount in account.iteritems():
         transactions = self.chaindb.listreceivedbyaddress(subaccount['address'])
         subaccount['balance'] = 0
         print transactions
         for transaction in transactions.itervalues():
             print transaction
             subaccount['balance'] = subaccount['balance'] + transaction['value']
         subaccount['received'] = transactions
     # sanitize the return values ... convert from bin to hex
     for address, subaccount in account.iteritems():
         subaccount['public_key'] = 1234
         subaccount['private_key'] = 5678
     return account
示例#31
0
def test_modelfuture_pickle_smoke(mock_client):
    mf = _model.ModelFuture(job_id=7, run_id=13, client=setup_client_mock())
    mf.result()
    mf_pickle = pickle.dumps(mf)
    pickle.loads(mf_pickle)
示例#32
0
    def list():
        processes = Process.query.filter_by(user_id=current_user.id)
        changed = False

        res = []
        for p in processes:
            status, updated = BatchProcess.update_process_info(p)
            if not status:
                continue

            if not changed:
                changed = updated

            if p.start_time is None or (p.acknowledge is not None
                                        and p.end_time is None):
                continue

            execution_time = None

            stime = parser.parse(p.start_time)
            etime = parser.parse(p.end_time or get_current_time())

            execution_time = BatchProcess.total_seconds(etime - stime)
            desc = ""
            try:
                desc = loads(p.desc.encode('latin-1')) if \
                    IS_PY2 and hasattr(p.desc, 'encode') else loads(p.desc)
            except UnicodeDecodeError:
                desc = loads(p.desc.encode('utf-8')) if \
                    IS_PY2 and hasattr(p.desc, 'encode') else loads(p.desc)
            except Exception:
                desc = loads(p.desc.encode('utf-8', 'ignore')) if \
                    IS_PY2 and hasattr(p.desc, 'encode') else loads(p.desc)

            details = desc

            if isinstance(desc, IProcessDesc):
                args = []
                args_csv = StringIO(
                    p.arguments.encode('utf-8') if hasattr(
                        p.arguments, 'decode') else p.arguments)
                args_reader = csv.reader(args_csv, delimiter=str(','))
                for arg in args_reader:
                    args = args + arg
                details = desc.details(p.command, args)
                desc = desc.message

            res.append({
                'id': p.pid,
                'desc': desc,
                'details': details,
                'stime': stime,
                'etime': p.end_time,
                'exit_code': p.exit_code,
                'acknowledge': p.acknowledge,
                'execution_time': execution_time,
                'process_state': p.process_state
            })

        if changed:
            db.session.commit()

        return res
示例#33
0
 def __new__(cls, *args, **kwargs):
     return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs)))
示例#34
0
    def loads(self, data: bytes, ignore_config: bool = False):
        """
        Deserialize the serialized report

        Args:
            data: The bytes of a serialize ProfileReport object.
            ignore_config: If set to True, the ProfileReport config will be overwritten with the current global Config.
                           If set to False, the function checks if the configs match

        Raises:
            ValueError: if ignore_config is set to False and the configs do not match.

        Returns:
            self
        """
        import pickle

        try:
            (
                df_hash,
                loaded_config,
                loaded_description_set,
                loaded_report,
                loaded_title,
            ) = pickle.loads(data)
        except Exception as e:
            raise ValueError(f"Failed to load data: {e}")

        if not all(
            (
                df_hash is None or isinstance(df_hash, str),
                loaded_title is None or isinstance(loaded_title, str),
                isinstance(loaded_config, Config),
                loaded_description_set is None
                or isinstance(loaded_description_set, dict),
                loaded_report is None or isinstance(loaded_report, Root),
            )
        ):
            raise ValueError(
                f"Failed to load data: file may be damaged or from an incompatible version"
            )
        if (df_hash == self.df_hash) and (
            ignore_config
            or config == loaded_config
            or (config.is_default and self.df is None)  # load to an empty ProfileReport
        ):
            # Set description_set, report, sample if they are None,or raise an warning.
            if self._description_set is None:
                self._description_set = loaded_description_set
            else:
                warnings.warn(
                    f"The description set of current ProfileReport is not None. It won't be loaded."
                )
            if self._report is None:
                self._report = loaded_report
            else:
                warnings.warn(
                    f"The report of current ProfileReport is not None. It won't be loaded."
                )

            # overwrite config if ignore_config set to True
            if ignore_config:
                config.update(loaded_config)

            # warn if version not equal
            if (
                loaded_description_set is not None
                and loaded_description_set["package"]["pandas_profiling_version"]
                != __version__
            ):
                warnings.warn(
                    f"The package version specified in the loaded data is not equal to the version installed. "
                    f"Currently running on pandas-profiling {__version__} , while loaded data is generated by pandas_profiling, {loaded_description_set['package']['pandas_profiling_version']}."
                )

            # set df_hash and title
            self._df_hash = df_hash
            self._title = loaded_title

        else:
            raise ValueError(
                "DataFrame or Config do not match with the current ProfileReport. "
                'If you want to overwrite the current configuration, use "ignore_config=True"'
            )
        return self
示例#35
0
 def restore(self, objs: str) -> None:
     objs = pickle.loads(objs)
     self.sync_filters(objs["filters"])
     for pid, state in objs["state"].items():
         self.policy_map[pid].set_state(state)
示例#36
0
import pickle

with open('emails.txt', 'rb') as handle:
    salesmanemails = pickle.loads(handle.read())
示例#37
0
         sorted="New `L` sorted by `key`. If key is str then use `attrgetter`. If key is int then use `itemgetter`",
         unique="Unique items, in stable order",
         val2idx="Dict from value to index",
         filter="Create new `L` filtered by predicate `f`, passing `args` and `kwargs` to `f`",
         argwhere="Like `filter`, but return indices for matching items",
         map="Create new `L` with `f` applied to all `items`, passing `args` and `kwargs` to `f`",
         map_filter="Same as `map` with `f` followed by `filter` with `g`",
         map_first="First element of `map_filter`",
         map_dict="Like `map`, but creates a dict from `items` to function results",
         starmap="Like `map`, but use `itertools.starmap`",
         itemgot="Create new `L` with item `idx` of all `items`",
         attrgot="Create new `L` with attr `k` (or value `k` for dicts) of all `items`.",
         cycle="Same as `itertools.cycle`",
         enumerate="Same as `enumerate`",
         zip="Create new `L` with `zip(*items)`",
         zipwith="Create new `L` with `self` zip with each of `*rest`",
         map_zip="Combine `zip` and `starmap`",
         map_zipwith="Combine `zipwith` and `starmap`",
         concat="Concatenate all elements of list",
         shuffle="Same as `random.shuffle`, but not inplace",
         reduce="Wrapper for `functools.reduce`",
         sum="Sum of the items",
         product="Product of the items",
         **_docs)

# Cell
#hide
L.__signature__ = pickle.loads(b'\x80\x03cinspect\nSignature\nq\x00(cinspect\nParameter\nq\x01X\x05\x00\x00\x00itemsq\x02cinspect\n_ParameterKind\nq\x03K\x01\x85q\x04Rq\x05\x86q\x06Rq\x07}q\x08(X\x08\x00\x00\x00_defaultq\tNX\x0b\x00\x00\x00_annotationq\ncinspect\n_empty\nq\x0bubh\x01X\x04\x00\x00\x00restq\x0ch\x03K\x02\x85q\rRq\x0e\x86q\x0fRq\x10}q\x11(h\th\x0bh\nh\x0bubh\x01X\x08\x00\x00\x00use_listq\x12h\x03K\x03\x85q\x13Rq\x14\x86q\x15Rq\x16}q\x17(h\t\x89h\nh\x0bubh\x01X\x05\x00\x00\x00matchq\x18h\x14\x86q\x19Rq\x1a}q\x1b(h\tNh\nh\x0bubtq\x1c\x85q\x1dRq\x1e}q\x1fX\x12\x00\x00\x00_return_annotationq h\x0bsb.')

# Cell
Sequence.register(L);
示例#38
0
    def cases(self):
        cached = self.cache.get(self.cache_key)
        if cached:
            return pickle.loads(cached)

        return self.reload_from_google_spreadsheet()
示例#39
0
    def connectionCycle(self):
        self.updateCurrentDutyLog("Starting Connection Cycle",
                                  "connectionCycle")

        if (self.connection == None or self.address == None):
            self.active = False
            self.updateCurrentDutyLog("Invalid Connection Or Address",
                                      "connectionCycle")
            # ||=======================||
            logMessage = "Invalid Connection Or Address" + " connectionCycle"
            self.debugLogger.log("Error", logMessage)
            # print("h1")
            # ||=======================||
            return 0
        # print("h2")
        # str(self.address[0]) + ', ' + str(self.address[1])
        while self.active:
            # print("h3")
            self.updateCurrentDuty("Server Listening @: " +
                                   str(self.address[0]) + ', ' +
                                   str(self.address[1]))
            dataRecv = None
            try:
                dataRecv = self.connection.recv(1024).decode()
                # print(dataRecv)
                try:
                    currentData = literal_eval(dataRecv)
                except Exception as e:
                    dataRecv = None
                # print(currentData)
            except socket.timeout as e:
                continue
            except Exception as e:
                print(e)
                self.active = False
                self.updateCurrentDutyLog(
                    "Successfully Closing Thread -> readSocketCycle",
                    "connectionCycle")
                return 0

            if (dataRecv != None):

                CODE = currentData[0]
                INFO = currentData[1]
                # print(currentData)

                # ||=======================||
                # Return Connection Success
                if (CODE == "#00000"):
                    message = "Echo - Connection Successful"
                    self.connection.send((message).encode())
                    DeviceData.ConnectionController.pushInteractionLog(
                        self.createInteractionLog(currentData))
                    print(message)

                # ||=======================||
                # Test Server Connection
                elif (CODE == "#00001"):
                    message = "Echo Connection Time Test"
                    self.connection.send((message).encode())
                    DeviceData.ConnectionController.pushInteractionLog(
                        self.createInteractionLog(currentData))
                    print(message)

                # ||=======================||
                # Write Data To DeviceData
                elif (CODE == "#00002"):
                    message = "Success"
                    self.connection.send((message).encode())
                    # tmpData = dataRecv[7:]
                    DeviceData.ConnectionController.pushInteractionLog(
                        self.createInteractionLog(currentData))
                    print("Device Data:", INFO)

                # ||=======================||
                # Set Gps Live Data
                elif (CODE == "#40001"):
                    message = "Recieved Gps Controller Live Data"
                    gpsControllerData = pickle.loads(currentData[1])
                    EngineData.GpsController.setLiveData(gpsControllerData)
                    DeviceData.ConnectionController.pushInteractionLog(
                        self.createInteractionLog((currentData[0], message)))
                    print(message)

                # ||=======================||
                # Push Gps Internal Log
                elif (CODE == "#40002"):
                    continue

                # ||=======================||
                # Set Mechanical Live Data
                elif (CODE == "#40003"):
                    message = "Recieved Mechanical Controller Live Data"
                    mechanicalControllerData = pickle.loads(currentData[1])
                    EngineData.MechanicalController.setLiveData(
                        mechanicalControllerData)
                    DeviceData.ConnectionController.pushInteractionLog(
                        self.createInteractionLog((currentData[0], message)))
                    print(message)

                # ||=======================||
                # Push Mechanical Internal Log
                elif (CODE == "#40004"):
                    continue

                # ||=======================||
                # Set Thermo Live Data
                elif (CODE == "#40005"):
                    message = "Recieved Thermo Controller Live Data"
                    thermoControllerData = pickle.loads(currentData[1])
                    EngineData.ThermoController.setLiveData(
                        thermoControllerData)
                    DeviceData.ConnectionController.pushInteractionLog(
                        self.createInteractionLog((currentData[0], message)))
                    print(message)

                # ||=======================||
                # Push Thermo Internal Log
                elif (CODE == "#40006"):
                    continue

                # ||=======================||
                # Set Energy Live Data
                elif (CODE == "#40007"):
                    message = "Recieved Energy Controller Live Data"
                    energyControllerData = pickle.loads(currentData[1])
                    EngineData.EnergyController.setLiveData(
                        energyControllerData)
                    DeviceData.ConnectionController.pushInteractionLog(
                        self.createInteractionLog((currentData[0], message)))
                    print(message)

                # ||=======================||
                # Push Energy Internal Log
                elif (CODE == "#40008"):
                    continue

                # ||=======================||
                # Close Client Connection
                elif (CODE == "#99999"):
                    self.connection.send(("Success").encode())
                    return

                dataRecv = None

        return 0


# |===============================================================|
示例#40
0
 def read_pickle(self, s3_filename):
     '''Same as self.load(), uses pickle.load() to read the s3 file.'''
     body = self._get_body(s3_filename)
     obj = pickle.loads(body.read())
     return obj
示例#41
0
 def receive_action(self):
     response = requests.get(f'{self.base_url}/gym-to-halite', timeout=1)
     assert response.status_code == requests.codes.ok
     return pickle.loads(response.content)
示例#42
0
 def unPickle(self, data):
     return pickle.loads(data)
示例#43
0
def importConditions(fileName, returnFieldNames=False, selection=""):
    """Imports a list of conditions from an .xlsx, .csv, or .pkl file

    The output is suitable as an input to :class:`TrialHandler`
    `trialList` or to :class:`MultiStairHandler` as a `conditions` list.

    If `fileName` ends with:

        - .csv:  import as a comma-separated-value file
            (header + row x col)
        - .xlsx: import as Excel 2007 (xlsx) files.
            No support for older (.xls) is planned.
        - .pkl:  import from a pickle file as list of lists
            (header + row x col)

    The file should contain one row per type of trial needed and one column
    for each parameter that defines the trial type. The first row should give
    parameter names, which should:

        - be unique
        - begin with a letter (upper or lower case)
        - contain no spaces or other punctuation (underscores are permitted)


    `selection` is used to select a subset of condition indices to be used
    It can be a list/array of indices, a python `slice` object or a string to
    be parsed as either option.
    e.g.:

        - "1,2,4" or [1,2,4] or (1,2,4) are the same
        - "2:5"       # 2, 3, 4 (doesn't include last whole value)
        - "-10:2:"    # tenth from last to the last in steps of 2
        - slice(-10, 2, None)  # the same as above
        - random(5) * 8  # five random vals 0-7

    """

    def _attemptImport(fileName, sep=',', dec='.'):
        """Attempts to import file with specified settings and raises
        ConditionsImportError if fails due to invalid format

        :param filename: str
        :param sep: str indicating the separator for cells (',', ';' etc)
        :param dec: str indicating the decimal point ('.', '.')
        :return: trialList, fieldNames
        """
        if fileName.endswith(('.csv', '.tsv')):
            trialsArr = pd.read_csv(fileName, encoding='utf-8-sig',
                                    sep=sep, decimal=dec)
            for col in trialsArr.columns:
                for row, cell in enumerate(trialsArr[col]):
                    if isinstance(cell, str):
                        tryVal = cell.replace(",", ".")
                        try:
                            trialsArr[col][row] = float(tryVal)
                        except ValueError:
                            pass
            logging.debug(u"Read csv file with pandas: {}".format(fileName))
        elif fileName.endswith(('.xlsx', '.xlsm')):
            trialsArr = pd.read_excel(fileName, engine='openpyxl')
            logging.debug(u"Read Excel file with pandas: {}".format(fileName))
        elif fileName.endswith('.xls'):
            trialsArr = pd.read_excel(fileName, engine='xlrd')
            logging.debug(u"Read Excel file with pandas: {}".format(fileName))
        # then try to convert array to trialList and fieldnames
        unnamed = trialsArr.columns.to_series().str.contains('^Unnamed: ')
        trialsArr = trialsArr.loc[:, ~unnamed]  # clear unnamed cols
        logging.debug(u"Clearing unnamed columns from {}".format(fileName))
        trialList, fieldNames = pandasToDictList(trialsArr)

        return trialList, fieldNames

    def _assertValidVarNames(fieldNames, fileName):
        """screens a list of names as candidate variable names. if all
        names are OK, return silently; else raise  with msg
        """
        fileName = pathToString(fileName)
        if not all(fieldNames):
            msg = ('Conditions file %s: Missing parameter name(s); '
                   'empty cell(s) in the first row?')
            raise exceptions.ConditionsImportError(msg % fileName)
        for name in fieldNames:
            OK, msg = isValidVariableName(name)
            if not OK:
                # tailor message to importConditions
                msg = msg.replace('Variables', 'Parameters (column headers)')
                raise exceptions.ConditionsImportError(
                    'Conditions file %s: %s%s"%s"' %
                    (fileName, msg, os.linesep * 2, name))

    if fileName in ['None', 'none', None]:
        if returnFieldNames:
            return [], []
        return []
    if not os.path.isfile(fileName):
        msg = 'Conditions file not found: %s'
        raise ValueError(msg % os.path.abspath(fileName))

    def pandasToDictList(dataframe):
        """Convert a pandas dataframe to a list of dicts.
        This helper function is used by csv or excel imports via pandas
        """
        # convert the resulting dataframe to a numpy recarray
        trialsArr = dataframe.to_records(index=False)
        # Check for new line characters in strings, and replace escaped characters
        for record in trialsArr:
            for idx, element in enumerate(record):
                if isinstance(element, str):
                    record[idx] = element.replace('\\n', '\n')
        if trialsArr.shape == ():
            # convert 0-D to 1-D with one element:
            trialsArr = trialsArr[np.newaxis]
        fieldNames = list(trialsArr.dtype.names)
        _assertValidVarNames(fieldNames, fileName)

        # convert the record array into a list of dicts
        trialList = []
        for trialN, trialType in enumerate(trialsArr):
            thisTrial = OrderedDict()
            for fieldN, fieldName in enumerate(fieldNames):
                val = trialsArr[trialN][fieldN]

                if isinstance(val, basestring):
                    if val.startswith('[') and val.endswith(']'):
                        # val = eval('%s' %unicode(val.decode('utf8')))
                        val = eval(val)
                elif type(val) == np.string_:
                    val = str(val.decode('utf-8-sig'))
                    # if it looks like a list, convert it:
                    if val.startswith('[') and val.endswith(']'):
                        # val = eval('%s' %unicode(val.decode('utf8')))
                        val = eval(val)
                elif np.isnan(val):
                    val = None
                thisTrial[fieldName] = val
            trialList.append(thisTrial)
        return trialList, fieldNames

    if (fileName.endswith(('.csv', '.tsv'))
            or (fileName.endswith(('.xlsx', '.xls', '.xlsm')) and haveXlrd)):
        if fileName.endswith(('.csv', '.tsv', '.dlm')):  # delimited text file
            for sep, dec in [ (',', '.'), (';', ','),  # most common in US, EU
                              ('\t', '.'), ('\t', ','), (';', '.')]:
                try:
                    trialList, fieldNames = _attemptImport(fileName=fileName,
                                                           sep=sep, dec=dec)
                    break  # seems to have worked
                except exceptions.ConditionsImportError as e:
                    continue  # try a different format
        else:
            trialList, fieldNames = _attemptImport(fileName=fileName)

    elif fileName.endswith(('.xlsx','.xlsm')):  # no xlsread so use openpyxl
        if not haveOpenpyxl:
            raise ImportError('openpyxl or xlrd is required for loading excel '
                              'files, but neither was found.')

        # data_only was added in 1.8
        if parse_version(openpyxl.__version__) < parse_version('1.8'):
            wb = load_workbook(filename=fileName)
        else:
            wb = load_workbook(filename=fileName, data_only=True)
        ws = wb.worksheets[0]

        logging.debug(u"Read excel file with openpyxl: {}".format(fileName))
        try:
            # in new openpyxl (2.3.4+) get_highest_xx is deprecated
            nCols = ws.max_column
            nRows = ws.max_row
        except Exception:
            # version openpyxl 1.5.8 (in Standalone 1.80) needs this
            nCols = ws.get_highest_column()
            nRows = ws.get_highest_row()

        # get parameter names from the first row header
        fieldNames = []
        rangeCols = list(range(nCols))
        for colN in range(nCols):
            if parse_version(openpyxl.__version__) < parse_version('2.0'):
                fieldName = ws.cell(_getExcelCellName(col=colN, row=0)).value
            else:
                # From 2.0, cells are referenced with 1-indexing: A1 == cell(row=1, column=1)
                fieldName = ws.cell(row=1, column=colN + 1).value
            if fieldName:
                # If column is named, add its name to fieldNames
                fieldNames.append(fieldName)
            else:
                # Otherwise, ignore the column
                rangeCols.remove(colN)
        _assertValidVarNames(fieldNames, fileName)

        # loop trialTypes
        trialList = []
        for rowN in range(1, nRows):  # skip header first row
            thisTrial = {}
            for colN in rangeCols:
                if parse_version(openpyxl.__version__) < parse_version('2.0'):
                    val = ws.cell(_getExcelCellName(col=colN, row=0)).value
                else:
                    # From 2.0, cells are referenced with 1-indexing: A1 == cell(row=1, column=1)
                    val = ws.cell(row=rowN + 1, column=colN + 1).value
                # if it looks like a list or tuple, convert it
                if (isinstance(val, basestring) and
                        (val.startswith('[') and val.endswith(']') or
                                 val.startswith('(') and val.endswith(')'))):
                    val = eval(val)
                # Convert from eu style decimals: replace , with . and try to make it a float
                if isinstance(val, basestring):
                    tryVal = val.replace(",", ".")
                    try:
                        val = float(tryVal)
                    except ValueError:
                        pass
                fieldName = fieldNames[colN]
                thisTrial[fieldName] = val
            trialList.append(thisTrial)

    elif fileName.endswith('.pkl'):
        f = open(fileName, 'rb')
        # Converting newline characters.
        if PY3:
            # 'b' is necessary in Python3 because byte object is 
            # returned when file is opened in binary mode.
            buffer = f.read().replace(b'\r\n',b'\n').replace(b'\r',b'\n')
        else:
            buffer = f.read().replace('\r\n','\n').replace('\r','\n')
        try:
            trialsArr = pickle.loads(buffer)
        except Exception:
            raise IOError('Could not open %s as conditions' % fileName)
        f.close()
        trialList = []
        if PY3:
            # In Python3, strings returned by pickle() is unhashable.
            # So, we have to convert them to str.
            trialsArr = [[str(item) if isinstance(item, str) else item
                          for item in row] for row in trialsArr]
        fieldNames = trialsArr[0]  # header line first
        _assertValidVarNames(fieldNames, fileName)
        for row in trialsArr[1:]:
            thisTrial = {}
            for fieldN, fieldName in enumerate(fieldNames):
                # type is correct, being .pkl
                thisTrial[fieldName] = row[fieldN]
            trialList.append(thisTrial)
    else:
        raise IOError('Your conditions file should be an '
                      'xlsx, csv, dlm, tsv or pkl file')

    # if we have a selection then try to parse it
    if isinstance(selection, basestring) and len(selection) > 0:
        selection = indicesFromString(selection)
        if not isinstance(selection, slice):
            for n in selection:
                try:
                    assert n == int(n)
                except AssertionError:
                    raise TypeError("importConditions() was given some "
                                    "`indices` but could not parse them")

    # the selection might now be a slice or a series of indices
    if isinstance(selection, slice):
        trialList = trialList[selection]
    elif len(selection) > 0:
        allConds = trialList
        trialList = []
        print(selection)
        print(len(allConds))
        for ii in selection:
            trialList.append(allConds[int(ii)])

    logging.exp('Imported %s as conditions, %d conditions, %d params' %
                (fileName, len(trialList), len(fieldNames)))
    if returnFieldNames:
        return (trialList, fieldNames)
    else:
        return trialList
示例#44
0
 def receive(self) -> Message:
     pickled = self._sock.recv(MSG_SIZE)
     return pickle.loads(pickled)
示例#45
0
    rc.set('configuration', 'configparser', 0)
    rc.set('configuration', 'regconfig', 'it works')
    rc.set('settings', 'users', ['jack', 'john', 'who else?'])
    
    # Can u store a pickle? yes...however...
    """ Quote - Value lengths are limited by available memory. Long values
        (more than 2048 bytes) should be stored as files with the filenames
        stored in the configuration registry. This helps the registry perform
        efficiently."""
    import pickle
    x = {'hi': 'im going to be pickled...' }
    pick = pickle.dumps(x, pickle.HIGHEST_PROTOCOL)
    rc.set('pickle', 'pickleobject', (pick, _winreg.REG_BINARY))
    rc.write()
    
    #get sections and items
    for section in rc.sections():
        print section
        for item in rc.items(section):
            print '\t', item
            
    # Call this to write to registry path use it to configure different users..
    rc.write(r"HKEY_LOCAL_MACHINE\SOFTWARE\mysoftwareagain")
    
    # let's try reading the data only
    rc = RegConfig(r"HKEY_LOCAL_MACHINE\SOFTWARE\mysoftware")
    
    # let unpickle the pickle
    pick = rc.get('pickle', 'pickleobject')
    print pickle.loads(pick)
示例#46
0
 def test_pickling(self):
     assert hasattr(pickle.loads(pickle.dumps(cbook.CallbackRegistry())),
                    "callbacks")
示例#47
0
import imutils
import pickle
import cv2
import os

print("[INFO] loading face detector...")
protoPath = os.path.sep.join(['face_detection_model', 'deploy.prototxt'])
modelPath = os.path.sep.join(
    ['face_detection_model', 'res10_300x300_ssd_iter_140000.caffemodel'])
detector = cv2.dnn.readNetFromCaffe(protoPath, modelPath)

print("[INFO] loading face embedder...")
embedder = cv2.dnn.readNetFromTorch('openface_nn4.small2.v1.t7')

print("[INFO] loading face recognizer...")
recognizer = pickle.loads(open('output/recognizer.pickle', 'rb').read())

print("[INFO] loading label encoder...")
le = pickle.loads(open('output/le.pickle', 'rb').read())
''' This function processes the 'imageFile' received to find the faces in it, 
	creates bounds on the faces and returns the tagged image	'''


def recognize_person(imageFile):
    # load the image, resize it, and then grab the image dimensions
    image = cv2.imread(imageFile)
    image = imutils.resize(image, width=600)
    (h, w) = image.shape[:2]

    # construct a blob from the image and localize faces
    imageBlob = cv2.dnn.blobFromImage(cv2.resize(image, (300, 300)),
示例#48
0
def load_progress(progress_file, crawled_queue, cleaned_queue,
                  complete_single_queue, spider, tfidf):
    try:
        with open(progress_file, 'r') as load_session_file:
            progress_json = json.load(load_session_file)

        redis_conn = _Redis(
            host=progress_json['redis_host'],
            port=progress_json['redis_port'],
            db=progress_json['redis_db'],
            password=progress_json['redis_pwd']).getRedisConn()

        for redis_list in [
                'crawled_queue_list', 'cleaned_queue_list',
                'complete_single_queue_list'
        ]:
            all_item_list = redis_conn.lrange(redis_list, 0, -1)
            for key in list(locals()):
                if redis_list[:-5] == key:
                    current_queue = locals()[key]
                    for item in all_item_list:
                        current_queue.put(
                            item.decode())  # redis返回的数据都是byte类型的,需要decode

        spider_attr_dict = dict()
        for attr in redis_conn.lrange('spider_attr', 0, -1):
            attr = attr.decode()
            if attr in [
                    '__not_access_queue__', '__accessed_set__',
                    '__cannot_access_set__'
            ]:
                spider_attr_dict[attr] = redis_conn.lrange(attr, 0, -1)
            elif attr == '__config__':
                spider_attr_dict[attr] = pickle.loads(
                    redis_conn.get('spider_configs'))
            else:
                spider_attr_dict[attr] = redis_conn.get(
                    attr)  # spider的属性的decode交给spider.__load_attr__自己去做
        spider.__load_attr__(spider_attr_dict)

        redis_conf = [
            conf.decode() for conf in redis_conn.lrange('redis_conf', 0, -1)
        ]
        old_redis = _Redis(redis_conf[0], int(redis_conf[1]), int(
            redis_conf[2]), None if redis_conf[3] == 'None' else redis_conf[3])

        mysql_conf = [
            conf.decode() for conf in redis_conn.lrange('mysql_conf', 0, -1)
        ]
        old_mysql = _MySQL(mysql_conf[0], int(mysql_conf[1]), mysql_conf[2], mysql_conf[3], mysql_conf[4])
        
        spider.set_redis_conn(old_redis.getRedisConn())
        tfidf.set_redis_conn(old_redis.getRedisConn())
        tfidf.set_redis_conn(old_mysql.getMySQLConn())
        return old_redis, old_mysql

    except redis.exceptions.ConnectionError:
        print('无法连接进度存储数据库,进度加载失败')
    except Exception as e:
        print('发生了未处理的错误,无法加载程序进度', e)
        print(traceback.format_exc())
示例#49
0
            cur.execute("select * from nonexist")
        except psycopg2.Error, exc:
            e = exc

        self.assertEqual(e.pgcode, '42P01')
        self.assert_(e.pgerror)
        self.assert_(e.cursor is cur)

    @skip_before_python(2, 5)
    def test_pickle(self):
        import pickle
        cur = self.conn.cursor()
        try:
            cur.execute("select * from nonexist")
        except psycopg2.Error, exc:
            e = exc

        e1 = pickle.loads(pickle.dumps(e))

        self.assertEqual(e.pgerror, e1.pgerror)
        self.assertEqual(e.pgcode, e1.pgcode)
        self.assert_(e1.cursor is None)


def test_suite():
    return unittest.TestLoader().loadTestsFromName(__name__)


if __name__ == "__main__":
    unittest.main()
示例#50
0
 def pex_accumulator(total, result):
     blob, nevents, dataset = result
     total[1][dataset] += nevents
     total[0].add(pkl.loads(lz4f.decompress(blob)))
示例#51
0
# produces a new array that contains all but the last entry of digits.data
clf.fit(digits.data[:-1], digits.target[:-1])

# Input = [[data1], [data2]]
print clf.predict(digits.data[-1:])  # 8
print clf.predict(digits.data[0:3])  # 0 1 2
'''
Model Persistence
'''
import pickle
from sklearn.externals import joblib

# pickle存入变量
s = pickle.dumps(clf)
clf2 = pickle.loads(s)
print clf2.predict(digits.data[0:3])

# joblib存入文件
joblib.dump(clf, 'svm.pkl')
clf3 = joblib.load('svm.pkl')
print clf3.predict(digits.data[0:3])
'''
Conventions
'''
import numpy as np
from sklearn import random_projection

# Type casting
rng = np.random.RandomState(0)
X = rng.rand(10, 2000)
示例#52
0
    def test_high_level_trace(self):
        """Checks that python side high level events are recorded.
        """
        class RepeatedDataset(torch.utils.data.Dataset):
            def __init__(self, N, D_in, D_out):
                self.N = N
                self.x = torch.randn(N, D_in)
                self.y = torch.randn(N, D_out)

            def __len__(self):
                return self.N

            def __getitem__(self, idx):
                return self.x, self.y

        class TwoLayerNet(torch.nn.Module):
            def __init__(self, D_in, H, D_out):
                super(TwoLayerNet, self).__init__()
                self.linear1 = torch.nn.Linear(D_in, H)
                self.linear2 = torch.nn.Linear(H, D_out)

            def forward(self, x):
                h_relu = self.linear1(x).clamp(min=0)
                y_pred = self.linear2(h_relu)
                return y_pred

        class CustomSGD(torch.optim.SGD):
            def __init__(self, *args, **kwargs):
                super(CustomSGD, self).__init__(*args, **kwargs)

        def train():
            for _, data in enumerate(dataloader):
                x, y = data[0], data[1]
                y_pred = model(x)
                loss = criterion(y_pred, y)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

        N, D_in, H, D_out = 8, 10, 5, 2
        model = TwoLayerNet(D_in, H, D_out)
        criterion = torch.nn.MSELoss(reduction='sum')
        optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
        ds = RepeatedDataset(N, D_in, D_out)
        dataloader = torch.utils.data.DataLoader(ds, batch_size=1)

        try:
            train()
        except Exception:
            self.assertTrue(False, "Expected no exception without profiling.")

        # Create multiple instances, expect each func is hooked only one time.
        # Nested wrappers(repeated patching) will make following test fail.
        optimizer_duplicate = torch.optim.SGD(model.parameters(), lr=1e-4)
        dataloader_duplicate = torch.utils.data.DataLoader(ds, batch_size=1)

        def judge(expected_event_count, prof):
            actual_event_count = {}
            for e in prof.function_events:
                if "#" in e.name:
                    key = e.name
                    if key in expected_event_count.keys():
                        actual_event_count[key] = actual_event_count.setdefault(key, 0) + 1
            for key, count in expected_event_count.items():
                self.assertTrue((key in actual_event_count.keys()) and (count == actual_event_count[key]))

        with profile() as prof:
            train()
        expected_event_count = {
            # "+1" because the final iteration will enter __next__ but skip the loop body.
            "enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__": (N + 1),
            "Optimizer.step#SGD.step": N,
            "Optimizer.zero_grad#SGD.zero_grad": N
        }
        judge(expected_event_count, prof)

        # Test on pickle/unpickle. Expect to work in multi-processing.
        optimizer = pickle.loads(pickle.dumps(optimizer))
        with profile() as prof:
            train()
        judge(expected_event_count, prof)

        # Test on customized optimizer.
        optimizer = CustomSGD(model.parameters(), lr=1e-4)
        with profile() as prof:
            train()
        expected_event_count = {
            "enumerate(DataLoader)#_SingleProcessDataLoaderIter.__next__": (N + 1),
            "Optimizer.step#CustomSGD.step": N,
            "Optimizer.zero_grad#CustomSGD.zero_grad": N
        }
        judge(expected_event_count, prof)
示例#53
0
def test_pickle_fields_set():
    m = Model(a=24)
    assert m.dict(skip_defaults=True) == {'a': 24}
    m2 = pickle.loads(pickle.dumps(m))
    assert m2.dict(skip_defaults=True) == {'a': 24}
示例#54
0
TIMEOUT = 60  #время ожидания ответа сервера [сек]

BASE_SPEED = 100


def SetSpeed(leftSpeed, rightSpeed):
    print(leftSpeed, rightSpeed)


server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)  #создаем udp сервер
server.bind((IP, PORT))  #запускаем udp сервер
print("Listening on port %d..." % PORT)  #выводим сообщение о запуске сервера
server.settimeout(TIMEOUT)  #указываем серверу время ожидания

while True:  #создаем бесконечный цикл
    try:
        data = server.recvfrom(1024)  #попытка получить 1024 байта
    except socket.timeout:
        print("Time is out...")
        break
    parseData = pickle.loads(data[0])

    leftSpeed = parseData[1] * BASE_SPEED + parseData[0] * BASE_SPEED // 2
    rightSpeed = parseData[1] * BASE_SPEED - parseData[0] * BASE_SPEED // 2

    SetSpeed(leftSpeed, rightSpeed)

    #msg = 'Ok'
    #server.sendto(msg.encode('utf-8'), adrs)
server.close()
示例#55
0
 def test_isPickleable(self):
     """Must be pickleable to use syncMpiState."""
     stream = pickle.dumps(flags.Flags.BOND | flags.Flags.A)
     flag = pickle.loads(stream)
     self.assertEqual(flag, flags.Flags.BOND | flags.Flags.A)
示例#56
0
import urllib2,pickle

url = "http://www.pythonchallenge.com/pc/def/banner.p"
req = urllib2.urlopen(url).read()
data = pickle.loads(req)

print data
#for line in data: 
#    print ''.join(elem[0]*elem[1] for elem in line)
示例#57
0
 def deserialize(self, pickled: bytes):
   state = pickle.loads(pickled)
   self._object_to_save.restore(state)
示例#58
0
 def test_entry_point_pickleable(self):
     revived = pickle.loads(pickle.dumps(self.ep))
     assert revived == self.ep
示例#59
0
class ViDiac:

    class FeatureGenerator:
        def __init__(self, tokens):
            self.tokens = tokens

        def gen_inner_windows(self, index):
            mention_text = self.tokens[index]
            yield ('current_char_{}', mention_text.lower())
            yield ('is_digit_', (mention_text.isdigit()))
            yield ('is_punc_', (mention_text in string.punctuation))
            left_index = index
            while left_index > 0 and self.tokens[left_index] != ' ':
                left_index -= 1
                yield '{}_char_'.format(left_index - index), self.tokens[left_index]
                yield '{}_isdigit_'.format(left_index - index), self.tokens[left_index].isdigit()
                yield '{}_ispunct_'.format(left_index - index), self.tokens[left_index] in string.punctuation

            right_index = index
            while right_index < len(self.tokens) - 1 and self.tokens[right_index] != ' ':
                right_index += 1
                yield '{}_char_'.format(right_index - index), self.tokens[right_index]
                yield '{}_isdigit_'.format(right_index - index), self.tokens[right_index].isdigit()
                yield '{}_ispunct_'.format(right_index - index), self.tokens[right_index] in string.punctuation

            if left_index != right_index:
                yield ('inner_word_', ''.join(self.tokens[left_index:right_index]))

        def gen_left_windows(self, index, MAX_SPACE):
            num_words = 0
            last_space_index = index
            left_index = index
            while num_words < MAX_SPACE and left_index > 0:
                left_index -= 1
                if self.tokens[left_index] == ' ' or left_index == 0:
                    num_words += 1
                    if num_words == 1:
                        last_space_index = left_index
                    else:
                        yield '{}_word_'.format(1 - num_words), ''.join(self.tokens[left_index + 1:last_space_index])
                        last_space_index = left_index
                if self.tokens[left_index] in '.!?':
                    break

                    # yield '{}_char_'.format(left_index - index), self.tokens[left_index]
                    # yield '{}_isdigit_'.format(left_index - index), self.tokens[left_index].isdigit()
                    # yield '{}_ispunct_'.format(left_index - index), self.tokens[left_index] in string.punctuation

        def gen_right_windows(self, index, MAX_SPACE):
            num_words = 0
            last_space_index = index
            right_index = index
            while num_words < MAX_SPACE and right_index < len(self.tokens) - 1:
                right_index += 1

                if self.tokens[right_index] == ' ' or right_index == len(self.tokens):
                    num_words += 1
                    if num_words == 1:
                        last_space_index = right_index
                    else:
                        yield '{}_word_'.format(num_words - 1), ''.join(self.tokens[last_space_index + 1: right_index])
                        last_space_index = right_index
                if self.tokens[right_index] in '.!?':
                    break

                    # yield '{}_char_'.format(right_index - index), self.tokens[right_index]
                    # yield '{}_isdigit_'.format(right_index - index), self.tokens[right_index].isdigit()
                    # yield '{}_ispunct_'.format(right_index - index), self.tokens[right_index] in string.punctuation

    maAciiTexlex = [7845, 7847, 7849, 7851, 7853, 226, 225, 224, 7843, 227, 7841, 7855, 7857, 7859, \
                    7861, 7863, 259, 250, 249, 7911, 361, 7909, 7913, 7915, 7917, 7919, 7921, 432, \
                    7871, 7873, 7875, 7877, 7879, 234, 233, 232, 7867, 7869, 7865, 7889, 7891, 7893, \
                    7895, 7897, 244, 243, 242, 7887, 245, 7885, 7899, 7901, 7903, 7905, 7907, 417, \
                    237, 236, 7881, 297, 7883, 253, 7923, 7927, 7929, 7925, 273, 7844, 7846, 7848, \
                    7850, 7852, 194, 193, 192, 7842, 195, 7840, 7854, 7856, 7858, 7860, 7862, 258, \
                    218, 217, 7910, 360, 7908, 7912, 7914, 7916, 7918, 7920, 431, 7870, 7872, 7874, \
                    7876, 7878, 202, 201, 200, 7866, 7868, 7864, 7888, 7890, 7892, 7894, 7896, 212, \
                    211, 210, 7886, 213, 7884, 7898, 7900, 7902, 7904, 7906, 416, 205, 204, 7880, 296, \
                    7882, 221, 7922, 7926, 7928, 7924, 272]
    telex = ["aas", "aaf", "aar", "aax", "aaj", "aa", "as", "af", "ar", "ax", "aj", "aws", "awf", \
             "awr", "awx", "awj", "aw", "us", "uf", "ur", "ux", "uj", "uws", "uwf", "uwr", "uwx", \
             "uwj", "uw", "ees", "eef", "eer", "eex", "eej", "ee", "es", "ef", "er", "ex", "ej", \
             "oos", "oof", "oor", "oox", "ooj", "oo", "os", "of", "or", "ox", "oj", "ows", "owf", \
             "owr", "owx", "owj", "ow", "is", "if", "ir", "ix", "ij", "ys", "yf", "yr", "yx", "yj", \
             "dd", "AAS", "AAF", "AAR", "AAX", "AAJ", "AA", "AS", "AF", "AR", "AX", \
             "AJ", "AWS", "AWF", "AWR", "AWX", "AWJ", "AW", "US", "UF", "UR", "UX", \
             "UJ", "UWS", "UWF", "UWR", "UWX", "UWJ", "UW", "EES", "EEF", "EER", \
             "EEX", "EEJ", "EE", "ES", "EF", "ER", "EX", "EJ", "OOS", "OOF", "OOR", \
             "OOX", "OOJ", "OO", "OS", "OF", "OR", "OX", "OJ", "OWS", "OWF", "OWR", \
             "OWX", "OWJ", "OW", "IS", "IF", "IR", "IX", "IJ", "YS", "YF", "YR", "YX", \
             "YJ", "DD"]

    mapping = {}
    reversed_mapping = {}

    if sys.version_info[0] == 3:
        for i in range(len(telex)):
            mapping[chr(maAciiTexlex[i])] = telex[i]
            reversed_mapping[telex[i]] = chr(maAciiTexlex[i])
    else:
        for i in range(len(telex)):
            mapping[unichr(maAciiTexlex[i])] = telex[i]
            reversed_mapping[telex[i]] = unichr(maAciiTexlex[i])
    reversed_mapping

    crf = CRF(model_filename='models/vidiac.crfsuite')
    data = pickle.dumps(crf, protocol=pickle.HIGHEST_PROTOCOL)
    model = pickle.loads(data)

    @staticmethod
    def prepare_data(str_line):
        tokens = []
        labels = []
        for ch in str_line:
            label = ''
            if ch.isupper():
                label += 'U'
            else:
                label += 'L'

            if ch not in ViDiac.mapping:
                # yield "{}\t{}".format(ch.lower(), label)
                # yield (ch.lower(), label)
                tokens.append(ch.lower())
                labels.append(label)
            else:
                ch = ch.lower()
                chmap = ViDiac.mapping[ch]
                if chmap[0] == chmap[1]:
                    label += 'm'
                elif chmap[1] == 'w':
                    label += 'w'
                if chmap[-1] in 'sfrxj':
                    label += chmap[-1]
                # yield "{}\t{}".format(chmap[0], label)
                # yield (chmap[0], label)
                tokens.append(chmap[0])
                labels.append(label)
        return tokens, labels

    @staticmethod
    def word2features(i, feature_generator):

        features = {
            'bias': 1.0,
        }

        for (key, value) in feature_generator.gen_inner_windows(i):
            features[key] = value
        for (key, value) in feature_generator.gen_left_windows(i, 2):
            features[key] = value
        for (key, value) in feature_generator.gen_right_windows(i, 2):
            features[key] = value
        return features

    @staticmethod
    def sent2features(tokens):
        feature_generator = ViDiac.FeatureGenerator(tokens)
        return [ViDiac.word2features(i, feature_generator) for i in range(len(tokens))]

    @staticmethod
    def doit(str_sentence):
        list_char = list(str_sentence.lower())
        labels = ViDiac.model.predict([ViDiac.sent2features(list_char)])
        # output = tmp[0]
        # print labels[0]
        output = u''
        for i in range(len(list_char)):
            # print list_char[i], labels[0][i]
            if labels[0][i] == 'L':
                output += list_char[i]
            elif labels[0][i] == 'U':
                output += list_char[i].upper()
            else:
                # print "label_{}".format(labels[0][i])
                upcase = False
                unichar = list_char[i]
                for label in labels[0][i]:
                    if label == 'U':
                        upcase = True
                    elif label == 'L':
                        continue
                    elif label == 'm':
                        unichar += unichar
                    else:
                        unichar += label
                if upcase:
                    unichar = unichar.upper()
                # print unichar
                output += ViDiac.reversed_mapping[unichar]
        return output
示例#60
0
def load_data(train_filename):
    with open(train_filename, "rb") as f:
        data = pickle.load(f)
        data = pickle.loads(data)
    dataset = mx.gluon.data.dataset.ArrayDataset(data[0], data[1])
    return dataset