示例#1
0
文件: mora.py 项目: DeaDSouL/mora
 def apiRequest(self, movie='', useYear=True):
     if type(movie) != dict: return False
     if sys.version_info[0] == 3: title, year, gotError = urllib_quote(movie['title'].encode('utf8')), urllib_quote(movie['year'].encode('utf8')) if useYear else '', False # py3
     else: title, year, gotError = urllib_quote(movie['title']).encode('utf8'), urllib_quote(movie['year']).encode('utf8') if useYear else '', False #py2
     for api_name, api in _iteritems(self.api_list):
         try:
             if sys.version_info[0] == 3: data = json_loads( urllib2_urlopen(api['url'] % (title, year)).read().decode('utf-8') ) # py3
             else: data = json_loads( urllib2_urlopen(api['url'] % (title, year)).read() ) # py2
             if api['response_status_key'] in data and data[api['response_status_key']] == api['status_success_value']:
                 dataRated = data[api['response_rated_key']].upper() if api['response_rated_key'] in data else 'UNKNOWN'
                 if dataRated not in self.rated_aliases:
                     self.log('Unrecognized Rated: "%s" - API: "%s" - Movie: "%s" - Year: "%s"' % (dataRated, api_name, title, year))
                     self.rated_aliases[dataRated] = 'UNKNOWN'
                 movie['rated'] = dataRated
         except (urllib2_HTTPError, urllib2_URLError) as e:
             gotError = True
             #print('\r%s' % self.clear_line),
             self.rePrint()
             #print('\r%s' % self.clear_line, end='')
             self.log('\rError occurred while checking the rated for "%s"' % movie['basename'])
         if self.rated_aliases[movie['rated']] != 'UNKNOWN':
             break # Found the Rated, so let's exit the loop
     if not self.args.quiet and not gotError:
         self.rePrint('Live :%s%s ---> %s' % ((10-len(movie['rated']))*' ', movie['rated'], movie['basename']), False)
     self.rated_results[self.rated_aliases[movie['rated']]].append(movie)
     return True
示例#2
0
文件: test_create.py 项目: ESSS/conda
    def test_rpy_search(self):
        with make_temp_env("python=3.5") as prefix:
            run_command(Commands.CONFIG, prefix, "--add channels https://repo.continuum.io/pkgs/free")
            run_command(Commands.CONFIG, prefix, "--remove channels defaults")
            stdout, stderr = run_command(Commands.CONFIG, prefix, "--show", "--json")
            json_obj = json_loads(stdout)
            assert 'defaults' not in json_obj['channels']

            assert_package_is_installed(prefix, 'python')
            assert 'r' not in context.channels

            # assert conda search cannot find rpy2
            stdout, stderr = run_command(Commands.SEARCH, prefix, "rpy2", "--json")
            json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())

            assert bool(json_obj) is False

            # add r channel
            run_command(Commands.CONFIG, prefix, "--add channels r")
            stdout, stderr = run_command(Commands.CONFIG, prefix, "--show", "--json")
            json_obj = json_loads(stdout)
            assert 'r' in json_obj['channels']

            # assert conda search can now find rpy2
            stdout, stderr = run_command(Commands.SEARCH, prefix, "rpy2", "--json")
            json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
            assert len(json_obj['rpy2']) > 1
示例#3
0
 def post(self, request):
     print 'post'
     # arr = [p for p in product.__class__.__dict__ if p[0:2] != '__']
     # attributes = [a for a in Products.objects.first() if a[0:2] != '__']
     # print attributes
     # name = request.POST.get('name', 'null')
     # print 'test name', name
     # name = request.POST['name']
     # print 'test name', name
     # print 'test', request.body['name']
     # data = request.body
     # print request.body
     print json_loads(request.body)
     data = json_loads(request.body)
     if not data:
         return invalid('not data')
     name = ''
     description = ''
     if data['name']:
         name = data['name']
         print 'name', name
     if data['description']:
         description = data['description']
         print 'desc', description
     print 'create'
     category = Categories.objects.create(
         name=name,
         description=description,
     )
     if category:
         print 'created'
         return self.PostCodes.ok(category)
     else:
         return invalid('not create')
示例#4
0
def parse_lasoo_store_js(lasoo_store_js):
    # Store list json for google map looks like below:
    # [
    #     {id:13524191847234,latitude:-43.55240631,longitude:172.6368103,
    #       displayName:"All Power -- Cyclone Cycles & Mowers Ltd'"}
    #     ,
    #     {id:13524191847738,latitude:-43.51478577,longitude:172.64381409,
    #       displayName:"All Power -- Edgeware Mowers & Chainsaws Ltd'"}
    #     ,
    #     ...
    # ]
    lasoo_store_js = lasoo_store_js.replace("\t", "").replace("\n", "").replace("\r", "").replace('\'"', '"')
    if lasoo_store_js.find("\"id\"") > 0:
        store_list = json_loads(lasoo_store_js)
    else:
        store_json = lasoo_store_js \
            .replace('{id:', '{"id":') \
            .replace(',latitude:', ',"latitude":') \
            .replace(',longitude:', ',"longitude":') \
            .replace(',displayName:', ',"displayName":')
        store_list = json_loads(store_json)
    results = []
    for store in store_list:
        item = {}
        item['lasoo_id'] = str(store['id'])
        # Normalise field name and value
        item['display_name'] = normalize_lasoo_store_display_name(store['displayName'])
        item['name'] = item['display_name'].lower()
        item['latitude'] = store['latitude']
        item['longitude'] = store['longitude']
        results.append(item)
    return results
示例#5
0
    def test_clone_offline_multichannel_with_untracked(self):
        with make_temp_env("python") as prefix:
            assert_package_is_installed(prefix, 'python')
            assert 'r' not in context.channels

            # assert conda search cannot find rpy2
            stdout, stderr = run_command(Commands.SEARCH, prefix, "rpy2", "--json")
            json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
            assert bool(json_obj) is False

            # add r channel
            run_command(Commands.CONFIG, prefix, "--add channels r")
            stdout, stderr = run_command(Commands.CONFIG, prefix, "--get", "--json")
            json_obj = json_loads(stdout)
            assert json_obj['rc_path'] == join(prefix, 'condarc')
            assert json_obj['get']['channels']

            # assert conda search can now find rpy2
            stdout, stderr = run_command(Commands.SEARCH, prefix, "rpy2", "--json")
            json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
            assert len(json_obj['rpy2']) > 1

            run_command(Commands.INSTALL, prefix, "rpy2")
            assert_package_is_installed(prefix, 'rpy2')
            run_command(Commands.LIST, prefix)

            with enforce_offline():
                with make_temp_env("--clone", prefix, "--offline") as clone_prefix:
                    assert_package_is_installed(clone_prefix, 'python')
                    assert_package_is_installed(clone_prefix, 'rpy2')
                    assert isfile(join(clone_prefix, 'condarc'))  # untracked file
示例#6
0
    def set_licensing(self, netid, add=None, remove=None):
        """ Return True if license was set/unset
        """
        if add and len(add) or remove and len(remove):
            try:
                self.license_api.set_licenses_for_netid(
                    netid, add=add, remove=remove)
                return True
            except DataFailureException as ex:
                err_msg = str(ex)
                try:
                    if ex.status == 404:
                        odata = json_loads(ex.msg)['odata.error']
                        err_msg = odata['message']['value']
                        raise MSCAProvisionerNetidNotFound(
                            'License 404: %s: %s' % (netid, err_msg))
                    elif ex.status == 400:
                        odata = json_loads(ex.msg)['odata.error']
                        code = odata['code']
                        msg = odata['message']['value']
                        if (code == 'Request_BadRequest' and
                            re.match(r'.* invalid usage location\.$', msg)):
                            self.user_api.set_location_for_netid(netid, 'US')
                            self.log.info(
                                'Assigned location "US" to netid %s' % netid)
                            return self.set_licensing(
                                netid, add=add, remove=remove)
                except:
                    pass

                raise MSCAProvisionerException(
                    'License Fail: netid %s: add %s: remove %s: %s' % (
                        netid, add, remove, err_msg))

        return False
示例#7
0
    def put(self, request, id):
        print 'put'
        print 'id', id
        print json_loads(request.body)
        data = json_loads(request.body)
        if not data:
            return invalid('not data')

        # if data['id']:
        #     id = data['id']
        #     print 'id', id
        category = Categories.objects.get(id=id)
        if data['name']:
            category.name = data['name']
            print 'name', category.name
        if data['description']:
            category.description = data['description']
            print 'desc', category.description
        print 'update'
        category.save()
        if category:
            print 'updated'
            return self.PutCodes.ok(category)
        else:
            print 'did not update'
            return invalid('not update')
示例#8
0
def uri_solve_1st():
    '''
    >>> data_str 
    [[0,0], [1,2], [3,4] ...]
    '''
    try:
        plist = json_loads(request.form.get('data'))
        lbpos = json_loads(request.form.get('lb_pos'))
        return json_dumps(unicursal_from_lb(plist, lbpos))
    except: #FIXME: fix type of except
        return erro_screen_points()
示例#9
0
def parse_file(filename):
	"""Parses a single file"""
	
	with open(directory+filename) as f:
		data = f.readlines()
		category = json_loads(data[0])
		similar = json_loads(data[1])
		
		if 'SimilarSites' not in similar:
			similar = False
		if 'Category' not in category:
			category = False
	
	return [category, similar]
示例#10
0
文件: nerd.py 项目: AKSW/CSV2RDF-WIKI
    def extract(self, text, service, timeout):
        """Extract named entities from document with 'service'.        
        'service' can be any of the constants defined in this module.
        """

        """ submit document """
        self.http.request("POST", "/api/document",
                          urlencode({"text": text, 
                                     "key": self.api_key}),
                          self._headers
                          )
        response = self.http.getresponse()
        if response.status / 100 != 2:
            raise Exception("%s %s" % (response.status, response.reason))
        json = response.read()
        _debug(response, json)
        data = json_loads(json)
        id_document = data["idDocument"]


        """ annotate document """
        self.http.request("POST", "/api/annotation",
                          urlencode({"extractor": service,
                                     "idDocument": id_document,
                                     "timeout": timeout,
                                     "key": self.api_key}),
                          self._headers
                          )
        
        response = self.http.getresponse()
        if response.status / 100 != 2:
            raise Exception("%s %s" % (response.status, response.reason))
        json = response.read()
        _debug(response, json)
        data = json_loads(json)
        id_annotation = data["idAnnotation"]


        """ get extraction from the annotation """
        self.http.request("GET", "/api/entity" + "?key=%s&idAnnotation=%s" % (self.api_key,id_annotation),
                          headers = self._headers
                          )
        response = self.http.getresponse()
        if response.status / 100 != 2:
            raise Exception("%s %s" % (response.status, response.reason))
        json = response.read()
        _debug(response, json)
        data = json_loads(json)
        return data
示例#11
0
    def test_shortcut_absent_when_condarc_set(self):
        from menuinst.win32 import dirs as win_locations
        user_mode = 'user' if exists(join(sys.prefix, u'.nonadmin')) else 'system'
        shortcut_dir = win_locations[user_mode]["start"]
        shortcut_dir = join(shortcut_dir, "Anaconda{0} ({1}-bit)"
                                          "".format(sys.version_info.major, context.bits))

        prefix = make_temp_prefix(str(uuid4())[:7])
        shortcut_file = join(shortcut_dir, "Anaconda Prompt ({0}).lnk".format(basename(prefix)))
        assert not isfile(shortcut_file)

        # set condarc shortcuts: False
        run_command(Commands.CONFIG, prefix, "--set shortcuts false")
        stdout, stderr = run_command(Commands.CONFIG, prefix, "--get", "--json")
        json_obj = json_loads(stdout)
        # assert json_obj['rc_path'] == join(prefix, 'condarc')
        assert json_obj['get']['shortcuts'] is False

        try:
            with make_temp_env("console_shortcut", prefix=prefix):
                # including shortcuts: False from condarc should not get shortcuts installed
                assert package_is_installed(prefix, 'console_shortcut')
                assert not isfile(shortcut_file)

                # make sure that cleanup without specifying --shortcuts still removes shortcuts
                run_command(Commands.REMOVE, prefix, 'console_shortcut')
                assert not package_is_installed(prefix, 'console_shortcut')
                assert not isfile(shortcut_file)
        finally:
            rmtree(prefix, ignore_errors=True)
            if isfile(shortcut_file):
                os.remove(shortcut_file)
示例#12
0
 def behavior(self):
     while True:
         # if not connected --> try to connect
         if self.sock is None:
             self._connect()
         # if not connected --> sleep for a bit
         if self.sock is None:
             yield self.forDuration(0.1)
             continue
         # receive an update / skip
         try:
             msg = self.sock.recv(1024)
         except SocketError, se:
             # If there was no data on the socket --> not a real error, else
             if se.errno != 35:
                 progress("Connection failed: " + str(se))
                 self.sock.close()
                 self.sock = None
             yield
             continue
         ts = self.app.now
         dic = json_loads(msg)
         assert type(dic) is dict
         dic = dic.items()
         dic.sort()
         progress("Message received at: " + str(ts))
         for k, v in dic:
             progress("   %s : %s" % (k, repr(v)))
         yield self.forDuration(0.3)
示例#13
0
  def behavior( self ):
    """
    Plan main loop    
    """
    while True:
      # If no socket set up --> activate the ensureConnection sub-behavior to fix
      if self.sock is None:
        yield self.ensureConnection()
      msg = self._nextMessage()
      # If no message --> sleep a little and try again
      if len(msg) is 0:
          yield self.forDuration(0.3)
          continue
      # Parse the message
      dic = json_loads(msg)
      ts = self.app.now
      self.lastSensor = (ts, dic['f'], dic['b'])
      print 'JSON:', str(dic)
      # Check for field tags
      #if dic.has_key(""):
	  #TODO: check for world points
      if dic.has_key("w"):
        self.lastWaypoints = (ts,dic['w'])
      # Make sure to allow others to get the CPU
      yield
示例#14
0
    def handle_trent_request(self):
        target = request.headers.get("X-Amz-Target")
        data = request.get_data()
        verifier = AWSSigV4Verifier(
            request_method=request.method, uri_path=request.path,
            query_string=request.query_string, headers=request.headers,
            body=data, region="us-west-2", service="kms",
            key_mapping=self.keymap)
        try:
            verifier.verify()
        except InvalidSignatureError as e:
            # Uncomment if debugging signature issues.
            # print_exc()
            return make_response(json_dumps(
                {"__type": "AuthFailure",
                 "message": "Invalid signature"}), UNAUTHORIZED)

        try:
            params = json_loads(data)
        except ValueError as e:
            return make_response(json_dumps(
                {"__type": "MalformedQueryString",
                 "message": "Could not decode JSON data"}), BAD_REQUEST)

        if target == "TrentService.GenerateDataKey":
            return self.handle_trent_generate_data_key_request(params)
        elif target == "TrentService.Decrypt":
            return self.handle_trent_decrypt_request(params)

        print("Unknown action %s: params=%r" % (target, params), file=stderr)
        
        return make_response(json_dumps(
            {"__type": "InvalidAction",
             "message": "Unknown action %s" % target}), BAD_REQUEST)
示例#15
0
    def handle_trent_decrypt_request(self, params):
        try:
            ciphertext_blob = params["CiphertextBlob"]
            encryption_context = params.get("EncryptionContext", {})
        except KeyError as e:
            return make_response(json_dumps(
                {"__type": "MissingParameter",
                 "message": "Missing parameter: %s" % e.args[0]}), BAD_REQUEST)

        try:
            encrypt_params = json_loads(b64decode(ciphertext_blob))
        except ValueError as e:
            return make_response(json_dumps(
                {"__type": "InvalidParameterValue",
                 "message": "Invalid ciphertext blob"}), BAD_REQUEST)

        try:
            key_id = encrypt_params["KeyId"]
            encryption_context = encrypt_params["EncryptionContext"]
            plaintext = encrypt_params["Plaintext"]
        except KeyError as e:
            return make_response(json_dumps(
                {"__type": "MissingParameter",
                 "message": "Missing parameter: %s" % e.args[0]}), BAD_REQUEST)

        # Plaintext is already base64 encoded.
        return make_response(json_dumps(
            {"KeyId": key_id, "Plaintext": plaintext}), OK)
示例#16
0
 def behavior( self ):
   while True:
     # if not connected --> try to connect
     if self.sock is None:
       self._connect()
     # if not connected --> sleep for a bit
     if self.sock is None:
       yield self.forDuration(0.1)
       continue
     # receive an update / skip
     try:
       msg = self.sock.recv(1024)
     except SocketError, se:
       # If there was no data on the socket --> not a real error, else
       if se.errno != 35:
         progress("Connection failed: "+str(se))
         self.sock.close()
         self.sock = None
       yield
       continue
     ts = self.app.now
     try:
       dic = json_loads(msg)
     except ValueError, ve:
         progress("Parsing Error %s" % repr(msg) + str(ve))
def get_group_info_from_address(netloc, usessl, address, token):
    '''Get the group information from an email address for the list.

:param str netloc: The name of the GroupServer host to check with.
:param bool usessl: ``True`` if TLS should be used to communicate with the
                    server.
:param str address: The email address of the list (group) to be checked.
:param str token: The authentication token to pass to GroupServer.
:raises NotOk: If the server (``hostname``) responds with something
                   other than ``200``.
:return: Information about the group.
:rtype: ``dict``

Get information about a group, by sending the parameters to
:const:`GROUP_EXISTS_URI`. The server **must** respond with a JSON object,
and this is converted to a Python object before being returned
(see :func:`json.loads`).
'''
    fields = {'form.email': address, 'form.token': token,
              'form.actions.check': 'Check'}
    status, reason, data = post_multipart(netloc, GROUP_EXISTS_URI,
                                          fields, usessl=usessl)
    if status != HTTP_OK:
        raise NotOk('%s (%d <%s>)' % (reason, status, netloc))
    retval = json_loads(data)
    return retval
示例#18
0
 def put(self, request, id=0):
     print 'put'
     if not id:
         return invalid('error: update customer: no id')
     data = json_loads(request.body)
     if not data:
         return invalid('not data')
     print data
     try:
         print 'updating'
         customer = Customers.objects.get(id=id)
         user = User.objects.get(id=customer.user.id)
         user.email = data['email']
         user.is_active = data['active']
         user.username = data['username']
         user.last_name = data['lastname']
         user.first_name = data['firstname']
         if data['password']:
             user.set_password(data['password'])
         user.save()
         customer.user = user
         customer.phone = data['phone']
         customer.address = data['address']
         customer.save()
         return self.Response.ok(customer)
     except Exception:
         return invalid('error: update customer')
示例#19
0
def get_mst():

    points = map(tuple, json_loads(request.form.get('data')))

    return json_dumps(
        mst.mst(
            mst.fullmap_of_pointslist(points)))
示例#20
0
文件: passmgr.py 项目: zepto/lessinfo
def crypt_to_dict_sha256(crypt_data: str, password: str = '',
                  skip_invalid: bool = True) -> dict:
    """ Decrypts crypt_data and returns the json.loads dictionary.
    If skip_invalid is True then skip decryption of data if the password is
    invalid.

    """

    while True:
        # Get the password to decrypt the data.
        if not password:
            password = getpass.getpass('Enter the password for decryption: ')

        # Convert the data to a bytes object and decrypt it.
        json_data = decrypt_sha256(password, str_to_bytes_sha256(crypt_data))

        # Load the decrypted data with json and return the resulting
        # dictionary.
        try:
            return json_loads(json_data)
        except:
            # Don't loop forever unless asked to.
            if skip_invalid:
                print('Skipping, because of invalid password.')
                return {}
            else:
                print('Invalid password.  Please try again.')
                password = ''
                continue
示例#21
0
    def post(self, request, project_id, subset_id):
        """
        Updates the subset filter based on the data entered by the user

        Parameter
        ---------
        request : django.http.HttpRequest
            Object representing the request.
        project_id : int
            identifies the project in the data base
        subset_id : int
            identifies the subset in the data base

        Returns
        -------
        django.http.HttpResponse
            Rendered template
        """
        context = self.get_context_data(project_id, subset_id)
        subset = context.get('subset')

        if subset:
            data = request.POST
            if data['filters'] != '-1':
                subset.filters = json_loads(data['filters'])
                subset.save()

            messages.success(self.request, "The subset has been updated.")

        return self.render_to_response(context)
示例#22
0
def getdescendants(request, code):
    params = {}
    results = {}
    
    language = request.LANGUAGE_CODE.lower()
    if language == 'pt-br':
        language = 'pt'

    for lang in DECS_LANGS:
        params[lang] = urllib.urlencode({
            'tree_id': code or '',
            'lang': lang,
            })

        resource = urllib.urlopen(settings.DECS_SERVICE, params[lang])

        tree = ElementTree()
        tree.parse(resource)

        descendants = tree.findall('decsws_response/tree/descendants/term_list[@lang="%s"]/term' % lang)
        for d in descendants:
            if d.attrib['tree_id'] in results:
                results[ d.attrib['tree_id'] ] += ',"%s":"%s"' % (lang,d.text.capitalize())
            else:
                results[ d.attrib['tree_id'] ] = '"%s":"%s"' % (lang,d.text.capitalize())

    json = '[%s]' % ','.join((JSON_MULTILINGUAL_TERM % (id,desc) for desc,id in results.items()))
    json_response = json_loads(json)
    json_response.sort(key=lambda x: x['fields']['description'][language])
        
    return HttpResponse(json_dumps(json_response), mimetype='application/json')
def get_data():
    d = engine.execute(
            "select payload from " +\
            CONTROLLER_ID +\
            "_heater_settings_schedule order by id desc limit 1").scalar()
    try:
        d = d.replace('\\','').replace('\'','"')
    except AttributeError:
        return {'week':[0,0,0,0,0,0,0],
                'work':[{'from':[0,0],'to':[0,0],'temp':0}],
                'free':[{'from':[0,0],'to':[0,0],'temp':0}],
                'other':0}
    if d[0] == '"':
        d = d[1:-1]
    template = json_loads(d)
    #print(template)
    #template = {'week'     : [0,0,0,0,0,1,1],
    #            'work' :
    #                [{'from':[ 6,30], 'to':[ 8, 0],'temp': 21.0},
    #                 {'from':[14,30], 'to':[17, 0],'temp': 20.5},
    #                 {'from':[19,30], 'to':[21,39],'temp': 20.5}],
    #            'free' :
    #                [{'from':[ 9, 0], 'to':[12, 0],'temp': 21.0},
    #                 {'from':[14, 0], 'to':[21, 0],'temp': 22.0}],
    #            'other' : 17.0,
    #            'override' : {
    #                'temp'     : 25.0,
    #                'duration' : 30,
    #                'start'    : [ 9,54]}}

    return template
示例#24
0
 def test_search_gawk_on_win(self):
     with make_temp_env() as prefix:
         stdout, stderr = run_command(Commands.SEARCH, prefix, "gawk", "--json")
         json_obj = json_loads(stdout.replace("Fetching package metadata ...", "").strip())
         assert "gawk" in json_obj.keys()
         assert "m2-gawk" in json_obj.keys()
         assert len(json_obj.keys()) == 2
示例#25
0
文件: s3.py 项目: dacut/rolemaker
    def decrypt_data_key_kms(self, s3_key_name, encrypted_data_key,
                             material_desc):
        if material_desc is None:
            raise EncryptionError("%s: Missing %s metadata entry" %
                                  (s3_key_name, _x_amz_matdesc))

        try:
            material_desc = json_loads(material_desc)
        except:
            raise EncryptionError(
                "%s: Metadata entry %s is not valid JSON" %
                (s3_key_name, _x_amz_matdesc))
        
        cmk_id = material_desc.get(_kms_cmk_id)
        if cmk_id is None:
            raise EncryptionError(
                "%s: Metadata entry %s does not have a %s key" %
                (s3_key_name, _x_amz_matdesc, _kms_cmk_id))

        try:
            encryption_context = {_kms_cmk_id: cmk_id}
            decrypt_response = self.kms.decrypt(
                encrypted_data_key, encryption_context=encryption_context)
            return decrypt_response['Plaintext']
        except BotoServerError as e:
            raise EncryptionError(
                "%s: KMS decryption failure: %s" % (s3_key_name, e))
示例#26
0
def shooter_downloader(file_path):
    """ see https://docs.google.com/document/d/1ufdzy6jbornkXxsD-OGl3kgWa4P9WO5NZb6_QYZiGI0/preview
    """
    resp = request(SHOOTER_URL,
                   data={'filehash': shooter_hash(file_path), 'pathinfo': basename(file_path), 'format': 'json'})
    try:
        r_json = json_loads(resp)
    except:
        print '射手网没有找到字幕'.encode(getfilesystemencoding())
        return False
    else:
        f_name, file_extension = splitext(file_path)
        result = []
        for info in r_json:
            for f_info in info['Files']:
                # 不下载idx和sub版本的字幕
                if f_info['Ext'] not in ('sub', 'idx'):
                    result.append((f_info['Link'], f_info['Ext']))
        if len(result) < 1:
            print '射手网没有找到字幕'.encode(getfilesystemencoding())
            return False
        elif len(result) == 1:
            urlretrieve(result[0][0], filename='{}.{}'.format(f_name, result[0][1]))
            print '字幕下载完成'.encode(getfilesystemencoding())
        else:
            for idx, value in enumerate(result):
                urlretrieve(value[0], filename='{}_{}.{}'.format(f_name, idx + 1, value[1]))
                print '第{}个字幕下载完成'.format(idx + 1).encode(getfilesystemencoding())
        return True
示例#27
0
 def _make_image(self, image, info, remote=False):
     img_obj = Image(image, backend=self, remote=remote)
     if remote:
         return img_obj
     name = info['Id']
     img_obj.input_name = image
     img_obj.name = image
     img_obj.config = info
     img_obj.backend = self
     img_obj.id = name
     img_obj.registry, img_obj.repo, img_obj.image, img_obj.tag, _ = Decompose(image).all
     img_obj.repotags = info['RepoTags']
     img_obj.created = info['Created']
     img_obj.size = info.get('VirtualSize', None)
     img_obj.virtual_size = info.get('VirtualSize', None)
     img_obj.original_structure = info
     img_obj.deep = True
     img_obj.labels = info.get('Labels', None)
     img_obj.version = img_obj.get_label("Version")
     img_obj.release = img_obj.get_label("Release")
     ostree_manifest = self.syscontainers.get_manifest(image)
     if ostree_manifest:
         ostree_manifest = json_loads(ostree_manifest)
     img_obj.digest = None if ostree_manifest is None else ostree_manifest.get('Digest') or ostree_manifest.get('digest')
     img_obj.os = img_obj.get_label("Os")
     img_obj.arch = img_obj.get_label("Arch")
     img_obj.graph_driver = None
     return img_obj
示例#28
0
 def put(self, request, id):
     print "put"
     data = json_loads(request.body)
     print data
     if not data:
         return invalid("not data")
     try:
         print "updating"
         order = Orders.objects.get(id=id)
         print "1"
         Orders_Products.objects.filter(order=order).delete()
         print "2"
         for prd in data["products"]:
             print "===ID===", prd["id"]
             product = Products.objects.get(id=prd["id"])
             # product = Products.objects.get(id=1)
             print "3"
             Orders_Products.objects.create(
                 order=order, product=product, qty=prd["qty"], price=prd["price"], total_price=prd["total_price"]
             )
             print "4"
         print "return"
         return JsonResponse({"code": 200, "message": "OK", "data": {}})
     except Exception:
         return invalid("error: update products in order")
示例#29
0
文件: base.py 项目: gqueiroz/scigws
    def __init__(self, key_to_list_mapping):
        if isinstance(key_to_list_mapping, QueryDict):
            to_iterate, key_to_list_mapping = key_to_list_mapping, {}
            for key, value in to_iterate.iteritems():
                key_to_list_mapping[key.lower()] = [i.lower() for i in to_iterate.getlist(key)]
        if isinstance(key_to_list_mapping, basestring):
            # TODO: XML Request
            from xmltodict import parse as xml_to_dict
            from json import dumps as json_dumps, loads as json_loads
            data = xml_to_dict(key_to_list_mapping)
            dct = json_loads(json_dumps(data))

            def iterate_dict(nested):
                for key, value in nested.iteritems():
                    if isinstance(value, dict):
                        for inner_key, inner_value in iterate_dict(value):
                            yield inner_key, inner_value
                    else:
                        yield key, value

            list_elements = list(iterate_dict(dct))
            k = dct.keys()[0].lower()
            request = k if ':' not in k else k.split(':')[1]
            key_to_list_mapping = {'request': [request]}
            for element in list_elements:
                if element[0].lower() == "@service" or element[0].lower() == "@version":
                    key_to_list_mapping[element[0][1:].lower()] = [element[1].lower()]
                elif not element[0].startswith('@'):
                    e = element[0].lower()
                    if ':' in e:
                        e = e.split(':')[1].lower()
                    key_to_list_mapping[e] = [element[1].lower()]
        super(OWSDict, self).__init__(key_to_list_mapping)
        self._is_valid_ows_request()
        self.coverage_id_formatter()
示例#30
0
文件: passmgr.py 项目: zepto/lessinfo
    def _read_file(self, filename: str, password: str = '') -> dict:
        """ Reads the data from filename and returns the account dictionary,
        the encrypted master key, and the decrypted master key.

        """

        # Read from the file if it exists.
        with pathlib_path(filename) as pass_file:
            lzma_data = pass_file.read_bytes() if pass_file.is_file() else b''

        # Get the json data out of the file data or an empty json dict of
        # the file was empty.
        if lzma_data:
            json_data = lzma_decompress(lzma_data).decode()
        else:
            json_data = '{}'

        accounts_dict = json_loads(json_data)

        # Pop the master key out of the accounts dictionary so it won't be
        # operated on or listed.  Also if no master key is found, create
        # one.
        encrypted_key = bytes.fromhex(accounts_dict.pop(self.MASTER_KEY_DIGEST, ''))

        if not encrypted_key:
            if not password:
                # Get the password to encrypt the master key.
                password = self._ask_pass('password')
        else:
            # Get the password to decrypt the key.
            password = self._ask_pass('password', verify=False)

        return CryptData(password, encrypted_key), accounts_dict
示例#31
0
numfig = True

numfig_format = {
    'figure': 'Figure %s: ',
    'table': 'Table %s: ',
    'code-block': 'Listing %s',
    'section': 'Section %s'
}

# -- Options for HTML output -------------------------------------------------

html_context = {}
ctx = ROOT / "context.json"
if ctx.is_file():
    html_context.update(json_loads(ctx.open("r").read()))

if (ROOT / "_theme").is_dir():
    html_theme_path = ["."]
    html_theme = "_theme"
    html_theme_options = {
        "logo_only": True,
        "home_breadcrumbs": True,
        "vcs_pageview_mode": "blob",
    }
    html_css_files = [
        "theme_overrides.css",
    ]
else:
    html_theme = "alabaster"
示例#32
0
def main(args):

    if len(args) != 2:
        print(" ")
        print(" Usage : {} <configuration_yaml>".format(args[0]))
        return

    print('job started')

    #
    # Get the YAML config loaded:
    #

    with open(args[1], mode='r') as yaml_file:
        params, steps = load_config(yaml_file.read())

    if params is None:
        print("Bad YAML load")
        return

    target_project = params['TARGET_PROJECT']
    dataset_id = params['DATASET']
    data_file_path = params['DATA_FILE_PATH']
    dataset_metadata_file = params['DATASET_METADATA_FILE']
    install_list = params['INSTALL_LIST']

    target_client = bigquery.Client(project=target_project)

    #
    # Step 0: Delete dataset if it exists (for updates until versioning is in place)
    #

    if 'delete_existing_dataset' in steps:
        if bq_dataset_exists(target_client, dataset_id):
            success = delete_all_views(target_client, target_project, dataset_id)
            if not success:
                print("delete dataset step 1 failed")
                return
            success = delete_dataset(target_client, dataset_id)
            if not success:
                print("delete dataset step 2 failed")
                return

    #
    # Step 1: Do we need to create the dataset in the target project?
    #

    if 'create_dataset' in steps:
        # Where is the dataset description file:
        dataset_metadata_file_full_path = "{}/{}".format(data_file_path, dataset_metadata_file)
        with open(dataset_metadata_file_full_path, mode='r') as dataset_metadata:
            ds_meta_dict = json_loads(dataset_metadata.read())

        success = create_dataset(target_client, target_project, dataset_id, ds_meta_dict)
        if not success:
            print("create_dataset failed")
            return

    if 'delete_all_views' in steps:
        if bq_dataset_exists(target_client, dataset_id):
            success = delete_all_views(target_client, target_project, dataset_id)
            if not success:
                print("deleting all views failed")
                return

    if 'install_views' in steps:
        for mydict in install_list:
            for view_name, view_dict in mydict.items():
                print("creating view: {}".format(view_name))
                sql_format_file = view_dict["sql"]
                metadata_file = view_dict["metadata"]
                table_list = view_dict["table_list"]
                metadata_file_full_path = "{}/{}".format(data_file_path, metadata_file)
                sql_format_file_full_path = "{}/{}".format(data_file_path, sql_format_file)
                with open(sql_format_file_full_path, mode='r') as sql_format_file:
                    sql_format = sql_format_file.read()
                # use list as argument to format:
                print(table_list)
                view_sql = sql_format.format(*table_list)
                with open(metadata_file_full_path, mode='r') as view_metadata_file:
                    view_schema = json_loads(view_metadata_file.read())
                success = create_view(target_client, target_project, dataset_id, view_name, view_schema, view_sql)
                if not success:
                    print("shadow_datasets failed")
                    return

    print('job completed')
示例#33
0
def listener_bitshares(selection=None):
    """
    primary listener event loop
    :param int(selection) or None: user choice for demonstration of listener
    :run forever:
    """
    # get node list from github repo for bitshares ui staging; write to file
    nodes = bitshares_nodes()
    options = raw_operations()
    json_ipc(doc="nodes.txt", text=json_dumps(nodes))
    # create a subfolder for the database; write to file
    create_database()
    # initialize block number
    last_block_num = curr_block_num = 0
    # bypass user input... gateway transfer ops
    act = print_op
    if selection is None:
        selection = 0
        act = withdraw
    # spawn subprocesses for gathering streaming consensus irreversible block number
    spawn_block_num_processes()
    # continually listen for last block["transaction"]["operations"]
    print(it("red", "\nINITIALIZING WITHDRAWAL LISTENER\n\n"))
    while True:
        try:
            # get the irreversible block number reported by each maven subprocess
            block_numbers = []
            for maven_id in range(BLOCK_NUM_MAVENS):
                block_num = json_ipc(doc=f"block_num_maven_{maven_id}.txt")[0]
                block_numbers.append(block_num)
            # the current block number is the statistical mode of the mavens
            # NOTE: may throw StatisticsError when no mode
            curr_block_num = mode(block_numbers)
            # print(curr_block_num)
            json_ipc(doc=f"block_number.txt",
                     text=json_dumps([
                         curr_block_num,
                     ]))
            # if the irreverisble block number has advanced
            if curr_block_num > last_block_num:
                print(
                    "\033[F",  # go back one line
                    it("blue", "BitShares Irreversible Block"),
                    it("yellow", curr_block_num),
                    time.ctime()[11:19],
                    it("blue", int(time.time())),
                )
                if last_block_num > 0:  # not on first iter
                    # spawn some new mavens to get prospective block data
                    start = last_block_num + 1
                    stop = curr_block_num + 1
                    spawn_block_processes(start, stop)
                    # inititialize blocks as a dict of empty transaction lists
                    blocks = {}
                    for block_num in range(start, stop):
                        blocks[block_num] = []
                    # get block transactions from each maven subprocesses
                    for maven_id in range(BLOCK_MAVENS):
                        # print(maven_id)
                        maven_blocks = json_ipc(
                            doc=f"block_maven_{maven_id}.txt")
                        # for each block that has past since last update
                        for block_num in range(start, stop):
                            # print(block_num)
                            # get the maven's version of that block from the dictionary
                            # NOTE: may throw KeyError, TODO: find out why?
                            maven_block = maven_blocks[str(block_num)]
                            # append that version to the list
                            # of maven opinions for that block number
                            blocks[block_num].append(json_dumps(maven_block))
                    # get the mode of the mavens for each block in the blocks dict
                    # NOTE: may throw StatisticsError when no mode
                    # for example half the nodes are on the next block number
                    blocks = {
                        k: json_loads(mode(v))
                        for k, v in blocks.items()
                    }
                    # triple nested:
                    # for each operation, in each transaction, on each block
                    for block_num, transactions in blocks.items():
                        for item, trx in enumerate(transactions):
                            for op in trx["operations"]:
                                # add the block and transaction numbers to the operation
                                op[1]["block"] = block_num
                                op[1]["trx"] = item + 1
                                op[1]["operation"] = (op[0], options[op[0]])
                                # spin off withdrawal act so listener can continue
                                process = Process(target=act, args=(op, ))
                                process.start()
                last_block_num = curr_block_num
            time.sleep(6)
        # statistics and key errors can be safely ignored, restart loop
        except (StatisticsError, KeyError):
            continue
        # in all other cases provide stack trace
        except Exception as error:
            print("\n\n", it("yellow", error), "\n\n")
            print(traceback.format_exc(), "\n")
            continue
示例#34
0
def test_dumps_twitter_ujson(benchmark):
    benchmark.group = "twitter.json serialization"
    benchmark.extra_info["lib"] = "ujson"
    data = read_fixture_obj("twitter.json.xz")
    benchmark.extra_info["correct"] = json_loads(ujson_dumps(data)) == data
    benchmark(ujson_dumps, data)
示例#35
0
 def abrir_json(self, arquivo:str) -> bool:
     a = open(arquivo, 'r', encoding='utf8')
     txt = a.read()
     a.close()
     print(txt)
     return json_loads(txt)
示例#36
0
    def get_all_pachi_moves(self):
        buff = []

        sleep(.01)
        while not self.stderr_queue.empty():
            while not self.stderr_queue.empty():
                buff.append(self.stderr_queue.get().strip())
            sleep(.01)

        position_evaluation = Position()
        found = False
        influence = [[0 for i in range(self.size)] for j in range(self.size)]
        number_coordinate = self.size

        for err_line in buff:
            if "fbook match" in err_line:
                position_evaluation["book move"] = True
            if " |" in err_line:

                try:
                    line = err_line.split(" |")[3].strip()
                    line = line.split(" ")
                    letters = "ABCDEFGHJKLMNOPQRST"[:self.size]
                    for value, letter in zip(line, letters):
                        i, j = gtp2ij(letter + str(number_coordinate))
                        if value in ("X", "x"):
                            influence[i][j] = 1
                        elif value in ("O", "o"):
                            influence[i][j] = 2

                    number_coordinate -= 1
                except:
                    pass
            if "Score Est: " in err_line:
                position_evaluation["estimated score"] = err_line.split(
                    "Score Est: ")[1]
            if '{"frame": ' in err_line:
                json = json_loads(err_line)
                move = json["frame"]["can"][0]
                position_evaluation["best move"] = move[0].keys()[0]
                if type(move[0].values()[0]) == type(0.5):
                    winrate = move[0].values()[0]
                    position_evaluation["win rate"] = str(
                        100 * float(winrate)) + "%"
                elif type(move[0].values()[0]) == type([0, 1]):
                    winrate, playouts = move[0].values()[0]
                    position_evaluation["win rate"] = str(
                        100 * float(winrate)) + "%"

            if '{"move": ' in err_line:
                found = True
                #this line is the json report line
                #exemple: {"move": {"playouts": 5064, "extrakomi": 0.0, "choice": "H8", "can": [[{"H8":0.792},{"F2":0.778},{"G6":0.831},{"G7":0.815}], [{"K14":0.603},{"L13":0.593},{"M13":0.627},{"K13":0.593}], [{"M15":0.603},{"L13":0.724},{"M13":0.778},{"K13":0.700}], [{"M14":0.627},{"M15":0.647},{"N15":0.596}]]}}
                json = json_loads(err_line)
                position_evaluation["playouts"] = json["move"]["playouts"]
                for move in json["move"]["can"]:
                    if not move:
                        continue
                    variation = Variation()
                    first_move = move[0].keys()[0]
                    if type(move[0].values()[0]) == type(0.5):
                        winrate = move[0].values()[0]
                        variation["first move"] = first_move
                        variation["win rate"] = str(100 * float(winrate)) + "%"
                    elif type(move[0].values()[0]) == type([0, 1]):
                        winrate, playouts = move[0].values()[0]
                        variation["first move"] = first_move
                        variation["win rate"] = str(100 * float(winrate)) + "%"
                        variation["playouts"] = str(playouts)
                    sequence = ""
                    for follow_up in move:
                        sequence += follow_up.keys()[0] + " "
                    variation["sequence"] = sequence.strip()
                    position_evaluation['variations'].append(variation)
        if not found and ("book move" not in position_evaluation):
            log("\a")
            log("\n")
            log("===========================================")
            log("Could not find any data in Pachi log")
            log("Please double check that Pachi command line")
            log("includes parameter: reporting=json")
            log("===========================================")
            log("\n")
        #print "len(influence)",len(influence),number_coordinate
        position_evaluation["influence"] = influence

        return position_evaluation
示例#37
0
    def get_score_table(self):
        json = json_loads(self.json)
        if 'hands' in json and 'deltas' not in json['hands'][-1]:
            del json['hands'][-1]

        return json
示例#38
0
def test_dumps_canada_json(benchmark):
    benchmark.group = "canada.json serialization"
    benchmark.extra_info["lib"] = "json"
    data = read_fixture_obj("canada.json.xz")
    benchmark.extra_info["correct"] = json_loads(json_dumps(data)) == data
    benchmark(json_dumps, data)
示例#39
0
def imagescore(json, userid, accesstoken):
    # 获取用户信息
    headers = {
        "Accept": "application/json",
        "Content-Type": "application/json",
        'Authorization': 'Bearer ' + accesstoken
    }
    user_url_to_get = 'https://osu.ppy.sh/api/v2/users/' + str(userid) + '/osu'
    user_url_get_result = get(url=user_url_to_get, headers=headers)
    user_get_result = user_url_get_result.text
    userjs = json_loads(user_get_result)
    # print(user_get_result)

    # bg合并模板
    im = Image.open('bg.jpg')
    im = im.filter(ImageFilter.GaussianBlur(radius=3))
    im = im.point(lambda p: p * 0.5)
    im = im.resize((1366, 768))
    score_model = Image.open('image/image_model/score.png')
    im.paste(score_model, (0, 0), score_model)

    # 新画布
    draw = ImageDraw.Draw(im)

    # 常规输出
    js = [
        (json[0]['beatmapset']['title'], """fonts/comic sans ms.ttf""", 27,
         927, 85),
        (json[0]['beatmapset']['artist'], """fonts/comic sans ms.ttf""", 21,
         927, 120),
        (json[0]['beatmap']['cs'], """fonts/comic sans ms.ttf""", 24, 1249,
         393),
        (json[0]['beatmap']['ar'], """fonts/comic sans ms.ttf""", 24, 1249,
         452),
        (json[0]['beatmap']['accuracy'], """fonts/comic sans ms.ttf""", 24,
         1249, 513),
        (json[0]['beatmap']['drain'], """fonts/comic sans ms.ttf""", 24, 1249,
         570),
        (json[0]['beatmap']['bpm'], """fonts/comic sans ms.ttf""", 24, 1032,
         453),
        (json[0]['beatmap']['id'], """fonts/comic sans ms.ttf""", 20, 1035,
         513),
        (json[0]['beatmap']['beatmapset_id'], """fonts/comic sans ms.ttf""",
         20, 1035, 571),
        (json[0]['statistics']['count_300'], """fonts/comic sans ms.ttf""", 20,
         615, 515),
        (json[0]['statistics']['count_100'], """fonts/comic sans ms.ttf""", 20,
         780, 515),
        (json[0]['statistics']['count_50'], """fonts/comic sans ms.ttf""", 20,
         615, 600),
        (json[0]['statistics']['count_miss'], """fonts/comic sans ms.ttf""",
         20, 780, 600),
        (json[0]['beatmap']['difficulty_rating'],
         """fonts/comic sans ms.ttf""", 23, 561, 210),
        (json[0]['pp'], """fonts/comic sans ms.ttf""", 32, 1198, 677),
        (json[0]['user']['username'], """fonts/comic sans ms.ttf""", 31, 337,
         606),
    ]
    for (js, font, size, x, y) in js:
        a = str(js)
        # a = '{:,}'.format(a)
        width, height = ImageFont.truetype(font, size).getsize(a)
        # print(width, height)
        draw.text((x - (width / 2), y - (height / 2)),
                  a,
                  fill=(255, 255, 255),
                  font=ImageFont.truetype(font, size))

    # 分数输出
    a = json[0]['score']
    a = '{:,}'.format(a)
    width, height = ImageFont.truetype("fonts/lucidahandwriting.ttf",
                                       49).getsize(a)
    # print(width, height)
    draw.text((1160 - (width / 2), 235 - (height / 2)),
              a,
              fill=(255, 255, 255),
              font=ImageFont.truetype("fonts/lucidahandwriting.ttf", 49))

    # 长度输出
    a = json[0]['beatmap']['total_length']
    minute = int(a / 60)
    second = a % 60
    a = str(minute) + ":" + str(second)
    width, height = ImageFont.truetype("fonts/comic sans ms.ttf",
                                       24).getsize(a)
    # print(width, height)
    draw.text((1033 - (width / 2), 393 - (height / 2)),
              a,
              fill=(255, 255, 255),
              font=ImageFont.truetype("fonts/comic sans ms.ttf", 24))

    # mod输出
    mods = json[0]['mods']
    x = 525
    y = 220
    for i in mods:
        img = 'image/mods/{}.png'.format(i)
        img = Image.open(img)
        im.paste(img, (x, y), img)
        x = x + 75
    # print(mods)

    # 玩家pp rank输出
    # userjs = json_load(open('user.json'))
    pp = int(userjs['statistics']['pp'])
    rank = userjs['statistics']['global_rank']
    a = 'pp:{}  #{}'.format(pp, rank)
    width, height = ImageFont.truetype("fonts/comic sans ms.ttf",
                                       30).getsize(a)
    # print(width, height)
    draw.text((342 - (width / 2), 680 - (height / 2)),
              a,
              fill=(255, 255, 255),
              font=ImageFont.truetype("fonts/comic sans ms.ttf", 30))

    # 玩家头像输出
    avatarurl = userjs['avatar_url']
    avatar = get(avatarurl)
    with open('image/avatar.png', 'wb') as f:
        f.write(avatar.content)
    avatar = Image.open('image/avatar.png')
    avatar = avatar.resize((140, 140))

    border = Image.open('image/image_model/avatar-a.png').convert('L')
    invert = ImageOps.invert(border)
    invert.save('image/image_model/avatar-b.png')
    avatar.putalpha(invert)
    # avatar.save('image/avatar.png', 'PNG', qulity=100)

    im.paste(avatar, (41, 578), avatar)
    remove('image/avatar.png')

    # rank图标输出
    rank = str(json[0]['rank'])
    if rank == 'XH':
        rank = Image.open('image/ranking/[email protected]')
    elif rank == 'X':
        rank = Image.open('image/ranking/[email protected]')
    elif rank == 'SH':
        rank = Image.open('image/ranking/[email protected]')
    elif rank == 'S':
        rank = Image.open('image/ranking/[email protected]')
    elif rank == 'A':
        rank = Image.open('image/ranking/[email protected]')
    elif rank == 'B':
        rank = Image.open('image/ranking/[email protected]')
    elif rank == 'C':
        rank = Image.open('image/ranking/[email protected]')
    elif rank == 'D':
        rank = Image.open('image/ranking/[email protected]')
    else:
        pass
    rank = rank.resize((450, 450))
    im.paste(rank, (55, 50), rank)

    # 展示图片
    im.show()
示例#40
0
def test_dumps_citm_catalog_rapidjson(benchmark):
    benchmark.group = "citm_catalog.json serialization"
    benchmark.extra_info["lib"] = "rapidjson"
    data = read_fixture_obj("citm_catalog.json.xz")
    benchmark.extra_info["correct"] = json_loads(rapidjson_dumps(data)) == data
    benchmark(rapidjson_dumps, data)
def test_can_overlap():
	r0 = Restriction(json_loads('''
		{
			"type": 1,
			"contents": ["PD-101", "FA-102", "FA-105", "LT-103", "LT-203", "LT-206"]
		}
	'''))
	
	r1 = Restriction(json_loads('''
		{
			"type": 2,
			"contents": [
				[
					"MS-301", "MS-302", "MS-303", "MS-304"
				],
				[
					"CH-103", "CH-104", "MA-103", "MA-104", "PH-201", "PH-202", "ER-*"
				]
			]
		}
	'''))
	
	r2 = Restriction(json_loads('''
		{
			"type": 2,
			"contents": [
				[
					"MS-301", "MS-302", "MS-303", "MS-304"
				],
				[
					"CH-103", "CH-104", "MA-103", "MA-104", "PH-201", "PH-202", "ER-*"
				],
				[
					"SO-101"
				]
			]
		}
	'''))
	
	r3 = Restriction(json_loads('''
		{
			"type": 1,
			"contents": ["BU-150", "MSL-*"]
		}
	'''))
	
	restriction_list = [r0, r1, r2, r3]
	
	assert Restriction.can_overlap(restriction_list, "PD-101", "MS-301")
	assert not Restriction.can_overlap(restriction_list, "PD-101", "LT-206")
	assert Restriction.can_overlap(restriction_list, "PD-101", "LT-205")
	assert Restriction.can_overlap(restriction_list, "BU-150", "SO-101")
	assert not Restriction.can_overlap(restriction_list, "BU-150", "MSL-250")
	assert not Restriction.can_overlap(restriction_list, "MS-304", "PH-202")
	assert Restriction.can_overlap([r0, r1], "MS-301", "SO-101")
	assert not Restriction.can_overlap(restriction_list, "MS-301", "SO-101")
	assert not Restriction.can_overlap(restriction_list, "SO-101", "MS-301")
	assert not Restriction.can_overlap(restriction_list, "MS-302", "ER-301")
	assert not Restriction.can_overlap(restriction_list, "ER-499", "MS-302")
	assert Restriction.can_overlap(restriction_list, "ER-499", "ER-301")
	assert Restriction.can_overlap(restriction_list, "FA-102", "FA-102")
示例#42
0
def test_dumps_github_orjson(benchmark):
    benchmark.group = "github.json serialization"
    benchmark.extra_info["lib"] = "orjson"
    data = read_fixture_obj("github.json.xz")
    benchmark.extra_info["correct"] = json_loads(orjson_dumps(data)) == data
    benchmark(orjson_dumps, data)
示例#43
0
 def from_json(cls, json_str):
     return cls(**json_loads(json_str))