def generate_from_count(self, count): """ Generate the cloud image from a dictionary of word and counts. :param count: A dictionary in the form {word: count} :return: """ stopwords = set([i.lower() for i in self.stopwords.get_stopwords()]) # remove stopwords count = {x: count[x] for x in count if x.lower() not in stopwords} # Need to normalize the frequencies full_terms_dict = multidict.MultiDict() for key in count: full_terms_dict.add(key, count[key]) self.wordcloud.generate_from_frequencies(full_terms_dict).to_file( 'wordcloud.png') # self.wordcloud_image = self.wordcloud.generate_from_frequencies(full_terms_dict).to_image() self.wordcloud_image = cv2.imread('wordcloud.png') sorted_x = sorted(count.items(), key=lambda cnt: cnt[1], reverse=True) plot_words = [x[0] for x in sorted_x[:self.freq_size]] plot_counts = [x[1] for x in sorted_x[:self.freq_size]] # Plot histogram using matplotlib bar(). indexes = np.arange(len(plot_words)) fig = plt.bar(indexes, plot_counts) plt.xticks(indexes, plot_words, rotation=90) plt.savefig('histogram.png') # plt.close(fig) plt.close() self.freq_image = cv2.imread('histogram.png') self.__notify_image_observers() self.__notify_frequency_observers()
def test_http_1_0_no_host(self): headers = multidict.MultiDict({}) self.message = protocol.RawRequestMessage('GET', '/', (1, 0), headers, [], True, 'deflate') environ = self._make_one() self.assertEqual(environ['SERVER_NAME'], '2.3.4.5') self.assertEqual(environ['SERVER_PORT'], '80')
async def pcap(request, response): project = await Controller.instance().get_loaded_project(request.match_info["project_id"]) ssl_context = Controller.instance().ssl_context() link = project.get_link(request.match_info["link_id"]) if not link.capturing: raise aiohttp.web.HTTPConflict(text="This link has no active packet capture") compute = link.compute pcap_streaming_url = link.pcap_streaming_url() headers = multidict.MultiDict(request.headers) headers['Host'] = compute.host headers['Router-Host'] = request.host body = await request.read() connector = aiohttp.TCPConnector(limit=None, force_close=True, ssl_context=ssl_context) async with aiohttp.ClientSession(connector=connector, headers=headers) as session: async with session.request(request.method, pcap_streaming_url, timeout=None, data=body) as response: proxied_response = aiohttp.web.Response(headers=response.headers, status=response.status) if response.headers.get('Transfer-Encoding', '').lower() == 'chunked': proxied_response.enable_chunked_encoding() await proxied_response.prepare(request) async for data in response.content.iter_any(): if not data: break await proxied_response.write(data)
def add_field(self, name, value, *, content_type=None, filename=None, content_transfer_encoding=None): if isinstance(value, io.IOBase): self._is_multipart = True elif isinstance(value, (bytes, bytearray, memoryview)): if filename is None and content_transfer_encoding is None: filename = name type_options = multidict.MultiDict({'name': name}) if filename is not None and not isinstance(filename, str): raise TypeError('filename must be an instance of str. ' 'Got: %s' % filename) if filename is None and isinstance(value, io.IOBase): filename = guess_filename(value, name) if filename is not None: type_options['filename'] = filename self._is_multipart = True headers = {} if content_type is not None: if not isinstance(content_type, str): raise TypeError('content_type must be an instance of str. ' 'Got: %s' % content_type) headers[hdrs.CONTENT_TYPE] = content_type self._is_multipart = True if content_transfer_encoding is not None: if not isinstance(content_transfer_encoding, str): raise TypeError('content_transfer_encoding must be an instance' ' of str. Got: %s' % content_transfer_encoding) headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding self._is_multipart = True self._fields.append((type_options, headers, value))
async def download(self): file = self.field.get(self.context) if file is None: raise AttributeError('No field value') resp = StreamResponse(headers=multidict.MultiDict({ 'CONTENT-DISPOSITION': 'attachment; filename="%s"' % file.filename })) resp.content_type = file.contentType if file.size: resp.content_length = file.size buf = BytesIO() downloader = await file.download(buf) await resp.prepare(self.request) # response.start(request) done = False while done is False: status, done = downloader.next_chunk() print("Download {}%.".format(int(status.progress() * 100))) buf.seek(0) data = buf.read() resp.write(data) await resp.drain() buf.seek(0) buf.truncate() return resp
def get_freq_dict_for_cluster(cluster): # Returns a dictionary representing the frequency of words in the documents of the given cluster full_terms_dict = multidict.MultiDict( ) # Creating a multidict object to store word frequency tmp_dict = {} # Creating temporary dictionary for d in range(len(cluster)): words = cluster[d].split() reduced_text = " ".join( sorted(set(words), key=words.index)) # Removing duplicate words in a document for text in reduced_text.split(" "): if text not in vectorizer.vocabulary_: # Discarding all the words that are not in the tfidf matrix. continue if re.match("anyone|many|may|used|see|use|way|believe", text): continue val = tmp_dict.get( text.lower(), 0 ) # Getting the frequency value of our word from the dictionary tmp_dict[text.lower( )] = val + 1 # Increasing the frequency value of the selected word by 1 for key, value in tmp_dict.items(): full_terms_dict.add( key, value ) # the dictionary keys represent our words, while the values represent the # number of documents that contain said word in them return full_terms_dict
def modifyUser(): uid = session.get("user_id") db = pymysql.connect(host=current_app.config['HOST'], user=current_app.config['USER'], password=current_app.config['PASSWORD'], port=current_app.config['PORT'], database=current_app.config['DATABASE'], charset=current_app.config['CHARSET']) cursor = db.cursor() try: user = get_user(uid) gender = request.form.get("gender", default=None) nickname = request.form.get("nickname", default=None) motto = request.form.get('motto', default=None) info = request.form.get('info', default=None) birthday = request.form.get('birthday', default=None) if gender is not None: form = multidict.MultiDict([('gender', gender)]) GForm = genderForm(form) if GForm.validate() is not True: return jsonify({"code": -1, "errMsg": "请选择正确的用户性别"}) else: user.gender = gender if birthday is not None: form = multidict.MultiDict([('birthday', birthday)]) birthForm = birthdayForm(form) if birthForm.validate() is not True: return jsonify({"code": -1, "errMsg": "请选择正确的用户生日"}) else: user.birthday = birthday if nickname is not None: user.userNickName = nickname if motto is not None: user.motto = motto if info is not None: user.info = info sql = "UPDATE vuser SET user_nickname = '%s' ,gender = %s, birthday = '%s', motto = '%s', info = '%s' WHERE user_id = %s" % ( user.userNickName, user.gender, user.birthday, user.motto, user.info, uid) cursor.execute(sql) db.commit() return jsonify({"code": 200}) except: db.rollback() return jsonify({"code": -1, "errMsg": "数据库操作失败"}) finally: db.close()
def made_rand_n_list(n: int): skip_list = skiplist.SkipList() multiset = multidict.MultiDict() for _ in range(n): rand_num = random.randint(0, n * 2) multiset.add(str(rand_num), "") skip_list.update(True, rand_num) return [skip_list, multiset]
async def tus_options(self): resp = Response(headers=multidict.MultiDict({ 'Tus-Resumable': '1.0.0', 'Tus-Version': '1.0.0', 'Tus-Max-Size': '1073741824', 'Tus-Extension': 'creation,expiration' })) return resp
async def test_optcalc_solve_for_vol_with_zero_premium(): data = multidict.MultiDict(optutil.solve_for_vol_with_zero_premium()) form = forms.OptionForm(data) validation_result = form.validateSolveFor() # assert validation_result is False assert validation_result is False assert len(form.form_errors) == 1 assert form.form_errors[ 0] == 'Solve for Impl Vol must have a valid Premium'
def __init__(self,token,name,phoneNumber): super().__init__() self.token=token #idx, like ic or tracetgt token self.name=name #name of person self.phoneNumber=phoneNumber #Phone number with 8 digits self.persontags=personTag.nothing.value #status #data structs self.location = multidict.MultiDict() #allows multiple keys
def handle_post_query(self, url): charset = self.request.charset or 'utf-8' posts = multidict.MultiDict() o = urllib.parse.urlparse(url) posts.extend( urllib.parse.parse_qsl(qs=o.query, keep_blank_values=True, encoding=charset)) return posts
def getFrequencyDictForText(list): fullTermsDict = multidict.MultiDict() tmpDict = {} for text in list: val = tmpDict.get(text, 0) tmpDict[text.lower()] = val + 1 for key in tmpDict: fullTermsDict.add(key, tmpDict[key]) return fullTermsDict
def freq_dict(sentence): fullTermsDict = multidict.MultiDict() tmpDict = {} # making dict for counting frequencies for text in sentence.split(" "): val = tmpDict.get(text, 0) tmpDict[text] = val + 1 for key in tmpDict: fullTermsDict.add(key, tmpDict[key]) return fullTermsDict
async def modify_headers(self, site: URL, request: web.Request) -> multidict.MultiDict: """Modify headers. Args: site (URL): URL of the next host request. request (web.Request): Proxy directed request. This will need to be changed for the actual host request. Returns multidict.MultiDict: Headers after modifications """ result = await super().modify_headers(site, request) method = request.method if (str(site.path) == "/oauth2/v3/authorize/mfa/verify" and method == "POST" and not await request.post()): # allow post json to autogenerate headers. # https://github.com/timdorr/tesla-api/discussions/316. return multidict.MultiDict({}) return multidict.MultiDict(result)
def test_classes_not_abstract() -> None: d1 = multidict.MultiDict({'a': 'b'}) # type: multidict.MultiDict[str] d2 = multidict.CIMultiDict({'a': 'b'}) # type: multidict.CIMultiDict[str] d3 = multidict.MultiDictProxy(d1) d4 = multidict.CIMultiDictProxy(d2) d1.getone('a') d2.getall('a') d3.getone('a') d4.getall('a')
def get_frequency_dict_for_text(self, table): fullTermsDict = multidict.MultiDict() tmpDict = {} # making dict for counting frequencies for index, row in table.iterrows(): tmpDict[index] = int(row) for key in tmpDict: fullTermsDict.add(key, tmpDict[key]) return fullTermsDict
def generate_cloud(most_common_words): fullTermsDict = multidict.MultiDict() for word in most_common_words: fullTermsDict.add(word[0], word[1]) wc = WordCloud(background_color="white", max_words=1000) # generate word cloud wc.generate_from_frequencies(fullTermsDict) # show plt.imshow(wc, interpolation="bilinear") plt.axis("off") plt.show()
def getFrequencyDict(tblAnalyQuery): minSize = 0 fullTermsDict = multidict.MultiDict() keyword_query = tblAnalyQuery(data_ty, dataClkwTtle, dataClorTtle, dataClprEndDe, dataClprBeginDe) maxSize = len(keyword_query) print(maxSize) for minSize in range(minSize, maxSize): fullTermsDict.add(keyword_query[minSize][0], float(keyword_query[minSize][1])) return fullTermsDict
async def tus_create(self): # This only happens in tus-java-client, redirect this POST to a PATCH if self.request.headers.get('X-HTTP-Method-Override') == 'PATCH': return await self.tus_patch() file = self.field.get(self.context) if file is None: file = S3File(contentType=self.request.content_type) self.field.set(self.context, file) if 'CONTENT-LENGTH' in self.request.headers: file._current_upload = int(self.request.headers['CONTENT-LENGTH']) else: file._current_upload = 0 if 'UPLOAD-LENGTH' in self.request.headers: file._size = int(self.request.headers['UPLOAD-LENGTH']) else: raise AttributeError('We need upload-length header') if 'UPLOAD-MD5' in self.request.headers: file._md5hash = self.request.headers['UPLOAD-MD5'] if 'UPLOAD-EXTENSION' in self.request.headers: file._extension = self.request.headers['UPLOAD-EXTENSION'] if 'TUS-RESUMABLE' not in self.request.headers: raise AttributeError('Its a TUS needs a TUS version') if 'UPLOAD-METADATA' not in self.request.headers: file.filename = uuid.uuid4().hex else: filename = self.request.headers['UPLOAD-METADATA'] file.filename = base64.b64decode( filename.split()[1]).decode('utf-8') await file.initUpload(self.context) if file.size < MIN_UPLOAD_SIZE: file._one_tus_shoot = True else: file._one_tus_shoot = False # Location will need to be adapted on aiohttp 1.1.x resp = Response( headers=multidict.MultiDict({ 'Location': IAbsoluteURL(self.context, self.request)() + '/@tusupload/' + self.field.__name__, # noqa 'Tus-Resumable': '1.0.0', 'Access-Control-Expose-Headers': 'Location,Tus-Resumable' }), status=201) return resp
def getFrequencyDictForText(sentence): fullTermsDict = multidict.MultiDict() tmpDict = {} for text in sentence.split(" "): if re.match("a|the|an|the|to|in|for|of|or|by|with|is|on|that|be", text): continue val = tmpDict.get(text, 0) tmpDict[text.lower()] = val + 1 for key in tmpDict: fullTermsDict.add(key, tmpDict[key]) return dict(fullTermsDict)
def words_in_base(base, name): fullTermsDict = multidict.MultiDict() words = {} for file in base: for word in base[file].split(): word = re.sub(r'[^\w\s]', '', word).lower() if word not in stop_words and word.isalpha(): val = words.get(word, 0) words[word] = val + 1 for key in words: fullTermsDict.add(key, words[key]) tree_map(fullTermsDict, name)
def transfromTxtToDict(str): fullTermsDict = multidict.MultiDict() splitString = str.replace(",", " ").split() entries = dict([(x, y) for x, y in zip(splitString[::2], splitString[1::2])]) for key in entries: fullTermsDict.add(key, int(entries[key])) return fullTermsDict
def digest(self) -> str: """Return the MD5 digest for this query.""" if Backend.salt is None: raise model.SecretNotFound("wolfram-salt not specified in secrets.json.") items = sorted(self.parameters.items()) encoded = multidict.MultiDict((k, urllib.parse.quote_plus(v)) for k, v in items) values = ''.join(f"{k}{v}" for k, v in encoded.items()) data = f"{Backend.salt}{values}" signature = hashlib.md5(data.encode(encoding="utf-8")) return signature.hexdigest().upper()
async def tus_head(self): file = self.field.get(self.context) if file is None: raise KeyError('No file on this context') head_response = { 'Upload-Offset': str(file.actualSize()), 'Tus-Resumable': '1.0.0', 'Access-Control-Expose-Headers': 'Upload-Offset,Upload-Length,Tus-Resumable' } if file.size: head_response['Upload-Length'] = str(file._size) resp = Response(headers=multidict.MultiDict(head_response)) return resp
def getFrequencyDictForText(sentence, stopwords_flag): fullTermsDict = multidict.MultiDict() tmpDict = {} # making dict for counting frequencies for text in sentence.split(" "): if stopwords_flag and re.match("a|the|an|the|to|in|for|of|or|by|with|is|on|that|be", text): continue val = tmpDict.get(text, 0) tmpDict[text.lower()] = val + 1 for key in tmpDict: fullTermsDict.add(key, tmpDict[key]) return fullTermsDict
def get_frequency_dict(sentence): """Converts raw text into a multidict for wordcloud usage""" full_terms_dict = multidict.MultiDict() tmp_dict = {} # making dict for counting frequencies for text in sentence.split(" "): if text.lower().strip() in STOPWORDS: continue val = tmp_dict.get(text, 0) tmp_dict[text.strip()] = val + 1 for key in tmp_dict: full_terms_dict.add(key, tmp_dict[key]) return full_terms_dict
def processInput(searchInput): size = ceil(len(searchInput) / 2) + 1 key = "selectSearchWithin" val = "textSearchWithin" newSearchInput = multidict.MultiDict() for i in range(1, size): try: k = key + str(i) v = val + str(i) newSearchInput.add(searchInput[k], searchInput[v]) except: break print(newSearchInput) return newSearchInput
def get_frequency_dictionary(sentence): """Creates a multidict with the words and their frequency""" fullTermsDict = multidict.MultiDict() tmpDict = {} # making dict for counting frequencies for text in sentence.split(" "): if text in stopwords: continue val = tmpDict.get(text, 0) tmpDict[text.lower()] = val + 1 for key in tmpDict: fullTermsDict.add(key, tmpDict[key]) return fullTermsDict
def getFrequencyDictForText(sents, w): fullTermsDict = multidict.MultiDict() tmpDict = {} mx = 0 for word in sents: if word not in STOPS and word != w and not have_same_root(w,word) and not word.isdigit(): val = tmpDict.get(word,0) if val + 1 > mx: mx = val + 1 tmpDict[word] = val+1 for key in tmpDict: fullTermsDict.add(key,tmpDict[key] / (mx*3)) return fullTermsDict