def text(self): """Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using ``charade``. """ # Try charset from content-type content = None encoding = self.encoding if not self.content: return str('') # Fallback to auto-detected encoding. if self.encoding is None: encoding = self.apparent_encoding # Decode unicode from given encoding. try: content = str(self.content, encoding, errors='replace') except (LookupError, TypeError): # A LookupError is raised if the encoding was not found which could # indicate a misspelling or similar mistake. # # A TypeError can be raised if encoding is None # # So we try blindly encoding. content = str(self.content, errors='replace') return content
def get_unicode_from_response(r): """Returns the requested content back in unicode. :param r: Response object to get unicode content from. Tried: 1. charset from content-type 2. every encodings from ``<meta ... charset=XXX>`` 3. fall back and replace all unicode characters """ tried_encodings = [] # Try charset from content-type encoding = get_encoding_from_headers(r.headers) if encoding: try: return str(r.content, encoding) except UnicodeError: tried_encodings.append(encoding) # Fall back: try: return str(r.content, encoding, errors='replace') except TypeError: return r.content
def plotPieChart(self, positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, noOfSearchTerms): labels = [ 'Positive [' + str(positive) + '%]', 'Weakly Positive [' + str(wpositive) + '%]', 'Strongly Positive [' + str(spositive) + '%]', 'Neutral [' + str(neutral) + '%]', 'Negative [' + str(negative) + '%]', 'Weakly Negative [' + str(wnegative) + '%]', 'Strongly Negative [' + str(snegative) + '%]' ] sizes = [ positive, wpositive, spositive, neutral, negative, wnegative, snegative ] colors = [ 'yellowgreen', 'lightgreen', 'darkgreen', 'gold', 'red', 'lightsalmon', 'darkred' ] patches, texts = plt.pie(sizes, colors=colors, startangle=91) plt.legend(patches, labels, loc="best") plt.title('How people are reacting on ' + searchTerm + ' by analyzing ' + str(noOfSearchTerms) + ' Tweets.') plt.axis('equal') plt.tight_layout() plt.show()
def _encode_files(files, data): """Build the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary if parameters are supplied as a dict. """ if (not files): raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) files = to_key_val_list(files or {}) for field, val in fields: if isinstance(val, basestring) or not hasattr(val, '__iter__'): val = [val] for v in val: if v is not None: # Don't call str() on bytestrings: in Py3 it all goes wrong. if not isinstance(v, bytes): v = str(v) new_fields.append( (field.decode('utf-8') if isinstance(field, bytes) else field, v.encode('utf-8') if isinstance(v, str) else v)) for (k, v) in files: # support for explicit filename ft = None fh = None if isinstance(v, (tuple, list)): if len(v) == 2: fn, fp = v elif len(v) == 3: fn, fp, ft = v else: fn, fp, ft, fh = v else: fn = guess_filename(v) or k fp = v if isinstance(fp, str): fp = StringIO(fp) if isinstance(fp, bytes): fp = BytesIO(fp) rf = RequestField(name=k, data=fp.read(), filename=fn, headers=fh) rf.make_multipart(content_type=ft) new_fields.append(rf) body, content_type = encode_multipart_formdata(new_fields) return body, content_type
def prepare_url(self, url, params): """Prepares the given HTTP URL.""" #: Accept objects that have string representations. try: url = unicode(url) except NameError: # We're on Python 3. url = str(url) except UnicodeDecodeError: pass # Don't do any URL preparation for oddball schemes if ':' in url and not url.lower().startswith('http'): self.url = url return # Support for unicode domain names and paths. scheme, auth, host, port, path, query, fragment = parse_url(url) if not scheme: raise MissingSchema("Invalid URL {0!r}: No schema supplied. " "Perhaps you meant http://{0}?".format(url)) if not host: raise InvalidURL("Invalid URL %r: No host supplied" % url) # Only want to apply IDNA to the hostname try: host = host.encode('idna').decode('utf-8') except UnicodeError: raise InvalidURL('URL has an invalid label.') # Carefully reconstruct the network location netloc = auth or '' if netloc: netloc += '@' netloc += host if port: netloc += ':' + str(port) # Bare domains aren't valid URLs. if not path: path = '/' if is_py2: if isinstance(scheme, str): scheme = scheme.encode('utf-8') if isinstance(netloc, str): netloc = netloc.encode('utf-8') if isinstance(path, str): path = path.encode('utf-8') if isinstance(query, str): query = query.encode('utf-8') if isinstance(fragment, str): fragment = fragment.encode('utf-8') enc_params = self._encode_params(params) if enc_params: if query: query = '%s&%s' % (query, enc_params) else: query = enc_params url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) self.url = url
def __unicode__(self): return str(self.id)
def teacher(request, teacher_id): date_string = request.session['date_string'] date_object = datetime.strptime(date_string, '%Y-%m-%d') if request.method == "GET": change_day = request.GET.get('change_day', None) if change_day == 'prev': date_object += timedelta(days=-1) elif change_day == 'next': date_object += timedelta(days=1) year_int = date_object.year day_int = date_object.day month_string = date_object.strftime('%B') day_string = date_object.strftime('%A').lower() date_string = date_object.strftime('%Y-%m-%d') request.session['date_string'] = date_string students = Student.objects.all().filter(teacher=teacher_id, day=day_string, active=True).order_by('time') current_teacher = Teacher.objects.filter(id=teacher_id) if request.method == "POST": for student in students: request_id = str(student.id) + "_attendance" student_attendance = request.POST[request_id] if student_attendance == 'true': data = 'present' else: data = "absent" if student_attendance == 'excused': data = "makeup" Student.objects.filter(id=student.id).update( make_up=F('make_up') + 1) if Lesson.objects.all().filter(student=student.id, teacher=teacher_id, month=month_string, year=year_int).count() == 0: lesson_attrs = { "student_id": student.id, "teacher_id": student.teacher.id, "month": month_string, "year": year_int, "lessons": { day_int: data } } new_lesson = Lesson.objects.create(**lesson_attrs) serializer = LessonSerializer(data=new_lesson) if serializer.is_valid(): serializer.save() else: lesson = Lesson.objects.get(student=student.id, teacher=teacher_id, month=month_string, year=year_int) lesson.lessons[day_int] = data lesson.save() return redirect('home') else: return render( request, 'teacher.html', { 'current_teacher': current_teacher, 'students': students, 'today': day_string, 'month': month_string, 'day': day_int })
def build_digest_header(self, method, url): realm = self.chal['realm'] nonce = self.chal['nonce'] qop = self.chal.get('qop') algorithm = self.chal.get('algorithm') opaque = self.chal.get('opaque') if algorithm is None: _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) path = p_parsed.path if p_parsed.query: path += '?' + p_parsed.query A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self.last_nonce: self.nonce_count += 1 else: self.nonce_count = 1 ncvalue = '%08x' % self.nonce_count s = str(self.nonce_count).encode('utf-8') s += nonce.encode('utf-8') s += time.ctime().encode('utf-8') s += os.urandom(8) cnonce = (hashlib.sha1(s).hexdigest()[:16]) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2) if _algorithm == 'MD5-SESS': HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if qop is None: respdig = KD(HA1, "%s:%s" % (nonce, HA2)) elif qop == 'auth' or 'auth' in qop.split(','): respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self.last_nonce = nonce # XXX should the partial digests be encoded too? base = 'username="******", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: base += ', opaque="%s"' % opaque if algorithm: base += ', algorithm="%s"' % algorithm if entdig: base += ', digest="%s"' % entdig if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return 'Digest %s' % (base)
def DownloadData(self): # authenticating consumerKey = 'ROLv76ehzBwOj7OJzHQNoJlvR' consumerSecret = '6eLMOCoYmHmPNaW42IDXv52V6d3CxrLjl6ZJ1x6xkXjKke3M0C' accessToken = '153783306-Wn3MnHCaeFha9SKCfSnLrHv7HijIId3X5qsvYhQh' accessTokenSecret = 'swVUGR8EBw6goNIlxQi5IgOt8QJxPla9gydfXVHpgzU8j' auth = tweepy.OAuthHandler(consumerKey, consumerSecret) auth.set_access_token(accessToken, accessTokenSecret) api = tweepy.API(auth) # input for term to be searched and how many tweets to search searchTerm = input("Enter Keyword/Tag to search about: ") NoOfTerms = int(input("Enter how many tweets to search: ")) # searching for tweets self.tweets = tweepy.Cursor(api.search, q=searchTerm, lang="en").items(NoOfTerms) # Open/create a file to append data to csvFile = open('result.csv', 'a') # Use csv writer csvWriter = csv.writer(csvFile) # creating some variables to store info polarity = 0 positive = 0 wpositive = 0 spositive = 0 negative = 0 wnegative = 0 snegative = 0 neutral = 0 # iterating through tweets fetched for tweet in self.tweets: #Append to temp so that we can store in csv later. I use encode UTF-8 self.tweetText.append(self.cleanTweet(tweet.text).encode('utf-8')) # print (tweet.text.translate(non_bmp_map)) #print tweet's text analysis = TextBlob(tweet.text) # print(analysis.sentiment) # print tweet's polarity polarity += analysis.sentiment.polarity # adding up polarities to find the average later if ( analysis.sentiment.polarity == 0 ): # adding reaction of how people are reacting to find average later neutral += 1 elif (analysis.sentiment.polarity > 0 and analysis.sentiment.polarity <= 0.3): wpositive += 1 elif (analysis.sentiment.polarity > 0.3 and analysis.sentiment.polarity <= 0.6): positive += 1 elif (analysis.sentiment.polarity > 0.6 and analysis.sentiment.polarity <= 1): spositive += 1 elif (analysis.sentiment.polarity > -0.3 and analysis.sentiment.polarity <= 0): wnegative += 1 elif (analysis.sentiment.polarity > -0.6 and analysis.sentiment.polarity <= -0.3): negative += 1 elif (analysis.sentiment.polarity > -1 and analysis.sentiment.polarity <= -0.6): snegative += 1 # Write to csv and close csv file csvWriter.writerow(self.tweetText) csvFile.close() # finding average of how people are reacting positive = self.percentage(positive, NoOfTerms) wpositive = self.percentage(wpositive, NoOfTerms) spositive = self.percentage(spositive, NoOfTerms) negative = self.percentage(negative, NoOfTerms) wnegative = self.percentage(wnegative, NoOfTerms) snegative = self.percentage(snegative, NoOfTerms) neutral = self.percentage(neutral, NoOfTerms) # finding average reaction polarity = polarity / NoOfTerms # printing out data print("How people are reacting on " + searchTerm + " by analyzing " + str(NoOfTerms) + " tweets.") print() print("General Report: ") if (polarity == 0): print("Neutral") elif (polarity > 0 and polarity <= 0.3): print("Weakly Positive") elif (polarity > 0.3 and polarity <= 0.6): print("Positive") elif (polarity > 0.6 and polarity <= 1): print("Strongly Positive") elif (polarity > -0.3 and polarity <= 0): print("Weakly Negative") elif (polarity > -0.6 and polarity <= -0.3): print("Negative") elif (polarity > -1 and polarity <= -0.6): print("Strongly Negative") print() print("Detailed Report: ") print(str(positive) + "% people thought it was positive") print(str(wpositive) + "% people thought it was weakly positive") print(str(spositive) + "% people thought it was strongly positive") print(str(negative) + "% people thought it was negative") print(str(wnegative) + "% people thought it was weakly negative") print(str(snegative) + "% people thought it was strongly negative") print(str(neutral) + "% people thought it was neutral") self.plotPieChart(positive, wpositive, spositive, negative, wnegative, snegative, neutral, searchTerm, NoOfTerms)