def moderate(self, comment, content_object, request): """Need to pass Akismet test""" if not AKISMET_COMMENT: return False try: from akismet import Akismet from akismet import APIKeyError except ImportError: return False akismet = Akismet(key=AKISMET_API_KEY, blog_url='%s://%s/' % ( PROTOCOL, Site.objects.get_current().domain)) if akismet.verify_key(): akismet_data = { 'user_ip': request.META.get('REMOTE_ADDR', ''), 'user_agent': request.META.get('HTTP_USER_AGENT', ''), 'referrer': request.META.get('HTTP_REFERER', 'unknown'), 'permalink': content_object.get_absolute_url(), 'comment_type': 'comment', 'comment_author': smart_str(comment.userinfo.get('name', '')), 'comment_author_email': smart_str(comment.userinfo.get('email', '')), 'comment_author_url': smart_str(comment.userinfo.get('url', '')), } is_spam = akismet.comment_check(smart_str(comment.comment), data=akismet_data, build_data=True) if is_spam: comment.save() user = comment.content_object.authors.all()[0] comment.flags.create(user=user, flag='spam') return is_spam raise APIKeyError("Your Akismet API key is invalid.")
def get_hexdigest(algorithm, salt, raw_password): raw_password, salt = smart_str(raw_password), smart_str(salt) if algorithm == 'md5': return md5_constructor(salt + raw_password).hexdigest() elif algorithm == 'sha1': return sha_constructor(salt + raw_password).hexdigest() raise ValueError('Got unknown password algorithm type in password')
def _upload_file(request): """ Upload file to the server. """ from django.core.files.move import file_move_safe if request.method == 'POST': folder = request.POST.get('folder') fb_uploadurl_re = re.compile(r'^.*(%s)' % reverse("fb_upload")) folder = fb_uploadurl_re.sub('', folder) abs_path = _check_access(request, folder) if request.FILES: filedata = request.FILES['Filedata'] filedata.name = convert_filename(filedata.name) _check_access(request, abs_path, filedata.name) # PRE UPLOAD SIGNAL filebrowser_pre_upload.send(sender=request, path=request.POST.get('folder'), file=filedata) # HANDLE UPLOAD uploadedfile = handle_file_upload(abs_path, filedata) # MOVE UPLOADED FILE # if file already exists if os.path.isfile(smart_str(os.path.join(fb_settings.MEDIA_ROOT, fb_settings.DIRECTORY, folder, filedata.name))): old_file = smart_str(os.path.join(abs_path, filedata.name)) new_file = smart_str(os.path.join(abs_path, uploadedfile)) file_move_safe(new_file, old_file) # POST UPLOAD SIGNAL filebrowser_post_upload.send(sender=request, path=request.POST.get('folder'), file=FileObject(smart_str(os.path.join(fb_settings.DIRECTORY, folder, filedata.name)))) return HttpResponse('True')
def get_option_value(optionset_admin, db_option, current_only): """ Given an Option object, return its value for the current language. """ name = smart_str(db_option.name) if not name in optionset_admin.options: return None field = optionset_admin.options[name] if not db_option.lang_dependant: return field.to_python(db_option.value) if db_option.value else '' value_dict = {} for key, value in json.loads(db_option.value).items(): value_dict[smart_str(key)] = value if current_only: curr_lang = get_language() if curr_lang in value_dict: return field.to_python(value_dict[curr_lang]) if value_dict[curr_lang] else '' else: for key in value_dict: value_dict[key] = field.to_python(value_dict[key]) return value_dict
def urlencode(self, safe=None): """ Returns an encoded string of all query string arguments. :arg safe: Used to specify characters which do not require quoting, for example:: >>> q = QueryDict('', mutable=True) >>> q['next'] = '/a&b/' >>> q.urlencode() 'next=%2Fa%26b%2F' >>> q.urlencode(safe='/') 'next=/a%26b/' """ output = [] if safe: encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe))) else: encode = lambda k, v: urlencode({k: v}) for k, list_ in self.lists(): k = smart_str(k, self.encoding) output.extend([encode(k, smart_str(v, self.encoding)) for v in list_]) return '&'.join(output)
def get_revisions(title, csv_writer, lang, textcleaner, startid=None, prev_text=""): api_base = 'http://%s.wikipedia.org/w/api.php' % lang options = {} options.update({ 'action': 'query', 'prop': 'revisions', 'rvlimit': 500, 'titles': title, 'rvprop': 'ids|timestamp|content', 'rvdir': 'newer', 'format': 'json' }) if startid != None: options.update({ 'rvstartid': startid }) url = api_base + '?' + urllib.urlencode(options) logging.info(url) result = simplejson.load(urllib.urlopen(url)) pages = result["query"]["pages"] for page in pages: revs = pages[page]["revisions"] for r in revs: text_cleaned = textcleaner.clean_all(r["*"]) text = smart_str(_diff_text(prev_text, text_cleaned)[0]) csv_writer.writerow([r["timestamp"], lang, smart_str(title), "", text]) prev_text = text_cleaned try: cont = result['query-continue']['revisions']['rvstartid'] logging.info("Continue to %d", cont) get_revisions(title, csv_writer, lang, cont, prev_text) except KeyError: logging.info("Finished!")
def pullThread(self, msgl): """ gets all messages for a thread """ data= self.get(msgl) msgs = data.findAll("span",{"class":"username-inbox"}) this_id='' ids,texts = [],[] for msg in msgs: this_id= msg.find("a")['href'].split("=")[1] txt = msg.parent.parent.findAll("div")[1].text if "has expressed interest in you" not in txt: texts.append(smart_str(txt)) ids.append(smart_str(this_id)) for link in data.findAll("a"): if "viewallmessages" in link['href'] and not data.find("span", {"class": "text-warning"}): newlink = "http://www.pof.com/" + link['href'] ids2,texts2 = self.pullThread(newlink) ids = ids2 + ids texts = texts2 + texts break if not msgs: return [],[] else: return ids,texts
def putRow(self, cluster, tableName, row, data): client = self.connectCluster(cluster) mutations = [] Mutation = get_thrift_type('Mutation') for column in data.keys(): mutations.append(Mutation(column=smart_str(column), value=smart_str(data[column]))) # must use str for API, does thrift coerce by itself? return client.mutateRow(tableName, smart_str(row), mutations, None)
def convert_query(self, query, num_params): # kinterbasdb tries to convert the passed SQL to string. # But if the connection charset is NONE, ASCII or OCTETS it will fail. # So we convert it to string first. if num_params == 0: return smart_str(query, self.encoding) return smart_str(query % tuple("?" * num_params), self.encoding)
def serialize_model(self, instance): """ Given a model instance or dict, serialize it to a dict.. """ data = {} fields = self.get_fields(instance) # serialize each required field for fname in fields: try: if hasattr(self, smart_str(fname)): # check first for a method 'fname' on self first meth = getattr(self, fname) if inspect.ismethod(meth) and len(inspect.getargspec(meth)[0]) == 2: obj = meth(instance) elif hasattr(instance, '__contains__') and fname in instance: # check for a key 'fname' on the instance obj = instance[fname] elif hasattr(instance, smart_str(fname)): # finally check for an attribute 'fname' on the instance obj = getattr(instance, fname) else: continue key = self.serialize_key(fname) val = self.serialize_val(fname, obj) data[key] = val except _SkipField: pass return data
def changeBusinessMan(request): """ 为某个实例修改businessMan """ if request.method == "POST": if request.POST.get('change', '') == u"businessMan": thisUser = request.user if not checkPerm(thisUser, 'businessMan'): return HttpResponse('failed') thisVm = smart_str(request.POST.get('host')) oldValue = smart_str(request.POST.get('oldvalue', '')) newValue = smart_str(request.POST.get('newvalue', '')) vm = Vm(thisVm) try: vm.update(businessMan=newValue) log = LogRequest(request.user) logContent = '(modify businessMan) %s --> %s' %\ (oldValue, newValue) log.save(logContent) return HttpResponse('successful') except: return HttpResponse('failed') else: raise Http404 else: raise Http404
def export_csv_vol(modeladmin, request, queryset): # instead of text/html, we render the response as a text/csv file response = HttpResponse(mimetype='text/csv') response['Content-Disposition'] = 'attachement; filename="volunteer.csv"' writer = csv.writer(response, csv.excel) # this will ensure that the encoding is utf8 so that excel can properly open the file response.write(u'\ufeff'.encode('utf8')) # these are the four fields that will be exported by django (first row) writer.writerow([ smart_str(u"Email"), smart_str(u"First Name"), smart_str(u"Last name"), smart_str(u"Start Date"), smart_str(u"Active") ]) # now we need to write every row that the Big C for obj in queryset: s = time.strptime(str(obj.start_date.month) + ' ' + str(obj.start_date.day) + ' ' + str(obj.start_date.year), "%m %d %Y") s = time.strftime("%m/%d/%Y", s) writer.writerow([ smart_str(obj.email), smart_str(obj.first_name), smart_str(obj.last_name), smart_str(s), smart_str(obj.is_active) ]) return response
def rec_list(obj): obj.title = smart_str("— "*obj.step) + smart_str(obj.title) mass_object.append(obj) children = Category.objects.filter(parent=obj) for child in children: rec_list(child)
def RenderContents(self,as_rows = True): from django.db import connection, transaction from django.utils.encoding import smart_str cursor = connection.cursor() cursor.execute(self.report_query) query_set = today_var1 = cursor.fetchall() self.results = query_set formatted_row = [] formatted_set = [] row_count = 0 for row in query_set: row_count += 1 for_index = 0 for col in row: try: formatter = self.spec[for_index][2] if as_rows: formatted_row.append(smart_str(getattr(self, formatter)(col,self.organization))) else: formatted_set.append(smart_str(getattr(self, formatter)(col,self.organization))) for_index += 1 except: pass if as_rows: formatted_set.append(formatted_row) formatted_row = [] if as_rows: return formatted_set else: return formatted_set,row_count
def json(request, *args, **kwargs): """ The oembed endpoint, or the url to which requests for metadata are passed. Third parties will want to access this view with URLs for your site's content and be returned OEmbed metadata. """ # coerce to dictionary params = dict(request.GET.items()) callback = params.pop('callback', None) url = params.pop('url', None) if not url: return HttpResponseBadRequest('Required parameter missing: URL') try: provider = oembed.site.provider_for_url(url) if not provider.provides: raise OEmbedMissingEndpoint() except OEmbedMissingEndpoint: raise Http404('No provider found for %s' % url) query = dict([(smart_str(k), smart_str(v)) for k, v in params.items() if v]) try: resource = oembed.site.embed(url, **query) except OEmbedException, e: raise Http404('Error embedding %s: %s' % (url, str(e)))
def render(self, context): link = '#' try: path = context['request'].META['PATH_INFO'] view, args, kwargs = resolve(path) filter_value = self.filter_value.resolve(context, True) if filter_value: filter_name = smart_str(self.filter_name) filter_value = smart_str(filter_value) kwargs[filter_name] = filter_value # These two don't make sense if filter_name == 'server' and 'hostname' in kwargs: del kwargs['hostname'] elif filter_name == 'hostname' and 'server' in kwargs: del kwargs['server'] try: link = reverse(view, args=args, kwargs=kwargs) except NoReverseMatch: link = reverse(self.fallback_view, args=None, kwargs={filter_name: filter_value}) qs = context['request'].GET.urlencode() if qs: link += "?" + qs except NoReverseMatch: rm = sys.exc_info()[1] raise rm except (Resolver404, ValueError): pass return link
def print_entity_list(): with open(ENTITYJSONFILE) as f: j = json.load(f) for i,k in enumerate(j["targets"]): for alias in k["alias"]: row = u"%s|%s|%s|%s" % (k["target_id"], k["group"], k["entity_type"], alias) print smart_str(row)
def get_hexdigest(algorithm, salt, raw_password): """ Returns a string of the hexdigest of the given plaintext password and salt using the given algorithm ('md5', 'sha1' or 'crypt'). """ raw_password, salt = smart_str(raw_password), smart_str(salt) if algorithm == 'crypt': try: import crypt except ImportError: raise ValueError('"crypt" password algorithm not supported in this environment') return crypt.crypt(raw_password, salt) # The rest of the supported algorithms are supported by hashlib, but # hashlib is only available in Python 2.5. try: import hashlib except ImportError: if algorithm == 'md5': import md5 return md5.new(salt + raw_password).hexdigest() elif algorithm == 'sha1': import sha return sha.new(salt + raw_password).hexdigest() else: if algorithm == 'md5': return hashlib.md5(salt + raw_password).hexdigest() elif algorithm == 'sha1': return hashlib.sha1(salt + raw_password).hexdigest() raise ValueError("Got unknown password algorithm type in password.")
def render(self, context): try: source = self.src.resolve(context) except VariableDoesNotExist: return None if self.version_prefix: version_prefix = self.version_prefix else: try: version_prefix = self.version_prefix_var.resolve(context) except VariableDoesNotExist: return None try: source = force_unicode(source) version_path = get_version_path(url_to_path(source), version_prefix) if not os.path.isfile(smart_str(os.path.join(MEDIA_ROOT, version_path))): # create version version_path = version_generator(url_to_path(source), version_prefix) elif os.path.getmtime(smart_str(os.path.join(MEDIA_ROOT, url_to_path(source)))) > os.path.getmtime(smart_str(os.path.join(MEDIA_ROOT, version_path))): # recreate version if original image was updated version_path = version_generator(url_to_path(source), version_prefix, force=True) context[self.var_name] = FileObject(version_path) except: context[self.var_name] = "" return ''
def fpost(): post = MySQLdb.escape_string(smart_str(request.form['post_text'])) user = MySQLdb.escape_string(smart_str(request.form['user'])) lv = MySQLdb.escape_string(smart_str(request.form['lvalue'])) la = MySQLdb.escape_string(smart_str(request.form['ladr'])) pic = request.files['file'] if pic: ur = secure_filename(pic.filename) if '.' not in ur: ur = "." + ur if len(get_post_all(user)) > 0: ur = str(get_post_all(user)[-1][0] + 1) + ur else: ur = "1" + ur pic.save(os.path.join(app.config['UPLOAD_FOLDER'], ur)) ur = "pics/" + ur else: ur = "__empty__" if posting(user, MySQLdb.escape_string(post), ur): if la: if la[:7] != "http://": la = "http://" + la pi = int(get_post_all(user)[-1][0]) if lv: put_link(pi, la, lv) else: put_link(pi, la) session['user'] = user return redirect(url_for("hom"))
def build_message(self, contact): """ Build the email as a multipart message containing a multipart alternative for text (plain, HTML) plus all the attached files. """ content_html = self.build_email_content(contact) content_text = html2text(content_html) message = MIMEMultipart() message['Subject'] = self.build_title_content(contact) message['From'] = smart_str(self.newsletter.header_sender) message['Reply-to'] = smart_str(self.newsletter.header_reply) message['To'] = contact.mail_format() message_alt = MIMEMultipart('alternative') message_alt.attach(MIMEText(smart_str(content_text), 'plain', 'UTF-8')) message_alt.attach(MIMEText(smart_str(content_html), 'html', 'UTF-8')) message.attach(message_alt) for attachment in self.attachments: message.attach(attachment) for header, value in self.newsletter.server.custom_headers.items(): message[header] = value return message
def signin(): user = MySQLdb.escape_string(smart_str(request.form['Username'])) password = MySQLdb.escape_string(smart_str(request.form['Password'])) if sign_in(user, password): posts = fget_post_all(user) return render_template("home.html", posts=posts, Username=user) return render_template("main-page.html")
def put_object_headers(response, meta, restricted=False, token=None): response['ETag'] = meta['checksum'] response['Content-Length'] = meta['bytes'] response.override_serialization = True response['Content-Type'] = meta.get('type', 'application/octet-stream') response['Last-Modified'] = http_date(int(meta['modified'])) if not restricted: response['X-Object-Hash'] = meta['hash'] response['X-Object-UUID'] = meta['uuid'] if TRANSLATE_UUIDS: meta['modified_by'] = \ retrieve_displayname(token, meta['modified_by']) response['X-Object-Modified-By'] = smart_str( meta['modified_by'], strings_only=True) response['X-Object-Version'] = meta['version'] response['X-Object-Version-Timestamp'] = http_date( int(meta['version_timestamp'])) for k in [x for x in meta.keys() if x.startswith('X-Object-Meta-')]: response[smart_str( k, strings_only=True)] = smart_str(meta[k], strings_only=True) for k in ( 'Content-Encoding', 'Content-Disposition', 'X-Object-Manifest', 'X-Object-Sharing', 'X-Object-Shared-By', 'X-Object-Allowed-To', 'X-Object-Public'): if k in meta: response[k] = smart_str(meta[k], strings_only=True) else: for k in ('Content-Encoding', 'Content-Disposition'): if k in meta: response[k] = smart_str(meta[k], strings_only=True)
def get_profile_fields(obj): reg = obj.user affiliate = reg.profile.get_registration().wsgc_affiliate if not affiliate: affiliate = reg.profile.get_registration().wsgc_affiliate_other race = [r.name for r in reg.profile.race.all()] fields = [ reg.profile.salutation, smart_str( reg.first_name, encoding='utf-8', strings_only=False, errors='strict' ), smart_str( reg.profile.second_name, encoding='utf-8', strings_only=False, errors='strict' ), smart_str( reg.last_name, encoding='utf-8', strings_only=False, errors='strict' ), reg.email,reg.profile.email_auxiliary(), reg.profile.phone_primary,reg.profile.phone_mobile, reg.profile.address1,reg.profile.address2,reg.profile.city, reg.profile.state,reg.profile.postal_code, reg.profile.address1_current,reg.profile.address2_current, reg.profile.city_current,reg.profile.state_current, reg.profile.postal_code_current, reg.profile.date_of_birth,reg.profile.gender, ' '.join(race),reg.profile.tribe, reg.profile.disability,reg.profile.disability_specify, reg.profile.employment,reg.profile.military,reg.profile.us_citizen, reg.profile.registration_type, affiliate ] return fields
def profile_sync( request ): profile = Profile.objects.get( user = request.user ) data_encoded = urllib.urlencode( [ ( 'first_name', smart_str( profile.first_name ) ) ,( 'last_name', smart_str( profile.last_name ) ) ,( 'phone', smart_str( profile.phone ) ) ,( 'mobile', smart_str( profile.mobile ) ) ,( 'email_1', smart_str( profile.email ) ) ] ) if profile.atsid: url = '/'.join( [ ATS_URI, 'candidate/%s/update/' ] ) r = urllib2.Request( url % ( profile.atsid ), data = data_encoded ) y = urllib2.urlopen( r ) #response = y.read() #y.close() #result = simplejson.loads( response ) else: url = '/'.join( [ ATS_URI, 'candidate/new/' ] ) r = urllib2.Request( url, data = data_encoded ) y = urllib2.urlopen( r ) response = y.read() y.close() result = simplejson.loads( response ) profile.atsid = result['candidate'] profile.save() return HttpResponseRedirect( '/jobs/' )
def download_file(file_name): path_to_file = EX_DIR + '\\candidateDetails\\' + file_name my_file = open(path_to_file, 'rb').read() response = HttpResponse(my_file, content_type = "text/html") response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(file_name) response['X-Sendfile'] = smart_str(path_to_file) return response
def export_xls_zip_all_classes(StudentAdmin, request, queryset): import openpyxl from django.utils.encoding import smart_str from openpyxl.utils import get_column_letter response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') response['Content-Disposition'] = 'attachment; filename=journal.xlsx' wb = openpyxl.Workbook() wb.remove(wb.active) pos = 1 columns = [ (u"ID", 15), (u"ФИО", 20), (u"Класс", 20), (u"Оценка", 10), (u"Комментарий", 40), ] dv = DataValidation(type="decimal", operator="between", formula1=0, formula2=10, allow_blank=True) for parallel in Parallel.objects.all(): for group in parallel.class2_set.all(): isinstance(group, Class2) ws = wb.create_sheet(str(parallel) + str(group.name), pos) ws.protection.sheet = True ws.protection.password = '******' pos += 1 row_num = 0 for col_num in xrange(len(columns)): c = ws.cell(row=row_num + 1, column=col_num + 1) c.value = columns[col_num][0] #c.style.font.bold = True # set column width ws.column_dimensions[get_column_letter(col_num + 1)].width = columns[col_num][1] for student in group.student_set.all(): row_num += 1 row = [ student.pk, smart_str(student.fname + " " + student.lname + ' ' + student.fathers_name), smart_str(str(parallel.name) + str(group.name)), smart_str(''), smart_str(''), ] for col_num in xrange(len(row)): c = ws.cell(row=row_num + 1, column=col_num + 1) c.alignment = Alignment(wrap_text=True) lock = False if col_num + 1 <= 3: lock = True c.protection = Protection(locked=lock) c.value = row[col_num] response.write(save_virtual_workbook(wb)) return response
def _sync_ldap_members(connection, group, ldap_info, count=0, max_count=1): if count >= max_count: return None # Find all users and groups of group. users_info = connection.find_users_of_group(ldap_info['dn']) groups_info = connection.find_groups_of_group(ldap_info['dn']) posix_members = ldap_info['posix_members'] for user_info in users_info: LOG.debug("Synchronizing user %s with group %s" % (smart_str(user_info['dn']), smart_str(group.name))) try: user = ldap_access.get_ldap_user(username=user_info['username']) group.user_set.add(user) except User.DoesNotExist: LOG.debug("Synchronizing user %s with group %s failed. User does not exist." % (smart_str(user_info['dn']), smart_str(group.name))) for group_info in groups_info: LOG.debug("Synchronizing group %s" % smart_str(group_info['dn'])) try: group = Group.objects.get(name=group_info['name']) _sync_ldap_members(connection, group, group_info, count+1, max_count) except Group.DoesNotExist: LOG.debug("Synchronizing group %s failed. Group does not exist." % smart_str(group.name)) for posix_member in posix_members: LOG.debug("Synchronizing posix user %s with group %s" % (smart_str(posix_member), smart_str(group.name))) users_info = connection.find_users(posix_member, search_attr='uid', user_name_attr=desktop.conf.LDAP.USERS.USER_NAME_ATTR.get(), find_by_dn=False) for user_info in users_info: try: user = ldap_access.get_ldap_user(username=user_info['username']) group.user_set.add(user) except User.DoesNotExist: LOG.debug("Synchronizing posix user %s with group %s failed. User does not exist." % (smart_str(posix_member), smart_str(group.name)))
def get_version_path(value, version_prefix): """ Construct the PATH to an Image version. Value has to be server-path, relative to MEDIA_ROOT. version_filename = filename + version_prefix + ext Returns a path relative to MEDIA_ROOT. """ if os.path.isfile(smart_str(os.path.join(MEDIA_ROOT, value))): path, filename = os.path.split(value) filename, ext = os.path.splitext(filename) # check if this file is a version of an other file # to return filename_<version>.ext instead of filename_<version>_<version>.ext tmp = filename.split("_") if tmp[len(tmp) - 1] in ADMIN_VERSIONS: # it seems like the "original" is actually a version of an other original # so we strip the suffix (aka. version_perfix) new_filename = filename.replace("_" + tmp[len(tmp) - 1], "") # check if the version exists when we use the new_filename if os.path.isfile(smart_str(os.path.join(MEDIA_ROOT, path, new_filename + "_" + version_prefix + ext))): # our "original" filename seem to be filename_<version> construct # so we replace it with the new_filename filename = new_filename # if a VERSIONS_BASEDIR is set we need to strip it from the path # or we get a <VERSIONS_BASEDIR>/<VERSIONS_BASEDIR>/... construct if VERSIONS_BASEDIR != "": path = path.replace(VERSIONS_BASEDIR + "/", "") version_filename = filename + "_" + version_prefix + ext return os.path.join(VERSIONS_BASEDIR, path, version_filename) else: return None
def fun(): user = MySQLdb.escape_string(smart_str(request.form['user'])) unf = MySQLdb.escape_string(smart_str(request.form['unf'])) if unfollow(user, unf): posts = fget_post_all(user) return render_template("home.html", posts=posts, Username=user) return render_template("main-page.html")
def export_csv_customer_intercom(modeladmin, request, queryset): import csv from django.utils.encoding import smart_str from django.http import HttpResponse response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=for_intercom.csv' if not request.user.has_perm('customers.export_customer'): return response writer = csv.writer(response, csv.excel) response.write(u'\ufeff'.encode('utf8')) writer.writerow([ smart_str(u'user_id'), smart_str(u'email'), smart_str(u'name'), smart_str(u'signed_up_at'), smart_str(u'last_login_at'), smart_str(u'last_order_at'), smart_str(u'paused'), smart_str(u'unsubscribed'), smart_str(u'has_active_order'), smart_str(u'address'), smart_str(u'postcode'), smart_str(u'phone'), smart_str(u'vouchers'), smart_str(u'total_spend'), smart_str(u'shipped_orders'), smart_str(u'facebook_id'), smart_str(u'stripe_id'), ]) for obj in queryset: writer.writerow([ smart_str(obj.id), smart_str(obj.user.email), smart_str(obj.get_full_name()), smart_str(obj.get_signed_up()), smart_str(obj.get_last_login()), smart_str(obj.get_last_order_date()), smart_str(obj.subscription_is_paused()), smart_str(obj.subscription_is_canceled()), smart_str(obj.has_active_order()), smart_str(obj.get_full_address()), smart_str(obj.postcode), smart_str(obj.phone), smart_str(obj.get_all_voucher_names()), smart_str(obj.get_total_spend()), smart_str(obj.get_count_orders()), smart_str(obj.get_facebook_id()), smart_str(obj.stripe_id), ]) return response
def _filter_ni(reportrow, field, data): args = smart_str(data).strip().split(",") return ('"%s" not in (%s)' % (field, ",".join(["%s"] * len(args))), args)
def _filter_nc(reportrow, field, data): return ( 'not upper("%s") like upper(%%s)' % field, ["%%%s%%" % smart_str(data).strip()], )
def cache_key(self, name): return u'staticfiles:%s' % hashlib.md5(smart_str(name)).hexdigest()
def post_process(self, paths, dry_run=False, **options): """ Post process the given list of files (called from collectstatic). Processing is actually two separate operations: 1. renaming files to include a hash of their content for cache-busting, and copying those files to the target storage. 2. adjusting files which contain references to other files so they refer to the cache-busting filenames. If either of these are performed on a file, then that file is considered post-processed. """ # don't even dare to process the files if we're in dry run mode if dry_run: return # delete cache of all handled paths self.cache.delete_many([self.cache_key(path) for path in paths]) # build a list of adjustable files matches = lambda path: matches_patterns(path, self._patterns.keys()) adjustable_paths = [path for path in paths if matches(path)] # then sort the files by the directory level path_level = lambda name: len(name.split(os.sep)) for name in sorted(paths.keys(), key=path_level, reverse=True): # use the original, local file, not the copied-but-unprocessed # file, which might be somewhere far away, like S3 storage, path = paths[name] with storage.open(path) as original_file: # generate the hash with the original content, even for # adjustable files. hashed_name = self.hashed_name(name, original_file) # then get the original's file content.. if hasattr(original_file, 'seek'): original_file.seek(0) hashed_file_exists = self.exists(hashed_name) processed = False # ..to apply each replacement pattern to the content if name in adjustable_paths: content = original_file.read() converter = self.url_converter(name) for patterns in self._patterns.values(): for pattern in patterns: content = pattern.sub(converter, content) if hashed_file_exists: self.delete(hashed_name) # then save the processed result content_file = ContentFile(smart_str(content)) saved_name = self._save(hashed_name, content_file) hashed_name = force_unicode(saved_name.replace('\\', '/')) processed = True else: # or handle the case in which neither processing nor # a change to the original file happened if not hashed_file_exists: processed = True saved_name = self._save(hashed_name, original_file) hashed_name = force_unicode( saved_name.replace('\\', '/')) # and then set the cache accordingly self.cache.set(self.cache_key(name), hashed_name) yield name, hashed_name, processed
def test_sendfile(self): response = real_sendfile(HttpRequest(), self._get_readme()) self.assertTrue(response is not None) self.assertEqual('text/plain', response['Content-Type']) self.assertEqual(self._get_readme(), smart_str(response.content))
def test_xsendfile_header_containing_unicode(self): filepath = self.ensure_file(u'péter_là_gueule.txt') response = real_sendfile(HttpRequest(), filepath) self.assertTrue(response is not None) self.assertEqual(smart_str(filepath), response['X-Sendfile'])
def export_csv_customer(modeladmin, request, queryset): import csv from django.utils.encoding import smart_str from django.http import HttpResponse response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=mymodel.csv' if not request.user.has_perm('customers.export_customer'): return response writer = csv.writer(response, csv.excel) response.write(u'\ufeff'.encode('utf8')) writer.writerow([ smart_str(u"id"), smart_str(u"user"), smart_str(u"customer"), smart_str(u"address"), smart_str(u"postcode"), smart_str(u"phone"), smart_str(u"amount"), smart_str(u"vouchers"), smart_str(u"orders"), smart_str(u"stripe_id"), smart_str(u"card_details"), ]) for obj in queryset: writer.writerow([ smart_str(obj.id), smart_str(obj.user), smart_str(obj.get_full_name()), smart_str(obj.get_full_address()), smart_str(obj.postcode), smart_str(obj.phone), smart_str(obj.amount), smart_str(obj.get_all_vouchers()), smart_str(obj.get_count_orders()), smart_str(obj.stripe_id), smart_str(obj.card_details), ]) return response
def export_csv_order(modeladmin, request, queryset): import csv from django.utils.encoding import smart_str from django.http import HttpResponse response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=mymodel.csv' if not request.user.has_perm('customers.export_order'): return response writer = csv.writer(response, csv.excel) response.write(u'\ufeff'.encode( 'utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer.writerow([ smart_str(u"id"), smart_str(u"status"), smart_str(u"creation_date"), smart_str(u"shipping_date"), smart_str(u"customer"), smart_str(u"coffee"), smart_str(u"amount"), smart_str(u"brew"), smart_str(u"package"), smart_str(u"different"), smart_str(u"recurrent"), smart_str(u"interval"), smart_str(u"voucher"), smart_str(u"feedback"), ]) for obj in queryset: writer.writerow([ smart_str(obj.id), smart_str(obj.status), smart_str(obj.date), smart_str(obj.shipping_date), smart_str(obj.customer), smart_str(obj.coffee), smart_str(obj.amount), smart_str(obj.brew), smart_str(obj.get_package_display()), smart_str(obj.different), smart_str(obj.recurrent), smart_str(obj.interval), smart_str(obj.voucher), smart_str(obj.get_feedback_display()), ]) return response
def export_csv_coffee_reviews(modeladmin, request, queryset): import csv from django.utils.encoding import smart_str from django.http import HttpResponse response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=coffee_reviews.csv' if not request.user.has_perm('customers.export_coffeereview'): return response writer = csv.writer(response, csv.excel) response.write(u'\ufeff'.encode('utf8')) writer.writerow([ smart_str(u"id"), smart_str(u"created_at"), smart_str(u"rating"), smart_str(u"comment"), smart_str(u"coffee"), smart_str(u"order"), smart_str(u"amount"), smart_str(u"customer"), smart_str(u"stripe_id"), ]) for obj in queryset: writer.writerow([ smart_str(obj.id), smart_str(obj.created_at), smart_str(obj.rating), smart_str(obj.comment), smart_str(obj.order.coffee), smart_str(obj.order), smart_str(obj.order.amount), smart_str(obj.order.customer), smart_str(obj.order.customer.stripe_id), ]) return response
unsubscribes += temp_list unsubscribes_set = set(unsubscribes) except mailchimp.Error, e: print 'MailChimp error occurred: %s - %s' % (e.__class__, e) customers = Customer.objects.all() get_started = GetStartedResponse.objects.all() customers_set = set([x.user.email.lower() for x in customers]) get_started_set = set([x.email.lower() for x in get_started]) subscribes_set = customers_set.union(get_started_set) - unsubscribes_set writer.writerow([ smart_str(u"email"), # smart_str(u"mailchimp"), ]) for email in subscribes_set: writer.writerow([ smart_str(email), # smart_str('1'), ]) # for email in unsubscribes_set: # writer.writerow([ # smart_str(email), # smart_str('0'), # ]) return response
def export_csv_gearorders(modeladmin, request, queryset): import csv from django.utils.encoding import smart_str from django.http import HttpResponse response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=gear_orders.csv' if not request.user.has_perm('customers.export_gearorder'): return response writer = csv.writer(response, csv.excel) response.write(u'\ufeff'.encode( 'utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer.writerow([ smart_str(u'id'), smart_str(u'status'), smart_str(u'creation_date'), smart_str(u'shipping_date'), smart_str(u'customer'), smart_str(u'gear'), smart_str(u'quantity'), smart_str(u'price'), smart_str(u'details'), smart_str(u'tracking_number'), ]) for obj in queryset: writer.writerow([ smart_str(obj.id), smart_str(obj.status), smart_str(obj.date), smart_str(obj.shipping_date), smart_str(obj.customer), smart_str(obj.gear), smart_str(obj.details.get('Quantity', 1)), smart_str(obj.price), smart_str(obj.details), smart_str(obj.tracking_number), ]) return response
def handle(self, *args, **options): outputfile = options.get('file') output_text = [] output_text.append('@@@@@'.join([ smart_str(u"ID"), smart_str(u"User"), smart_str(u"Date"), smart_str(u"Date Updated"), smart_str(u"Name (as stated on disclaimer)"), smart_str(u"DOB"), smart_str(u"Address"), smart_str(u"Postcode"), smart_str(u"Home Phone"), smart_str(u"Mobile Phone"), smart_str(u"Emergency Contact 1: Name"), smart_str(u"Emergency Contact 1: Relationship"), smart_str(u"Emergency Contact 1: Phone"), smart_str(u"Emergency Contact 2: Name"), smart_str(u"Emergency Contact 2: Relationship"), smart_str(u"Emergency Contact 2: Phone"), smart_str(u"Medical Conditions"), smart_str(u"Medical Conditions Details"), smart_str(u"Joint Problems"), smart_str(u"Joint Problems Details"), smart_str(u"Allergies"), smart_str(u"Allergies Details"), smart_str(u"Medical Treatment Terms"), smart_str(u"Medical Treatment Accepted"), smart_str(u"Disclaimer Terms"), smart_str(u"Disclaimer Terms Accepted"), smart_str(u"Over 18 Statement"), smart_str(u"Over 18 Confirmed") ])) for obj in OnlineDisclaimer.objects.all(): output_text.append('@@@@@'.join([ smart_str(obj.pk), smart_str(obj.user), smart_str(obj.date.strftime('%Y-%m-%d %H:%M:%S:%f %z')), smart_str( obj.date_updated.strftime('%Y-%m-%d %H:%M:%S:%f %z' ) if obj.date_updated else ''), smart_str(obj.name), smart_str(obj.dob.strftime('%Y-%m-%d')), smart_str(obj.address), smart_str(obj.postcode), smart_str(obj.home_phone), smart_str(obj.mobile_phone), smart_str(obj.emergency_contact1_name), smart_str(obj.emergency_contact1_relationship), smart_str(obj.emergency_contact1_phone), smart_str(obj.emergency_contact2_name), smart_str(obj.emergency_contact2_relationship), smart_str(obj.emergency_contact2_phone), smart_str('Yes' if obj.medical_conditions else 'No'), smart_str(obj.medical_conditions_details), smart_str('Yes' if obj.joint_problems else 'No'), smart_str(obj.joint_problems_details), smart_str('Yes' if obj.allergies else 'No'), smart_str(obj.allergies_details), smart_str(obj.medical_treatment_terms), smart_str('Yes' if obj.medical_treatment_permission else 'No'), smart_str(obj.disclaimer_terms), smart_str('Yes' if obj.terms_accepted else 'No'), smart_str(obj.over_18_statement), smart_str('Yes' if obj.age_over_18_confirmed else 'No'), ])) output_str = '&&&&&'.join(output_text) with open(outputfile, 'wb') as out: out.write(encrypt(PASSWORD, output_str)) with open(outputfile, 'rb') as file: filename = os.path.split(outputfile)[1] try: msg = EmailMessage( '{} disclaimer backup'.format( settings.ACCOUNT_EMAIL_SUBJECT_PREFIX), 'Encrypted disclaimer back up file attached. ' '{} records.'.format(OnlineDisclaimer.objects.count()), settings.DEFAULT_FROM_EMAIL, to=[settings.SUPPORT_EMAIL], attachments=[(filename, file.read(), 'bytes/bytes')]) msg.send(fail_silently=False) except: pass self.stdout.write( '{} disclaimer records encrypted and written to {}'.format( OnlineDisclaimer.objects.count(), outputfile)) logger.info('{} disclaimer records encrypted and backed up'.format( OnlineDisclaimer.objects.count())) ActivityLog.objects.create( log='{} disclaimer records encrypted and backed up'.format( OnlineDisclaimer.objects.count()))
def export_csv_preferences(modeladmin, request, queryset): import csv from django.utils.encoding import smart_str from django.http import HttpResponse response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename=mymodel.csv' if not request.user.has_perm('customers.export_preferences'): return response writer = csv.writer(response, csv.excel) response.write(u'\ufeff'.encode('utf8')) writer.writerow([ smart_str(u"id"), smart_str(u"customer"), smart_str(u"coffee"), smart_str(u"flavor"), smart_str(u"brew"), smart_str(u"package"), smart_str(u"different"), smart_str(u"intense"), smart_str(u"interval"), ]) for obj in queryset: flavors = ' | '.join([str(v) for v in obj.flavor.all()]) writer.writerow([ smart_str(obj.id), smart_str(obj.customer), smart_str(obj.coffee), smart_str(flavors), smart_str(obj.brew), smart_str(obj.package), smart_str(obj.different), smart_str(obj.intense), smart_str(obj.interval), ]) return response
def get_menu_id(self, path): return hashlib.sha1(" | ".join(smart_str(p) for p in path)).hexdigest()
def foreignkey_autocomplete(self, request): """ Search in the fields of the given related model and returns the result as a simple string to be used by the jQuery Autocomplete plugin """ query = request.GET.get('q', None) app_label = request.GET.get('app_label', None) model_name = request.GET.get('model_name', None) search_fields = request.GET.get('search_fields', None) object_pk = request.GET.get('object_pk', None) try: to_string_function = self.related_string_functions[model_name] except KeyError: to_string_function = lambda x: x.__str__() if search_fields and app_label and model_name and (query or object_pk): def construct_search(field_name): # use different lookup methods depending on the notation if field_name.startswith('^'): return "%s__istartswith" % field_name[1:] elif field_name.startswith('='): return "%s__iexact" % field_name[1:] elif field_name.startswith('@'): return "%s__search" % field_name[1:] else: return "%s__icontains" % field_name model = apps.get_model(app_label, model_name) queryset = model._default_manager.all() data = '' if query: for bit in query.split(): or_queries = [ models.Q( **{ construct_search(smart_str(field_name)): smart_str(bit) }) for field_name in search_fields.split(',') ] other_qs = QuerySet(model) other_qs.query.select_related = queryset.query.select_related other_qs = other_qs.filter(reduce(operator.or_, or_queries)) queryset = queryset & other_qs additional_filter = self.get_related_filter(model, request) if additional_filter: queryset = queryset.filter(additional_filter) if self.autocomplete_limit: queryset = queryset[:self.autocomplete_limit] data = ''.join([ str('%s|%s\n') % (to_string_function(f), f.pk) for f in queryset ]) elif object_pk: try: obj = queryset.get(pk=object_pk) except Exception: # FIXME: use stricter exception checking pass else: data = to_string_function(obj) return HttpResponse(data, content_type='text/plain') return HttpResponseNotFound()
def __str__(self): return smart_str(self.cost_item) + " - " + smart_str(self.total)
def write_page(self, site, page, filename): self.out('Writing sitemap %s.' % filename, 2) old_page_md5 = None urls = [] if conf.MOCK_SITE: if conf.MOCK_SITE_NAME is None: raise ImproperlyConfigured( "STATICSITEMAPS_MOCK_SITE_NAME must not be None. Try setting to www.yoursite.com" ) from django.contrib.sites.requests import RequestSite from django.test.client import RequestFactory rs = RequestSite(RequestFactory().get( '/', SERVER_NAME=conf.MOCK_SITE_NAME)) try: if callable(site): if conf.MOCK_SITE: urls.extend(site().get_urls( page, rs, protocol=conf.MOCK_SITE_PROTOCOL)) else: urls.extend(site().get_urls(page, protocol=conf.FORCE_PROTOCOL)) else: if conf.MOCK_SITE: urls.extend( site.get_urls(page, rs, protocol=conf.MOCK_SITE_PROTOCOL)) else: urls.extend( site.get_urls(page, protocol=conf.FORCE_PROTOCOL)) except EmptyPage: self.out("Page %s empty" % page) except PageNotAnInteger: self.out("No page '%s'" % page) lastmods = [ lastmod for lastmod in [u.get('lastmod') for u in urls] if lastmod is not None ] file_lastmod = max(lastmods) if len(lastmods) > 0 else None path = os.path.join(self.root_dir, filename) template = getattr(site, 'sitemap_template', 'sitemap.xml') if self.storage.exists(path): old_page_md5 = self.read_hash(path) self.storage.delete(path) output = smart_str(loader.render_to_string(template, {'urlset': urls})) self._write(path, output) with self.storage.open(path) as sitemap_page: if old_page_md5 != self.get_hash(sitemap_page.read()): self.has_changes = True if conf.USE_GZIP: if conf.GZIP_METHOD not in [ 'python', 'system', ]: raise ImproperlyConfigured( "STATICSITEMAPS_GZIP_METHOD must be in ['python', 'system']" ) if conf.GZIP_METHOD == 'system' and not os.path.exists( conf.SYSTEM_GZIP_PATH): raise ImproperlyConfigured( 'STATICSITEMAPS_SYSTEM_GZIP_PATH does not exist') if conf.GZIP_METHOD == 'system' and not isinstance( self.storage, FileSystemStorage): raise ImproperlyConfigured( 'system gzip method can only be used with FileSystemStorage' ) if conf.GZIP_METHOD == 'system': # GZIP with system gzip binary subprocess.call([ conf.SYSTEM_GZIP_PATH, '-f', path, ]) else: # GZIP with python gzip lib try: gzipped_path = '%s.gz' % path if self.storage.exists(gzipped_path): self.storage.delete(gzipped_path) self.out('Compressing...', 2) buf = BytesIO() with gzip.GzipFile(fileobj=buf, mode="w") as f: f.write(output.encode('utf-8')) self.storage.save(gzipped_path, ContentFile(buf.getvalue())) except OSError: self.out("Compress %s file error" % path) return file_lastmod
def __str__(self): return smart_str(self.name)
def __str__(self): return smart_str(self.category) + " " + smart_str(self.name)
def get_size_name(self): if self.size: return smart_str(self.size.name) else: return smart_str("único")
def __str__(self): return smart_str(self.name) + " (" + smart_str(self.unit) + ")"
def __repr__(self): return smart_str(u'<%s: %s>' % (self.__class__.__name__, unicode(self)))
def __str__(self): return smart_str(self.key)
def handle(self, *args, **options): print("=/= " * 20, "\nSitemap creation started @ %s" % (datetime.datetime.now())) count = -1 if args: count = int(args[0]) if args[0].isdigit() else -1 current_site = ROOT_BASE_URL root_url_path = 'http://%s/' % current_site sites = [] today_date = datetime.date.today() try: global sitemaps for section, site in sitemaps.items(): parent_xml_file = parent_xml_path + 'sitemap_' + section + '.xml' fd_parent = open(parent_xml_file, 'w') fd_parent.write('<?xml version="1.0" encoding="UTF-8"?>\n') fd_parent.write( '<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\n' ) fd_parent.write('<sitemap>\n') fd_parent.write('<loc>' + root_url_path + 'sitemap_' + section + '-1.xml.gz</loc>\n') fd_parent.write('<lastmod>' + today_date.strftime("%Y-%m-%d") + '</lastmod>\n') fd_parent.write('</sitemap>\n') if callable(site): pages = site().paginator.num_pages else: pages = site.paginator.num_pages sitemap_url = reverse('section_sitemap', kwargs={'section': section}) sites.append('%s%s' % (current_site, sitemap_url)) if pages > 1: for page in range(2, pages + 1): sites.append('%s%s?p=%s' % (current_site, sitemap_url, page)) fd_parent.write('<sitemap>\n') fd_parent.write('<loc>' + root_url_path + 'sitemap_' + section + '-' + str(page) + '.xml.gz</loc>\n') fd_parent.write('<lastmod>' + today_date.strftime("%Y-%m-%d") + '</lastmod>\n') fd_parent.write('</sitemap>\n') fd_parent.write('</sitemapindex>\n') fd_parent.close() success_count = 0 SuperSitemap.write_file = open( BASE_DIR + '/uploads/sitemap_log.txt', 'w') for each_site in sites: url_name = each_site.split('/')[1] if url_name.find('?') > 0: section = url_name[url_name.find('-') + 1:url_name.find('.')] page = int(url_name[url_name.find('=') + 1:]) else: section = url_name[url_name.find('-') + 1:url_name.find('.')] page = 1 file_name = 'sitemap_' + section + '-' + str(page) + '.xml' xml_file_path = parent_xml_path + file_name maps, urls = [], [] maps.append(sitemaps[section]) for site in maps: if callable(site): urls, success_count = site().get_urls(page, count) else: urls, success_count = site.get_urls(page, count) response = smart_str( loader.render_to_string('custom_sitemap.xml', {'urlset': urls})) fd = open(xml_file_path, 'w') fd.write(response) fd.close() os.system(str('gzip -f ' + xml_file_path)) if SuperSitemap.write_file: SuperSitemap.write_file.write( 'XML File : %s , Success Count : %s\n' % (os.path.basename(xml_file_path), success_count)) if not SuperSitemap.write_file.closed: SuperSitemap.write_file.close() except Exception as e: error_stream = traceback.format_exc() sys.stdout.write(error_stream) sys.stdout.write('\nExiting with error\n' + '*' * 30 + '\n') sys.exit() sys.stdout.write('Successfully created the xml files\n\n')
def full_name(self): if self.flavor: return smart_str(self.category) + " " + smart_str(self.flavor.name) else: return smart_str(self.category)
def create_questionnaire_explanation_fields(self, questionnaire_id, language, questionnaire_lime_survey, fields, entrance_questionnaire): """ :param questionnaire_id: :param language: :param questionnaire_lime_survey: :param fields: fields from questionnaire that are to be exported :param entrance_questionnaire: boolean :return: header, formatted according to fields data_rows, formatted according to fields if error, both data are [] """ # clear fields fields_cleared = [field.split("[")[0] for field in fields] questionnaire_explanation_fields_list = [HEADER_EXPLANATION_FIELDS] fields_from_questions = [] # for each field, verify the question description questionnaire_title = questionnaire_lime_survey.get_survey_title( questionnaire_id, language) questionnaire_code = \ self.get_questionnaire_code_from_id(questionnaire_id) # get fields description questionnaire_questions = questionnaire_lime_survey.list_questions_ids( questionnaire_id, 0) for question in questionnaire_questions: properties = questionnaire_lime_survey.get_question_properties( question, language) question_code = \ properties['title'] if 'title' in properties else None if question_code and question_code in fields_cleared: fields_from_questions.append(question_code) # clean the question description that came from limesurvey question_description = \ re.sub( '{.*?}', '', re.sub('<.*?>', '', properties['question']) ).replace(' ', '').strip() question_type = smart_str(properties['type']) question_type_description = question_types[question_type] \ if question_type in question_types else '' question_group = \ self.get_group_properties( questionnaire_lime_survey, questionnaire_id, properties['gid'], language ) question_order = properties['question_order'] questionnaire_list = \ [smart_str(questionnaire_code), smart_str(questionnaire_title)] question_type_list = \ [smart_str(question_order), question_type, question_type_description] question_list = \ [smart_str(question_code), smart_str(question_description)] scales = [""] # "1": "Array Dual Scale" if question_type == "1": if isinstance(properties['attributes_lang'], dict): scales = [ properties['attributes_lang']['dualscale_headerA'] if 'dualscale_headerA' in properties['attributes_lang'] else "", properties['attributes_lang']['dualscale_headerB'] if 'dualscale_headerB' in properties['attributes_lang'] else "" ] else: scales = ["", ""] # answers options_list = [] if isinstance(properties['answeroptions'], dict): options = collections.OrderedDict( sorted(properties['answeroptions'].items())) for option_key, option_values in options.items(): options_list.append([ smart_str(option_key), smart_str(option_values['answer']) ]) else: # include blank line options_list = [[""] * 2] # sub-questions if isinstance(properties['subquestions'], dict): sub_questions_list = [[ smart_str(value['title']), smart_str(value['question']) ] for value in properties['subquestions'].values()] sub_questions_list = sorted(sub_questions_list, key=itemgetter(0)) else: # include blank line sub_questions_list = [[""] * 2] for scale_index, scale_label in enumerate(scales): scale = [scale_index + 1, scale_label] \ if scale_label else [""] * 2 for sub_question in sub_questions_list: for option in options_list: question_index = question_code if sub_question[0]: question_index += '[' + sub_question[0] + ']' if scale_label: question_index += \ '[' + str(scale_index + 1) + ']' questionnaire_explanation_fields_list.append( questionnaire_list + [question_group['group_name']] + question_type_list + [question_index] + question_list + sub_question + scale + option) if len(fields_cleared) != len(fields_from_questions): for field in fields_cleared: if field not in fields_from_questions: description = self.get_header_description( questionnaire_id, field, entrance_questionnaire) question_list = [ smart_str(questionnaire_code), smart_str(questionnaire_title), '', '', smart_str(field), smart_str(field), smart_str(description) ] questionnaire_explanation_fields_list.append(question_list) return questionnaire_explanation_fields_list
def post(self, request, *args, **kwargs): routecard = RouteCard.objects.get(pk=self.kwargs.get('pk')) response = HttpResponse(content_type='text/csv') response[ 'Content-Disposition'] = 'attachment; filename={0}-{1}.csv'.format( routecard.jobspec.number, routecard.number) writer = csv.writer(response, csv.excel) response.write(u'\ufeff'.encode('utf8')) # BOM (optional...Excel needs it to open UTF-8 file properly) writer.writerow([ smart_str(u"Process"), smart_str(u"Machine"), smart_str(u"Planned Start Time"), smart_str(u"Planned End Time"), smart_str(u"Completed"), smart_str(u"Total Accepted"), smart_str(u"Delay"), ]) for plan in routecard.plan_set.all(): writer.writerow([ smart_str(plan.process), smart_str(plan.machine), smart_str(plan.planned_on), smart_str(plan.end_time()), smart_str(plan.is_complete), smart_str(plan.total_accepted_quantity()), smart_str(plan.delay()), ]) return response
def post(self, request): if request.is_ajax(): try: if isinstance(request.body, bytes): data = json.loads(request.body.decode()) else: data = json.loads(request.body) if data['action'] == 'create': if not request.user.has_perm( 'common.can_add_commandssequence'): raise PermissionDenied(_('403 Forbidden')) obj = CommandsSequence.objects.create( name=data['name'], commands=data['commands']) for group in data['group']: obj.group.add(ServerGroup.objects.get(name=group)) obj.save() return JsonResponse({ 'status': True, 'message': '%s create success!' % (smart_str(data.get('name', None))) }) elif data['action'] == 'update': if not request.user.has_perm( 'common.can_change_commandssequence'): raise PermissionDenied(_('403 Forbidden')) try: obj = CommandsSequence.objects.get( id=data.get('id', None)) obj.commands = data['commands'] [obj.group.remove(group) for group in obj.group.all()] for group in data['group']: obj.group.add(ServerGroup.objects.get(name=group)) data.pop('group') obj.__dict__.update(data) obj.save() return JsonResponse({ 'status': True, 'message': '%s update success!' % (smart_str(data.get('name', None))) }) except ObjectDoesNotExist: return JsonResponse({ 'status': False, 'message': 'Request object not exist!' }) elif data['action'] == 'delete': if not request.user.has_perm( 'common.can_delete_commandssequence'): raise PermissionDenied(_('403 Forbidden')) try: obj = CommandsSequence.objects.get( id=data.get('id', None)) taskname = obj.name obj.delete() return JsonResponse({ 'status': True, 'message': 'Delete task %s success!' % (taskname) }) except ObjectDoesNotExist: return JsonResponse({ 'status': False, 'message': 'Request object not exist!' }) else: return JsonResponse({ 'status': False, 'message': 'Illegal action.' }) except ObjectDoesNotExist: return JsonResponse({ 'status': False, 'message': 'Please input a valid group name!' }) except IntegrityError: return JsonResponse({ 'status': False, 'message': 'Task name:%s already exist,Please use another name instead!' % (data['name']) }) except KeyError: return JsonResponse({ 'status': False, 'message': "Invalid parameter,Please report it to the adminstrator!" }) except Exception as e: print(traceback.print_exc()) return JsonResponse({ 'status': False, 'message': 'Some error happend! Please report it to the adminstrator! Error info:%s' % (smart_str(e)) }) else: pass
def _get_content(self): if self.has_header('Content-Encoding'): return ''.join(self._container) return smart_str(''.join(self._container), self._charset)