def upload_pricelist(request): if request.user.is_staff: if request.method == 'POST': f = request.FILES['file'] filename = request.FILES['file'].name name, ext = os.path.splitext(translify(filename).replace(' ', '_')) newname = '/uploads/' + 'purchase_doc' + ext oldfile = 'purchase_doc' for root, dirs, files in os.walk(settings.MEDIA_ROOT+'/uploads/',): for filename in files: name, ext = os.path.splitext(translify(u'%s' % filename).replace(' ', '_')) if name=='purchase_doc': oldfile = '/uploads/' + filename try: os.remove(settings.MEDIA_ROOT + oldfile) except OSError: oldfile = False path_name = settings.MEDIA_ROOT + newname destination = open(path_name, 'wb+') for chunk in f.chunks(): destination.write(chunk) destination.close() return http.HttpResponseRedirect('/admin/') else: return http.HttpResponse('403 Forbidden. Authentication Required!')
def create_excel_stat(selected_survey, date_start, date_end, query_type): """Создать excel-файл статистики прозвона для скачивания пользователем""" sample_data, format_data = get_sample_data(selected_survey, query_type) target_dates = get_dates(date_start, date_end, sample_data) excel_rows = get_excel_rows(format_data, target_dates) wb = xl.Workbook() ws = wb.active ws.cell(row=1, column=1).value = selected_survey for col_num, col_label in enumerate(target_dates): if type(col_label) == str: ws.cell(row=1, column=col_num + 2).value = col_label else: ws.cell(row=1, column=col_num + 2).value = dt.strftime(col_label, '%d.%m.%Y') for row_num, resp_counts in enumerate(excel_rows): for label, counts in resp_counts.items(): ws.cell(row=row_num + 2, column=1).value = label for col_num, count in enumerate(counts): ws.cell(row=row_num + 2, column=col_num + 2).value = count auto_fit_excel_columns(ws) response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') if query_type == 1: filename = 'Sample_count_{survey}.xlsx'.format(survey=translify(selected_survey).replace('.', '_')) elif query_type == 2: filename = 'Dialing_count_{survey}.xlsx'.format(survey=translify(selected_survey).replace('.', '_')) response['Content-Disposition'] = 'attachment; filename={}'.format(filename) wb.save(response) return response
def form_valid(self, form): source = self.request.FILES.get('source') poster = self.request.FILES.get('poster') form.instance.source = default_storage.save(translify(source.name), ContentFile(source.read())) form.instance.poster = default_storage.save(translify(poster.name), ContentFile(poster.read())) form.instance.user = self.request.user self.object = form.save() subtitle = Subtitle() subtitle.lang = 'ru' subtitle.video = self.object subtitle.source = default_storage.save(translify(self.ru_sub.name), ContentFile(self.ru_sub.read())) subtitle.save() subtitle.pk = None subtitle.source = default_storage.save(translify(self.en_sub.name), ContentFile(self.en_sub.read())) subtitle.lang = 'en' subtitle.save() return HttpResponseRedirect(self.get_success_url())
def set_slug(self): from pytils.translit import translify n = '' if(self.first_name): n = n + translify(self.first_name) if(self.last_name): n = n +'-'+ translify(self.last_name) st = str(self.id)+'-'+n self.slug = st #import pdb; pdb.set_trace() self.save()
def file_upload_path(instance, filename): """Generates upload path Keyword arguments: instance -- FileField filename - String Returns: String """ parts = filename.rsplit('.', 1) name = "%s.%s" % (slugify(translify(parts[0])), slugify(translify(parts[1]))) return "%s/%s/%s/%s" % (strftime('%Y'), strftime('%m'), strftime('%d'), name)
def to_sms (self): addr_part = remove_short_numbers (translit.translify(u"Addr:" + self.address())) tel_part = remove_short_numbers (translit.translify(u"Tel:" + self.phoneNo)) order_part = remove_short_numbers (translit.translify(u'Zakaz:' + self.orderItems())) name_part = remove_short_numbers (translit.translify(u"Imya:" + self.name if self.name else u'')) comment = remove_short_numbers (translit.translify (u"Komment:" + self.comment if self.comment else u'')) zakaz = " ".join ([addr_part, tel_part, order_part]) if (name_part) and (len(zakaz) + len(" ")+ len(name_part)) <= 160: zakaz += " " + name_part if (comment) and (len(zakaz) + len(" ")+ len(comment)) <= 160: zakaz += " " + comment if len(zakaz) <= 160: return zakaz else: return u"Shlishkom dlinnyi zakaz! Smotrite na sayte ili na pochte!"
def file_upload_path(instance, filename): """Generates upload path Keyword arguments: instance -- FileField filename - String Returns: String """ parts = filename.rsplit('.', 1) name = "%s.%s" % (slugify(translify(parts[0])), slugify(translify( parts[1]))) return "%s/%s/%s/%s" % (strftime('%Y'), strftime('%m'), strftime('%d'), name)
def rename_file(self, filename): from pytils import translit filename = translit.translify(filename) filename = self.get_indexed_name(filename) return filename
def elfinder_connector(request): elf = elFinder.connector(settings.ELFINDER_OPTIONS) req = {} if request.method == 'GET': form = request.GET else: form = request.POST for field in elf.httpAllowedParameters: if field in form: req[field] = form.get(field) # Hack by Kidwind if field == 'targets[]' and hasattr(form, 'getlist'): req[field] = form.getlist(field) if request.FILES and request.FILES.getlist('upload[]'): up_files = {} for up in request.FILES.getlist('upload[]'): if up.name: file_name = slugify(translify(up.name)) up_files[file_name] = up.file req[field] = up_files status, header, response = elf.run(req) if not response is None and status == 200: if 'file' in response and isinstance(response['file'], file): response['file'].close() return HttpResponse(json.dumps(response), content_type='application/json')
def get_dict_from_lxml(lxml_obj, translit=False, force_list=False, remove_namespaces=True, list_tags=list()): children = lxml_obj.getchildren() if (len(children) > 1 and children[0].tag == children[1].tag) or force_list: value = [] for obj in children: value.append( get_dict_from_lxml(obj, translit, remove_namespaces=remove_namespaces)) elif len(children) > 0: value = {} if translit: from pytils.translit import translify for obj in children: tag = obj.tag if remove_namespaces: tag = remove_xml_namespace(tag) if translit: tag = translify(tag) value[tag] = get_dict_from_lxml( obj, translit, force_list=tag in list_tags, remove_namespaces=remove_namespaces) else: value = lxml_obj.text return value
def post(self, request): data, errors = RequestSchema().load(request.data) errand_boy_transport = UNIXSocketTransport() out, err, returncode = errand_boy_transport.run_cmd("wkhtmltopdf -V") translified_text = translify(data["text_for_translite"]) result = { "type": str(data["type"]), "type_value": str(data["type"].value), "payment_method": str(data["payment_method"]), "payment_method_value": str(data["payment_method"].value), "wkhtml": out, "slug": slugify(data["text_for_slug"]), "translite": translified_text, "detranslite": detranslify(translified_text), "plural_first": get_plural(data["int_for_plural"], ("клиенту", "клиентам", "клиентам")), "plural_second": get_plural(data["int_for_plural"], "секунду,секунды,секунд"), } return render(request, "test.html", result)
def create_user(self, email, fullname, password): """ Создание пользователя в чате :param email: E-mail :param fullname: Фамилия и Имя :param password: пароль :return: id в чате """ username = translit.translify("_".join(fullname.split(' '))) result = re.findall(r'@\w+.\w+', email) username = username + '_' + email.replace(result[0], '') data = { 'email': email, 'name': fullname, 'username': username, 'password': password, } resp = requests.post(settings.ROCKETCHAT_URL + '/api/v1/users.create', headers=self.headers, json=data) if resp.status_code != 200: logger.error('Could not create user: %s' % resp.text, exc_info=True) return False logger.info('create_user.resp - %s' % resp, exc_info=True) data = resp.json()['user'] return data['_id']
def slugify(value, limit=None, default='', lower=True, dot_allowed=False): value = smart_unicode(value) # Replace all non-allowed chars with "-" char # to help pytils not to crash if dot_allowed: value = RE_NOT_ENRUCHAR_DOT.sub('-', value) else: value = RE_NOT_ENRUCHAR.sub('-', value) # Do transliteration value = translify(value) # Replace trash with safe "-" char if dot_allowed: value = RE_NOT_ENCHAR_DOT.sub('-', value) else: value = RE_NOT_ENCHAR.sub('-', value) # Replace "-" from both side of the string value = value.strip('-') if lower: value = value.lower() # Replace sequences of dashes value = RE_DASH.sub('-', value) if limit is not None: value = value[:limit] if value != "": return value else: return default
def translify(text): """Translify russian text""" try: res = translit.translify(smart_unicode(text, encoding)) except Exception, err: # because filter must die silently res = default_value % {'error': err, 'value': text}
def ajax_upload_file(request): max_upload_size = settings.EMAIL_ATTACHMENT_MAX_UPLOAD_SIZE if request.method == "POST" and request.is_ajax( ) and request.user.is_expertA: upload_file = request.FILES.get('upload_file') error = None if upload_file: # BUG #2369 restriction of file content type does not work. (Removed from code now) if upload_file._size > max_upload_size: error = _( 'Please keep file size under {max_size}. Current file size {size}.' ).format({ 'max_size': filesizeformat(max_upload_size), 'size': filesizeformat(upload_file._size) }) else: error = _('No file.') context = {'error': error} if not error: upload_path = settings.EMAIL_ATTACHMENT_UPLOAD_PATH upload_filename = default_storage.get_available_name( join(upload_path, translify(upload_file.name))) saved_path = default_storage.save(upload_filename, upload_file) context = { 'saved_filename': basename(saved_path), 'original_filename': upload_file.name } return JSONResponse(context) raise PermissionDenied
def slugify(value, limit=None, default='', lower=True): value = smart_unicode(value) # Replace all non russian/english chars with "-" char # to help pytils not to crash value = RE_NOT_ENRUCHAR.sub('-', value) # Do transliteration value = translify(value) # Replace trash with safe "-" char value = RE_NOT_ENCHAR.sub('-', value).strip('-') if lower: value = value.lower() # Replace sequences of dashes value = RE_DASH.sub('-', value) if limit is not None: value = value[:limit] if value != "": return value else: return default
def upload(request): """ Uploads a file and send back its URL to CKEditor. TODO: Validate uploads """ # Get the uploaded file from request. upload = request.FILES['upload'] # Open output file in which to store upload. upload_filename = generate_filename(translify(upload.name)) url = get_media_url(upload_filename) out = open(upload_filename, 'wb+') # Iterate through chunks and write to destination. for chunk in upload.chunks(): out.write(chunk) out.close() # Respond with Javascript sending ckeditor upload url. return HttpResponse(""" <script type='text/javascript'> window.parent.CKEDITOR.tools.callFunction(%s, '%s'); </script>""" % (request.GET['CKEditorFuncNum'], url))
def get_results(self): """ Fetches the results via the form. Returns an empty list if there's no query to search with. """ query = self.query if not (self.form.is_valid() and query): return self.form.no_query_found() #Replace letter ё --> е query = replace_special(query) # save the query to statistic if 'page' not in self.request.GET and query: rows = SearchLogger.objects.filter(text=query).update(counter=F('counter')+1) if not rows: SearchLogger.objects.create(text=query) translited_query = force_unicode(translify(query)) detranslited_query = force_unicode(detranslify(query)) sqs = self.searchqueryset().filter_and(SQ(content=detranslited_query) | SQ(content=translited_query) | SQ(content=query)) #sqs = self.searchqueryset().auto_query(query) if self.load_all: sqs = sqs.load_all() return sqs
def slugify_func(self, content): try: from pytils.translit import translify content = translify(content) except: pass return slugify(content)
def save(self, *args, **kwargs): slug = slugify(translit.translify(self.title), allow_unicode=True) if ClientsPhoto.objects.filter(slug=slug).exclude(id=self.id).exists(): slug = "-".join([slug, str(now.strftime("%d-%m-%Y-%H-%M-%S"))]) self.slug = slug else: self.slug = slug super(ClientsPhoto, self).save(*args, **kwargs)
def make_upload_path(directory, instance, filename): basename, extension = os.path.splitext(filename) try: name = translify(instance.name) except ValueError: name = instance.name name = slugify(name) or 'bad_filename' return u'upload/guide/%s/%s%s' % (directory, name, extension)
def post_save(sender, **kwargs): my_unit = kwargs['instance'] my_unit.slug = slugify( translit.translify(u'{slug}_{id}'.format(slug=my_unit.name, id=my_unit.id))) post_save.disconnect(Unit.post_save, sender=Unit) my_unit.save() post_save.connect(Unit.post_save, sender=Unit)
def post_save(sender, **kwargs): my_category = kwargs['instance'] my_category.slug = slugify( translit.translify(u'{slug}_{id}'.format(slug=my_category.name, id=my_category.id))) post_save.disconnect(Category.post_save, sender=Category) my_category.save() post_save.connect(Category.post_save, sender=Category)
def translit_cal(string): """Print transliterated formatted calendar. """ try: out = string.replace('ⵛ', '-:)').replace('☩', '+').replace('⊕', '(+)') return translit.translify(out.decode('utf8')) except UnicodeDecodeError: return 'Transliteration is not working!'
def translify(stext): """Translify russian text""" try: utext = pseudo_unicode(stext, encoding, default_value) res = translit.translify(utext) except Exception, err: # because filter must die silently res = default_value % {"error": err, "value": stext}
def translify(stext): """Translify russian text""" try: utext = pseudo_unicode(stext, encoding, default_value) res = translit.translify(utext) except Exception, err: # because filter must die silently res = default_value % {'error': err, 'value': stext}
def _file_view(request, transformation_name, file_id=None, download=False): not_found_path = '%snotfound/%s' converting_path = '%sconverting/%s' if file_id: try: file = File.objects.get(id=file_id) except File.DoesNotExist: file = None raise Http404() else: try: modification = file.modifications[transformation_name] except File.DerivativeNotFound: modification = None else: file = None modification = None if modification: response = HttpResponse(modification.file.read(), content_type='application/octet-stream' if download else modification.file.content_type ) response['Last-Modified'] = modification.file.upload_date if download: user_agent = request.META.get('HTTP_USER_AGENT', '').lower() file_name = (file.name or str(file.id)) + ('.%s' % file.extension if file.extension else '') file_name = translify(file_name) if 0: #TODO file_name = iri_to_uri(file_name) if user_agent.find('opera') == -1: pass if user_agent.find('msie') != -1: file_name.replace('+', '%20') response['Content-Disposition'] = 'attachment; filename="%s";' % file_name return response if file and os.path.exists(converting_path % (settings.MEDIA_ROOT, transformation_name)): return redirect(converting_path % (settings.MEDIA_URL, transformation_name)) if os.path.exists(not_found_path % (settings.MEDIA_ROOT, transformation_name)): return redirect(not_found_path % (settings.MEDIA_URL, transformation_name)) raise Http404()
def translit_cal(string): """ :return transliterated formatted calendar. """ out = string.replace( DICT_FORMAT['gl'], u'-:)').replace( DICT_FORMAT['pl'], u'+').replace( DICT_FORMAT['tw'], u'(+)') return translit.translify(out)
def translit(self, name): output_name = [] for letter in name: try: out_letter = translify(letter) except ValueError: out_letter = '1' output_name.append(out_letter) output_name = ''.join(output_name) return output_name
def get_google_tts_audio(modeladmin, request, queryset): for card in queryset: with tempfile.NamedTemporaryFile() as temp: text = striptags(card.back) response = audio_extract(text) temp.write(response.read()) temp.flush() translified = translify(text) file_name = slugify(translified)[:50] card.back_audio.save('%s.mp3' % file_name, File(temp))
def community_upload(instance, filename): filename = translit.translify(filename) new_name = md5(filename) path = "uploads/communities/%(cid)d/p_%(filename)s%(ext)s" % { 'cid': instance.id or 0, 'filename': new_name.hexdigest(), 'ext': os.path.splitext(filename)[-1] } print path return path
def handle_uploaded_file(f, filename, folder): name, ext = os.path.splitext(translify(filename).replace(' ', '_')) hashed_name=md5.md5(name+datetime.datetime.now().strftime("%Y%m%d%H%M%S")).hexdigest() path_name = settings.MEDIA_ROOT + '/uploads/' + folder + hashed_name + ext destination = open(path_name, 'wb+') for chunk in f.chunks(): destination.write(chunk) destination.close() return '/media/uploads/'+ folder + hashed_name + ext
def save(self, *args, **kwargs): try: from pytils.translit import translify except ImportError: self.slug = slugify(self.label) else: self.slug = slugify(translify(self.label)) self.slug = self.slug.replace('-', '_') return super(AbstractField, self).save(*args, **kwargs)
def convert(s): for (f, t) in TRANSLIFY_PATCHES.iteritems(): s = s.replace(f, t) ret = [] for c in s: try: c = translify(c) except ValueError: c = unidecode(c) ret.append(c) return ''.join(ret)
def translify(stext): """Translify russian text""" try: res = translit.translify( utils.provide_unicode( stext, encoding, default=default_value )) except Exception, err: # because filter must die silently res = default_value % {'error': err, 'value': stext}
def save(self, force_insert=False, force_update=False, using=None, update_fields=None): slug = slugify(translit.translify(self.title), allow_unicode=True) if Reviews.objects.filter(slug=slug).exclude(id=self.id).exists(): slug = "-".join([slug, str(now.strftime("%d-%m-%Y-%H-%M-%S"))]) self.slug = slug else: self.slug = slug super(Reviews, self).save()
def clean_translify(form): """Iterates over form fields and traslifies values.""" for field, value in form.cleaned_data.iteritems(): if not value or not isinstance(value, basestring): # Not a string field or empty? not interested ... continue if not field in form.errors: try: form.cleaned_data[field] = translify(force_unicode(value)) except ValueError: # We'll better pass than leave user with a stupid error pass
def user_upload_to(instance, filename): today = datetime.today() filename = translit.translify(filename) new_name = md5(filename) path = "uploads/users/%(uid)d/%(year)d/%(month)d/%(day)d/a_%(filename)s%(ext)s" % { "uid": instance.user.id, "year": today.year, "month": today.month, "day": today.day, "filename": new_name.hexdigest(), "ext": os.path.splitext(filename)[-1], } return path
def index(): if request.method == 'POST': file = request.files['file'] translated_fname = translify(file.filename) # Делаем транслитерацию, # чтобы русские буквы не были стёрты функцией secure_filename fname = secure_filename(translated_fname) fullpath = 'static/uploaded_files/' + fname #Поверяем, был ли создан файл if os.path.exists(fullpath): return (f"Файл по имени \"{fname}\" уже существует! " + "Мы не станем его переписывать!") file.save(fullpath) return f"Файл по имени \"{fname}\" успешно загрузился!" return render_template("Uploadform.html")
def validate_post_data(post_data): permalink = translit.translify(post_data['title'].strip()) # TODO #templink = self.collection.find_one({'_id': user_data['_id']}) #exp = re.compile('\W') #whitespace = re.compile('\s') #temp_title = whitespace.sub("_", post_data['title']) #permalink = exp.sub('', temp_title) post_data['title'] = cgi.escape(post_data['title']) post_data['preview'] = cgi.escape(post_data['preview'], quote=True) post_data['body'] = cgi.escape(post_data['body'], quote=True) post_data['date'] = datetime.datetime.utcnow() post_data['permalink'] = permalink return post_data
def create_excel_stratification(strat_rows, selected_survey): wb = xl.Workbook() ws = wb.active for enum, cell in enumerate(('Описание квоты', 'Всего успешных', 'Размер квоты', 'Процент выполнения')): ws.cell(row=1, column=(enum + 1)).value = cell for enum, row in enumerate(strat_rows): ws.cell(row=enum + 2, column=1).value = row[3] ws.cell(row=enum + 2, column=2).value = row[4] ws.cell(row=enum + 2, column=3).value = int(row[2]) ws.cell(row=enum + 2, column=4).value = '{val}%'.format(val=str(row[5])) auto_fit_excel_columns(ws) response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') response['Content-Disposition'] = 'attachment; filename=Stratification_{survey}.xlsx'.format( survey=translify(selected_survey).replace('.', '_') ) wb.save(response) return response
def try_get_scopus_ids(self, fullname): """ Get list of possible SCOPUS ids. :param fullname: origin full name in Russian. :return: matched article authors and their SCOPUS ids. """ fullname = fullname.lower() fullname_translit = translit.translify(fullname) authors = set() scopus_ids = set() coincidence = self.__get_last_coincidence(fullname_translit) if coincidence is not None: authors.add(coincidence) ids = self.__author_ids[coincidence] scopus_ids.update(ids) return authors, scopus_ids for author, ids in self.__author_ids.items(): coincidence_count = self.__get_initials_coincidence_count( author, fullname_translit) if coincidence_count == 0: continue initial_count = author.count('.') translit_article_format = self.__get_name_with_initials( fullname_translit, initial_count) name1 = self.__get_letters(translit_article_format) name2 = self.__get_letters(author) d1 = self.__hamming_distance(name1, name2) sim1 = self.__get_cooccurrence_with_order(name1, name2) sim2 = self.__get_symbol_intersection(name1, name2) f1 = 2 * (sim1 * sim2) / (sim1 + sim2) if (sim1 + sim2 > 0) else 0 len_diff = self.get_len_diff(name1, name2) # if (d1 < 0.5) or (sim1 >= 0.6 and sim2 > 0.8): if ((d1 < self.d1) or (f1 >= self.f1)) and (len_diff <= 1.5): authors.add(author) scopus_ids.update(ids) return authors, scopus_ids
def slugify(s, entities=True, decimal=True, hexadecimal=True, instance=None, slug_field='slug', filter_dict=None): s = smart_unicode(translit.translify(unicode(s))) if entities: s = re.sub('&(%s);' % '|'.join(name2codepoint), lambda m: unichr(name2codepoint[m.group(1)]), s) if decimal: try: s = re.sub('&#(\d+);', lambda m: unichr(int(m.group(1))), s) except: pass if hexadecimal: try: s = re.sub('&#x([\da-fA-F]+);', lambda m: unichr(int(m.group(1), 16)), s) except: pass s = unicodedata.normalize('NFKD', s).encode('ascii', 'ignore') s = re.sub(r'[^-a-z0-9_]+', '-', s.lower()) return re.sub('-{2,}', '-', s).strip('-')
def get_available_name(name): """ Returns a filename that's free on the target storage system, and available for new content to be written to. """ dir_name, file_name = os.path.split(name) file_root, file_ext = os.path.splitext(file_name) file_root = slugify(translify(file_root)) name = os.path.join(dir_name, file_root + file_ext) # If the filename already exists, keep adding an underscore (before the # file extension, if one exists) to the filename until the generated # filename doesn't exist. while os.path.exists(name): file_root += '_' # file_ext includes the dot. name = os.path.join(dir_name, file_root + file_ext) return name
def normalize_words(words): """ Merge word and his translit analog. If translit and normal word in text - we detect fraud Transform words in normal form :param words: list of words :return: (<list of normalize words>, <flag that detect or not fraud>) """ tokens = [] is_fraud = False set_words = set(words) for word in words: translit_word = translit.translify(word) if translit_word != word and translit_word in set_words: word = translit_word is_fraud = True tokens.append(RussianStemmer(ignore_stopwords=True).stem(word)) return tokens, is_fraud
def send(self, id, homework): day_temp = '{title} ({date})\n\n' subj_temp = '{num}. {name}\n{tasks}\n\n' for day in sorted(homework.keys(), key=lambda d: len(d)): date = '.'.join([day[6:], day[4:6], day[:4]]) response = day_temp.format(title=homework[day]['title'].upper(), date=date) subjects = homework[day].get('items', {}) list_of_attachs = list() num = 1 for subj in sorted(subjects.keys(), key=lambda s: len(s)): name = subjects[subj].get('name', '') tasks = str() hw = subjects[subj] for t in hw['homework'].keys(): tasks += hw['homework'][t]['value'] + '\n' for f in hw.get('files', []): r = requests.get(url=f['link'], headers={'User-Agent': 'Mozilla/5.0'}) filename = translit.translify(f['filename']) with open('ElJurAPI/DocsBuff/' + filename, 'wb') as doc: doc.write(r.content) url = vk.docs.getMessagesUploadServer( peer_id=id)['upload_url'] r = requests.post( url=url, files={ 'file': open('ElJurAPI/DocsBuff/' + filename, 'rb') }).json() attach = vk.docs.save(file=r['file'])[0] list_of_attachs.append('doc' + str(attach['owner_id']) + '_' + str(attach['id'])) os.remove('ElJurAPI/DocsBuff/' + filename) response += subj_temp.format(num=num, name=name, tasks=tasks) num += 1 vk.messages.send(user_id=id, message=response, attachment=list_of_attachs, keyboard=default_keyboard)
def create_fw_excel_stat(selected_survey, agregate_date): wb = xl.Workbook() ws = wb.active agregate_date = calculate_fw_monitor(agregate_date) headers = [selected_survey, *FW_MONITOR_HEADERS] for col_num, col_label in enumerate(headers): ws.cell(row=1, column=(col_num + 1)).value = col_label for enum, row in enumerate(agregate_date.values): ws.cell(row=enum + 2, column=1).value = dt.strftime(row[0], '%d.%m.%Y') ws.cell(row=enum + 2, column=2).value = row[1] ws.cell(row=enum + 2, column=3).value = row[2] ws.cell(row=enum + 2, column=4).value = row[3] ws.cell(row=enum + 2, column=5).value = row[4] auto_fit_excel_columns(ws) response = HttpResponse(content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet') response['Content-Disposition'] = 'attachment; filename=Interviewers_productive_{survey}.xlsx'.format( survey=translify(selected_survey).replace('.', '_') ) wb.save(response) return response
def get_context_data(self, **kwargs): context = super(ShowPurchasesListView, self).get_context_data(**kwargs) purch_doc_file = False curr_ext = False size = False for root, dirs, files in os.walk(settings.MEDIA_ROOT + '/uploads/', ): for filename in files: name, ext = os.path.splitext(translify(u'%s' % filename).replace(' ', '_')) if name == 'purchase_doc': purch_doc_file = '/media/uploads/' + filename curr_ext = ext size = os.path.getsize(settings.MEDIA_ROOT + '/uploads/' + filename) context['purch_doc_file'] = purch_doc_file if curr_ext: context['ext'] = curr_ext[1:] if size: size = sizeof_fmt(size) context['size'] = size context['type'] = self.kwargs.get('type', None) try: loaded_count = int(Settings.objects.get(name='loaded_count').value) except: loaded_count = 6 queryset = context['purchases'] result = GetLoadIds(queryset, loaded_count) splited_result = result.split('!') try: remaining_count = int(splited_result[0]) except: remaining_count = False next_id_loaded_items = splited_result[1] context['purchases'] = context['purchases'][:loaded_count] context['next_id_loaded_items'] = next_id_loaded_items context['loaded_count'] = loaded_count return context
def ajax_upload_file(request): max_upload_size = settings.EMAIL_ATTACHMENT_MAX_UPLOAD_SIZE if request.method == "POST" and request.is_ajax() and request.user.is_expertA: upload_file = request.FILES.get('upload_file') error = None if upload_file: # BUG #2369 restriction of file content type does not work. (Removed from code now) if upload_file._size > max_upload_size: error = _('Please keep file size under {max_size}. Current file size {size}.').format({ 'max_size': filesizeformat(max_upload_size), 'size': filesizeformat(upload_file._size)}) else: error = _('No file.') context = {'error': error} if not error: upload_path = settings.EMAIL_ATTACHMENT_UPLOAD_PATH upload_filename = default_storage.get_available_name(join(upload_path, translify(upload_file.name))) saved_path = default_storage.save(upload_filename, upload_file) context = {'saved_filename': basename(saved_path), 'original_filename': upload_file.name} return JSONResponse(context) raise PermissionDenied
def create_groups(self, name='', readOnly=False, members=None): """ Создание публичного группы :param name: Название группы :param readOnly: true/false - возможность установить группу только для чтения :param members: Добавить пользователей в группу :return: Статус, id канала, название группы """ name = translit.translify("_".join(name.split(' '))) members = [] if members is None else members data = {'name': name, 'members': members, 'readOnly': readOnly} resp = requests.post(settings.ROCKETCHAT_URL + '/api/v1/groups.create', headers=self.headers, json=data) if resp.status_code != 200: logger.error('Fail create_groups: %s' % resp.text, exc_info=True) return False logger.info('create_groups.resp - %s' % resp, exc_info=True) data = resp.json() return data['success'], data['group']['_id'], data['group']['name']
def slugify(value, limit=None, default=''): value = smart_unicode(value) # Replace all non russian/english chars with "-" char # to help pytils not to crash value = RE_NOT_ENRUCHAR.sub('-', value) # Do transliteration value = translify(value) # Replace trash with safe "-" char value = RE_NOT_ENCHAR.sub('-', value).strip('-').lower() # Replace sequences of dashes value = RE_DASH.sub('-', value) if limit is not None: value = value[:limit] if value != "": return value else: return default
def save(self, *args, **kwargs): """ Create a unique slug from title - append an index and increment if it already exists. """ if not self.slug: try: from pytils.translit import translify except ImportError: self.slug = slugify(self.title) else: self.slug = slugify(translify(self.title)) i = 0 while True: if i > 0: if i > 1: self.slug = self.slug.rsplit("-", 1)[0] self.slug = "%s-%s" % (self.slug, i) if not self.__class__.objects.filter(slug=self.slug): break i += 1 super(AbstractForm, self).save(*args, **kwargs)