def validate(self): if self.is_new(): self.validate_duplicate_entry() self.validate_folder() if not self.flags.ignore_file_validate: self.validate_file() self.generate_content_hash() self.set_folder_size() if frappe.db.exists('File', {'name': self.name, 'is_folder': 0}): if not self.is_folder and (self.is_private != self.db_get('is_private')): old_file_url = self.file_url private_files = frappe.get_site_path('private', 'files') public_files = frappe.get_site_path('public', 'files') if not self.is_private: shutil.move(os.path.join(private_files, self.file_name), os.path.join(public_files, self.file_name)) self.file_url = "/files/{0}".format(self.file_name) else: shutil.move(os.path.join(public_files, self.file_name), os.path.join(private_files, self.file_name)) self.file_url = "/private/files/{0}".format(self.file_name) # update documents image url with new file url if self.attached_to_doctype and self.attached_to_name and \ frappe.db.get_value(self.attached_to_doctype, self.attached_to_name, "image") == old_file_url: frappe.db.set_value(self.attached_to_doctype, self.attached_to_name, "image", self.file_url)
def get_filecontent_from_path(path): if not path: return if path.startswith('/'): path = path[1:] if path.startswith('assets/'): # from public folder full_path = os.path.abspath(path) elif path.startswith('files/'): # public file full_path = frappe.get_site_path('public', path) elif path.startswith('private/files/'): # private file full_path = frappe.get_site_path(path) else: full_path = path if os.path.exists(full_path): with open(full_path, 'rb') as f: filecontent = f.read() return filecontent else: return None
def validate(self): if self.is_new(): self.validate_duplicate_entry() self.validate_folder() if not self.flags.ignore_file_validate: self.validate_file() self.generate_content_hash() self.set_folder_size() if frappe.db.exists("File", {"name": self.name, "is_folder": 0}): if not self.is_folder and (self.is_private != self.db_get("is_private")): private_files = frappe.get_site_path("private", "files") public_files = frappe.get_site_path("public", "files") if not self.is_private: shutil.move(os.path.join(private_files, self.file_name), os.path.join(public_files, self.file_name)) self.file_url = "/files/{0}".format(self.file_name) else: shutil.move(os.path.join(public_files, self.file_name), os.path.join(private_files, self.file_name)) self.file_url = "/private/files/{0}".format(self.file_name)
def validate(self): if self.is_new(): self.validate_duplicate_entry() self.validate_folder() if not self.flags.ignore_file_validate: self.validate_file() self.generate_content_hash() self.set_folder_size() if frappe.db.exists('File', {'name': self.name, 'is_folder': 0}): if not self.is_folder and (self.is_private != self.db_get('is_private')): private_files = frappe.get_site_path('private', 'files') public_files = frappe.get_site_path('public', 'files') if not self.is_private: shutil.move(os.path.join(private_files, self.file_name), os.path.join(public_files, self.file_name)) self.file_url = "/files/{0}".format(self.file_name) else: shutil.move(os.path.join(public_files, self.file_name), os.path.join(private_files, self.file_name)) self.file_url = "/private/files/{0}".format(self.file_name)
def ugs_save_file_on_filesystem_hook(*args, **kwargs): """Intercept all write_file events, and mogrify images Replaces the standard write_file event. Obtains the filename, content_type etc. Calls the normal backup 'save_file_on_filesystem' with all arguments. If we do not handle the attachment specially, this function is entirely transparent. However, we can handle specific file types as we see fit - in this case we mogrify JPG and JPEG images. save_file_on_filesystem does strange things, so we need to reconstruct the filename using analagous logic - this could break in future with Frappe changes.""" ret = frappe.utils.file_manager.save_file_on_filesystem(*args, **kwargs) file_name = ret['file_name'] #file_url = ret['file_url'] # Not a consistent file system identifier if ('is_private' in kwargs) and kwargs['is_private']: file_path = os.path.abspath( frappe.get_site_path('private', 'files', file_name)) else: file_path = os.path.abspath( frappe.get_site_path('public', 'files', file_name)) extension = os.path.splitext(file_name)[1].lower() if extension in ('.jpg', '.jpeg'): # Resize and autoorient this image resize_image(file_path) return ret
def get_test_record_log(): '''Return the list of doctypes for which test records have been created''' if 'test_record_log' not in frappe.flags: if os.path.exists(frappe.get_site_path('.test_log')): with open(frappe.get_site_path('.test_log'), 'r') as f: frappe.flags.test_record_log = f.read().splitlines() else: frappe.flags.test_record_log = [] return frappe.flags.test_record_log
def make_thumbnail(self): if self.file_url: if self.file_url.startswith("/files"): try: image = Image.open(frappe.get_site_path("public", self.file_url.lstrip("/"))) filename, extn = self.file_url.rsplit(".", 1) except IOError: frappe.msgprint("Unable to read file format for {0}".format(self.file_url)) else: # downlaod file_url = frappe.utils.get_url(self.file_url) r = requests.get(file_url, stream=True) try: r.raise_for_status() except requests.exceptions.HTTPError, e: if "404" in e.args[0]: frappe.throw(_("File '{0}' not found").format(self.file_url)) else: raise image = Image.open(StringIO.StringIO(r.content)) filename, extn = self.file_url.rsplit("/", 1)[1].rsplit(".", 1) mimetype = mimetypes.guess_type(filename + "." + extn)[0] if mimetype is None or not mimetype.startswith("image/"): # detect file extension by reading image header properties extn = imghdr.what(filename + "." + extn, h=r.content) filename = "/files/" + strip(urllib.unquote(filename)) thumbnail = ImageOps.fit( image, (300, 300), Image.ANTIALIAS ) thumbnail_url = filename + "_small." + extn path = os.path.abspath(frappe.get_site_path("public", thumbnail_url.lstrip("/"))) try: thumbnail.save(path) self.db_set("thumbnail_url", thumbnail_url) except IOError: frappe.msgprint("Unable to write file format for {0}".format(path)) return thumbnail_url
def download_zip(files, output_filename): from zipfile import ZipFile input_files = [frappe.get_site_path('private', 'files', filename) for filename in files] output_path = frappe.get_site_path('private', 'files', output_filename) with ZipFile(output_path, 'w') as output_zip: for input_file in input_files: output_zip.write(input_file, arcname=os.path.basename(input_file)) with open(output_path, 'rb') as fileobj: filedata = fileobj.read() frappe.local.response.filename = output_filename frappe.local.response.filecontent = filedata frappe.local.response.type = "download"
def unzip(self): """Unzip current file and replace it by its children""" if not ".zip" in self.file_name: frappe.msgprint(_("Not a zip file")) return zip_path = frappe.get_site_path(self.file_url.strip("/")) base_url = os.path.dirname(self.file_url) with zipfile.ZipFile(zip_path) as zf: zf.extractall(os.path.dirname(zip_path)) for info in zf.infolist(): if not info.filename.startswith("__MACOSX"): file_url = file_url = base_url + "/" + info.filename file_name = frappe.db.get_value("File", dict(file_url=file_url)) if file_name: file_doc = frappe.get_doc("File", file_name) else: file_doc = frappe.new_doc("File") file_doc.file_name = info.filename file_doc.file_size = info.file_size file_doc.folder = self.folder file_doc.is_private = self.is_private file_doc.file_url = file_url file_doc.attached_to_doctype = self.attached_to_doctype file_doc.attached_to_name = self.attached_to_name file_doc.save() frappe.delete_doc("File", self.name)
def make_thumbnail(self): if self.file_url: if self.file_url.startswith("/files"): try: image, filename, extn = get_local_image(self.file_url) except IOError: return else: try: image, filename, extn = get_web_image(self.file_url) except (requests.exceptions.HTTPError, requests.exceptions.SSLError, IOError): return thumbnail = ImageOps.fit( image, (300, 300), Image.ANTIALIAS ) thumbnail_url = filename + "_small." + extn path = os.path.abspath(frappe.get_site_path("public", thumbnail_url.lstrip("/"))) try: thumbnail.save(path) self.db_set("thumbnail_url", thumbnail_url) except IOError: frappe.msgprint("Unable to write file format for {0}".format(path)) return return thumbnail_url
def make_thumbnail(self, file_url, doc, dn): try: image = Image.open(StringIO.StringIO(self.content)) filename, extn = file_url.rsplit(".", 1) except IOError: frappe.msgprint("Unable to read file format for {0}".format(os.path.realpath(self.file_path))) return thumbnail = ImageOps.fit( image, (300, 300), Image.ANTIALIAS ) thumbnail_url = filename + "." + extn path = os.path.abspath(frappe.get_site_path("public", thumbnail_url.lstrip("/"))) frappe.create_folder(os.path.dirname(path)) try: thumbnail.save(path) doc.db_set("thumbnail_url", thumbnail_url) except IOError: frappe.msgprint("Unable to write file format for {0}".format(path))
def migrate(verbose=True, rebuild_website=False): '''Migrate all apps to the latest version, will: - run before migrate hooks - run patches - sync doctypes (schema) - sync fixtures - sync desktop icons - sync web pages (from /www) - sync web pages (from /www) - run after migrate hooks ''' touched_tables_file = frappe.get_site_path('touched_tables.json') if os.path.exists(touched_tables_file): os.remove(touched_tables_file) try: frappe.flags.touched_tables = set() frappe.flags.in_migrate = True clear_global_cache() #run before_migrate hooks for app in frappe.get_installed_apps(): for fn in frappe.get_hooks('before_migrate', app_name=app): frappe.get_attr(fn)() # run patches frappe.modules.patch_handler.run_all() # sync frappe.model.sync.sync_all(verbose=verbose) frappe.translate.clear_cache() sync_fixtures() sync_customizations() sync_desktop_icons() sync_languages() frappe.get_doc('Portal Settings', 'Portal Settings').sync_menu() # syncs statics render.clear_cache() # add static pages to global search router.sync_global_search() #run after_migrate hooks for app in frappe.get_installed_apps(): for fn in frappe.get_hooks('after_migrate', app_name=app): frappe.get_attr(fn)() frappe.db.commit() clear_notifications() frappe.publish_realtime("version-update") frappe.flags.in_migrate = False finally: with open(touched_tables_file, 'w') as f: json.dump(list(frappe.flags.touched_tables), f, sort_keys=True, indent=4) frappe.flags.touched_tables.clear()
def add_to_test_record_log(doctype): '''Add `doctype` to site/.test_log `.test_log` is a cache of all doctypes for which test records are created''' test_record_log = get_test_record_log() if not doctype in test_record_log: frappe.flags.test_record_log.append(doctype) with open(frappe.get_site_path('.test_log'), 'w') as f: f.write('\n'.join(filter(None, frappe.flags.test_record_log)))
def download_e_invoice_file(file_name): content = None with open(frappe.get_site_path('private', 'files', file_name), "r") as f: content = f.read() frappe.local.response.filename = file_name frappe.local.response.filecontent = content frappe.local.response.type = "download"
def zip_files(self): for folder in ("public", "private"): files_path = frappe.get_site_path(folder, "files") backup_path = self.backup_path_files if folder=="public" else self.backup_path_private_files cmd_string = """tar -cf %s %s""" % (backup_path, files_path) err, out = frappe.utils.execute_in_shell(cmd_string) print 'Backed up files', os.path.abspath(backup_path)
def update_space_usage(): # public and private files files_size = get_folder_size(frappe.get_site_path("public", "files")) files_size += get_folder_size(frappe.get_site_path("private", "files")) backup_size = get_folder_size(frappe.get_site_path("private", "backups")) database_size = get_database_size() usage = { "files_size": flt(files_size, 2), "backup_size": flt(backup_size, 2), "database_size": flt(database_size, 2), "total": flt(flt(files_size) + flt(backup_size) + flt(database_size), 2), } update_limits({"space_usage": usage}) return usage
def delete_photo_from_propshikari(thumbnail_url): thumbnail_index = thumbnail_url.find("files") thu_url = thumbnail_url[thumbnail_index:] img_path = frappe.get_site_path() + '/public/' + thu_url for path in [img_path , img_path.replace("thumbnail", "regular")]: if os.path.exists(path): os.remove(path) else: raise DoesNotExistError("Project Image not found")
def delete_jrxml_child_file(path, jasper_all_sites): filename, extn = path.rsplit(".", 1) thumbnail_url = filename + "." + extn ppath = os.path.abspath(frappe.get_site_path("public", "files", thumbnail_url.lstrip("/"))) if os.path.exists(ppath): os.remove(ppath) file_path = os.path.join(get_jasper_path(jasper_all_sites),path) if os.path.exists(file_path): os.remove(file_path)
def view_slideshow_py(slideshow): images_path = os.path.abspath(frappe.get_site_path('public')) sql = ("select image from `tabWebsite Slideshow Item` " "where parent='{}' order by idx").format(slideshow) image_list = frappe.db.sql(sql, as_dict=False) image_list = [x[0] for x in image_list] return image_list
def validate(self): if self.is_new(): self.validate_duplicate_entry() self.validate_folder() if not self.flags.ignore_file_validate: self.validate_file() self.generate_content_hash() self.set_folder_size() if frappe.db.exists('File', {'name': self.name, 'is_folder': 0}): old_file_url = self.file_url if not self.is_folder and (self.is_private != self.db_get('is_private')): private_files = frappe.get_site_path('private', 'files') public_files = frappe.get_site_path('public', 'files') if not self.is_private: shutil.move(os.path.join(private_files, self.file_name), os.path.join(public_files, self.file_name)) self.file_url = "/files/{0}".format(self.file_name) else: shutil.move(os.path.join(public_files, self.file_name), os.path.join(private_files, self.file_name)) self.file_url = "/private/files/{0}".format(self.file_name) # update documents image url with new file url if self.attached_to_doctype and self.attached_to_name: if not self.attached_to_field: field_name = None reference_dict = frappe.get_doc(self.attached_to_doctype, self.attached_to_name).as_dict() for key, value in reference_dict.items(): if value == old_file_url: field_name = key break self.attached_to_field = field_name if self.attached_to_field: frappe.db.set_value(self.attached_to_doctype, self.attached_to_name, self.attached_to_field, self.file_url)
def make_site_dirs(): site_public_path = os.path.join(frappe.local.site_path, 'public') site_private_path = os.path.join(frappe.local.site_path, 'private') for dir_path in ( os.path.join(site_private_path, 'backups'), os.path.join(site_public_path, 'files')): if not os.path.exists(dir_path): os.makedirs(dir_path) locks_dir = frappe.get_site_path('locks') if not os.path.exists(locks_dir): os.makedirs(locks_dir)
def store_image_to_propshikari(request_data): request_data = json.loads(request_data) putil.validate_property_data(request_data,["profile_photo"]) if not request_data.get("profile_photo").get("file_ext"): raise MandatoryError("Image Extension not found") user_email = putil.validate_for_user_id_exists(request_data.get("user_id")) if not os.path.exists(frappe.get_site_path("public","files",request_data.get("user_id"))): os.mkdir(frappe.get_site_path("public","files",request_data.get("user_id"))) try: base64_data = request_data.get("profile_photo").get("file_data").encode("utf8") base64_data = base64_data.split(',')[1] imgdata = base64.b64decode(base64_data) file_name = "PSUI-" + cstr(time.time()) + '.' + request_data.get("profile_photo").get("file_ext") with open(frappe.get_site_path("public","files",request_data.get("user_id"),file_name),"wb+") as fi_nm: fi_nm.write(imgdata) file_name = "files/"+request_data.get("user_id")+'/'+file_name frappe.db.set_value(dt="User",dn=user_email, field="user_image", val=file_name) return {"operation":"Update", "message":"Profile Image updated Successfully", "profile_image_url":frappe.request.host_url + file_name, "user_id":request_data.get("user_id")} except Exception,e: raise ImageUploadError("Profile Image Updation Failed")
def generate_single_invoice(docname): doc = frappe.get_doc("Sales Invoice", docname) e_invoice = prepare_and_attach_invoice(doc, True) content = None with open(frappe.get_site_path('private', 'files', e_invoice.file_name), "r") as f: content = f.read() frappe.local.response.filename = e_invoice.file_name frappe.local.response.filecontent = content frappe.local.response.type = "download"
def create_directory(self): if not os.path.exists(frappe.get_site_path("public", "files", "mycfo")): os.makedirs(frappe.get_site_path("public", "files", "mycfo", "edited_file")) os.mkdir(frappe.get_site_path("public", "files", "mycfo", "published_file")) if not os.path.exists(frappe.get_site_path("public", "files", "mycfo", "edited_file", self.document_type)): os.makedirs(frappe.get_site_path("public", "files", "mycfo", "edited_file", self.document_type)) os.mkdir(frappe.get_site_path("public", "files", "mycfo", "published_file", self.document_type))
def store_property_photos_in_propshikari(request_data, custom_id): property_url_dict = {"full_size":[], "thumbnails":[]} size = 400,400 if request_data: putil.validate_for_property_photo_fields(request_data) if not os.path.exists(frappe.get_site_path("public","files",custom_id)): os.makedirs(frappe.get_site_path("public","files",custom_id,"regular")) os.mkdir(frappe.get_site_path("public","files",custom_id,"thumbnail")) for property_photo in request_data: file_ext = property_photo.get("file_ext") try: base64_data = property_photo.get("file_data").encode("utf8") base64_data = base64_data.split(',')[1] imgdata = base64.b64decode(base64_data) old_file_name = "PSPI-" + cstr(time.time()) + random_string(5) + "." + file_ext with open(frappe.get_site_path("public","files",custom_id,"regular",old_file_name),"wb+") as fi_nm: fi_nm.write(imgdata) file_name = "files/" + custom_id + "/regular/" + old_file_name regular_image_url = frappe.request.host_url + file_name property_url_dict.get("full_size").append(regular_image_url) thumbnail_file_name = frappe.get_site_path("public","files",custom_id,"thumbnail",old_file_name) im = Image.open(frappe.get_site_path("public","files",custom_id,"regular",old_file_name)) im.thumbnail(size, Image.ANTIALIAS) im.save(thumbnail_file_name) thumbnail_file_url = "files/" + custom_id + "/thumbnail/" + old_file_name property_url_dict.get("thumbnails").append(frappe.request.host_url + thumbnail_file_url) except Exception,e: raise ImageUploadError("Image Upload Error")
def qrcode_as_png(user, totp_uri): '''Save temporary Qrcode to server.''' from frappe.utils.file_manager import save_file folder = create_barcode_folder() png_file_name = '{}.png'.format(frappe.generate_hash(length=20)) file_obj = save_file(png_file_name, png_file_name, 'User', user, folder=folder) frappe.db.commit() file_url = get_url(file_obj.file_url) file_path = os.path.join(frappe.get_site_path('public', 'files'), file_obj.file_name) url = qrcreate(totp_uri) with open(file_path, 'w') as png_file: url.png(png_file, scale=8, module_color=[0, 0, 0, 180], background=[0xff, 0xff, 0xcc]) return file_url
def make_thumbnail(self): from PIL import Image, ImageOps import os if self.file_url: if self.file_url.startswith("/files"): try: image = Image.open(frappe.get_site_path("public", self.file_url.lstrip("/"))) filename, extn = self.file_url.rsplit(".", 1) except IOError: frappe.msgprint("Unable to read file format for {0}".format(self.file_url)) else: # downlaod import requests, StringIO file_url = frappe.utils.get_url(self.file_url) r = requests.get(file_url, stream=True) r.raise_for_status() image = Image.open(StringIO.StringIO(r.content)) filename, extn = self.file_url.rsplit("/", 1)[1].rsplit(".", 1) filename = "/files/" + strip(urllib.unquote(filename)) thumbnail = ImageOps.fit( image, (300, 300), Image.ANTIALIAS ) thumbnail_url = filename + "_small." + extn path = os.path.abspath(frappe.get_site_path("public", thumbnail_url.lstrip("/"))) try: thumbnail.save(path) self.db_set("thumbnail_url", thumbnail_url) except IOError: frappe.msgprint("Unable to write file format for {0}".format(path)) return thumbnail_url
def store_document(self): self.create_directory() try: if self.file_data and self.request_type not in ["Archive", "Upgrade Validity"]: base64_data = self.file_data.get("file_data").encode("utf8") base64_data = base64_data.split(',')[1] base64_data = base64.b64decode(base64_data) extension = "." + self.file_extension if self.file_extension else "" file_path = frappe.get_site_path("public","files", "mycfo", "edited_file", self.document_type, self.file_name + extension) with open(file_path, "wb+") as fi_nm: fi_nm.write(base64_data) self.new_file_path = '/'.join(["files", "mycfo", "edited_file", self.document_type, self.file_name + extension]) except Exception,e: frappe.throw("File Upload Error")
def jupyter(context): try: from pip import main except ImportError: from pip._internal import main reqs = subprocess.check_output([sys.executable, '-m', 'pip', 'freeze']) installed_packages = [r.decode().split('==')[0] for r in reqs.split()] if 'jupyter' not in installed_packages: main(['install', 'jupyter']) site = get_site(context) frappe.init(site=site) jupyter_notebooks_path = os.path.abspath(frappe.get_site_path('jupyter_notebooks')) sites_path = os.path.abspath(frappe.get_site_path('..')) try: os.stat(jupyter_notebooks_path) except OSError: print('Creating folder to keep jupyter notebooks at {}'.format(jupyter_notebooks_path)) os.mkdir(jupyter_notebooks_path) bin_path = os.path.abspath('../env/bin') print(''' Stating Jupyter notebook Run the following in your first cell to connect notebook to frappe ``` import frappe frappe.init(site='{site}', sites_path='{sites_path}') frappe.connect() frappe.local.lang = frappe.db.get_default('lang') frappe.db.connect() ``` '''.format(site=site, sites_path=sites_path)) os.execv('{0}/jupyter'.format(bin_path), [ '{0}/jupyter'.format(bin_path), 'notebook', jupyter_notebooks_path, ])
def load_messages(language): """Load translation messages for given language from all `setup_wizard_requires` javascript files""" frappe.clear_cache() set_default_language(get_language_code(language)) m = get_dict("page", "setup-wizard") for path in frappe.get_hooks("setup_wizard_requires"): # common folder `assets` served from `sites/` js_file_path = os.path.abspath(frappe.get_site_path("..", *path.strip("/").split("/"))) m.update(get_dict("jsfile", js_file_path)) m.update(get_dict("boot")) send_translations(m) return frappe.local.lang
def get_file_path(path): return frappe.get_site_path( (("" if "/private/" in path else "/public") + path).strip("/"))
def make_jasper_hooks_path(): jasper_hooks_path = frappe.get_site_path( "jasper_hooks_" + frappe.local.site.replace(".", "_")) frappe.create_folder(jasper_hooks_path, with_init=True) return jasper_hooks_path
def process_new_images(item_code, rte_id, tag): """Read images from 'uploads' folder, sort and rename them, resize and auto-orient them, copy them to the site public images folder and finally create a website slideshow. Server-side part of auto-slideshow, called from a button on item page. """ # Whitelisted function; check permissions if not frappe.has_permission('Item', 'write'): frappe.throw('Need write permissions on Item!', frappe.PermissionError) public_site_files_path = os.path.abspath( frappe.get_site_path('public', 'files')) ret_val = {'success': False} # Get user current_user = frappe.db.get_value("User", frappe.session.user, ["username"]) upload_images_directory = os.path.join(uploads_path, current_user) slideshow_code = 'SS-' + item_code # Check that no slideshow exists for this item, and that we don't have # an existing slideshow with a matching name ('SS-ITEM-?????') if frappe.db.get_value("Item", item_code, "slideshow", slideshow_code): frappe.msgprint("A website slideshow is already set for this item.") return ret_val if frappe.db.exists("Website Slideshow", slideshow_code): frappe.msgprint("A website slideshow with the name " + slideshow_code + " already exists.") return ret_val # Images should already be uploaded onto local uploads directory # Sort these images into a 'natural' order file_list = list_files(upload_images_directory) n_files = len(file_list) file_dict = {} for file in file_list: file_dict[file] = tuple( int(x) if x.isdigit() else x for x in re_digitsearch.split(file) if x) file_list.sort(key=lambda x: file_dict[x]) if (n_files == 0): frappe.msgprint("There are no images to process. " + "Please upload images first.") return ret_val # Update the number of images to process msg = {'command': 'set_image_number', 'n_images': n_files} realtime_eval(rte_id, tag, 'update_slideshow', msg) new_file_list = [] file_sizes = [] # Rename the files to ITEM-XXXXX-Y and move all the files to # public_site_files_path w = len(str(n_files)) for i, fname in enumerate(file_list, 1): new_fname = item_code + '-{num:0{width}}.jpg'.format(num=i, width=w) new_file_list.append(new_fname) upload_fpath = os.path.join(upload_images_directory, fname) site_fpath = os.path.join(public_site_files_path, new_fname) shutil.move(upload_fpath, site_fpath) # Now auto resize the image resize_image(site_fpath) # Url (relative to hostname) of file file_url = os.path.join('files', new_fname) # File size file_sizes.append(os.path.getsize(site_fpath)) # Now update the slideshow msg = { 'command': 'new_image', 'img_id': i, 'n_images': n_files, 'file_url': file_url } realtime_eval(rte_id, tag, 'update_slideshow', msg) if create_slideshow(slideshow_code): create_slideshow_items(slideshow_code, new_file_list, file_sizes) else: frappe.msgprint("There was a problem creating the slideshow. " + "You will need to do this manually") return ret_val # For now, assume the first image is the primary image # Note this is the idx which is one-indexed, not zero-indexed. idx_main_image = 1 # Update the website slideshow frappe.db.set_value('Item', item_code, 'slideshow', slideshow_code) # Update the item image file_name = new_file_list[idx_main_image - 1] image_url = os.path.join('files', new_file_list[idx_main_image - 1]) frappe.db.set_value('Item', item_code, 'image', image_url) # Set up website image web_url, thumb_url = create_website_image(file_name, item_code) # Set the website image and thumbnail frappe.db.set_value('Item', item_code, 'website_image', web_url) frappe.db.set_value('Item', item_code, 'thumbnail', thumb_url) # Add a comment to the Item frappe.get_doc("Item", item_code).add_comment( "Attachment", "Auto Create Slideshow: Website slideshow {}".format(slideshow_code)) # Allow the slideshow to close and update to show completion msg = {'command': 'done'} realtime_eval(rte_id, tag, 'update_slideshow', msg) ret_val['success'] = True return ret_val
for user in user_list: roles = frappe.get_list("Has Role", filters = { 'parent': user.name }, fields = ['role']) for row in roles: if frappe.get_value("Role", row.role, "desk_access") == 1: active_users += 1 break data = { 'users': 5, 'active_users': active_users, 'space': 5120, 'db_space': 100, 'company': 2, 'used_company': 1, 'count_website_users': 0, 'count_administrator_user': 0, 'valid_till': add_days(today(), 14) } with open(frappe.get_site_path('quota.json'), 'w') as outfile: json.dump(data, outfile, indent= 2) file_path = frappe.utils.get_bench_path() + '/' + \ frappe.utils.get_site_name(frappe.local.site) + \ '/quota.json' print('\nfile quota.json created at ', file_path, 'with the following settings:') for key in data: print("\t{}: {}".format(key, data[key])) print('\nChange the values in quota.json to change limits\n')
def get_error_snapshot_path(): return frappe.get_site_path('error-snapshots')
def migrate(verbose=True, skip_failing=False, skip_search_index=False): """Migrate all apps to the current version, will: - run before migrate hooks - run patches - sync doctypes (schema) - sync dashboards - sync fixtures - sync desktop icons - sync web pages (from /www) - sync web pages (from /www) - run after migrate hooks """ service_status = check_connection(redis_services=["redis_cache"]) if False in service_status.values(): for service in service_status: if not service_status.get(service, True): print("{} service is not running.".format(service)) print("""Cannot run bench migrate without the services running. If you are running bench in development mode, make sure that bench is running: $ bench start Otherwise, check the server logs and ensure that all the required services are running.""" ) sys.exit(1) touched_tables_file = frappe.get_site_path("touched_tables.json") if os.path.exists(touched_tables_file): os.remove(touched_tables_file) try: add_column(doctype="DocType", column_name="migration_hash", fieldtype="Data") frappe.flags.touched_tables = set() frappe.flags.in_migrate = True clear_global_cache() # run before_migrate hooks for app in frappe.get_installed_apps(): for fn in frappe.get_hooks("before_migrate", app_name=app): frappe.get_attr(fn)() # run patches frappe.modules.patch_handler.run_all(skip_failing) # sync frappe.model.sync.sync_all(verbose=verbose) frappe.translate.clear_cache() sync_jobs() sync_fixtures() sync_dashboards() sync_customizations() sync_languages() frappe.get_doc("Portal Settings", "Portal Settings").sync_menu() # syncs statics render.clear_cache() # updating installed applications data frappe.get_single("Installed Applications").update_versions() # run after_migrate hooks for app in frappe.get_installed_apps(): for fn in frappe.get_hooks("after_migrate", app_name=app): frappe.get_attr(fn)() # build web_routes index if not skip_search_index: # Run this last as it updates the current session print("Building search index for {}".format(frappe.local.site)) build_index_for_all_routes() frappe.db.commit() clear_notifications() frappe.publish_realtime("version-update") frappe.flags.in_migrate = False finally: with open(touched_tables_file, "w") as f: json.dump(list(frappe.flags.touched_tables), f, sort_keys=True, indent=4) frappe.flags.touched_tables.clear()
def validate(self): # Raise error anyways to demonstrate validate func path = (frappe.get_site_path('public', 'files', 'ahmad.jpg')) path2 = (frappe.get_site_path('public', 'files', 'omar.jpg')) # picture_of_me = face_recognition.load_image_file(str(path)) # my_face_encoding = face_recognition.face_encodings(picture_of_me)[0] # unknown_picture = face_recognition.load_image_file(str(path2)) # unknown_face_encoding = face_recognition.face_encodings(unknown_picture)[0] # results = face_recognition.compare_faces([my_face_encoding], unknown_face_encoding) # if results[0] == True: # frappe.throw("It's a picture of me!") # else: # frappe.throw("It's not a picture of me!") # This is a super simple (but slow) example of running face recognition on live video from your webcam. # There's a second example that's a little more complicated but runs faster. # PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam. # OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this # specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. # Get a reference to webcam #0 (the default one) # START FROM HERE 11111 video_capture = cv2.VideoCapture(0) # Load a sample picture and learn how to recognize it. obama_image = face_recognition.load_image_file(str(path)) obama_face_encoding = face_recognition.face_encodings(obama_image)[0] # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True while True: # Grab a single frame of video ret, frame = video_capture.read() # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(small_frame) face_encodings = face_recognition.face_encodings(small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) match = face_recognition.compare_faces([obama_face_encoding], face_encoding) name = "Unknown" if match[0]: name = "Barack" face_names.append(name) process_this_frame = not process_this_frame # Display the results for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) # Display the resulting image cv2.imshow('Video', frame) # Hit 'q' on the keyboard to quit! if cv2.waitKey(1) & 0xFF == ord('q'): break # Release handle to the webcam video_capture.release() cv2.destroyAllWindows()
def get_index_path(index_name): return frappe.get_site_path("indexes", index_name)
def validate(self): # define two constants, one for the eye aspect ratio to indicate # blink and then a second constant for the number of consecutive # frames the eye must be below the threshold EYE_AR_THRESH = 0.3 EYE_AR_CONSEC_FRAMES = 3 # initialize the frame counters and the total number of blinks COUNTER = 0 TOTAL = 0 # initialize dlib's face detector (HOG-based) and then create # the facial landmark predictor path = (frappe.get_site_path('public', "shape_predictor_68_face_landmarks.dat")) print("[INFO] loading facial landmark predictor...") detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(str(path)) # grab the indexes of the facial landmarks for the left and # right eye, respectively (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] # start the video stream thread print("[INFO] starting video stream thread...") vs = VideoStream(src=0).start() # vs = VideoStream(usePiCamera=True).start() fileStream = False time.sleep(1.0) # loop over frames from the video stream while True: # if this is a file video stream, then we need to check if # there any more frames left in the buffer to process # grab the frame from the threaded video file stream, resize # it, and convert it to grayscale # channels) frame = vs.read() frame = imutils.resize(frame, width=450) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # detect faces in the grayscale frame rects = detector(gray, 0) # loop over the face detections for rect in rects: # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array shape = predictor(gray, rect) shape = face_utils.shape_to_np(shape) # extract the left and right eye coordinates, then use the # coordinates to compute the eye aspect ratio for both eyes leftEye = shape[lStart:lEnd] rightEye = shape[rStart:rEnd] leftEAR = self.eye_aspect_ratio(leftEye) rightEAR = self.eye_aspect_ratio(rightEye) # average the eye aspect ratio together for both eyes ear = (leftEAR + rightEAR) / 2.0 # compute the convex hull for the left and right eye, then # visualize each of the eyes leftEyeHull = cv2.convexHull(leftEye) rightEyeHull = cv2.convexHull(rightEye) cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1) cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1) # check to see if the eye aspect ratio is below the blink # threshold, and if so, increment the blink frame counter if ear < EYE_AR_THRESH: COUNTER += 1 # otherwise, the eye aspect ratio is not below the blink # threshold else: # if the eyes were closed for a sufficient number of # then increment the total number of blinks if COUNTER >= EYE_AR_CONSEC_FRAMES: TOTAL += 1 # reset the eye frame counter COUNTER = 0 # draw the total number of blinks on the frame along with # the computed eye aspect ratio for the frame cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) # show the frame cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
def get_watermarks_og_folder_path(): return frappe.get_site_path("wm_image_files")
from jinja2 import Environment, PackageLoader import jinja2 import pymysql from ugscommon import get_unsubmitted_prec_qty import ugssettings from .ebay_active_listings import update_ebay_data NO_IMAGES = True USE_SERVER_IMAGES = True #Save to public directory so one can download garage_xml_path = (os.path.join(os.sep, frappe.utils.get_bench_path(), 'garagesale')) site_files_path = (os.path.join(os.sep, frappe.utils.get_bench_path(), 'sites', frappe.get_site_path(), 'public', 'files')) images_url = 'https://shop.unigreenscheme.co.uk' site_url = 'https://shop.unigreenscheme.co.uk' footer = """<br><br>The price includes VAT and we can provide VAT invoices.\ <br><br>Universities and colleges - purchase orders accepted - please contact us.""" def is_scotland(item_code): """Determine if an item code is a Scottish item by reference to stock locations (specifically, if the item is contained in the 'Scotland' stock location). TODO - this is broken for several obvious reasons """ sl = frappe.db.sql(
def migrate(verbose=True, rebuild_website=False, skip_failing=False): '''Migrate all apps to the latest version, will: - run before migrate hooks - run patches - sync doctypes (schema) - sync fixtures - sync desktop icons - sync web pages (from /www) - sync web pages (from /www) - run after migrate hooks ''' touched_tables_file = frappe.get_site_path('touched_tables.json') if os.path.exists(touched_tables_file): os.remove(touched_tables_file) try: frappe.flags.touched_tables = set() frappe.flags.in_migrate = True clear_global_cache() #run before_migrate hooks for app in frappe.get_installed_apps(): for fn in frappe.get_hooks('before_migrate', app_name=app): frappe.get_attr(fn)() # run patches frappe.modules.patch_handler.run_all(skip_failing) # sync frappe.model.sync.sync_all(verbose=verbose) frappe.translate.clear_cache() sync_fixtures() sync_customizations() sync_languages() frappe.get_doc('Portal Settings', 'Portal Settings').sync_menu() # syncs statics render.clear_cache() # add static pages to global search global_search.update_global_search_for_all_web_pages() # updating installed applications data frappe.get_single('Installed Applications').update_versions() #run after_migrate hooks for app in frappe.get_installed_apps(): for fn in frappe.get_hooks('after_migrate', app_name=app): frappe.get_attr(fn)() frappe.db.commit() clear_notifications() frappe.publish_realtime("version-update") frappe.flags.in_migrate = False finally: with open(touched_tables_file, 'w') as f: json.dump(list(frappe.flags.touched_tables), f, sort_keys=True, indent=4) frappe.flags.touched_tables.clear()
def before_submit(self): ## Prepare recipients list sms_list = [] telegram_list = [] mqtt_list = [] str_attach = '' recipients = [] str_message = "" ## Send E-mails if self.message_type == "E-mail": ## Read message body str_message = self.email_body ## Read Recipients Table recipient_list = self.recipient_item if len(recipient_list) > 0: for item in recipient_list: recipients.append(item.participant_email_id) ## Read and prepare message with Attachments if len(self.message_item) > 0: for idx, row in enumerate(self.message_item): if "http" in row.attachment: str_attach = str_attach + '<a href="' + row.attachment + '">Anexo ' + str( idx + 1) + ': ' + row.description + '</a><br>' else: str_attach = str_attach + '<a href="' + frappe.utils.get_url( ) + urllib.parse.quote( row.attachment) + '">Anexo ' + str( idx + 1) + ': ' + row.description + '</a><br>' str_message = str_message + "<p>Con archivos anexos:</p><p>" + str_attach + "</p>" ## Finally Send message by Email email_args = { "sender": self.from_email_account, "recipients": recipients, "message": str_message, "subject": self.subject, "reference_doctype": self.doctype, "reference_name": self.name } frappe.sendmail(**email_args) ## Send IoT messages if self.message_type == "IoT": ## Read main message dict_message = json.loads(self.message_text) if "message" in dict_message: str_message = dict_message["message"]["text"] ## Read and prepare message with attachments if len(self.message_item) > 0 and str_message != '': for idx, row in enumerate(self.message_item): if "http" in row.attachment: str_attach = str_attach + 'Anexo ' + str( idx + 1 ) + ': ' + row.description + ' @ ' + row.attachment + '\n' else: str_attach = str_attach + 'Anexo ' + str( idx + 1 ) + ': ' + row.description + ' @ ' + frappe.utils.get_url( ) + urllib.parse.quote(row.attachment) + '\n' str_message = str_message + "\nCon archivos anexos:\n" + str_attach dict_message["message"]["text"] = str_message ## Prepare location recipients if len(self.location_table) > 0 and not self.all_places: for loc in self.location_table: """ Get from database devices assigned to locations in session """ locdev = frappe.db.sql( """SELECT device FROM `tabPlace Item` WHERE parent=%s AND place=%s and docstatus < 2""", (self.course, loc.place), True) if len(locdev) > 0: for plc in locdev: sms_list, mqtt_list, telegram_list = append_recipients( plc.device, sms_list, mqtt_list, telegram_list) ## Prepare device recipients even in case all places selectect if len(self.device_table) > 0 and not self.all_places: for dev in self.device_table: sms_list, mqtt_list, telegram_list = append_recipients( dev.device, sms_list, mqtt_list, telegram_list) ## Prepare all devices if self.all_places: """ Get from database devices in session """ locdev = frappe.db.sql( """SELECT device FROM `tabPlace Item` WHERE parent=%s and docstatus < 2""", (self.course), True) if len(locdev) > 0: for plc in locdev: sms_list, mqtt_list, telegram_list = append_recipients( plc.device, sms_list, mqtt_list, telegram_list) """ Get from database devices in session in roles table """ roldev = frappe.db.sql( """SELECT device FROM `tabSession Role Item` WHERE parent=%s and docstatus < 2""", (self.course), True) if len(roldev) > 0: for itm in roldev: sms_list, mqtt_list, telegram_list = append_recipients( itm.device, sms_list, mqtt_list, telegram_list) ## Prepare role recipients if len(self.recipient_table) > 0 and not self.all_roles: for rol in self.recipient_table: frappe.msgprint(rol.participant_role) """ Get from database devices ported in session """ roldev = frappe.db.sql( """SELECT device FROM `tabSession Role Item` WHERE parent=%s AND participant_role=%s and docstatus < 2""", (self.course, rol.participant_role), True) if len(roldev) > 0: for itm in roldev: sms_list, mqtt_list, telegram_list = append_recipients( itm.device, sms_list, mqtt_list, telegram_list) ## Prepare participants if len(self.participant_table) > 0 and not self.all_roles: for per in self.participant_table: frappe.msgprint(per.participant) """ Get from database devices ported in session """ perdev = frappe.db.sql( """SELECT device FROM `tabSession Role Item` WHERE parent=%s AND participant=%s and docstatus < 2""", (self.course, per.participant), True) if len(perdev) > 0: for per in perdev: sms_list, mqtt_list, telegram_list = append_recipients( per.device, sms_list, mqtt_list, telegram_list) ## Prepare all roles if self.all_roles: """ Get from database devices in session in roles table """ roldev = frappe.db.sql( """SELECT device FROM `tabSession Role Item` WHERE parent=%s and docstatus < 2""", (self.course), True) if len(roldev) > 0: for itm in roldev: sms_list, mqtt_list, telegram_list = append_recipients( itm.device, sms_list, mqtt_list, telegram_list) ## Send message by MQTT if len(mqtt_list) > 0: path = frappe.utils.get_bench_path() site_name = frappe.utils.get_url().replace("http://", "").replace( "https://", "") if ":" in site_name: pos = site_name.find(":") site_name = site_name[:pos] client = frappe.get_doc('MQTT Settings', 'MQTT Settings') server = client.broker_gateway port = client.port user = client.user client.secret = get_decrypted_password('MQTT Settings', 'MQTT Settings', 'secret', False) secret = client.secret do_ssl = client.is_ssl # connect to MQTT Broker to Publish Message pid = os.getpid() client_id = '{}:{}'.format('client', str(pid)) try: backend = mqtt.Client(client_id=client_id, clean_session=True) backend.username_pw_set(user, password=secret) if do_ssl == True: ca = os.path.join( path, "sites", site_name, frappe.get_site_path('private', 'files', client.ca)[1:]) client_crt = os.path.join( path, "sites", site_name, frappe.get_site_path('private', 'files', client.client_crt)[1:]) client_key = os.path.join( path, "sites", site_name, frappe.get_site_path('private', 'files', client.client_key)[1:]) port_ssl = client.ssl_port ## Prepare mqtt backend.tls_set(ca_certs=ca, certfile=client_crt, keyfile=client_key, cert_reqs=ssl.CERT_REQUIRED, ciphers=None) backend.tls_insecure_set(False) time.sleep(.5) backend.connect(server, port_ssl) else: backend.connect(server, port) payload = frappe.safe_decode( json.dumps(dict_message)).encode('utf-8') for dev in mqtt_list: mqtt_topic = str(dev) + "/display/text" backend.publish(mqtt_topic, cstr(payload)) backend.disconnect() except: frappe.msgprint( _("Error in MQTT Broker sending to ", str(mqtt_list))) pass ## Send message by Telegram if len(telegram_list) > 0: try: send_telegram(telegram_list, cstr(str_message)) except: pass ## Send message by SMS if len(sms_list) > 0 and self.message_type == "IoT": try: send_sms(sms_list, cstr(str_message)) except: pass ## Final Message frappe.msgprint(_("Actions Completed and Messages Sent"))
def copy_site_config(self): site_config_backup_path = self.backup_path_conf site_config_path = os.path.join(frappe.get_site_path(), "site_config.json") with open(site_config_backup_path, "w") as n, open(site_config_path) as c: n.write(c.read())
def zip_files(self): files_path = frappe.get_site_path("public", "files") cmd_string = """tar -cf %s %s""" % (self.backup_path_files, files_path) err, out = frappe.utils.execute_in_shell(cmd_string)
def blink(self): # define two constants, one for the eye aspect ratio to indicate # blink and then a second constant for the number of consecutive # frames the eye must be below the threshold EYE_AR_THRESH = 0.3 EYE_AR_CONSEC_FRAMES = 3 # initialize the frame counters and the total number of blinks COUNTER = 0 TOTAL = 0 SUCCESS = False # initialize dlib's face detector (HOG-based) and then create # the facial landmark predictor path2 = (frappe.get_site_path('public', 'files', 'ahmad.jpg')) # Load a sample picture and learn how to recognize it. obama_image = face_recognition.load_image_file(str(path2)) obama_face_encoding = face_recognition.face_encodings(obama_image)[0] # self.x=type(obama_face_encoding).__name__ # load numpy array to t var t = obama_face_encoding # encoding the numpy array s = base64.b64encode(t) self.x=s # decoding the numpy array r = base64.decodestring(s) q = np.frombuffer(r, dtype=np.float64) self.y=str(q) # compare between q and t return ture or false self.z = str(np.allclose(q, t)) # Initialize some variables face_locations = [] face_encodings = [] face_names = [] process_this_frame = True name='uni' path = (frappe.get_site_path('public', "shape_predictor_68_face_landmarks.dat")) print("[INFO] loading facial landmark predictor...") detector = dlib.get_frontal_face_detector() predictor = dlib.shape_predictor(str(path)) # grab the indexes of the facial landmarks for the left and # right eye, respectively (lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] (rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] # start the video stream thread print("[INFO] starting video stream thread...") vs = VideoStream(src=0).start() # vs = VideoStream(usePiCamera=True).start() fileStream = False time.sleep(1.0) # loop over frames from the video stream while True: # if this is a file video stream, then we need to check if # there any more frames left in the buffer to process # grab the frame from the threaded video file stream, resize # it, and convert it to grayscale # channels) frame = vs.read() frame = imutils.resize(frame, width=450) # Grab a single frame of video # Resize frame of video to 1/4 size for faster face recognition processing small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) # Only process every other frame of video to save time if process_this_frame: # Find all the faces and face encodings in the current frame of video face_locations = face_recognition.face_locations(small_frame) face_encodings = face_recognition.face_encodings(small_frame, face_locations) face_names = [] for face_encoding in face_encodings: # See if the face is a match for the known face(s) match = face_recognition.compare_faces([obama_face_encoding], face_encoding) name = "Unknown" if match[0]: name = "Ahmad" face_names.append(name) process_this_frame = not process_this_frame # Display the results gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # detect faces in the grayscale frame rects = detector(gray, 0) for (top, right, bottom, left), name in zip(face_locations, face_names): # Scale back up face locations since the frame we detected in was scaled to 1/4 size top *= 4 right *= 4 bottom *= 4 left *= 4 # Draw a box around the face cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2) # Draw a label with a name below the face cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED) font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1) # Display the resulting image if name=='Ahmad': for rect in rects: # determine the facial landmarks for the face region, then # convert the facial landmark (x, y)-coordinates to a NumPy # array shape = predictor(gray, rect) shape = face_utils.shape_to_np(shape) # extract the left and right eye coordinates, then use the # coordinates to compute the eye aspect ratio for both eyes leftEye = shape[lStart:lEnd] rightEye = shape[rStart:rEnd] leftEAR = self.eye_aspect_ratio(leftEye) rightEAR = self.eye_aspect_ratio(rightEye) # average the eye aspect ratio together for both eyes ear = (leftEAR + rightEAR) / 2.0 # compute the convex hull for the left and right eye, then # visualize each of the eyes leftEyeHull = cv2.convexHull(leftEye) rightEyeHull = cv2.convexHull(rightEye) cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1) cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1) # check to see if the eye aspect ratio is below the blink # threshold, and if so, increment the blink frame counter if ear < EYE_AR_THRESH: COUNTER += 1 # otherwise, the eye aspect ratio is not below the blink # threshold else: # if the eyes were closed for a sufficient number of # then increment the total number of blinks if COUNTER >= EYE_AR_CONSEC_FRAMES: TOTAL += 1 # reset the eye frame counter COUNTER = 0 # draw the total number of blinks on the frame along with # the computed eye aspect ratio for the frame cv2.putText(frame, "Blinks: {}".format(TOTAL), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) cv2.putText(frame, "EAR: {:.2f}".format(ear), (300, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2) # show the frame cv2.imshow("Frame", frame) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break if TOTAL == 1: SUCCESS=True break # do a bit of cleanup cv2.destroyAllWindows() vs.stop()
def _new_site( db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=False, no_mariadb_socket=False, reinstall=False, db_password=None, db_type=None, db_host=None, db_port=None, new_site=False, ): """Install a new Frappe site""" from frappe.commands.scheduler import _is_scheduler_enabled from frappe.utils import get_site_path, scheduler, touch_file if not force and os.path.exists(site): print("Site {0} already exists".format(site)) sys.exit(1) if no_mariadb_socket and not db_type == "mariadb": print("--no-mariadb-socket requires db_type to be set to mariadb.") sys.exit(1) frappe.init(site=site) if not db_name: import hashlib db_name = "_" + hashlib.sha1( os.path.realpath(frappe.get_site_path()).encode()).hexdigest()[:16] try: # enable scheduler post install? enable_scheduler = _is_scheduler_enabled() except Exception: enable_scheduler = False make_site_dirs() installing = touch_file(get_site_path("locks", "installing.lock")) install_db( root_login=mariadb_root_username, root_password=mariadb_root_password, db_name=db_name, admin_password=admin_password, verbose=verbose, source_sql=source_sql, force=force, reinstall=reinstall, db_password=db_password, db_type=db_type, db_host=db_host, db_port=db_port, no_mariadb_socket=no_mariadb_socket, ) apps_to_install = (["frappe"] + (frappe.conf.get("install_apps") or []) + (list(install_apps) or [])) for app in apps_to_install: install_app(app, verbose=verbose, set_as_patched=not source_sql) os.remove(installing) scheduler.toggle_scheduler(enable_scheduler) frappe.db.commit() scheduler_status = ("disabled" if frappe.utils.scheduler.is_scheduler_disabled() else "enabled") print("*** Scheduler is", scheduler_status, "***")
except (requests.exceptions.HTTPError, requests.exceptions.SSLError, IOError): return size = width, height <<<<<<< HEAD image.thumbnail(size) ======= if crop: image = ImageOps.fit(image, size, Image.ANTIALIAS) else: image.thumbnail(size, Image.ANTIALIAS) >>>>>>> 176d241496ede1357a309fa44a037b757a252581 thumbnail_url = filename + "_" + suffix + "." + extn path = os.path.abspath(frappe.get_site_path("public", thumbnail_url.lstrip("/"))) try: image.save(path) if set_as_thumbnail: self.db_set("thumbnail_url", thumbnail_url) self.db_set("thumbnail_url", thumbnail_url) except IOError: frappe.msgprint(_("Unable to write file format for {0}").format(path)) return return thumbnail_url def after_delete(self):
import math import sys import os.path import frappe from frappe import msgprint from erpnext_ebay.ebay_requests import get_trading_api, revise_inventory_status from ebaysdk.exception import ConnectionError from ebaysdk.trading import Connection as Trading from .garage_sale import jtemplate, lookup_condition import ugssettings PATH_TO_YAML = os.path.join(os.sep, frappe.utils.get_bench_path(), 'sites', frappe.get_site_path(), 'ebay.yaml') def revise_generic_items(item_code): """Generic Revise eBay listings""" #get the ebay id given the item_code ebay_id = frappe.get_value('Item', item_code, 'ebay_id') if ebay_id and item_code: frappe.msgprint( 'This Item is on eBay. Please wait while the listing is revised...' ) (item_name, description, function_grade, grade_details, condition, tech_details, delivery_type, accessories_extras, power_cable_included, power_supply_included, remote_control_included, case_included,
def add_cancelled_watermark(dt, dn): fname = "{0}.pdf".format(dn) _fname = "{0}_cancelled.pdf".format(dn) input_file_fpath = str(frappe.get_site_path('private', 'files', fname)) output_file_fpath = str(frappe.get_site_path('private', 'files', _fname)) pdf_file = input_file_fpath merged = output_file_fpath watermark = frappe.get_app_path('senstech', 'public', 'pdf', 'abgebrochen.pdf') try: with open(pdf_file, "rb") as input_file, open(watermark, "rb") as watermark_file: input_pdf = PdfFileReader(input_file) watermark_pdf = PdfFileReader(watermark_file) watermark_page = watermark_pdf.getPage(0) output = PdfFileWriter() for i in range(input_pdf.getNumPages()): pdf_page = input_pdf.getPage(i) pdf_page.mergePage(watermark_page) output.addPage(pdf_page) with open(merged, "wb") as merged_file: output.write(merged_file) except FileNotFoundError as e: pass f = frappe.get_doc({ "doctype": "File", "file_url": '/private/files/{0}'.format(_fname), "file_name": _fname, "attached_to_doctype": dt, "attached_to_name": dn, "folder": 'Home/Attachments', "file_size": 0, "is_private": 1 }) f.flags.ignore_permissions = True f.insert() frappe.db.commit() files = frappe.get_all('File', filters={ 'attached_to_doctype': dt, 'attached_to_name': dn }, fields=['name', 'file_url']) for file in files: if file.file_url == '/private/files/{0}'.format(fname): f_to_remove = frappe.get_doc('File', file.name) f_to_remove.delete() if os.path.exists(input_file_fpath): os.remove(input_file_fpath) return
def update_site_usage(): data = get_site_info() with open(os.path.join(frappe.get_site_path(), 'site_data.json'), 'w') as outfile: json.dump(data, outfile) outfile.close()
def read(ocr_receipt): import xml.etree.cElementTree as ET # source = '/home/jvfiel/frappe-bl3ndlabs/apps/erpnext_ocr/erpnext_ocr/erpnext_ocr/test.xml' source = frappe.db.sql( """SELECT xml FROM `tabOCR Receipt` WHERE name=%s""", (ocr_receipt))[0][0] if source == "" or not source: frappe.throw( "No XML. Please upload file in OCR again and save to generate.") tree = ET.ElementTree(file=frappe.get_site_path() + source) root = tree.getroot() xmlname = root.tag.split("}") # print xmlname xmlname = xmlname[0] xmlname += "}" # print xmlname parent_ocr = [] #Vendor Name try: vendor_name = tree.findall( './/{0}vendor//{0}name//{0}recognizedValue//{0}text'.format( xmlname))[0].text # print elem.tag, elem.attrib, elem.text except: vendor_name = "" parent_ocr.append({ "name": "Vendor Name", "expanded": True, "children": [{ "name": vendor_name }] }) #Vendor Address try: vendor_address = tree.findall( './/{0}vendor//{0}address//{0}text'.format(xmlname))[0].text except: vendor_address = "" # print elem.tag, elem.attrib, elem.text parent_ocr.append({ "name": "Vendor Address", "expanded": True, "children": [{ "name": vendor_address }] }) #Vendor Phone try: vendor_phone = tree.findall( './/{0}vendor//{0}phone//{0}recognizedValue//{0}text'.format( xmlname))[0].text except: vendor_phone = "" # print elem.tag, elem.attrib, elem.text parent_ocr.append({ "name": "Vendor Phone", "expanded": True, "children": [{ "name": vendor_phone }] }) # Vendor Fax # for elem in tree.findall('.//{0}vendor//{0}fax//{0}recognizedValue//{0}text'.format(xmlname)): # print elem.tag, elem.attrib, elem.text # Vendor PurchaseType try: vendor_purchasetype = tree.findall( './/{0}vendor//{0}purchaseType'.format(xmlname))[0].text except: vendor_purchasetype = "" # print elem.tag, elem.attrib, elem.text parent_ocr.append({ "name": "Purchase Type", "expanded": True, "children": [{ "name": vendor_purchasetype }] }) # Vendor Date try: invoice_date = tree.findall( './/{0}date//{0}text'.format(xmlname))[0].text except: invoice_date = "" # print elem.tag, elem.attrib, elem.text parent_ocr.append({ "name": "Invoice Date", "expanded": True, "children": [{ "name": invoice_date }] }) # # Vendor Time # for elem in tree.findall('.//{0}time//{0}recognizedValue//{0}text'.format(xmlname)): # print elem.tag, elem.attrib, elem.text # Vendor subTotal try: vendor_subTotal = tree.findall( './/{0}subTotal//{0}text'.format(xmlname))[0].text except: vendor_subTotal = "" # print elem.tag, elem.attrib, elem.text parent_ocr.append({ "name": "SubTotal", "expanded": True, "children": [{ "name": vendor_subTotal }] }) # # Vendor Total # for elem in tree.findall('.//{0}total//{0}text'.format(xmlname)): # print elem.tag, elem.attrib, elem.text # Vendor Payment Card Number # payment_cardno = tree.findall('.//{0}payment//{0}cardNumber'.format(xmlname))[0].text # print elem.tag, elem.attrib, elem.text # Vendor Payment Value try: payment_val = tree.findall( './/{0}payment//{0}value//{0}recognizedValue//{0}text'.format( xmlname))[0].text except: payment_val = "" # print elem.tag, elem.attrib, elem.text items = [] # Vendor Items parent_item = {"name": 'Recognized Items', "children": []} item_children = [] for elem in tree.findall('.//{0}recognizedItems//{0}item'.format(xmlname)): children = [] parent_item = {"name": 'Item 1', "children": []} # child = {"name": 'Sub Item 1', "children": []} # print elem.tag, elem.attrib, elem.text, "*", elem.attrib['index'] i = elem.attrib['index'] # for elem in tree.findall('.//{0}recognizedItems//{0}item[@index="{1}"]//{0}name//{0}text'.format(xmlname,i)): # print elem.tag, elem.attrib, elem.text #NAME try: elem = tree.findall( './/{0}recognizedItems//{0}item[@index="{1}"]//{0}name//{0}text' .format(xmlname, i))[0] # print elem.tag, elem.attrib, elem.text name = elem.text except: name = "" # name = {"name": elem.text, "children": []} parent_item.update({"name": name}) # children.append(name) #TOTAL try: elem = tree.findall( './/{0}recognizedItems//{0}item[@index="{1}"]//{0}total//{0}recognizedValue//{0}text' .format(xmlname, i))[0] # print elem.tag, elem.attrib, elem.text total = elem.text except: total = "" total = { "name": "Amount", "expanded": True, "children": [{ "name": total, "children": [] }] } print total children.append(total) #COUNT try: elem = tree.findall( './/{0}recognizedItems//{0}item[@index="{1}"]//{0}count//{0}normalizedValue' .format(xmlname, i))[0] # print elem.tag, elem.attrib, elem.text count = elem.text except: count = "" count = { "name": "QTY", "expanded": True, "children": [{ "name": count, "children": [] }] } print count children.append(count) parent_item.update({"children": children}) # parent_ocr.update(parent_item) item_children.append(parent_item) # print items parent_ocr.append({"name": "Recognized Items", "children": item_children}) print parent_ocr """ {name: 'Item 1', children: []}, {name: 'Item 2', expanded: true, children: [ {name: 'Sub Item 1', children: []}, {name: 'Sub Item 2', children: []} ] } ], 'tree' """ # return { # "source":source, # "vendor_name":vendor_name, # "vendor_address":vendor_address, # "vendor_phone":vendor_phone, # "vendor_address":vendor_address, # "invoice_date":invoice_date, # "subTotal":vendor_subTotal, # # "payment_cardno":payment_cardno, # "payment_val":payment_val, # "items":items # } return parent_ocr # print read()