def merge_in_pdf(fa, fb): for f in [fa, fb]: if _is_img(f.name): img = Image.open(f.file) try: r, g, b, a = img.split() #alpha except Exception as e: r, g, b = img.split() img = Image.merge('RGB', (r, g, b)) temp_file = TemporaryFile() img.save(temp_file, "PDF", resolution=100, transparency=0) temp_file.seek(0) f.file = temp_file merger = PdfFileMerger() for f in [fa, fb]: merger.append(PdfFileReader(f.file)) temp_file = TemporaryFile() merger.write(temp_file) temp_file.seek(0) pdf_file = File(temp_file) pdf_file.name = 'id_card.pdf' return pdf_file
def testSaveFileToUploadDirectory(self): from django.conf import settings upload_dir = settings.UPLOAD_DIR self.assertTrue(os.access(upload_dir, os.W_OK)) #make sure upload dir can be written to #create a temporary file tempFilePath = os.getcwd() + "/assignments/tempFile.txt" with open(tempFilePath, "wr") as tmpFile: file = File(tmpFile) file.name = "tempFile.txt" self.assertEqual(upload_dir + file.__unicode__(), save_file_to_upload_dir(file)) self.assertTrue(os.path.exists(upload_dir + file.__unicode__())) #file was written to upload directory self.assertEqual(upload_dir + file.__unicode__(), save_file_to_upload_dir(file)) #overwrite self.assertTrue(os.path.exists(upload_dir + file.__unicode__())) os.remove(upload_dir + file.__unicode__()) self.assertFalse(os.path.exists(upload_dir + file.__unicode__())) os.remove(tempFilePath) self.assertFalse(os.path.exists(tempFilePath))
def student_register_page(request): if request.method == "POST": form = RegistrationForm(request.POST) if form.is_valid(): user = User.objects.create_user( username=form.cleaned_data["username"], password=form.cleaned_data["password1"], email=form.cleaned_data["email"], ) fileinit = open("%s/%s" % (STATIC_ROOT, "images/student.gif"), "rb") filea = File(fileinit) filename = filea.name name = filename.split(".") filea.name = user.username + "_avatar_student" + "." + name[len(name) - 1] user_profile = UserProfile.objects.create( user=user, fullname=form.cleaned_data["fullname"], typeUser="******", avatar=filea ) filea.close() user_profile.save() LessonReference.objects.create(user=user_profile) return render_to_response("registration/teacher_signup_success.html", RequestContext(request)) else: form = RegistrationForm() variables = RequestContext(request, {"form": form}) return render_to_response("registration/student_signup.html", variables)
def testSaveFileToUploadDirectory(self): from django.conf import settings upload_dir = settings.UPLOAD_DIR self.assertTrue(os.access( upload_dir, os.W_OK)) #make sure upload dir can be written to #create a temporary file tempFilePath = os.getcwd() + "/assignments/tempFile.txt" with open(tempFilePath, "wr") as tmpFile: file = File(tmpFile) file.name = "tempFile.txt" self.assertEqual(upload_dir + file.__unicode__(), save_file_to_upload_dir(file)) self.assertTrue(os.path.exists( upload_dir + file.__unicode__())) #file was written to upload directory self.assertEqual(upload_dir + file.__unicode__(), save_file_to_upload_dir(file)) #overwrite self.assertTrue(os.path.exists(upload_dir + file.__unicode__())) os.remove(upload_dir + file.__unicode__()) self.assertFalse(os.path.exists(upload_dir + file.__unicode__())) os.remove(tempFilePath) self.assertFalse(os.path.exists(tempFilePath))
def processFileUploads(filelist): ''' processes a file upload list, unzipping all zips returns a dict that maps indexes to processed file objects ''' newlist = [] for file in filelist: if zipfile.is_zipfile(file): # unzip and append to the list zip = zipfile.ZipFile(file, "r") for f in zip.namelist(): logger.debug("Extracting ZipFile: %s" % f) if f.endswith('/'): logger.debug("Skipping directory entry: %s" % f) continue if "__MACOSX" in f or ".DS_Store" in f: logger.debug( "Skipping MAC OS X resource file artifact: %s" % f) continue zf = zip.open(f).read() newfile = File(io.BytesIO(zf)) newfile.name = f newlist.append(newfile) else: newlist.append(file) processed = dict(enumerate(newlist)) return processed
def processFileUploads(filelist): ''' processes a file upload list, unzipping all zips returns a new list with unzipped files ''' newlist = [] for file in filelist: if zipfile.is_zipfile(file): # unzip and append to the list zip = zipfile.ZipFile(file, "r") for f in zip.namelist(): logger.debug("Extracting ZipFile: %s" % f) if f.endswith('/'): logger.debug("Skipping directory entry: %s" % f) continue if "__MACOSX" in f or ".DS_Store" in f: logger.debug("Skipping MAC OS X resource file artifact: %s" % f) continue zf = zip.open(f).read() newfile = File(io.BytesIO(zf)) newfile.name = f newlist.append(newfile) else: newlist.append(file) return newlist
def get_steps(*names): r = [] for name in names: f = File(open(os.path.join(DATA_PATH, name))) f.name = name r.append(f) return r
def data_upload(request): if request.method == 'GET': return render(request, 'bulk_upload.html', {}) data_file = request.FILES['file'] images_zip = request.FILES['images'] if not data_file.name.endswith('.csv') and not images_zip.name.endswith( '.zip'): messages.error(request, 'This is not a csv file') return render(request, 'teacher/bulk_upload.html', {}) data_set = data_file.read().decode('UTF-8') io_string = io.StringIO(data_set) next(io_string) zipped_files = ZipFile(images_zip) image_names = zipped_files.namelist() for column in csv.reader(io_string, delimiter=',', quotechar='"'): if not column[3] == '': image_name = column[2] teacher, created = Teacher.objects.update_or_create( first_name=column[0], last_name=column[1], email_address=column[3], phone_number=column[4], room_number=column[5]) if not image_name == '': if image_name in image_names: zip_img = zipped_files.read(image_name) tmp_file = io.BytesIO(zip_img) dummy_file = File(tmp_file) dummy_file.name = image_name dummy_file.size = len(zip_img) dummy_file.file = tmp_file teacher.profile_picture = dummy_file teacher.save() subjects = column[6].split(',') subjects_taught_count = TeacherSubject.objects.filter( teacher=teacher).count() for subject in subjects: if subjects_taught_count > 5: break subject = subject.strip().lower() subject_object, created = Subject.objects.update_or_create( title=subject) TeacherSubject.objects.update_or_create(teacher=teacher, subject=subject_object) subjects_taught_count += 1 messages.success(request, 'Data has been uploaded') return render(request, 'teacher/bulk_upload.html', {})
def create_model(self, algorithm, model_file, train_acc_file=None, train_loss_file=None, preset_dataset=False): from django.core.files.base import File # normal django_file = File(model_file) raise NotImplementedError django_file.name = folder_name + "model.joblib" print(django_file.name) model = Model( algorithm=algorithm, person=algorithm.selected_person, # can be null dataset=algorithm.selected_dataset, file=django_file) model.save()
def _create_django_file(self, file_path, folder_name, file_name): from django.core.files.base import File django_file = File(open(file_path, "rb")) django_file.name = folder_name + "/" + file_name return django_file
def train_algorithm_on_hass_instance(self, request): # get web model of dataset person_name = request.POST.get("person_select", "") algo = self.get_sel_algorithm() # todo change this by adding an option in front end to choose data instance datainstance = DataInstance.objects.filter(id=2)[0] dataset = Dataset.objects.filter(name="homeassistant")[0] person = Person.objects.filter(name=person_name)[0] algo.selected_person = person algo.selected_dataset = dataset algo.save() model_name = self._create_model_name(algo, person, datainstance) folder_name = self._generate_folder_name(algorithm=algo, preset_dataset=False) tmp_folder_name = 'tmp/' + folder_name tmp_model_file_path = self.get_media_file_path(tmp_folder_name, MODEL_FILE_NAME) self._create_media_model_folder_if_not_exists(tmp_folder_name) tmp_model_image_file_path = self.get_media_file_path( tmp_folder_name, MODEL_IMG_NAME) tmp_loss_file_path = self.get_media_file_path(tmp_folder_name, TRAIN_LOSS_FILE_NAME) tmp_loss_image_file_path = self.get_media_file_path( tmp_folder_name, TRAIN_LOSS_IMG_NAME) tmp_acc_file_path = self.get_media_file_path(tmp_folder_name, TRAIN_ACC_FILE_NAME) ctrl = self._create_ctrl_for_hass_instance(algo, person, datainstance, model_name) ctrl.init_model_on_dataset(model_name) ctrl.register_benchmark(model_name) ctrl.register_loss_file_path(tmp_loss_file_path, model_name) ctrl.train_model(model_name) # workaround, saving the file beforehand and loading it again # because joblib doesn't support buffer stuff #self._clean_tmp_folder() ctrl.save_model(tmp_model_file_path, model_name) ctrl.save_plot_trainloss(model_name, tmp_loss_image_file_path) # todo make this work #ctrl.save_visualization_to_file(tmp_model_image_file_path, model_name) from django.core.files.base import File from django.core.files.images import ImageFile # normal django_model_file = File(open(tmp_model_file_path, "rb")) django_model_file.name = folder_name + "/" + MODEL_FILE_NAME #django_model_img = ImageFile(open(tmp_model_image_file_path, "rb")) #django_model_img.name = folder_name + "/" + MODEL_IMG_NAME django_loss_file = File(open(tmp_loss_file_path, "rb")) django_loss_file.name = folder_name + "/" + TRAIN_LOSS_FILE_NAME django_loss_image = ImageFile(open(tmp_loss_image_file_path, "rb")) django_loss_image.name = folder_name + "/" + TRAIN_LOSS_IMG_NAME print(django_model_file.name) model = Model( algorithm=algo, person=algo.selected_person, # can be null dataset=algo.selected_dataset, datainstance=datainstance, file=django_model_file, #visualization=django_model_img, visualization=None, train_loss=django_loss_file, train_loss_graph=django_loss_image #train_acc=django_acc_file, ) model.save() os.remove(tmp_model_file_path) os.remove(tmp_loss_file_path)
def train_algorithm_on_preset_dataset(self, request): dataset_name = request.POST.get("dataset_select", "") algo = self.get_sel_algorithm() dataset = Dataset.objects.filter(name=dataset_name)[0] algo.selected_dataset = dataset algo.save() self._train_algorithm_on_preset_dataset_forehand_cleanup(algo=algo) folder_name = self._generate_folder_name(algorithm=algo, preset_dataset=True) tmp_folder_name = 'tmp/' + folder_name tmp_model_file_path = self.get_media_file_path(tmp_folder_name, MODEL_FILE_NAME) self._create_media_model_folder_if_not_exists(tmp_folder_name) tmp_model_image_file_path = self.get_media_file_path( tmp_folder_name, MODEL_IMG_NAME) tmp_loss_file_path = self.get_media_file_path(tmp_folder_name, TRAIN_LOSS_FILE_NAME) tmp_loss_image_file_path = self.get_media_file_path( tmp_folder_name, TRAIN_LOSS_IMG_NAME) tmp_acc_file_path = self.get_media_file_path(tmp_folder_name, TRAIN_ACC_FILE_NAME) ctrl = self._create_ctrl_for_normal_dataset(algo, dataset) ctrl.init_model_on_dataset() ctrl.register_benchmark() #ctrl.register_loss_file_path('/home/cmeier/code/tmp/kasteren/train_loss.log') ctrl.register_loss_file_path(tmp_loss_file_path) print('~' * 100) print('~' * 100) print('loss_fn: ', tmp_loss_file_path) # todo set file path to acc logs in ctrl # todo set file path to train logs in ctrl #ctrl.register_acc_file_path(tmp_loss_file_path) #ctrl.register_acc_file_path(tmp_acc_file_path) #print('acc_fn: ', tmp_acc_file_path) #print('~'*100) #print('~'*100) ctrl.train_model([False]) # workaround, saving the file beforehand and loading it again # because joblib doesn't support buffer stuff #self._clean_tmp_folder() ctrl.save_model(tmp_model_file_path) ctrl.save_loss_plot_to_file(tmp_loss_image_file_path) ctrl.save_visualization_to_file(tmp_model_image_file_path) from django.core.files.base import File from django.core.files.images import ImageFile # normal django_model_file = File(open(tmp_model_file_path, "rb")) django_model_file.name = folder_name + "/" + MODEL_FILE_NAME django_model_img = ImageFile(open(tmp_model_image_file_path, "rb")) django_model_img.name = folder_name + "/" + MODEL_IMG_NAME #django_acc_file = File(open(tmp_acc_file_path, "rb")) #django_acc_file.name = folder_name + TRAIN_ACC_FILE_NAME django_loss_file = File(open(tmp_loss_file_path, "rb")) django_loss_file.name = folder_name + "/" + TRAIN_LOSS_FILE_NAME django_loss_image = ImageFile(open(tmp_loss_image_file_path, "rb")) django_loss_image.name = folder_name + "/" + TRAIN_LOSS_IMG_NAME print(django_model_file.name) model = Model( algorithm=algo, person=algo.selected_person, # can be null dataset=algo.selected_dataset, file=django_model_file, visualization=django_model_img, train_loss=django_loss_file, train_loss_graph=django_loss_image #train_acc=django_acc_file, ) model.save() os.remove(tmp_model_file_path) os.remove(tmp_loss_file_path)
def generate_part_doc_links(request,product, parent_ctrl,instances,doc3D, inbulk_cache): """ :param product: :class:`.Product` that represents the arborescense :param parent_ctrl: :class:`.Part` from which we want to realize the decomposition :param instances: Use to trace the items to update Parses forms and generates: - The bom-child of Parts (in relation to the **product**) - For every :class:`.ParentChildLink` generated in the previous condition we attach all the :class:`.Location_link` relatives - To every generated :class:`.Part` a :class:`.Document3D` has been attached and Document3D has been set like the attribute PartDecompose of the Part - The attribute doc_id of every node of the arborescense(**product**) is now the relative id of :class:`.DocumentFile` generated in the previous condition - To every generated :class:`.Document3D` has been added a new empty(locked) :class:`.DocumentFile` STP ( :meth:`.generateGhostDocumentFile` ) - The attribute doc_path of every node of the arborescense(**product**) is now the path of :class:`.DocumentFile` STP generated in the previous condition """ to_delete=[] user = parent_ctrl._user company = pmodels.User.objects.get(username=settings.COMPANY) other_files = list(doc3D.files.exclude(models.is_stp)) for link in product.links: try: oq=forms.Order_Quantity_Form(request.POST,prefix=link.visited) oq.is_valid() options=oq.cleaned_data order=options["order"] quantity=options["quantity"] unit=options["unit"] if not link.product.part_to_decompose: part_ctype=forms.Doc_Part_type_Form(request.POST,prefix=link.product.visited) part_ctype.is_valid() options = part_ctype.cleaned_data cls = get_all_plmobjects()[options["type_part"]] part_form = pforms.get_creation_form(user, cls, request.POST, inbulk_cache=inbulk_cache, prefix=str(link.product.visited)+"-part") part_ctrl = parent_ctrl.create_from_form(part_form, user, True, True) instances.append((part_ctrl.object._meta.app_label, part_ctrl.object._meta.module_name, part_ctrl.object._get_pk_val())) c_link = parent_ctrl.add_child(part_ctrl.object,quantity,order,unit) models.generate_extra_location_links(link, c_link) doc_form = pforms.get_creation_form(user, models.Document3D, request.POST, inbulk_cache=inbulk_cache, prefix=str(link.product.visited)+"-document") doc_ctrl = models.Document3DController.create_from_form(doc_form, user, True, True) link.product.part_to_decompose=part_ctrl.object to_delete.append(generateGhostDocumentFile(link.product, doc_ctrl.object, company)) instances.append((doc_ctrl.object._meta.app_label, doc_ctrl.object._meta.module_name, doc_ctrl.object._get_pk_val())) part_ctrl.attach_to_document(doc_ctrl.object) new_Doc3D = doc_ctrl.object new_Doc3D.PartDecompose = part_ctrl.object new_Doc3D.no_index = True new_Doc3D.save() for doc_file in other_files: filename, ext = os.path.splitext(doc_file.filename) # add files with the same name (for example a .sldXXX # or.CATXXX file) if filename == link.product.name: f = File(doc_file.file) f.name = doc_file.filename f.size = doc_file.size df = doc_ctrl.add_file(f, False, False) if doc_file.thumbnail: doc_ctrl.add_thumbnail(df, File(doc_file.thumbnail)) instances.append((df._meta.app_label, df._meta.module_name, df.pk)) instances.append((doc_file._meta.app_label, doc_file._meta.module_name, doc_file.pk)) doc_file.no_index = True doc_file.deprecated = True doc_file.save() generate_part_doc_links(request,link.product, part_ctrl,instances,doc3D, inbulk_cache) else: c_link = parent_ctrl.add_child(link.product.part_to_decompose,quantity,order,unit) models.generate_extra_location_links(link, c_link) except Exception: raise models.Document_Generate_Bom_Error(to_delete,link.product.name)