def create_monochrome_logo(request, company): c = Company.objects.get(url_name=company) if not c.color_logo: return JsonError(_("Color logo does not exist")) # get company's color logo color_logo = Image.open(c.color_logo.path) # resize it to monochrome_logo dimension black_logo = resize_image(color_logo.copy(), g.IMAGE_DIMENSIONS['monochrome_logo'], 'fit') # reduce color depth black_logo = black_logo.convert(mode='1') # create a new path for the monochrome logo new_path = os.path.splitext(c.color_logo.path)[0] new_path = new_path + '_monochrome.' + g.MISC['image_format'] # save to the new path black_logo.save(new_path, g.MISC['image_format'], bits=1) # save to stupid django field django_file = File(open(new_path)) c.monochrome_logo.save('new', django_file) django_file.close() # return an url to the new logo return JsonResponse({'status': 'ok', 'logo_url': c.monochrome_logo.url})
def some_view(request): # Create the HttpResponse object with the appropriate PDF headers. response = HttpResponse(content_type='application/pdf') response['Content-Disposition'] = 'attachment; filename="somefilename.pdf"' buffer = BytesIO() # Create the PDF object, using the BytesIO object as its "file." p = canvas.Canvas(buffer) # Draw things on the PDF. Here's where the PDF generation happens. # See the ReportLab documentation for the full list of functionality. p.drawString(100, 100, "Hello world.") # Close the PDF object cleanly. p.showPage() p.save() # Get the value of the BytesIO buffer and write it to the response. pdf = buffer.getvalue() response.write(pdf) buffer.close() with open('/tmp/hello.world', 'w') as f: myfile = File(f) myfile.write('Hello World') upload(myfile, "test") return response
def _unserialize(self, value, as_type): if as_type is not None and isinstance(value, as_type): return value elif value is None: return None elif as_type == int or as_type == float or as_type == decimal.Decimal: return as_type(value) elif as_type == dict or as_type == list: return json.loads(value) elif as_type == bool or value in ('True', 'False'): return value == 'True' elif as_type == File: try: f = open(os.path.join(settings.MEDIA_ROOT, value[7:]), 'r') fi = File(f) fi.url = urljoin(settings.MEDIA_URL, value[7:]) return fi except OSError: return False elif as_type == datetime: return dateutil.parser.parse(value) elif as_type == date: return dateutil.parser.parse(value).date() elif as_type == time: return dateutil.parser.parse(value).time() elif as_type is not None and issubclass(as_type, Versionable): return as_type.objects.current.get(identity=value) elif as_type is not None and issubclass(as_type, Model): return as_type.objects.get(pk=value) return value
def get_data(request, type): if type=="udp": udpT6Server = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) print "UDP TCP IPv6 Mode Start....." udpT6Server.bind(('bbbb::212:4b00:205:f000', 5678)) print "UDP Server Start" while True: udpT4Data, udpT6ServerInfo = udpT6Server.recvfrom(1024) data = udpT4Data source_ip = udpT6ServerInfo[0] source_port = udpT6ServerInfo[1] rlist = [data, source_ip, source_port] print data, source_ip, source_port with open('data.json', 'w') as f: myfile = File(f) myfile.write(json.dumps({"data": rlist[0], "ip": rlist[1], "port":rlist[2]})) print "save" elif type=="tcp": tcpT6Server = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) print "Server Socket Created......." tcpT6Server.bind(('bbbb::212:4b00:205:f000', 5678)) print "Wating for connecting......." tcpT6Server.listen(5) while True: clientSock, clientaddr = tcpT6Server.accept() print "Connected from: ", clientSock.getpeername() clientSock.send('Congratulations......') else: return HttpResponse("Please choose a protocol")
def scale_img(f, name, max_px, dim): try: img = Image.open(f, 'r') except IOError: raise ValueError('invalid image file') (w, h) = img.size if dim == 'h': if h > max_px: w = max_px * w / h h = max_px else: return f elif dim == 'both': if w > max_px or h > max_px: if w > h: h = max_px * h / w w = max_px else: w = max_px * w / h h = max_px else: return f scaled_img = img.resize((w, h), Image.ANTIALIAS) scaled_buffer = StringIO() scaled_img.save(scaled_buffer, 'PNG') scaled_f = File(scaled_buffer, name = name + '.png') scaled_f._set_size(len(scaled_buffer.getvalue())) return scaled_f
def get_directorios(self): mask_previos_ = re.compile('^DAI*') directorios_ = [] ban_ = 50 cont_ = 0 for (dirpath, dirnames, filenames) in os.walk(self._raiz): for dirname in dirnames: if mask_previos_.match(dirname): archivos_ = self.get_archivos(os.path.join(dirpath,dirname)) if len(archivos_)>0: cont_ += 1 previo_ = enc004previo() previo_.referencia = dirname #previo_.usuario = User.objects.get(pk=1) previo_.save() for archivo_ in archivos_: try: det_ = det004previo() det_.previo = previo_ #file_f_ = File(file_) #file_f_.name = '%s%s'%(nuevo_nombre_,os.path.splitext(archivo_)[1]) f_ = File(open(archivo_)) f_.name = os.path.splitext(archivo_)[0].split('\\')[-1] det_.archivo= f_ det_.nombre_original = os.path.splitext(archivo_)[0].split('\\')[-1] det_.tipo = os.path.splitext(archivo_)[1] det_.nuevo_nombre = nombre_aleatorio() det_.save() except IOError: pass return 'Directorios Importados: %s '%cont_
def inbound_email(request): raw_data = request.raw_post_data # filename = '/tmp/raw_data.%s.json' % (time.time(),) # with open(filename, 'w') as f: # f.write(raw_data) data = json.loads(raw_data) inbound = PostmarkInbound(json=raw_data) if not inbound.has_attachments(): m = "ERROR! No attachments" logging.debug(m) return http.HttpResponse(m) try: hashkey, subject = inbound.subject().split(":", 1) except ValueError: m = "ERROR! No hashkey defined in subject line" logging.debug(m) return http.HttpResponse(m) try: post = BlogItem.get_by_inbound_hashkey(hashkey) except BlogItem.DoesNotExist: m = "ERROR! Unrecognized hashkey" logging.debug(m) return http.HttpResponse(m) attachments = inbound.attachments() attachment = attachments[0] blogfile = BlogFile(blogitem=post, title=subject.strip()) content = StringIO(attachment.read()) f = File(content, name=attachment.name()) f.size = attachment.content_length() blogfile.file.save(attachment.name(), f, save=True) blogfile.save() return http.HttpResponse("OK\n")
class MessageTest(ModelTest): def setUp(self): super(MessageTest, self).setUp() self.chain = mommy.make(Chain) # test file for models.FileField fpath = Path(settings.APP_DIR, 'grunt/tests/media/test-audio.wav') self.audio = File(open(fpath, 'rb')) def tearDown(self): super(MessageTest, self).tearDown() self.audio.close() def test_make_a_seed_message(self): """Make a seed message: a message without a parent.""" message = Message(chain=self.chain, audio=self.audio) message.full_clean() message.save() def test_make_a_response_message(self): """Make a message.""" seed = mommy.make_recipe('grunt.seed') message = Message(parent=seed, audio=self.audio) message.full_clean() message.save() self.assertEquals(message.parent, seed)
def test_file_iteration_with_mac_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\rtwo\rthree')) # Set chunk size to create a boundary after \r: # b'one\r... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def extractFile(fileObject, request): if fileObject.f.name.endswith(".zip"): # Convert file and dir into absolute paths fullpath = os.path.join(settings.MEDIA_ROOT,fileObject.f.name) dirname = os.path.dirname(settings.MEDIA_ROOT) # Get a real Python file handle on the uploaded file fullpathhandle = open(fullpath, 'r') # Unzip the file, creating subdirectories as needed zfobj = zipfile.ZipFile(fullpathhandle) for name in zfobj.namelist(): if name.endswith('/'): try: # Don't try to create a directory if exists os.mkdir(os.path.join(dirname, name)) except: pass else: outfile = File(open(os.path.join(dirname, name), 'wb+')) outfile.write(zfobj.read(name)) file_o=FilebabyFile(f=name, username=request.user.username, md5=hashlib.md5(outfile.read()).hexdigest()) file_o.save() outfile.close() deleteFromOS(fullpath) fileObject.delete() return True return False
def konto_action_handler(request): template = 'kontoplaan.html' go_home = False context = {} if request.method == 'POST': if request.POST.has_key('action'): if request.POST['action'] == u'Loe kontoplaan': request.encoding = 'utf-8' if request.FILES.has_key('cvs'): wf = request.FILES['cvs'] f = File(wf) kl = KontoLoader() kl.import_kontoplaan(f) go_home = True elif request.POST['action'] == u'Kirjuta kontoplaan faili': f = codecs.open('kontoplaan.csv', mode="w", encoding="utf8") kl = KontoLoader() if kl.export_kontoplaan(f): f.close() go_home = True else: go_home = True if go_home: result = redirect(kontoplaan) else: result = render_to_response(template, context, context_instance=RequestContext(request)) return result
def create_instance(model_class, image_name): instance = model_class() img = File(get_image_file()) instance.original_image.save(image_name, img) instance.save() img.close() return instance
def test_file_iteration_with_windows_newline_at_chunk_boundary(self): f = File(BytesIO(b'one\r\ntwo\r\nthree')) # Set chunk size to create a boundary between \r and \n: # b'one\r\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_with_unix_newline_at_chunk_boundary(self): f = File(BytesIO(b"one\ntwo\nthree")) # Set chunk size to create a boundary after \n: # b'one\n... # ^ f.DEFAULT_CHUNK_SIZE = 4 self.assertEqual(list(f), [b"one\n", b"two\n", b"three"])
def mail_from_postmark(request): if request.method == 'POST': json_data = request.body #body = json.loads(json_data)['HtmlBody'] inbound = PostmarkInbound(json=json_data) if inbound.has_attachments(): attachments = inbound.attachments() names = [] #absolue_uri = "<a href='"+request.build_absolute_uri(name1)+"'>" + name + "</a>" for attachment in attachments: name = attachment.name() name1 = settings.MEDIA_URL + 'attachments/' + name name2 = settings.MEDIA_ROOT + '/attachments/' + name names.append(name1) with open(name2,'w') as f: myFile = File(f) myFile.write(attachment.read()) mail = Inboundmail(html_body=inbound.text_body(), send_date=inbound.send_date(), subject=inbound.subject(), reply_to=inbound.reply_to(), sender=inbound.sender(), attachment=','.join(names)) #pdb.set_trace() else: mail = Inboundmail(html_body=inbound.text_body(), send_date=inbound.send_date(), subject=inbound.subject(), reply_to=inbound.reply_to(), sender=inbound.sender()) mail.save() return HttpResponse('OK') else: return HttpResponse('not OK')
def writeAssertionFile(self): data = json.dumps(self.serialize()) f = open(self.getAssertionPath(), 'w') localFile = File(f) localFile.write(data) localFile.closed f.closed
def writeIssuerFile(self): data = json.dumps(self.serialize()) f = open(self.getIssuerPath(), 'w') localFile = File(f) localFile.write(data) localFile.closed f.closed
def UploadFile(request): #manipulating the header to get vars uFile = File(request.FILES['file']) uname = request.user.username path = request.DATA['path'] #logging logDict = {} logDict['User']= request.user.username logDict['Action']= 'UploadFile' logDict['HTTP']= 'POST' logDict['File']= request.DATA['path']+uFile.name; logger.info(json.dumps(logDict)) #deleting obj in db query = ODFile.objects.filter(name=request.user, fileName = path+uFile.name).delete() #writing file and creating md5 hash md5 = hashlib.md5() if not os.path.exists('../Server/Files/'+ uname + '/'+ path): os.makedirs('../Server/Files/'+ uname + '/'+ path) with open('../Server/Files/' + uname+'/'+path+ uFile.name, 'w+') as destination: for chunk in uFile.chunks(): md5.update(chunk) destination.write(chunk) f = ODFile(fileName=path+ uFile.name.decode("utf-8"), name = request.user, fileHash=md5.hexdigest().decode("utf-8"), fileSize=uFile.size) f.save() return HttpResponse(constants.h_uploadFile_success)
def set_contents(self, data): if self._file is not None: self._file.close() self._file = None _file = File(open(self.path, "w")) _file.write(data) _file.close()
class CumulusTests(TestCase): def setUp(self): "Set up tiny files to test with." image_path = os.path.join(os.path.dirname(__file__), "image_300x200.gif") document_path = os.path.join(os.path.dirname(__file__), "text_file.txt") self.image = ImageFile(open(image_path, "rb")) self.document = File(open(document_path, "r")) def test_file_api(self): """ Make sure we can perform the following using the Django File API: - Upload the test files - Access common file attributes - Delete the test files """ self.thing = Thing.objects.create(image=self.image, document=self.document) self.assertEqual(self.thing.image.width, 300) self.assertEqual(self.thing.image.height, 200) self.assertEqual(self.thing.image.size, 976) self.assert_( "cdn.cloudfiles.rackspacecloud.com" in self.thing.image.url, "URL is not a valid Cloud Files CDN URL." ) self.assertEqual(self.thing.document.size, 31) self.assert_( "cdn.cloudfiles.rackspacecloud.com" in self.thing.document.url, "URL is not a valid Cloud Files CDN URL." ) self.thing.delete() def tearDown(self): self.document.close() self.image.close()
def report(request): form = ReportErrorForm(request.POST) if form.is_valid(): messages = [{'tags': 'success', 'text': u"Takk for at du hjelper til med å gjøre denne siden bedre!"}] file_path = 'reports/' + str(uuid.uuid4()) + '.xml' while os.path.isfile(file_path): file_path = 'reports/' + str(uuid.uuid4()) + '.xml' f = open(file_path, 'w+') xml_file = File(f) text = ( u"<?xml version=\"1.0\" encoding=\"UTF-8\"?>", u"<!DOCTYPE bank SYSTEM \"report.dtd\">", u"<report xmlns=\"http://www.w3schools.com\"", u"\txmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"", u"\txsi:schemaLocation=\"report.xsd\">", u"\t<course>", u"\t\t" + form.cleaned_data['course_code'], u"\t</course>", u"\t<semester>", u"\t\t" + form.cleaned_data['semester_code'], u"\t</semester>", u"\t<description>", u"\t\t" + form.cleaned_data['description'], u"\t</description>", u"</report>" ) xml_file.write(u'\n'.join(text).encode('utf8')) xml_file.close() return navbar_render(request, 'report.html', {'messages': messages}) else: return navbar_render(request, 'report.html')
def _open(self, path, mode="rb"): """ Return a memory only file which will not be saved to disk If an image is requested, fake image content using PIL """ if not self.exists(path): raise OSError("Mockstorage: '%s' No such file or directory." % path) if path in self.saved_files.keys(): return self.saved_files[path] if os.path.splitext(path)[1].lower() in [".jpg", ".png", ".gif", ".bmp"]: # 1px test image binary_image_data = "\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\tpHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\x00\x00\x07tIME\x07\xdb\x0c\x17\x020;\xd1\xda\xcf\xd2\x00\x00\x00\x0cIDAT\x08\xd7c\xf8\xff\xff?\x00\x05\xfe\x02\xfe\xdc\xccY\xe7\x00\x00\x00\x00IEND\xaeB`\x82" img = BytesIO(binary_image_data) mockfile = File(img) mockfile.name = "MOCKED_IMAGE_" + path else: content = "mock content" # If a predefined fake file is asked for, return predefined content filename = os.path.split(path)[1] for content_name in self.FAKE_FILES: mockfilename = content_name["filename"] mockcontent = content_name["content"] if filename == mockfilename: content = mockcontent mockfile = File(StringIO.StringIO(content)) mockfile.name = "MOCKED_FILE_" + path return mockfile
def create_document_thumbnail(object_id): """ Runs the create_thumbnail logic on a document. """ try: document = Document.objects.get(id=object_id) except Document.DoesNotExist: return image = document._render_thumbnail('thumb') filename = 'doc-%s-thumb.jpg' % document.id document.save_thumbnail(filename, image) thumb_folder = 'thumbs' preview = document._render_thumbnail('preview') filenamePreview = 'doc-%s-preview.jpg' % document.id upload_path = os.path.join(settings.MEDIA_ROOT, thumb_folder) if not os.path.exists(upload_path): os.makedirs(upload_path) with open(os.path.join(upload_path, filenamePreview), 'w') as f: thumbnail = File(f) thumbnail.write(preview)
def gen_online(self): """ This function returns the HTTP response with the STL file """ self.gen_offline() if self.points.shape[0] == 0: return None # Print to dat file self._seek_all() self.write_points_to_file(self.data_file) # Generate STL file current_dir = os.getcwd() os.chdir(os.path.dirname(self.data_file.name)) self.write_scad_to_file(self.scad_file, self.data_file.name) cmd = 'openscad -o ' + os.path.basename(self.stl_file.name) + ' ' + os.path.basename(self.scad_file.name) self._seek_all() ret = call(cmd, shell=True) os.chdir(current_dir) # Copy STL file to HttpResponse response = HttpResponse(content_type='application/txt') response['Content-Disposition'] = 'attachment; filename="%s.stl"' \ % os.path.splitext(os.path.basename(self.output_file_name))[0] # Copy file over to the download file self._seek_all() f = FileDj(response) for line in self.stl_file: f.write(line) return response
def add_article_full_text_from_file(abs_path, pmid, html_table_list, overwrite_existing = False): a = add_single_article_full(int(pmid), overwrite_existing) if a is None: return None # does article already have full text assoc with it? if m.ArticleFullText.objects.filter(article__pmid = pmid).count() > 0: aft = m.ArticleFullText.objects.get(article = a) if len(aft.get_content()) > 0: print "Article %s full text already in db, skipping..." % pmid return None try: print 'adding article %s' % (pmid) f = open(unicode(abs_path), 'r') file_ob = File(f) os.chdir(settings.PROJECT_BASE_DIRECTORY) aft = m.ArticleFullText.objects.get_or_create(article = a)[0] aft.full_text_file.save(pmid, file_ob) file_ob.close() for table in html_table_list: add_table_ob_to_article(table, a, text_mine = True) # text mine article level metadata apply_article_metadata(a) except Exception, e: # with open('failed_files.txt', 'a') as f: # f.write('%s\\%s' % (file_name, e)) print e print pmid
def index(request): context = RequestContext(request) terminal_output = '$ ' if request.method=='POST': form = EditorForm(request.POST) if form.is_valid(): cd = form.cleaned_data text = cd.get('text') if 'compile' in request.POST: #print("COMPILING...\n" + a + "\n") with open('media/myprogram.cpp', 'w+') as fd: myfile = File(fd) myfile.write(text) myfile.closed fd.closed proc = subprocess.Popen(['g++', 'media/myprogram.cpp', '-o', 'media/myprogram'], stderr=subprocess.PIPE) terminal_output += "g++ myprogram.cpp\n" terminal_output += proc.stderr.read() #print("OUTPUT...\n" + terminal_output) elif 'execute' in request.POST: #print("EXECUTING..." + a + "\n") proc = subprocess.Popen('./media/myprogram', stdout=subprocess.PIPE) terminal_output += "./media/myprogram\n" terminal_output += proc.stdout.read() #print("OUTPUT...\n" + terminal_output) else: form = EditorForm() context_dict = {'form': form, 'terminal_output': terminal_output} return render_to_response('c_compiler/index.html', context_dict, context)
def save_thumbnail(self, filenameo, image): # if os.path.splitext(filenameo)[-1]==".pdf": # pass # img = Image(filename=filenameo+"[0]") # img.save(filename="/srv/www/geonode/geonode/static/geonode/img/tmp.png") thumb_folder = 'thumbs' upload_path = os.path.join(settings.MEDIA_ROOT, thumb_folder) if not os.path.exists(upload_path): os.makedirs(upload_path) #url = os.path.join(settings.MEDIA_URL, thumb_folder, filenameo) with open(os.path.join(upload_path, filenameo), 'w') as f: thumbnail = File(f) thumbnail.write(image) url_path = os.path.join(settings.MEDIA_URL, thumb_folder, filenameo) url = urljoin(settings.SITEURL, url_path) Link.objects.get_or_create(resource=self, url=url, defaults=dict( name='Thumbnail', extension='png', mime='image/png', link_type='image', )) ResourceBase.objects.filter(id=self.id).update( thumbnail_url=url )
def create_by_user(self, user, uploaded_file, period, saved_hours): locked_at = django_now() unlockable_at = locked_at + datetime.timedelta(days=period) max_saved_hours = period * 24 if saved_hours > max_saved_hours: saved_hours = max_saved_hours password = get_random_string(8, allowed_chars='0123456789') fl = FileLocker() fl.lock(uploaded_file, password) locked_file = File( open(fl.get_locked_file_path(), 'r'), name=fl.get_locked_file_name() ) obj = self.create( user_profile=user.get_profile(), locked_file=locked_file, locked_file_name=fl.get_locked_file_name(), original_file_name=uploaded_file.name, original_file_size=uploaded_file.size, password=password, locked_at=locked_at, unlockable_at=unlockable_at, saved_hours=saved_hours ) locked_file.close() fl.clean() return obj
def save_file(request): if request.method == 'POST': title = request.POST.get('title') code_text = request.POST.get('code_text') filename = 'user_codes/'+title+'_'+request.user.username+'.c' verify_duplicate=[] if title != '' and code_text != '': try: verify_duplicate = Files_saveds.objects.get(author__exact=request.user.id,title__exact = title) except Exception, e: pass if verify_duplicate: with open(filename, 'r') as files: code_file = File(files) file_code=code_file.read() files.closed state = {'1':'3', '2' : file_code } else: try: with open(filename, 'w') as files: code_file = File(files) code_file.write(code_text) files.closed p = Files_saveds(title=title, author=request.user) p.save() state = {'1' : '0', '2' :'Successful'} except Exception, e: state = {'1' : '1','2' : 'Couldnot save'}
def save_thumbnail(self, filename, image): thumb_folder = 'thumbs' upload_path = os.path.join(settings.MEDIA_ROOT, thumb_folder) if not os.path.exists(upload_path): os.makedirs(upload_path) with open(os.path.join(upload_path, filename), 'wb') as f: thumbnail = File(f) thumbnail.write(image) url_path = os.path.join(settings.MEDIA_URL, thumb_folder, filename).replace('\\', '/') url = urljoin(settings.SITEURL, url_path) Link.objects.get_or_create(resource=self, url=url, defaults=dict( name='Thumbnail', extension='png', mime='image/png', link_type='image', )) ResourceBase.objects.filter(id=self.id).update( thumbnail_url=url )
def index(request): # Handle file upload if request.method == 'POST': form = DocumentForm(request.POST, request.FILES) if form.is_valid(): newdoc = Document(docfile=request.FILES['docfile']) newdoc.save() #Process and create a new file try: id = Document.objects.all()[0].id filepath = Document.objects.get(id=id).docfile.file.name filename = basename(Document.objects.get(id=id).docfile.name) Airtel_AV_Dashboard("Test").process(filepath, filename) name = "" for l in filename: if l == ".": break else: name += l with open(f"myapp/Final Output/{name} modified.xlsx", "rb") as stored_file: #Store the new file in the database new_doc = New_Document(docfile=File( stored_file, name=f"{name} modified.xlsx")) new_doc.save() #for each_file in Document.objects.all(): #each_file.delete() except Exception as e: print(f"Exception {e}") #for each_file in Document.objects.all(): #each_file.delete() # Redirect to the document list after POST return HttpResponseRedirect(reverse('index')) else: form = DocumentForm() # A empty, unbound form #if len(New_Document.objects.all()) > 0: #previous_document = New_Document.objects.all()[0] #previous_document.delete() # Load documents for the list page query_set = New_Document.objects.all() documents = [] cnt = len(query_set) - 1 for i in query_set: documents.append(query_set[cnt]) cnt -= 1 documents = documents # Render file_processing_indexpython manage.py runserver # page with the documents and the form return render(request, 'file_processing_index.html', { 'documents': documents, 'name': "Download File", 'form': form })
def setUp(self): super(ScreenshotAPITest, self).setUp() shot = Screenshot.objects.create(name='Obrazek', component=self.subproject) shot.image.save('screenshot.png', File(open(TEST_SCREENSHOT, 'rb')))
def parse_product(self,response): shop_id = Shop.objects.filter(url=response.meta['shop']).first() if shop_id is not None: print('existe') else: shop_id = Shop() shop_id.name = response.meta['shop_name'] shop_id.url = response.meta['shop'] shop_id.save() print('no existe') try: name_category = response.meta['name_category_safe'].lower() except: name_category = '' categ = response.xpath('.//nav[@class="woocommerce-breadcrumb"]/a/text()').extract() print("####################################") print("####################################") print("####################################") print(categ) category = None for a in categ: category_tags = CategoryTags.objects.filter(tag__icontains=a.lower()).first() print(category_tags) if category_tags: category = category_tags.category if category is not None: name_category = response.meta['name_category_safe'] product = response.css("div.center_column") name = response.xpath('.//div[@class="summary entry-summary"]/h1[@class="product_title entry-title"]/text()').re_first('\w.*') url = response.meta['url_product_safe'] reference = response.xpath('.//p[@id="product_reference"]/span/text()').re_first('\w.*') try: brand = response.css('img.brand-image').attrib['title'] except: brand = None try: description = "" description1 = response.xpath('.//div[@id="tab-description"]/p/text()').extract() for des in description1: description = description + str(des) + str("<br>") except: print("error") category = category category_temp = name_category # tax = try: t = response.xpath('.//span[@class="woocommerce-Price-amount amount"]/text()').re_first('\w.*') ti = t.split('.') total = "" for tii in ti: total = total + str(tii) total = int(total) except: total = None Product_object = Product() if name: Product_object.name = name if shop_id: Product_object.shop = shop_id if reference: Product_object.reference = reference if brand: Product_object.brand = brand if url: Product_object.url = url if category_temp: Product_object.category_temp = categ if description: Product_object.description = description if category: Product_object.category = category if total: Product_object.total = total else: Product_object.total = 0 Product_object.price = 0 Product_object.tax = 0 try: Product_object.save() product_error = False except: product_error = True print("No se pudo guardar el producto") if Product_object.id: headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'} list_img = response.css('figure.woocommerce-product-gallery__wrapper a img') contador = 0 for imm in list_img: img_url = imm.xpath('@src').re_first('\w.*') # img_url = response.css('img#bigpic').xpath('@src').get() name = str(Product_object.id) +'_' + str(contador) + '.jpg' producto_image = ProductImage() producto_image.product = Product_object req = Request(url=img_url, headers=headers) response = urlopen(req) io = BytesIO(response.read()) producto_image.image.save(name, File(io)) producto_image.save() else: print("no guardo") else: # print("#############################################################################################################") print("No existe la categoria")
def get_upload_file(filename='repo'): file = File( open('./tests/fixtures_static/{}.tar.gz'.format(filename), 'rb')) return SimpleUploadedFile(filename, file.read(), content_type='multipart/form-data')
def submit_script(**kwargs): job_id = kwargs.pop('djangui_job') resubmit = kwargs.pop('djangui_resubmit', False) rerun = kwargs.pop('rerun', False) from .backend import utils from .models import DjanguiJob, DjanguiFile job = DjanguiJob.objects.get(pk=job_id) command = utils.get_job_commands(job=job) if resubmit: # clone ourselves, setting pk=None seems hackish but it works job.pk = None # This is where the script works from -- it is what is after the media_root since that may change between # setups/where our user uploads are stored. cwd = job.get_output_path() abscwd = os.path.abspath(os.path.join(settings.MEDIA_ROOT, cwd)) job.command = ' '.join(command) job.save_path = cwd if rerun: # cleanup the old files, we need to be somewhat aggressive here. local_storage = utils.get_storage(local=True) remote_storage = utils.get_storage(local=False) to_delete = [] with atomic(): for dj_file in DjanguiFile.objects.filter(job=job): if dj_file.parameter is None or dj_file.parameter.parameter.is_output: to_delete.append(dj_file) path = local_storage.path(dj_file.filepath.name) dj_file.filepath.delete(False) if local_storage.exists(path): local_storage.delete(path) # TODO: This needs to be tested to make sure it's being nuked if remote_storage.exists(path): remote_storage.delete(path) [i.delete() for i in to_delete] utils.mkdirs(abscwd) # make sure we have the script, otherwise download it. This can happen if we have an ephemeral file system or are # executing jobs on a worker node. script_path = job.script.script_path if not utils.get_storage(local=True).exists(script_path.path): utils.get_storage(local=True).save(script_path.path, script_path.file) job.status = DjanguiJob.RUNNING job.save() proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=abscwd) stdout, stderr = proc.communicate() # tar/zip up the generated content for bulk downloads def get_valid_file(cwd, name, ext): out = os.path.join(cwd, name) index = 0 while os.path.exists(six.u('{}.{}').format(out, ext)): index += 1 out = os.path.join(cwd, six.u('{}_{}').format(name, index)) return six.u('{}.{}').format(out, ext) # fetch the job again in case the database connection was lost during the job or something else changed. job = DjanguiJob.objects.get(pk=job_id) # if there are files generated, make zip/tar files for download if len(os.listdir(abscwd)): tar_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'tar.gz') tar = tarfile.open(tar_out, "w:gz") tar_name = os.path.splitext( os.path.splitext(os.path.split(tar_out)[1])[0])[0] tar.add(abscwd, arcname=tar_name) tar.close() zip_out = get_valid_file(abscwd, get_valid_filename(job.job_name), 'zip') zip = zipfile.ZipFile(zip_out, "w") arcname = os.path.splitext(os.path.split(zip_out)[1])[0] zip.write(abscwd, arcname=arcname) for root, folders, filenames in os.walk(os.path.split(zip_out)[0]): for filename in filenames: path = os.path.join(root, filename) if path == tar_out: continue if path == zip_out: continue zip.write(path, arcname=os.path.join(arcname, filename)) zip.close() # save all the files generated as well to our default storage for ephemeral storage setups if djangui_settings.DJANGUI_EPHEMERAL_FILES: for root, folders, files in os.walk(abscwd): for filename in files: filepath = os.path.join(root, filename) s3path = os.path.join(root[root.find(cwd):], filename) remote = utils.get_storage(local=False) exists = remote.exists(s3path) filesize = remote.size(s3path) if not exists or (exists and filesize == 0): if exists: remote.delete(s3path) remote.save(s3path, File(open(filepath, 'rb'))) utils.create_job_fileinfo(job) job.stdout = stdout job.stderr = stderr job.status = DjanguiJob.COMPLETED job.save() return (stdout, stderr)
def test_model_methods(self): """Test model style manager CRUD methods""" self.assertEqual(self.qdjango_layer.styles, [ { 'name': 'style1', 'current': False }, { 'name': 'style2', 'current': True }, ]) self.assertFalse(self.qdjango_project.is_dirty) self.assertFalse( self.qdjango_layer.set_current_style('style1234567890')) self.assertTrue(self.qdjango_layer.set_current_style('style1')) self.assertTrue(self.qdjango_project.is_dirty) self.assertEqual(self.qdjango_layer.styles, [ { 'name': 'style1', 'current': True }, { 'name': 'style2', 'current': False }, ]) # Verify the project was written p = QgsProject() p.read(self.qdjango_project.qgis_project.fileName()) l = p.mapLayer(self.qdjango_layer.qgis_layer.id()) self.assertTrue(l.isValid()) sm = l.styleManager() self.assertEqual(sm.currentStyle(), 'style1') del(sm) del(p) # Test rename self.assertFalse(self.qdjango_layer.rename_style( 'style1234567890', 'new_name')) self.assertFalse(self.qdjango_layer.rename_style('style2', 'style1')) self.assertTrue(self.qdjango_layer.rename_style('style2', 'new_name')) self.assertTrue(self.qdjango_layer.rename_style('style1', 'new_name1')) self.assertTrue(self.qdjango_layer.rename_style('new_name1', 'style1')) # Verify the project was written p = QgsProject() p.read(self.qdjango_project.qgis_project.fileName()) l = p.mapLayer(self.qdjango_layer.qgis_layer.id()) self.assertTrue(l.isValid()) sm = l.styleManager() self.assertEqual(sm.styles(), ['new_name', 'style1']) del(sm) del(p) # Test remove/delete self.assertFalse(self.qdjango_layer.delete_style('style1234567890')) self.assertTrue(self.qdjango_layer.delete_style('style1')) self.assertFalse(self.qdjango_layer.delete_style('new_name')) assert self.qdjango_layer.rename_style('new_name', 'style1') # Verify the project was written p = QgsProject() p.read(self.qdjango_project.qgis_project.fileName()) l = p.mapLayer(self.qdjango_layer.qgis_layer.id()) self.assertTrue(l.isValid()) sm = l.styleManager() self.assertEqual(sm.styles(), ['style1']) del(sm) del(p) # Test add new style with open(os.path.join( CURRENT_PATH + TEST_BASE_PATH, 'multiple_styles_manager_test.qml'), 'r') as f: qml = f.read() self.assertFalse(self.qdjango_layer.add_style('style1', qml)) self.assertTrue(self.qdjango_layer.add_style( 'My new fancy èé style', qml)) # Verify the project was written p = QgsProject() p.read(self.qdjango_project.qgis_project.fileName()) l = p.mapLayer(self.qdjango_layer.qgis_layer.id()) self.assertTrue(l.isValid()) sm = l.styleManager() self.assertEqual(sm.styles(), ['My new fancy èé style', 'style1']) del(sm) del(p) # Test invalid QML self.assertFalse(self.qdjango_layer.add_style( 'My invalid style', '<xxxx>this is not a valid style</xxxx>')) # Restore the project and check the dirt flag project_file = File(open(self.project_path, 'r')) project = QgisProject(project_file) project.instance = self.qdjango_project project.title = 'Test qdjango postgres multiple styles manager project' project.group = self.project_group project.save() self.assertFalse(self.qdjango_project.is_dirty)
def form_valid(self, form): context = self.get_context_data() gallery = context['gallery'] archive = self.request.FILES['file'] temp = os.path.join(tempfile.gettempdir(), str(time.time())) if not os.path.exists(temp): os.makedirs(temp) zfile = zipfile.ZipFile(archive, 'a') for i in zfile.namelist(): filename = os.path.split(i)[1] ph_temp = os.path.abspath(os.path.join(temp, os.path.basename(i))) if not filename.strip(): # don't deal with directory continue # create file for image f_im = open(ph_temp, 'wb') f_im.write(zfile.read(i)) f_im.close() (title, ext) = os.path.splitext(os.path.basename(i)) # if size is too large, don't save if os.stat(ph_temp).st_size > settings.ZDS_APP['gallery']['image_max_size']: messages.error( self.request, _('Votre image "{}" est beaucoup trop lourde, réduisez sa taille à moins de {:.0f}' "Kio avant de l'envoyer.").format( title, settings.ZDS_APP['gallery']['image_max_size'] / 1024)) continue # if it's not an image, pass try: ImagePIL.open(ph_temp) except OSError: continue # create picture in database: f_im = File(open(ph_temp, 'rb')) f_im.name = title + ext pic = Image() pic.gallery = gallery pic.title = title pic.legend = '' pic.pubdate = datetime.now() pic.physical = f_im pic.save() f_im.close() if os.path.exists(ph_temp): os.remove(ph_temp) zfile.close() if os.path.exists(temp): shutil.rmtree(temp) return redirect(gallery.get_absolute_url())
def _open(self, name, mode='rb'): return File(open(self.path(name), mode))
def build(self, build): build_command = ' '.join([ 'pip', 'wheel', '--no-deps', '--no-clean', '--no-index', '--wheel-dir', '/wheelhouse', shlex_quote(build.original_url), ]) setup_commands = [ line.strip() for line in build.setup_commands.splitlines() if line.strip() ] commands = ' && '.join(setup_commands + [build_command]) cmd = 'sh -c {}'.format(shlex_quote(commands)) build_log = io.StringIO() build_log.write(cmd) build_log.write('\n') with tempdir(dir=settings.TEMP_BUILD_ROOT) as wheelhouse: image, tag = split_image_name(self.image) consume_output( # TODO: Add support for custom registries and auth_config self.client.pull(image, tag, stream=True), build_log, ) container = self.client.create_container( self.image, cmd, working_dir='/', volumes=['/wheelhouse'], host_config=self.client.create_host_config(binds={ wheelhouse: { 'bind': '/wheelhouse', 'ro': False, }, }), ) build_start = timezone.now() self.client.start(container=container['Id']) consume_output( self.client.attach(container=container['Id'], stdout=True, stderr=True, stream=True), build_log, ) build_end = timezone.now() self.client.remove_container(container=container['Id'], v=True) build.build_log = build_log.getvalue() build.build_duration = (build_end - build_start).total_seconds() build.build_timestamp = timezone.now() build.save() filenames = os.listdir(wheelhouse) if filenames: assert len(filenames) == 1 filename = filenames[0] with open(os.path.join(wheelhouse, filename), 'rb') as fh: build.metadata = extract_wheel_meta(fh) fh.seek(0) build.md5_digest = file_digest(hashlib.md5, fh) fh.seek(0) build.build.save(filename, File(fh)) fh.seek(0) build.filesize = build.build.size build.save() else: raise RuntimeError('Build failed')
def do_work(): """ The invoice worker creates Invoice objects for shop orders and for custom orders. It also generates PDF files for Invoice objects that have no PDF. It also emails invoices for shop orders. It also generates proforma invoices for all closed orders. """ # check if we need to generate any proforma invoices for shop orders for order in Order.objects.filter( Q(pdf="") | Q(pdf__isnull=True), open__isnull=True, ): # generate proforma invoice for this Order pdffile = generate_pdf_letter( filename=order.filename, template="pdf/proforma_invoice.html", formatdict={ "hostname": settings.ALLOWED_HOSTS[0], "order": order, "bank": settings.BANKACCOUNT_BANK, "bank_iban": settings.BANKACCOUNT_IBAN, "bank_bic": settings.BANKACCOUNT_SWIFTBIC, "bank_dk_reg": settings.BANKACCOUNT_REG, "bank_dk_accno": settings.BANKACCOUNT_ACCOUNT, }, ) # update order object with the file order.pdf.save(str(order.filename), File(pdffile)) order.save() logger.info(f"Generated proforma invoice PDF for order {order}") # check if we need to generate any invoices for shop orders for order in Order.objects.filter(paid=True, invoice__isnull=True): # generate invoice for this Order Invoice.objects.create(order=order) logger.info(f"Generated Invoice object for {order}") # check if we need to generate any invoices for custom orders for customorder in CustomOrder.objects.filter(invoice__isnull=True): # generate invoice for this CustomOrder Invoice.objects.create(customorder=customorder) logger.info(f"Generated Invoice object for {customorder}") # check if we need to generate any creditnotes for refunds for refund in Refund.objects.filter(paid=True, creditnote__isnull=True): # generate CreditNote for this Refund CreditNote.objects.create( refund=refund, invoice=refund.order.invoice, amount=refund.amount, text=f"Refund for order #{refund.order.id}", user=refund.order.user, ) logger.info(f"Generated CreditNote object for {refund}") # check if we need to generate any pdf invoices for invoice in Invoice.objects.filter(Q(pdf="") | Q(pdf__isnull=True)): # generate the pdf try: if invoice.customorder: template = "pdf/custominvoice.html" else: template = "pdf/invoice.html" pdffile = generate_pdf_letter( filename=invoice.filename, template=template, formatdict={ "invoice": invoice, "bank": settings.BANKACCOUNT_BANK, "bank_iban": settings.BANKACCOUNT_IBAN, "bank_bic": settings.BANKACCOUNT_SWIFTBIC, "bank_dk_reg": settings.BANKACCOUNT_REG, "bank_dk_accno": settings.BANKACCOUNT_ACCOUNT, }, ) logger.info("Generated pdf for invoice %s" % invoice) except Exception as E: logger.exception( "Unable to generate PDF file for invoice #%s. Error: %s" % (invoice.pk, E), ) continue # update invoice object with the file invoice.pdf.save(str(invoice.filename), File(pdffile)) invoice.save() # check if we need to send out any invoices (only for shop orders, and only where pdf has been generated) for invoice in Invoice.objects.filter( order__isnull=False, sent_to_customer=False, ).exclude(pdf=""): logger.info("found unmailed Invoice object: %s" % invoice) # add email to the outgoing email queue if add_invoice_email(invoice=invoice): invoice.sent_to_customer = True invoice.save() logger.info( "OK: Invoice email to {} added to queue.".format( invoice.order.user.email, ), ) else: logger.error( "Unable to add email for invoice {} to {}".format( invoice.pk, invoice.order.user.email, ), ) # check if we need to generate any pdf creditnotes? for creditnote in CreditNote.objects.filter( Q(pdf="") | Q(pdf__isnull=True)): # generate the pdf try: pdffile = generate_pdf_letter( filename=creditnote.filename, template="pdf/creditnote.html", formatdict={"creditnote": creditnote}, ) logger.info("Generated pdf for creditnote %s" % creditnote) except Exception as E: logger.exception( "Unable to generate PDF file for creditnote #%s. Error: %s" % (creditnote.pk, E), ) continue # update creditnote object with the file creditnote.pdf.save(creditnote.filename, File(pdffile)) creditnote.save() # check if we need to send out any creditnotes (only where pdf has been generated and only for creditnotes linked to a user) for creditnote in (CreditNote.objects.filter( sent_to_customer=False).exclude(pdf="").exclude(user=None)): # send the email if add_creditnote_email(creditnote=creditnote): logger.info("OK: Creditnote email to %s added" % creditnote.user.email) creditnote.sent_to_customer = True creditnote.save() else: logger.error( "Unable to add creditnote email for creditnote %s to %s" % (creditnote.pk, creditnote.user.email), )
def cp(field_file, filename): new_name = os.path.basename(filename) with open(filename, 'rb') as f: field_file.save(new_name, File(f))
def upload_complete(self, request, filename, *args, **kwargs): path = self.UPLOAD_DIR + "/" + filename self._dest.close() media_root = getattr(settings, 'MEDIA_ROOT') media_url = getattr(settings, 'MEDIA_URL') model_name = request.GET.get('model_name') object_id = request.GET.get('object_id') required_width = request.GET.get('required_width') required_height = request.GET.get('required_height') rand = ''.join([random.SystemRandom().choice(string.ascii_letters) for i in range(6)]) full_path = media_root + path if required_width and required_height: img = Image.open(full_path) if img.size != (int(required_width), int(required_height)): return {'error': _('Expected size is %(width)s x %(height)s px.' % {'width': required_width, 'height': required_height}), 'wrong_size': True} if model_name and object_id: s = get_service_instance() media_field = request.GET.get('media_field') if not media_field: media_field = request.GET.get('image_field', 'image') label_field = request.GET.get('label_field', 'name') tokens = model_name.split('.') model = get_model(tokens[0], tokens[1]) obj = model._default_manager.get(pk=object_id) media = obj.__getattribute__(media_field) try: with open(media_root + path, 'r') as f: content = File(f) current_media_path = media.path if media.name else None upload_to = media.field.upload_to if callable(upload_to): upload_to = upload_to(obj, filename) dir = media_root + upload_to unique_filename = False filename_suffix = 0 filename_no_extension, extension = os.path.splitext(filename) try: label = obj.__getattribute__(label_field) if label: seo_filename_no_extension = slugify(label) else: seo_filename_no_extension = obj.__class__.__name__.lower() except: seo_filename_no_extension = obj.__class__.__name__.lower() seo_filename = seo_filename_no_extension + extension if os.path.isfile(os.path.join(dir, seo_filename)): while not unique_filename: try: if filename_suffix == 0: open(os.path.join(dir, seo_filename)) else: open(os.path.join(dir, seo_filename_no_extension + str(filename_suffix) + extension)) filename_suffix += 1 except IOError: unique_filename = True if filename_suffix > 0: seo_filename = seo_filename_no_extension + str(filename_suffix) + extension if isinstance(media, DjangoImageFieldFile) or isinstance(media, ImageFieldFile): seo_filename = s.project_name_slug + '_' + seo_filename else: seo_filename = seo_filename.capitalize() destination = os.path.join(dir, seo_filename) if not os.path.exists(dir): os.makedirs(dir) media.save(destination, content) if request.GET.get('upload_to_ikwen') == 'yes': # Upload to ikwen media folder for access platform wide. destination2_folder = ikwen_settings.MEDIA_ROOT + upload_to if not os.path.exists(destination2_folder): os.makedirs(destination2_folder) destination2 = destination.replace(media_root, ikwen_settings.MEDIA_ROOT) os.rename(destination, destination2) if isinstance(media, MultiImageFieldFile): destination2_small = ikwen_settings.MEDIA_ROOT + media.small_name destination2_thumb = ikwen_settings.MEDIA_ROOT + media.thumb_name os.rename(media.small_path, destination2_small) os.rename(media.thumb_path, destination2_thumb) media_url = ikwen_settings.MEDIA_URL if isinstance(media, MultiImageFieldFile): url = media_url + media.small_name preview_url = url elif isinstance(media, DjangoImageFieldFile) or isinstance(media, ImageFieldFile): url = media_url + media.name preview_url = url else: url = media_url + media.name preview_url = get_preview_from_extension(media.name) try: if media and os.path.exists(media_root + path): os.unlink(media_root + path) # Remove file from upload tmp folder except Exception as e: if getattr(settings, 'DEBUG', False): raise e if current_media_path: try: if destination != current_media_path and os.path.exists(current_media_path): os.unlink(current_media_path) except OSError as e: if getattr(settings, 'DEBUG', False): raise e return { 'path': url, 'preview': preview_url + '?rand=' + rand } except IOError as e: logger.error("File failed to upload. May be invalid or corrupted image file", exc_info=True) if settings.DEBUG: raise e return {'error': 'File failed to upload. May be invalid or corrupted image file'} elif request.GET.get('is_tiny_mce'): tiny_mce_upload_dir = getattr(settings, 'TINY_MCE_UPLOAD_DIR', 'tiny_mce') tiny_mce_root = media_root + tiny_mce_upload_dir if not os.path.exists(tiny_mce_root): os.makedirs(tiny_mce_root) src = media_root + self.UPLOAD_DIR + "/" + filename dst = tiny_mce_root + '/' + filename os.rename(src, dst) return { 'path': media_url + tiny_mce_upload_dir + '/' + filename + '?rand=' + rand } else: path = settings.MEDIA_URL + self.UPLOAD_DIR + "/" + filename self._dest.close() raw_filename, extension = os.path.splitext(filename) resp = {"path": path} if extension.lower() not in ['.gif', '.jpeg', '.jpg', '.png', '.svg']: resp["preview"] = get_preview_from_extension(filename) return resp
def upload_product_complete(request): if request.method == 'POST': try: file = request.FILES['database_file'] except: messages.success(request, '파일이 선택되지 않았습니다.') return redirect('staff:upload_product_by_file') try: df = pd.read_excel(file) df = df.where(pd.notnull(df), None) print('성공적으로 파일 가져옴') except: raise ObjectDoesNotExist print(df) df.columns = ["number", "title", "brand", 'category', 'list_price', # 필수항목 'info_made_country', 'info_product_number', 'info_delivery', 'combined_delivery', 'main_image', # 필수항목 'image1', 'image2', 'image3', 'image4', 'image5', 'image6', 'image7', 'image8', 'image9', 'info_product_kind', 'info_material', 'info_feature', 'info_product_person', 'info_alert', # 비필수 'info_quality_standard', 'info_as', 'description', 'video_link' # 비필수 ] no_updated_dict = {} engine = create_engine(DATABASE_URL, echo=False) now1 = timezone.now() for index, row in df.iterrows(): data = dict(row) number = data['number'] brand = data['brand'] # obj = Test.objects.create(full_name=row['full_name'], phone_number=row['phone_number']) obj, created = Product.objects.get_or_new(request, number=number, data=data) if not created: no_updated_dict[data['number']] = 'Already Exist' elif created == 'Brand DoesNotExist': no_updated_dict[data['number']] = created elif created == 'Category DoesNotExist': no_updated_dict[data['number']] = created image_cols = ['main_image', 'image1', 'image2', 'image3', 'image4', 'image5', 'image6', 'image7', 'image8', 'image9'] if created == True: obj_images = [obj.main_image, obj.image1, obj.image2, obj.image3, obj.image4, obj.image5, obj.image6, obj.image7, obj.image8, obj.image9] obj_images_link = [obj.main_image_link, obj.image1_link, obj.image2_link] image_data_link = False for i, col in enumerate(image_cols): if data[col] is not None: try: with open(data[col], 'rb') as f: filename = upload_main_image_path(obj, os.path.basename(f.name)) obj_images[i] = File(f, name=filename) # obj.save() # self.license_file.save(upload_main_image_path, File(f)) except OSError: image_data_link = True # if data[col][0:4] == 'http': # obj_images_link[i] = data[col] # print("obj_images_link[{}] : {}".format(i, data[col])) # obj.save() # response = urllib.request.urlretrieve(data[col]) # with open(response[0], 'rb') as f: # file_url = urlparse(data[col]) # filename = os.path.basename(file_url.path) # obj_images[i].save(filename, f) # print(i, "이미지 확인", obj_images[i]) # obj.save() if image_data_link: obj.main_image_link = data['main_image'] obj.image1_link = data['image1'] obj.image2_link = data['image2'] obj.save() product_qs = Product.objects.all() now2 = timezone.now() print(now2-now1) context = { 'qs' : product_qs, 'no_updated_dict': no_updated_dict } return render(request, 'staff/upload_product_complete.html', context)
def download_image(self, url): image = requests.get(url).content with tempfile.TemporaryFile() as temp_file: temp_file.write(image) self.image.save(url.split('/')[-1], File(temp_file))
def update_profile(request): current_profile_info = request.user if (not current_profile_info.is_anonymous()): current_profile_info = ProfileInfo.objects.get( user=current_profile_info) #print(current_profile_info) else: current_profile_info = None try: user_id = request.POST['user_id'] user = ProfileInfo.objects.get(id=user_id) if (current_profile_info == user): about = "" website = "" email = "" size = int(request.POST['size']) if (size == 1): try: about = request.POST['about1'] except: pass try: website = request.POST['website1'] except: pass try: email = request.POST['email1'] except: pass elif (size == 2): try: about = request.POST['about2'] except: pass try: website = request.POST['website2'] except: pass try: email = request.POST['email2'] except: pass user.about = about user.website = website user.user.email = email try: picture = request.FILES['picture'] destination = open(settings.MEDIA_ROOT + picture.name, 'wb+') for chunk in picture.chunks(): destination.write(chunk) destination.close() user.profile_image.save( picture.name, File(open(settings.MEDIA_ROOT + picture.name, "rb"))) except: pass user.save() user.user.save() #print("Done!") return JsonResponse({'success': True}) except Exception as inst: #print(inst) #print("Update didn't work") return JsonResponse({'success': False})
<p>Fusce scelerisque vehicula elit, imperdiet maximus ligula dignissim non. Curabitur molestie erat ornare ex vulputate varius. Duis suscipit enim libero, non ultricies nisi porttitor eget. Proin a lacus sit amet lacus tincidunt viverra ut non ipsum. Duis semper iaculis consectetur. Praesent vel massa lacus. Praesent in malesuada ligula, in ornare tortor. Nam nec neque ipsum. Quisque vel quam nulla. Nulla viverra posuere lacus, ut faucibus sapien ultricies ut. Nullam convallis ullamcorper turpis, vitae condimentum dui ultricies et. Quisque eu venenatis turpis, tempus accumsan enim. Cras aliquet justo ante, id tempor nisl aliquet vestibulum.</p> <p>Praesent vestibulum metus eu egestas scelerisque. Nullam tempor lectus quis nunc cursus, nec porttitor augue luctus. Quisque placerat dolor eget nulla tempus dignissim. Suspendisse mollis iaculis sem a fringilla. Proin sapien erat, gravida sed est at, bibendum mollis leo. Aenean ullamcorper nisl nisi, in sollicitudin ipsum tempus sit amet. Phasellus convallis tellus pellentesque ex malesuada, sed pulvinar orci imperdiet. Pellentesque sit amet lorem nibh. Proin arcu odio, luctus at diam at, mattis eleifend eros. Nunc vulputate enim quis semper auctor. Duis urna lacus, molestie consectetur lacus non, vulputate mattis nisl. Class aptent taciti sociosqu ad litora torquent per conubia nostra, per inceptos himenaeos. Integer nec venenatis diam, sit amet rhoncus massa. Nunc a malesuada odio. """ for i in range(num_movies): title = movieList[i] released_date = date(int(movieList[i][-5:-1]), randint(1, 12), randint(1, 28)) try: print('Creating movie {0}.'.format(title)) movie = Movie() movie.title = title movie.slug = slugify(title) + '-' + str(i + 1) movie.released_date = released_date movie.description = description f = File( open( os.path.join(os.path.dirname(settings.BASE_DIR), "root", "media_cdn", "500x500.png"), 'rb')) movie.photo.save('{0}.png'.format(i + 1), f) movie.save() print('Movie {0} successfully created.'.format(title)) except: print(released_date) print( 'There was a problem creating the Movie: {0}. Error: {1}.'.format( title, sys.exc_info()))
def test_read_from_storage(self): HANDLED_FILES['written_files'].append(['foo', File(six.BytesIO(b'bar'))]) file_ = self.command.read_from_storage('foo') self.assertEqual(file_.read(), b'bar')
def test_write_local_file(self): fd, path = File(six.BytesIO(b"foo")), '/tmp/foo.bak' self.command.write_local_file(fd, path) self.assertTrue(os.path.exists(path)) # tearDown os.remove(path)
def authenticate(self, request): user = request.user or None access_token = None # assume logging in normal way params = {} params["client_id"] = CONSUMER_KEY params["client_secret"] = CONSUMER_SECRET params["redirect_uri"] = request.build_absolute_uri(reverse("facebook_login_done")) params["code"] = request.GET.get('code', '') url = ("https://graph.facebook.com/oauth/access_token?" + urllib.urlencode(params)) from cgi import parse_qs userdata = urllib.urlopen(url).read() res_parse_qs = parse_qs(userdata) # Could be a bot query if not ('access_token') in res_parse_qs: return None access_token = res_parse_qs['access_token'][-1] url = "https://graph.facebook.com/me?access_token=" + access_token fb_data = json.loads(urllib.urlopen(url).read()) uid = fb_data["id"] if not fb_data: return None try: same_email_user = UserProfile.objects.get(email=fb_data.get('email', None)) except: same_email_user = None if user.is_anonymous() and not same_email_user: try: fb_user = FacebookUserProfile.objects.get(facebook_uid=uid) fb_user.accesstoken = access_token fb_user.save() return fb_user.user except FacebookUserProfile.DoesNotExist: fb_picture_url = "http://graph.facebook.com/%s/picture?type=large" % uid username = fb_data.get('username') if not username: username = uid userProfile = UserProfile.objects.create(username=username) userProfile.first_name = fb_data['first_name'] userProfile.last_name = fb_data['last_name'] if fb_data['gender'] == "male": userProfile.gender = 'M' else: if fb_data['gender'] == "female": userProfile.gender = 'F' userProfile.email = fb_data.get('email', None) userProfile.isVerified_email = True userProfile.location = fb_data.get('location', fb_data).get('name', None) userProfile.save() img = urllib.urlretrieve(fb_picture_url) userProfile.profile_picture.save("Facebook-profile.jpg", File(open(img[0]))) urllib.urlcleanup() userProfile.facebook_link = fb_data.get('link', None) from django.contrib.auth.hashers import make_password raw_pass = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(12)) tmp_pass = make_password(raw_pass) userProfile.password = tmp_pass userProfile.save() fb_profile = FacebookUserProfile(facebook_uid=uid, user=userProfile, email=fb_data['email'], url=fb_data['link'], location=userProfile.location, accesstoken=access_token) fb_profile.save() return userProfile else: try: if same_email_user: user = same_email_user user_facebook = FacebookUserProfile.objects.get(user=user) if user_facebook.facebook_uid == uid: return user_facebook.user else: request.session['fb_accesstoken'] = access_token next = request.session['next'] or "" if next: del request.session['next'] return HttpResponseRedirect(next) else: return HttpResponseRedirect(reverse('sync_facebook')) except FacebookUserProfile.DoesNotExist: try: user_facebook = FacebookUserProfile.objects.get(facebook_uid=uid) request.session['fb_accesstoken'] = access_token next = request.session['next'] or "" if next: del request.session['next'] return HttpResponseRedirect(next) else: return HttpResponseRedirect(reverse('sync_facebook')) except FacebookUserProfile.DoesNotExist: fb_profile = FacebookUserProfile(facebook_uid=uid, user=UserProfile.objects.get(username=user.username), email=fb_data['email'], url=fb_data['link'], location=fb_data.get('location', fb_data).get('name', None), accesstoken=access_token) fb_profile.save() return fb_profile.user
def new_html(): """Modify the html template for data vizualization.""" global template_name folder_path = os.path.dirname(os.path.abspath(__file__)) html_template = os.path.join(folder_path, "templates/blog", template_name) with open(html_template, "r",) as html_t: html_test = File(html_t) html_string = html_test.read() link_nb = 0 colors_hex = color_span() for url, pct in data_prep()[0]: diameter = diameter_size(pct) link_nb += 1 new_string = re.sub( r'(<a href="{}".*?>.*?</a>)'.format(url), r'\1<div class="url{}">{}%</div>'.format(link_nb, pct), html_string, flags=re.MULTILINE | re.DOTALL, ) soup = BeautifulSoup(new_string, features="html.parser") soup.style.append( """.url{} {{ height: {}px; width: {}px; background-color: {}; border-radius: 50%; display: inline-block; line-height: {}px; text-align: center; vertical-align: middle; color: {};}} """.format( link_nb, diameter, diameter, colors_hex[pct - 1], diameter, colors_hex[pct - 1], ) ) soup.style.append( """.url{}:hover {{ height: {}px; width: {}px; background-color: {}; border-radius: 50%; display: inline-block; line-height: {}px; text-align: center; vertical-align: middle; color: black; font-weight: bold;}} """.format( link_nb, diameter, diameter, colors_hex[pct - 1], diameter, colors_hex[pct - 1], ) ) html_string = str(soup) return html_string
def _open(self, name, mode='rb'): if self.exists(os.path.join(settings.MEDIA_ROOT, name)): return super()._open(name, mode) dummy_file = os.path.join(settings.STATICFILES_DIRS[0], FILE_NOT_FOUND_IMAGE) return File(open(dummy_file, mode))
def post(self, request, token: str): path = open_configuration(request, token) with open(path, 'r', encoding='utf-8') as file: data = json.load(file) point = False if 'text' in data and data['text'] != []: point = True elif 'reply_markup' in data and data['reply_markup'] != []: point = True elif 'inline_markup' in data and data['inline_markup'] != []: point = True elif 'callback' in data and data['callback'] != []: point = True if not point: messages.error( request, 'You having empty configuration... Create some config!') return redirect('create_bot_second_step_text_url', token=token) username = str(request.user.username) text_builder = TextBuilder(token, username) reply_markup_builder = ReplyMarkupBuilder(token, username) inline_markup_builder = InlineMarkupBuilder(token, username) callback_builder = CallbackBuilder(token, username) program = BotFacade(text_builder, reply_markup_builder, inline_markup_builder, callback_builder, token, username, data, request) callback = program.operation() if callback is not None: messages.error(request, f'You have a problem buttons.' + callback[0]) if callback[1] == 'reply': return redirect('create_bot_second_step_reply_buttons_url', token) else: return redirect('create_bot_second_step_inline_buttons_url', token) some_path = open_test_bot(request=request, token=token) with open(some_path, 'r+', encoding='utf-8') as file: content_code = file.read() file.seek(0) file.truncate() file.write(content_code) file_script_path = open_test_bot(request=request, token=token) file_config_path = open_configuration(request=request, token=token) current_user = Profile.objects.get(user=request.user) access_token = data['access_token'] current_user_profile = Profile.objects.get(user=request.user) is_existed_bot = list( Bot.objects.filter(access_token=access_token, owner=current_user_profile)) if is_existed_bot == []: bot_object = Bot(owner=current_user, access_token=access_token, title=data['name'], username=data['username'], date_created=timezone.now()) bot_object.file_script.save( f"{request.user.username}_{token.replace(':', '_')}" "_test_bot.py", File(open(file_script_path, encoding="utf8"))) bot_object.file_config.save( f"{request.user.username}_{token.replace(':', '_')}" "_configuration.json", File(open(file_config_path, encoding="utf8"))) bot_object.save() return redirect('create_bot_third_step_url', token=token)
def populate(self, entry: Entry, form: EntryForm) -> Entry: form.populate(entry) if 'author' in form.cleaned_data.keys(): author, created = Author.objects.get_or_create( catalog=self._catalog, name=form.cleaned_data['author']['name'], surname=form.cleaned_data['author']['surname']) entry.author = author entry.save() if 'categories' in form.cleaned_data.keys(): entry.categories.clear() for record in form.cleaned_data.get('categories', []): category, created = Category.objects.get_or_create( creator=self._creator, catalog=self._catalog, term=record['term']) if created: category.label = record.get('label') category.scheme = record.get('scheme') category.save() entry.categories.add(category) if 'category_ids' in form.cleaned_data.keys(): entry.contributors.clear() for contributor in form.cleaned_data.get('category_ids', []): entry.categories.add(contributor) for record in form.cleaned_data.get('acquisitions', []): acquisition = Acquisition(entry=entry, relation=record.get('relation'), mime=record['content'].content_type) if 'content' in record.keys(): acquisition.content.save( f"{uuid.uuid4()}{mimetypes.guess_extension(acquisition.mime)}", record['content']) for price in record.get('prices', []): Price.objects.create(acquisition=acquisition, currency=price['currency_code'], value=price['value']) if 'contributors' in form.cleaned_data: entry.contributors.clear() for record in form.cleaned_data.get('contributors', []): contributor, is_created = Author.objects.get_or_create( catalog=self._catalog, name=record['name'], surname=record['surname']) entry.contributors.add(contributor) if 'contributor_ids' in form.cleaned_data.keys(): entry.contributors.clear() for contributor in form.cleaned_data.get('contributor_ids', []): entry.contributors.add(contributor) if 'feeds' in form.cleaned_data: entry.feeds.clear() for feed in form.cleaned_data.get('feeds', []): entry.feeds.add(feed) if 'image' in form.cleaned_data: if form.cleaned_data['image'] is None: entry.image = None entry.image_mime = None entry.thumbnail = None else: entry.image_mime = form.cleaned_data['image'].content_type entry.image.save( f"cover{mimetypes.guess_extension(entry.image_mime)}", form.cleaned_data['image']) buffer = BytesIO() thumbnail = form.cleaned_data['image'].image.copy() thumbnail.thumbnail(settings.OPDS['IMAGE_THUMBNAIL']) thumbnail.save(buffer, format=form.cleaned_data['image'].image.format) entry.thumbnail.save( f"thumbnail{mimetypes.guess_extension(entry.image_mime)}", File(buffer)) return entry
def train(recogntype, facesdbname, facesdbpath, size): # Read CSV face file and populate Face training set and labels frontal_cascade = Cascade.objects.get(pk=1) cv_frontal_cascade = cv2.CascadeClassifier(frontal_cascade.xml_file.path) # CSV path e.g.: /data/media/faces/SD_Faces/faces/lala.csv csv_path = create_csv_file(facesdbpath) (face_labelsDict, npfaces, labels) = read_csv_file(recogntype, csv_path, cv_frontal_cascade, size) trained_data_path = os.path.join(settings.MEDIA_ROOT, 'recognizer_train_data') if (recogntype == 'LBPH'): pretrained_filepath = os.path.join(trained_data_path, 'MyFaces.yml') elif (recogntype == 'KNN'): pretrained_filepath = os.path.join(trained_data_path, 'MyKNNFaces.yml') else: pretrained_filepath = os.path.join( trained_data_path, facesdbname.replace('.zip', '') + '_' + str(size[0]) + 'x' + str(size[1]) + '_' + recogntype + '.yml') recognizer = create_recognizer(recogntype) try: if ((recogntype == 'EF' or recogntype == 'FF') or (recogntype == 'LBPH' and not os.path.isfile(pretrained_filepath))): log.debug("Creating trained file: {}".format(pretrained_filepath)) recognizer.train(npfaces, numpy.array(labels)) else: log.debug( "Updating the trained file: {}".format(pretrained_filepath)) os.remove(pretrained_filepath) recognizer.train(npfaces, numpy.array(labels)) #recognizer.read(pretrained_filepath) #recognizer.update(npfaces, numpy.array(labels)) recognizer.write(pretrained_filepath) # Save the YAML pretrained file to db prtrdata = RecognizerPreTrainedData() prtrdata.name = os.path.basename(pretrained_filepath) prtrdata.recognizer = recogntype with open(pretrained_filepath) as f: prtrdata.yml_file.save(pretrained_filepath, File(f)) # Save a list of faces that this database recognizes # keeping the order of the labels # For example: # label 1 is ptrdata.faces[0] # label 2 is ptrdata.faces[1] etc tmp = "" for label, person in face_labelsDict.iteritems(): tmp += "{}, ".format(person) prtrdata.faces = tmp[:-2] # Delete previous entry of MyFaces.yml qs = RecognizerPreTrainedData.objects.filter(name='MyFaces.yml') if (qs.count() > 0): qs.delete() prtrdata.save() except Exception as e: log.error(str(e)) raise e return return face_labelsDict
articles = Article.objects.all() for article in articles: if article.icon: res = requests.get(article.icon.url) print(res) if res.status_code == 403: url = article.icon.url.replace('stc.1144378.com', 'static.0a5.com') res = requests.get(url) if res.status_code == 200: image_file_name = os.path.basename(article.icon.url) img_data = res.content with open(f'{image_file_name}', 'wb') as handler: handler.write(img_data) article.icon.save(image_file_name, File(open(image_file_name, 'rb'))) os.remove(image_file_name) for article in articles: if article.icon: staging_file_size = requests.head(article.icon.url).headers['content-length'] production_file_size = requests.head(article.icon.url.replace('stc.1144378.com', 'static.0a5.com')).headers['content-length'] if not staging_file_size == production_file_size: print(article) url = article.icon.url.replace('stc.1144378.com', 'static.0a5.com') res = requests.get(url) if res.status_code == 200: uuid_prefix = str(uuid.uuid4())[:8] image_file_name = f'{uuid_prefix}-{os.path.basename(article.icon.url)}' img_data = res.content
def synchronize(self, url=None, filename=None, content_type=None, file_extras=None, with_user=None): """Synchronizer le jeu de données avec l'instance de CKAN.""" # Identifiant de la resource CKAN : id = str(self.ckan_id) # Définition des propriétés du « package » : data = { 'crs': self.crs and self.crs.description or '', 'name': self.title, 'description': self.description, 'data_type': self.data_type, 'extracting_service': 'False', # I <3 CKAN 'format': self.format_type and self.format_type.ckan_format, 'view_type': self.format_type and self.format_type.ckan_view, 'id': id, 'lang': self.lang, 'restricted_by_jurisdiction': str(self.geo_restriction), 'url': url and url or '', 'api': '{}' } # TODO: Factoriser # (0) Aucune restriction if self.restricted_level == 'public': restricted = json.dumps({'level': 'public'}) # (1) Uniquement pour un utilisateur connecté elif self.restricted_level == 'registered': restricted = json.dumps({'level': 'registered'}) # (2) Seulement les utilisateurs indiquées elif self.restricted_level == 'only_allowed_users': restricted = json.dumps({ 'allowed_users': ','.join( self.profiles_allowed.exists() and [p.user.username for p in self.profiles_allowed.all()] or []), 'level': 'only_allowed_users' }) # (3) Les utilisateurs de cette organisation elif self.restricted_level == 'same_organization': restricted = json.dumps({ 'allowed_users': ','.join( get_all_users_for_organisations( self.organisations_allowed.all())), 'level': 'only_allowed_users' }) # (3) Les utilisateurs des organisations indiquées elif self.restricted_level == 'any_organization': restricted = json.dumps({ 'allowed_users': ','.join( get_all_users_for_organisations( self.organisations_allowed.all())), 'level': 'only_allowed_users' }) data['restricted'] = restricted if self.referenced_url: data['url'] = self.referenced_url if self.dl_url and filename: downloaded_file = File(open(filename, 'rb')) data['upload'] = downloaded_file data['size'] = downloaded_file.size data['mimetype'] = content_type if self.up_file and file_extras: data['upload'] = self.up_file.file data['size'] = file_extras.get('size') data['mimetype'] = file_extras.get('mimetype') if self.ftp_file: if not url: data['upload'] = self.ftp_file.file data['size'] = self.ftp_file.size data['mimetype'] = None # TODO if self.data_type == 'raw': if self.ftp_file or self.dl_url or self.up_file: data['resource_type'] = 'file.upload' elif self.referenced_url: data['resource_type'] = 'file' if self.data_type == 'annexe': data['resource_type'] = 'documentation' if self.data_type == 'service': data['resource_type'] = 'api' ckan_package = CkanHandler.get_package(str(self.dataset.ckan_id)) if with_user: username = with_user.username apikey = CkanHandler.get_user(username)['apikey'] with CkanUserHandler(apikey=apikey) as ckan: ckan.publish_resource(ckan_package, **data) else: return CkanHandler.publish_resource(ckan_package, **data)
def setUp(self): self.site = Site.objects.get_current() # # setup the protected dir; since we're using the default storage class, # this will point to # # /path/to/static/protected/ # # where "/path/to/static/" is your settings.MEDIA_ROOT and "protected" # is your PRODUCT.PROTECTED_DIR setting. # self.protected_dir = default_storage.path( config_value('PRODUCT', 'PROTECTED_DIR')) if not os.path.exists(self.protected_dir): os.makedirs(self.protected_dir) # setup a temporary file in the protected dir: this is the file that # django will use during this test, but we won't use it; close and # remove it. _file, _abs_path = mkstemp(dir=self.protected_dir) os.close(_file) os.remove(_abs_path) self.file_name = os.path.basename(_abs_path) # setup a temporary source dir and source file, using the same file name # generated eariler. self.dir = mkdtemp() self.file = open(os.path.join(self.dir, self.file_name), "wb+") # a fake SHA self.key = "".join(["12abf" for i in range(8)]) # setup a contact c, _created = Contact.objects.get_or_create( first_name="Jim", last_name="Tester", email="*****@*****.**", ) ad, _created = AddressBook.objects.get_or_create( contact=c, description="home", street1="test", state="OR", city="Portland", country=Country.objects.get(iso2_code__iexact='US'), is_default_shipping=True, is_default_billing=True, ) # setup a order o, _created = Order.objects.get_or_create( contact=c, shipping_cost=Decimal('6.00'), site=self.site) # setup download self.product, _created = DownloadableProduct.objects.get_or_create( product=Product.objects.get(slug='dj-rocks'), file=File(self.file), num_allowed_downloads=3, expire_minutes=1, ) self.product_link, _created = DownloadLink.objects.get_or_create( downloadable_product=self.product, order=o, key=self.key, num_attempts=0, time_stamp=timezone.now()) if django.VERSION < (1, 10): # setup client self.domain = 'satchmoserver' self.client = Client(SERVER_NAME=self.domain) # go through the verification step self.pd_url = reverse('satchmo_download_send', kwargs={'download_key': self.key}) pd_process_url = reverse('satchmo_download_process', kwargs={'download_key': self.key}) # first, hit the url. response = self.client.get(self.pd_url) if django.VERSION >= (1, 10): self.assertEqual(response['Location'], pd_process_url) else: self.assertEqual(response['Location'], 'http://%s%s' % (self.domain, pd_process_url)) # follow the redirect to "process" the key. response = self.client.get(response['Location']) self.assertEqual(self.client.session.get('download_key', None), self.key)
def uvozi_brez_siframi(imedatoteke, imeskupine, sheet_name): print('\x1b[6;30;42m' + 'Začetek uvažanja ' + imedatoteke + '\x1b[0m') #dodajanje skupin skupina_izdelkov = None if SkupinaIzdelkov.objects.filter(ime=imeskupine).exists(): skupina_izdelkov = SkupinaIzdelkov.objects.get(ime=imeskupine) else: skupina_izdelkov = SkupinaIzdelkov(ime=imeskupine) skupina_izdelkov.save() wb = load_workbook('products/' + imedatoteke + '.xlsx') sheet = wb[sheet_name] #cellsa = sheet['A3':'F251'] for row in range(5, sheet.max_row): if (sheet.cell(row, 3).value is None): break sifra = sheet.cell(row, 5).value artikel = sheet.cell(row, 3).value ean_koda = sheet.cell(row, 5).value opis = sheet.cell(row, 4).value opis_dimenzij = sheet.cell(row, 6).value opis = opis + ", " + opis_dimenzij tag = sheet.cell(row, 7).value tag = tag.split(", ") #odstrani empty stringe ce se kdo ponesreci zmoti list_tag = list(filter(None, tag)) #print(list_tag) #dodajanje tagov object_list_tag = [] for en_tag in list_tag: if Tag.objects.filter(ime=en_tag).exists(): object_list_tag.append(Tag.objects.get(ime=en_tag)) else: new_tag = Tag(ime=en_tag) new_tag.save() object_list_tag.append(new_tag) #dodajanje izdelkov new_izdelek = Izdelek(ime=artikel, opis=opis, skupina_izdelkov=skupina_izdelkov, koda=ean_koda, ean_koda=ean_koda) pot_do_slike = "products/slike/nislike.jpg" try: new_izdelek.slika.save("nislike.jpg", File(open(pot_do_slike, 'rb'))) except IOError: new_izdelek.slika.save("logo.jpg", File(open("products/slike/logo.jpg", 'rb'))) #print(sifra) new_izdelek.save() new_izdelek.tag.add(*object_list_tag) new_izdelek.save() print('\x1b[6;30;42m' + 'Konec uvažanja izdelkov ' + imedatoteke + '\x1b[0m') return
def ImportMovie(title, typeList, length, origin, company, director, content, tagList, actorList, time, imdb, tmdb, language, originId, score, cover=None): if typeList: # 获取最终的类型 temp = 1 finalType = 0 for type in typeList: typeNo = GetNoOfMovieType(type) finalType = finalType | (temp << typeNo) else: finalType = 1 if origin: #处理区域 finalRegion = 0 for region in origin: regionCode = GetReigionCode(region) finalRegion = finalRegion | (temp << regionCode) else: finalRegion = 16 if company is None: company = '未知' if not imdb: imdb = 0 if not content: content = '无' #创建导演 directorInstance = CreateActorConn(director) movieQuery = Movie.objects.filter(MovName=title, MovLength=length, MovDirector=directorInstance) # 若该电影已存在则直接return if movieQuery.exists(): return movieInstance = Movie.objects.create(MovName=title, MovLength=length, MovOrigin=finalRegion, MovType=finalType, MovCompany=company, MovDirector=directorInstance, MovDescription=content, MovDate=time, MovImdbId=imdb, MovTmdbId=tmdb, MovOriginId=originId, MovLanguage=language) # 处理分数 if score != 0: movieInstance.MovScore = score movieInstance.MovScoreCount = 1 movieInstance.save() # # 处理导演信息 # tempQuery = ActorConnection.objects.filter(MovId=movieInstance, ActorId=directorInstance) # if tempQuery.exists(): # return # else: # # 添加演出信息 # actConn = ActorConnection.objects.create(MovId=movieInstance, ActorId=directorInstance) # actConn.save() # 处理封面 if cover: headers = { 'user - agent': 'Mozilla / 5.0(Windows NT 10.0;\ Win64;\ x64) AppleWebKit / 537.36(KHTML, like\ Gecko) Chrome / 83.0\ .4103\ .116\ Safari / 537.36' } r = requests.get(cover, headers=headers) io = BytesIO(r.content) file = File(io) movieInstance.MovImg.save( "{0}.{1}".format(title, cover.split('.').pop()), file) # 保存电影 movieInstance.save() if tagList: # 处理tag for tag in tagList: # 查询tag queryResult = MovieTag.objects.filter(MovTagCnt=tag) # 如果为空则先添加该tag if not queryResult.exists(): tagInstance = MovieTag.objects.create(MovTagCnt=tag) tagInstance.save() else: tagInstance = queryResult[0] tempQuery = MovTagConnection.objects.filter(MovId=movieInstance, MovTagId=tagInstance) if tempQuery.exists(): print('already exist:', title, '$', tag) continue else: # 添加MovTagConnection connInstance = MovTagConnection.objects.create( MovId=movieInstance, MovTagId=tagInstance) connInstance.save() if actorList: # 处理演员 for actor in actorList: actorInstance = CreateActorConn(actor) tempQuery = ActorConnection.objects.filter(MovId=movieInstance, ActorId=actorInstance) if tempQuery.exists(): continue else: # 添加演出信息 actConn = ActorConnection.objects.create(MovId=movieInstance, ActorId=actorInstance) actConn.save() print('import movie:', movieInstance.MovId, " ", title)
def handle(self, *args, **options): """ main method """ user = options['user'] passw = options['password'] required_date = required_week = required_year = required_date = 0 if options['date']: required_date = int(options['date']) if options['week']: required_week = int(options['week']) if options['year']: required_year = int(options['year']) area_name = options['area'] clouds = options['clouds'] self.api = SentinelAPI(user, passw, 'https://scihub.copernicus.eu/dhus') try: self.area = Area.objects.get(id=int(area_name)) except: try: self.area = Area.objects.get(name=area_name) except: self.stdout.write( self.style.ERROR( 'Given area <{}> does not exist'.format(area_name))) sys.exit(1) (starting_date, end_date, week_nr) = self._get_dates(required_year, required_week, required_date) products = self.get_products(starting_date, end_date, self.area, clouds=clouds) if not len(products.items()): # TODO save empty week maybe? self.stdout.write( self.style.WARNING( 'There is no data for given time period ' + '<{start}, {end}>, '.format(start=starting_date, end=end_date) + 'maximal cloud cover <{cloud}%> and area <{area}>'.format( area=area_name, cloud=clouds))) return #self.tempdir = tempfile.mkdtemp(dir="/home/jachym/data/opengeolabs/lifemonitor/") #!self.tempdir = "/home/jachym/data/opengeolabs/lifemonitor/tmpq8_15z8f/" self.tempdir = tempfile.mkdtemp() _TO_BE_CLEANED.append(self.tempdir) self.api.download_all(products, self.tempdir) products_data = self.get_bands(products) patched_bands = self._patch_rasters(products_data) analysed_data = self._analyse(patched_bands) if Week.objects.filter(date=starting_date, area=self.area).count() == 0: week = Week( date=starting_date, area=self.area, ) else: week = Week.objects.get(date=starting_date, area=self.area) week.cutline = self.cutline_geom.wkt for band in patched_bands: band_key = band.lower() eval("week.{}".format(band_key)).save( os.path.basename(patched_bands[band]), File(open(patched_bands[band], "rb")), save=True) week.save() for an in analysed_data: at = AnalysisType.objects.get(name=an) if Analysis.objects.filter(week=week, type=at).count() == 0: analysis = Analysis(week=week, type=at) else: analysis = Analysis.objects.get(week=week, type=at) if analysed_data[an]["raster"]: analysis.raster.save( os.path.basename(analysed_data[an]["raster"]), File(open(analysed_data[an]["raster"], "rb")), save=True) if analysed_data[an]["image"]: analysis.image.save( os.path.basename(analysed_data[an]["image"]), File(open(analysed_data[an]["image"], "rb")), save=True) if analysed_data[an]["vector"]: analysis.vector.save( os.path.basename(analysed_data[an]["vector"]), File(open(analysed_data[an]["vector"], "rb")), save=True) analysis.save() self.stdout.write( self.style.SUCCESS('Successfully create data for week {}'.format( week.week)))