def handle(self, *args, **kwargs): for screenshot in models.Screenshot.load(): print screenshot.name img = Image.open(staticfiles_storage.path(screenshot.path)) # Ensure we're in RGB if img.mode not in ('L', 'RGB'): img = img.convert('RGB') # Magic! # The thumbnail is only limited by width, so use # a larger-than-needed height. img.thumbnail((200 * screenshot.screens, 1000), Image.ANTIALIAS) # Save the thumbnail to a tmpfile fd, tmp = tempfile.mkstemp() file = os.fdopen(fd, 'w+b') type = mimetypes.guess_type(screenshot.name)[0].split('/')[1] img.save(file, type) file.close() # Nuke previous version if it exists if staticfiles_storage.exists(screenshot.thumbnail): staticfiles_storage.delete(screenshot.thumbnail) # save thumbnail to stattic dir file = File(open(tmp, 'rb')) staticfiles_storage.save(screenshot.thumbnail, file) file.close() os.unlink(tmp)
def read_keywords(index_choices, keyword_filename): status = True for index_choice in index_choices: if index_choice == 'feedly': models.search_keywords[index_choice] = [] keywords_input = '' keyword_file = os.path.join(BASE_DIR, 'data/' + keyword_filename) try: file = open(keyword_file, 'r') pyfile = File(file) for line in pyfile: keyword = line.rstrip('\n') models.search_keywords[index_choice].append(keyword) if keyword.count(' ') > 0: keyword = '"' + keyword + '"' if keywords_input == '': keywords_input = keyword else: keywords_input = keywords_input + ',' + keyword pyfile.close() except: cwd = os.getcwd() print("read_keywords: working dirtory is: ", cwd) print("read_keywords: keyword_file: ", keyword_file) return False models.FeedlySeekerView.facets_keyword[0].read_keywords = keywords_input return True
def post(self, request, filename, format=None): data_file = request.data['file'] file = data_file.open(mode='r+b') new_map = Map.objects.create() new_map.data.save(name='data' + str(new_map.pk) + '.txt', content=file) usableData = new_map.data.open(mode='r+b') print(usableData) test_map = Draw(usableData, new_map.pk) file_path = test_map.draw_map() print(file_path) new_map_image_file = open(Path(file_path), 'rb') f = File(new_map_image_file) new_map.image.save(name='elevation_map' + str(new_map.pk) + '.png', content=f) new_map.save() print(new_map.image.url) f.close() new_map_image_file.close() usableData.close() os.remove(file_path) r = {"newMap": {"pk": str(new_map.pk), "image": new_map.image.url}} return Response(r, status=status.HTTP_201_CREATED)
def form_valid(self, form): context = self.get_context_data() gallery = context['gallery'] archive = self.request.FILES["file"] temp = os.path.join(tempfile.gettempdir(), str(time.time())) if not os.path.exists(temp): os.makedirs(temp) zfile = zipfile.ZipFile(archive, "a") for i in zfile.namelist(): filename = os.path.split(i)[1] ph_temp = os.path.abspath(os.path.join(temp, os.path.basename(i))) if filename.strip() == "": # don't deal with directory continue # create file for image f_im = open(ph_temp, "wb") f_im.write(zfile.read(i)) f_im.close() title = os.path.basename(i) # if size is too large, don't save if os.stat(ph_temp).st_size > settings.ZDS_APP['gallery']['image_max_size']: messages.error( self.request, _(u'Votre image "{}" est beaucoup trop lourde, réduisez sa taille à moins de {:.0f}' u'Kio avant de l\'envoyer.').format( title, settings.ZDS_APP['gallery']['image_max_size'] / 1024)) continue # if it's not an image, pass try: ImagePIL.open(ph_temp) except IOError: continue # create picture in database: f_im = File(open(ph_temp, "rb")) f_im.name = title pic = Image() pic.gallery = gallery pic.title = title pic.pubdate = datetime.now() pic.physical = f_im pic.save() f_im.close() if os.path.exists(ph_temp): os.remove(ph_temp) zfile.close() if os.path.exists(temp): shutil.rmtree(temp) return redirect(gallery.get_absolute_url())
def add_article_full_text_from_file(abs_path, pmid, html_table_list, overwrite_existing = False): a = add_single_article_full(int(pmid), overwrite_existing) if a is None: return None # does article already have full text assoc with it? if m.ArticleFullText.objects.filter(article__pmid = pmid).count() > 0: aft = m.ArticleFullText.objects.get(article = a) if len(aft.get_content()) > 0: print "Article %s full text already in db, skipping..." % pmid return None try: print 'adding article %s' % (pmid) f = open(unicode(abs_path), 'r') file_ob = File(f) os.chdir(settings.PROJECT_BASE_DIRECTORY) aft = m.ArticleFullText.objects.get_or_create(article = a)[0] aft.full_text_file.save(pmid, file_ob) file_ob.close() for table in html_table_list: add_table_ob_to_article(table, a, text_mine = True) # text mine article level metadata apply_article_metadata(a) except Exception, e: # with open('failed_files.txt', 'a') as f: # f.write('%s\\%s' % (file_name, e)) print e print pmid
def transpose_image(request, fileobjects, operation): for fileobject in fileobjects: root, ext = os.path.splitext(fileobject.filename) f = fileobject.site.storage.open(fileobject.path) im = Image.open(f) new_image = im.transpose(operation) tmpfile = File(NamedTemporaryFile()) try: new_image.save(tmpfile, format=Image.EXTENSION[ext], quality=VERSION_QUALITY, optimize=(os.path.splitext( fileobject.path)[1].lower() != '.gif')) except IOError: new_image.save(tmpfile, format=Image.EXTENSION[ext], quality=VERSION_QUALITY) try: saved_under = fileobject.site.storage.save(fileobject.path, tmpfile) if saved_under != fileobject.path: fileobject.site.storage.move(saved_under, fileobject.path, allow_overwrite=True) fileobject.delete_versions() finally: tmpfile.close() f.close() messages.add_message( request, messages.SUCCESS, _("Action applied successfully to '%s'" % (fileobject.filename)))
def form_valid(self, form): pdf_root = 'PDF_ROOT = ' + "'" + form.cleaned_data['pdf_root'] + "'" carpeta_facturas = 'CARPETA_FACTURAS' + "'" + form.cleaned_data['carpeta_facturas'] + "'" carpeta_pedidos = 'CARPETA_FACTURAS' + "'" + form.cleaned_data['carpeta_pedidos'] + "'" carpeta_diarios = 'CARPETA_DIARIOS' + "'" + form.cleaned_data['carpeta_diarios'] + "'" email_entrada = 'EMAIL_ENTRADA = ' + "'" + form.cleaned_data['email_entrada'] + "'" email_salida = 'EMAIL_SALIDA = ' + "'" + form.cleaned_data['email_salida'] + "'" facturas = "PDF_FACTURAS = PDF_ROOT + 'facturas/' " pedidos = "PDF_PEDIDOS = PDF_ROOT + 'pedidos/' " f = open('C:\Apache24\htdocs\Norte\FacturasNorte\config.py', 'w') pdf_file = File(f) pdf_file.write(pdf_root) pdf_file.write('\n') pdf_file.write(carpeta_facturas) pdf_file.write('\n') pdf_file.write(carpeta_pedidos) pdf_file.write('\n') pdf_file.write(carpeta_diarios) pdf_file.write('\n') pdf_file.write(facturas) pdf_file.write('\n') pdf_file.write(pedidos) pdf_file.write('\n') pdf_file.write(email_entrada) pdf_file.write('\n') pdf_file.write(email_salida) pdf_file.close() return render(self.request, 'FacturasNorte/admin/config_success.html')
def setUpTestData(cls): call_command('loaddata', 'BaseLayer.json', '--database=default', verbosity=0) call_command('loaddata', 'G3WMapControls.json', '--database=default', verbosity=0) call_command('loaddata', 'G3WSpatialRefSys.json', '--database=default', verbosity=0) call_command('loaddata', 'G3WGeneralDataSuite.json', '--database=default', verbosity=0) setup_testing_user(cls) cls.project_group = CoreGroup( name='Group1', title='Group3857', header_logo_img='', srid=G3WSpatialRefSys.objects.get(auth_srid=3857)) cls.project_group.save() qgis_project_file = File(open(QGS_FILE, 'r', encoding='UTF8')) cls.project = QgisProject(qgis_project_file) cls.project.title = 'Transaction group test project' cls.project.group = cls.project_group cls.project.save() qgis_project_file.close() cls.admin01 = User.objects.get(username='******')
def getRoute(request): algorithm = request.GET.get('algorithm') reqCoordType = request.GET.get('reqCoordType') g = PoiGraph(reqCoordType) result = None received_json_data = json.loads(request.body) unique_id = get_random_string(length=32) tmp_dir = settings.TAYO_TMP_DIR with open(tmp_dir + unique_id, 'w') as f: jsonfile = File(f) json.dump(received_json_data, jsonfile) jsonfile.close() g.add_vertex(received_json_data['startName'], received_json_data['startX'], received_json_data['startY'], 0) for viaPoint in received_json_data['viaPoints']: if 'viaPoints' not in viaPoint: numPassenger = '1' else: numPassenger = viaPoint['viaPointNumPassenger'] g.add_vertex(viaPoint['viaPointName'], viaPoint['viaX'], viaPoint['viaY'], numPassenger) g.add_vertex(received_json_data['endName'], received_json_data['endX'], received_json_data['endY'], 0) g.set_everyweight() #return HttpResponse("XXX") if (algorithm == 'onlytime'): result = list() result.append(received_json_data['startName']) viaPoints = received_json_data['viaPoints'] viaPoints.sort(cmp_viapoints) for viaPoint in viaPoints: result.append(viaPoint['viaPointName']) result.append(received_json_data['endName']) elif (algorithm == 'salesman'): result = travelling_salesman( g, g.get_vertex(received_json_data['startName']), g.get_vertex(received_json_data['endName'])) elif (algorithm == 'mintime'): result = mintime_passenger( g, g.get_vertex(received_json_data['startName']), g.get_vertex(received_json_data['endName'])) elif (algorithm == 'deviation'): result = standard_deviation( g, g.get_vertex(received_json_data['startName']), g.get_vertex(received_json_data['endName'])) else: result = prim(g, g.get_vertex(received_json_data['startName']), g.get_vertex(received_json_data['endName'])) #xml = getRouteSequential_in(result, received_json_data) jsonobj = g.get_json({'unique_id': unique_id}, result) return HttpResponse(jsonobj)
def get(self, request): # todaydate = datetime.date.today() # print(todaydate) # file = open("../media/wordsList.txt","r") # lines = file.readlines() # for i in range(len(lines)): # temp = lines[i].split(' - ') # word = temp[0] # meaning=temp[1][:-1] # print(i+1,"\t",word) # wordsrow , created = WordDetail.objects.get_or_create( # word=word, # meaning=meaning, # dateEntered=todaydate) # print(created) # file.close() # --- with open(settings.MEDIA_ROOT + "/wordsList.txt", "w+") as f: myfile = File(f) wordsList = WordDetail.objects.all() for i in range(len(wordsList)): myfile.write(wordsList[i].word + " - " + wordsList[i].meaning + " - " + str(wordsList[i].example) + "\n") myfile.close() f.close() return Response({'wordsJson': "la"}, content_type='application/json')
def post(self, request): try: word = request.POST.get('word') meaning = request.POST.get('meaning') date = request.POST.get('date') example = request.POST.get('example') wordsRow, created = WordDetail.objects.get_or_create( word=word, meaning=meaning, example=example, dateEntered=date) print(created) print("example saved") if created: with open(settings.MEDIA_ROOT + "/wordsList.txt", "a+") as f: myfile = File(f) myfile.write(word + " - " + meaning + " - " + example + "\n") print(myfile.closed) myfile.close() print(myfile.closed) f.close() else: print(created) wordsList = WordDetail.objects.all()[:] wordsJson = wordsList.values()[:] return Response(wordsJson, content_type='application/json') except Exception as e: traceback.print_exc() print(e) return HttpResponse(status=403)
def picnik (request, name=None): furl = request.REQUEST.get('file', '') ti = None if re.search("http://www.picnik", furl): f = urllib.urlopen(furl) fname = datetime.datetime.now().strftime("%a%d%b%Y%H%M%S%f.jpg") fp = os.path.join('/tmp', fname) fh = open(fp, 'wb') ti = pmodels.TempImage() while 1: data = f.read(64 * 2 ** 10) if data: fh.write(data) else: break f.close() fh.close() fh = File(open(fp, 'rb')) ti.image.save(fname, fh, save=True) ti.save() fh.close() os.remove(fp) return request.render_to_response('admin/people/tempimage/picnik.html', {'ti': ti, 'name': name})
def sending_picture(picture): x = secrets.token_hex(6) f = open('d:/image_tshirts/' + x + '.svg', 'w') my_file = File(f) my_file.write(picture) my_file.close() return cloudinary.uploader.upload_image('d:/image_tshirts/' + x + '.svg')
def test_add_new_chains_with_multiple_seeds(self): game = mommy.make(Game) add_chains_url = reverse('new_chains', kwargs={'pk': game.pk}) add_chains_url += '?num_seeds_per_chain=2' new_chain_name = 'new chain name' seed0 = File(open(self.audio_path, 'rb')) seed1 = File(open(self.audio_path, 'rb')) new_chain_formset_data = { 'form-TOTAL_FORMS': '1', 'form-INITIAL_FORMS': '0', 'form-MAX_NUM_FORMS': '', 'form-0-game': game.pk, 'form-0-name': new_chain_name, 'form-0-seed0': seed0, 'form-0-seed1': seed1, } self.client.post(add_chains_url, new_chain_formset_data) seed0.close() seed1.close() self.assertEquals(game.chains.count(), 1) chain = game.chains.first() self.assertEquals(chain.name, new_chain_name) self.assertEquals(chain.messages.count(), 2)
def settings(request): if not request.user.is_authenticated(): return HttpResponseForbidden("<h1>Forbidden</h1><p>user not authenticated</p>") c={} c.update(csrf(request)) if request.method == 'POST': settingsForm = SettingsForm(request.POST, request.FILES) if settingsForm.is_valid(): avatarFile = settingsForm.cleaned_data['avatarFile'] if avatarFile: userProfile = UserProfile.objects.get(user=request.user) if userProfile.avatar: userProfile.avatar.delete(save=False) # deleting old avatar # creating new one, with sha1'ed name hashingFunc = sha1() hashingFunc.update(hashSalt + str(request.user.id)) avatarFileName = "%s.%s" % (hashingFunc.hexdigest(), avatarFile.content_type.split('/')[1]) originalImage = Image.open(avatarFile) resizedImage = ImageOps.fit(originalImage, (300, 300), Image.ANTIALIAS) tmpDest = os.path.join(MEDIA_ROOT, 'avatars', 'tmp' + avatarFileName) resizedImage.save(tmpDest) f = open(tmpDest) avatarResizedFile = File(f) userProfile.avatar.save(avatarFileName, avatarResizedFile) avatarResizedFile.close() os.remove(tmpDest) ######## user = request.user user.first_name = settingsForm.cleaned_data['first_name'] user.last_name = settingsForm.cleaned_data['last_name'] user.email = settingsForm.cleaned_data['email_address'] user.userprofile.bio = settingsForm.cleaned_data['bio'] if settingsForm.cleaned_data['old_password']: if settingsForm.cleaned_data['new_password']: user.set_password(settingsForm.cleaned_data['new_password']) user.save() user.userprofile.save() return HttpResponseRedirect('/settings/') else: settingsForm = SettingsForm( { 'userId': str(request.user.id), 'first_name':request.user.first_name, 'last_name':request.user.last_name, 'email_address':request.user.email, 'bio' :request.user.userprofile.bio }) sectionTitle = inspect.stack()[0][3] c.update({'sectionTitle' : sectionTitle, 'sections' : sections, 'settingsForm': settingsForm}) return render_to_response('%s.html' % sectionTitle, c, context_instance=RequestContext(request))
async def archive_channel(context: DiscordContext, channel_id: str, finished: bool = False): """Archives a certain channel. :param context: The Discord Context. :param channel_id: The ID of the channel to archive.""" channel_obj = await sync_to_async( Channel.objects.filter(id=int(channel_id)).first)() if channel_obj is None: await context.send( f'<#{channel_id}> is seemingly not a playthrough channel.') return await context.send(f'Archiving <#{channel_id}>...') try: exported_channel_file_path = export_channel(channel_id) exported_channel_file = File(file=open(exported_channel_file_path), name=exported_channel_file_path.name) except Exception as e: logger.error(e) await context.send(("Error occurred when archiving the channel, " "please check logs for more information. " "\nThe channel has not been deleted.")) return channel_in_guild = get(context.guild.channels, id=int(channel_id)) await channel_in_guild.delete() await sync_to_async(Archive.objects.create)(channel=channel_obj, file=exported_channel_file) exported_channel_file.close() exported_channel_file_path.unlink() if finished: channel_obj.finished = finished await sync_to_async(channel_obj.save)() await context.send('Archived the channel.')
def well_known(request): location = request.META.get("HTTP_X_LOCATION", "/didkit/") # generates the didweb handler didWeb = "did:web:" + \ request.META["HTTP_HOST"] + \ ':'.join(location[:-1].split('/')) # opens the key in order to get the public part of it with open(KEY_PATH, "r") as f: key_file = File(f) key = json.loads(key_file.readline()) key_file.close() key_id = didWeb + "#main" # adds the did.json to the context credential = { "@context": "https://www.w3.org/ns/did/v1", "id": didWeb, "verificationMethod": [{ "id": key_id, "type": "Ed25519VerificationKey2018", "controller": didWeb, "publicKeyJwk": { "kty": key["kty"], "crv": key["crv"], "x": key["x"] } }], "authentication": [key_id], "assertionMethod": [key_id], } return JsonResponse(credential)
def create_monochrome_logo(request, company): c = Company.objects.get(url_name=company) if not c.color_logo: return JsonError(_("Color logo does not exist")) # get company's color logo color_logo = Image.open(c.color_logo.path) # resize it to monochrome_logo dimension black_logo = resize_image(color_logo.copy(), g.IMAGE_DIMENSIONS['monochrome_logo'], 'fit') # reduce color depth black_logo = black_logo.convert(mode='1') # create a new path for the monochrome logo new_path = os.path.splitext(c.color_logo.path)[0] new_path = new_path + '_monochrome.' + g.MISC['image_format'] # save to the new path black_logo.save(new_path, g.MISC['image_format'], bits=1) # save to stupid django field django_file = File(open(new_path)) c.monochrome_logo.save('new', django_file) django_file.close() # return an url to the new logo return JsonResponse({'status': 'ok', 'logo_url': c.monochrome_logo.url})
class CumulusTests(TestCase): def setUp(self): "Set up tiny files to test with." image_path = os.path.join(os.path.dirname(__file__), "image_300x200.gif") document_path = os.path.join(os.path.dirname(__file__), "text_file.txt") self.image = ImageFile(open(image_path, "rb")) self.document = File(open(document_path, "r")) def test_file_api(self): """ Make sure we can perform the following using the Django File API: - Upload the test files - Access common file attributes - Delete the test files """ self.thing = Thing.objects.create(image=self.image, document=self.document) self.assertEqual(self.thing.image.width, 300) self.assertEqual(self.thing.image.height, 200) self.assertEqual(self.thing.image.size, 976) self.assert_( "cdn.cloudfiles.rackspacecloud.com" in self.thing.image.url, "URL is not a valid Cloud Files CDN URL." ) self.assertEqual(self.thing.document.size, 31) self.assert_( "cdn.cloudfiles.rackspacecloud.com" in self.thing.document.url, "URL is not a valid Cloud Files CDN URL." ) self.thing.delete() def tearDown(self): self.document.close() self.image.close()
def report(request): form = ReportErrorForm(request.POST) if form.is_valid(): messages = [{'tags': 'success', 'text': u"Takk for at du hjelper til med å gjøre denne siden bedre!"}] file_path = 'reports/' + str(uuid.uuid4()) + '.xml' while os.path.isfile(file_path): file_path = 'reports/' + str(uuid.uuid4()) + '.xml' f = open(file_path, 'w+') xml_file = File(f) text = ( u"<?xml version=\"1.0\" encoding=\"UTF-8\"?>", u"<!DOCTYPE bank SYSTEM \"report.dtd\">", u"<report xmlns=\"http://www.w3schools.com\"", u"\txmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"", u"\txsi:schemaLocation=\"report.xsd\">", u"\t<course>", u"\t\t" + form.cleaned_data['course_code'], u"\t</course>", u"\t<semester>", u"\t\t" + form.cleaned_data['semester_code'], u"\t</semester>", u"\t<description>", u"\t\t" + form.cleaned_data['description'], u"\t</description>", u"</report>" ) xml_file.write(u'\n'.join(text).encode('utf8')) xml_file.close() return navbar_render(request, 'report.html', {'messages': messages}) else: return navbar_render(request, 'report.html')
class MessageTest(ModelTest): def setUp(self): super(MessageTest, self).setUp() self.chain = mommy.make(Chain) # test file for models.FileField fpath = Path(settings.APP_DIR, 'grunt/tests/media/test-audio.wav') self.audio = File(open(fpath, 'rb')) def tearDown(self): super(MessageTest, self).tearDown() self.audio.close() def test_make_a_seed_message(self): """Make a seed message: a message without a parent.""" message = Message(chain=self.chain, audio=self.audio) message.full_clean() message.save() def test_make_a_response_message(self): """Make a message.""" seed = mommy.make_recipe('grunt.seed') message = Message(parent=seed, audio=self.audio) message.full_clean() message.save() self.assertEquals(message.parent, seed)
def set_contents(self, data): if self._file is not None: self._file.close() self._file = None _file = File(open(self.path, "w")) _file.write(data) _file.close()
def export_fixtures(): """ a simple function to expor the important lookup tables. These fixutre will be used for testing and also for seeding new instances""" fixtures_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../fixtures') models_to_export = [ models.Location, models.Status, models.PersonRole, models.SpatialRepresentationType, models.SpatialReferenceSystem, models.SecurityClassification, models.ResourceType, models.Maintenance, models.CharacterSet, models.KeywordDomain, models.ContentType, shared_models.FiscalYear, ] for model in models_to_export: data = serializers.serialize("json", model.objects.all()) my_label = model._meta.db_table f = open(os.path.join(fixtures_dir, f'{my_label}.json'), 'w') myfile = File(f) myfile.write(data) myfile.close()
def extractFile(fileObject, request): if fileObject.f.name.endswith(".zip"): # Convert file and dir into absolute paths fullpath = os.path.join(settings.MEDIA_ROOT,fileObject.f.name) dirname = os.path.dirname(settings.MEDIA_ROOT) # Get a real Python file handle on the uploaded file fullpathhandle = open(fullpath, 'r') # Unzip the file, creating subdirectories as needed zfobj = zipfile.ZipFile(fullpathhandle) for name in zfobj.namelist(): if name.endswith('/'): try: # Don't try to create a directory if exists os.mkdir(os.path.join(dirname, name)) except: pass else: outfile = File(open(os.path.join(dirname, name), 'wb+')) outfile.write(zfobj.read(name)) file_o=FilebabyFile(f=name, username=request.user.username, md5=hashlib.md5(outfile.read()).hexdigest()) file_o.save() outfile.close() deleteFromOS(fullpath) fileObject.delete() return True return False
def detect(request): if request.method == "POST": data = request.FILES['sample_image'] data1 = str(data) img, extention = data1.split(".") if extention in allowed_extention: with open("accounts/media/detect/empty.png", "wb") as img: with data as image: myfile = File(img) for i in image: myfile.write(i) img.close() myfile.close() image = cv2.imread("accounts/media/detect/empty.png", 1) img = cv2.resize(image, (512, 512), interpolation=cv2.INTER_NEAREST) cv2.imwrite("accounts/media/detect/empty.png", img) main() messages.info(request, "Thank you for being Patient, ThankYou") return render(request, "final.html", {}) else: messages.ERROR(request, "Please provide Valid Image File(JPEG/JPG/PNG)") return redirect('detect:detector') else: return render(request, "upload.html", {'image_form': imageForm})
def save_case_file(self): case_path = os.getenv("CASEPATH") case_path_obj = case_path.split('/')[0] # 获取发送的请求 json_str = self.body json_obj = json.loads(json_str) case_path = json_obj.get('casepath').split(':')[0].replace('.', '/') + '.py' time_str = time.strftime("%Y-%m-%d %H_%M_%S") # rename 原文件+时间格式(2017-07-20 18_34_48) os.rename( '../arbiter-cases/' + case_path_obj + '/' + case_path, '../arbiter-cases/' + case_path_obj + '/' + case_path + '_' + time_str + '.history') # 使用codecs解决乱码问题 with codecs.open('../arbiter-cases/' + case_path_obj + '/' + case_path, 'w', 'utf-8') as f: mfile = File(f) mfile.write(json_obj.get('content')) mfile.flush() mfile.seek(0) mfile.close() if mfile.closed: result = 'ok' return HttpResponse(json.dumps({"result": result}), content_type="application/json")
def create_instance(model_class, image_name): instance = model_class() img = File(get_image_file()) instance.original_image.save(image_name, img) instance.save() img.close() return instance
def konto_action_handler(request): template = 'kontoplaan.html' go_home = False context = {} if request.method == 'POST': if request.POST.has_key('action'): if request.POST['action'] == u'Loe kontoplaan': request.encoding = 'utf-8' if request.FILES.has_key('cvs'): wf = request.FILES['cvs'] f = File(wf) kl = KontoLoader() kl.import_kontoplaan(f) go_home = True elif request.POST['action'] == u'Kirjuta kontoplaan faili': f = codecs.open('kontoplaan.csv', mode="w", encoding="utf8") kl = KontoLoader() if kl.export_kontoplaan(f): f.close() go_home = True else: go_home = True if go_home: result = redirect(kontoplaan) else: result = render_to_response(template, context, context_instance=RequestContext(request)) return result
def upload_docs(build, project): """Uploads the built docs to the appropriate storage.""" project = build.project logger.info('Uploading docs for %s' % project) count = 0 dest_base = '%s/%s' % (project.owner, project.name) if project.generator.name == 'Sphinx': target = subprocess.check_output( ['bash', 'bin/target_sphinx', build.path, project.docs_path]) elif project.generator.name == 'Jekyll': target = subprocess.check_output( ['bash', 'bin/target_jekyll', build.path, project.docs_path]) local_base = '%s/%s/' % (build.path, target.rstrip()) # Walks through the built doc files and uploads them for root, dirs, names in os.walk(local_base): for idx, name in enumerate(names): with open(os.path.join(root, name), 'rb') as fp: file = File(fp) dest = '/%s/%s' % (dest_base, os.path.relpath(file.name, local_base)) logger.info('Uploading %s...' % dest) docs_storage.save(dest, file) # Invalidates cache cache.delete(dest) # Deletes the file from local after uploading file.close() os.remove(os.path.join(root, name)) count += idx shutil.rmtree(build.path) # Updates the project's modified date project.save() build.status = Build.SUCCESS build.save() logger.info('Finished uploading %s files' % count)
def create_by_user(self, user, uploaded_file, period, saved_hours): locked_at = django_now() unlockable_at = locked_at + datetime.timedelta(days=period) max_saved_hours = period * 24 if saved_hours > max_saved_hours: saved_hours = max_saved_hours password = get_random_string(8, allowed_chars='0123456789') fl = FileLocker() fl.lock(uploaded_file, password) locked_file = File( open(fl.get_locked_file_path(), 'r'), name=fl.get_locked_file_name() ) obj = self.create( user_profile=user.get_profile(), locked_file=locked_file, locked_file_name=fl.get_locked_file_name(), original_file_name=uploaded_file.name, original_file_size=uploaded_file.size, password=password, locked_at=locked_at, unlockable_at=unlockable_at, saved_hours=saved_hours ) locked_file.close() fl.clean() return obj
def save_image(self, image_data): """ Save a TNZImage. It will also save the related TNZImageInstance. It will check image instance 'format' key as the unique key for the image. :param image_data: :return (TNZImage bool): """ flag = self.UNCHANGED if image_data['asset_type'] != 'image': return None, flag image_sorted_data = { 'o_id': image_data['o_id'], 'unique_id': image_data['unique_id'], 'type_o_id': image_data['type_o_id'], 'description': image_data['description'], 'label': image_data['label'], 'width': image_data['width'], 'height': image_data['height'], 'order': image_data.get('order'), 'market': image_data['market'], 'latitude': self.convert_latlng(image_data['latitude']), 'longitude': self.convert_latlng(image_data['longitude']), 'asset_type': image_data['asset_type'], 'credit': image_data['credit'], 'exists': image_data['exists'], 'caption': image_data['caption'], 'url': image_data['url'], } try: return TNZImage.objects.get(unique_id=image_data['unique_id']) except TNZImage.DoesNotExist: image = TNZImage(**image_sorted_data) flag = self.IMPORTED image.save() for instance in image_data['instances']: if instance['format'] == 'original' and not image.file.name: try: url = instance['url'] resource = requests.get(url, stream=True) except requests.HTTPError: raise else: if resource.status_code == 200: name = urlparse(url).path.split('/')[-1] temp_name = os.path.join(settings.MEDIA_ROOT, 'listings/temp_image') file = File(open(temp_name, 'wb')) for chunk in resource: file.write(chunk) file.close() file.open('rb') image.file.save(name, file) file.close() flag = self.IMPORTED break return image, flag
def resave_files(self, object): for field in self.fields: field_callable = getattr(object, field) absfilepath = os.path.join(settings.MEDIA_ROOT, field_callable.name) filename = os.path.basename(absfilepath) f = File(open(absfilepath, 'r')) field_callable.save(filename, f) f.close()
def save_vcf(filename, user_profile): basePath = getBasePath() file = open(basePath + "/" + filename, "rb") django_file_1 = File(file) doc = Vcf(pdf=django_file_1, user_profile=user_profile) doc.save() django_file_1.close() file.close()
def readFile(request, path, notefilePK): # open binary file in read-only mode fileHandler = openFile(path, 'rb') if fileHandler['opened']: # create Django File object using python's file object file = File(fileHandler['handler']) readContent(request, file, notefilePK) file.close()
def setUp(self): qgis_project_file = File(open('{}{}{}'.format(CURRENT_PATH, TEST_BASE_PATH, QGS_FILE), 'r', encoding='utf-8')) # Replace name property with only file name without path to simulate UploadedFileWithId instance. qgis_project_file.name = qgis_project_file.name.split('/')[-1] self.project = QgisProject(qgis_project_file) qgis_project_file.close()
def resave_files(self, object): for field in self.fields: field_callable = getattr(object, field) absfilepath = os.path.join(settings.MEDIA_ROOT, field_callable.name) filename = os.path.basename(absfilepath) f = File(open(absfilepath, "r")) field_callable.save(filename, f) f.close()
def version_generator(value, version_prefix, force=None, site=None): """ Generate Version for an Image. value has to be a serverpath relative to MEDIA_ROOT. """ # PIL's Error "Suspension not allowed here" work around: # s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html if STRICT_PIL: from PIL import ImageFile else: try: from PIL import ImageFile except ImportError: import ImageFile ImageFile.MAXBLOCK = IMAGE_MAXBLOCK # default is 64k if not site: from filebrowser.sites import site as default_site site = default_site tmpfile = File(NamedTemporaryFile()) try: f = site.storage.open(value) im = Image.open(f) version_path = get_version_path(value, version_prefix, site=site) version_dir, version_basename = os.path.split(version_path) root, ext = os.path.splitext(version_basename) version = scale_and_crop(im, VERSIONS[version_prefix]['width'], VERSIONS[version_prefix]['height'], VERSIONS[version_prefix]['opts']) if not version: version = im if 'methods' in VERSIONS[version_prefix].keys(): for m in VERSIONS[version_prefix]['methods']: if callable(m): version = m(version) try: version.save( tmpfile, format=Image.EXTENSION[ext], quality=VERSION_QUALITY, optimize=(os.path.splitext(version_path)[1].lower() != '.gif')) except IOError: version.save(tmpfile, format=Image.EXTENSION[ext], quality=VERSION_QUALITY) # Remove the old version, if there's any if version_path != site.storage.get_available_name(version_path): site.storage.delete(version_path) site.storage.save(version_path, tmpfile) return version_path except: return None finally: tmpfile.close() try: f.close() except: pass
def test_upload_results(self): string_model = """def create_model(): model = tf.keras.models.Sequential([ keras.layers.Dense(512, activation='relu', input_shape=(784,)), keras.layers.Dropout(0.2), keras.layers.Dense(10) ]) model.compile(optimizer='adam', loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) return model """ exec (string_model, None, globals()) # Create a basic model instance model = create_model() FILE_NAME = "test.h5" model.save(FILE_NAME) from django.core.files import File from django.core.files.uploadedfile import SimpleUploadedFile file = File(open(FILE_NAME, 'rb')) uploaded_file = SimpleUploadedFile('test.h5', file.read(), content_type='multipart/form-data') results = { 'id': self.resultID, 'val_metrics': "0.12, 0.01", 'val_loss': 0.11, 'train_loss': 0.12, 'train_metrics': "0.12, 0.01", } data = { 'trained_model': uploaded_file, 'data' : json.dumps(results) } resp = self.client.post("/results/"+str(self.resultID), data, format='multipart') self.assertEqual(resp.status_code, status.HTTP_200_OK) filepath = os.path.join(settings.MEDIA_ROOT, settings.TRAINED_MODELS_DIR)+str(self.resultID)+'.h5' self.assertTrue(os.path.exists(filepath)) self.obj = TrainingResult.objects.get(id=self.resultID) self.assertEqual(self.obj.status, 'finished') epsilon=1*10**(-8) self.assertEqual(self.obj.train_metrics, results['train_metrics']) self.assertEqual(self.obj.val_metrics, results['val_metrics']) self.assertTrue(abs(float(self.obj.train_loss)- results['train_loss'])<= epsilon) self.assertTrue(abs(float(self.obj.val_loss )- results['val_loss'])<= epsilon) file.close() os.remove(FILE_NAME)
def save_binary(filename, user_profile): basePath = getBasePath() file = open(basePath + "/" + filename, "rb") django_file_1 = File(file) doc = Document(docfile=django_file_1, user_profile=user_profile) doc.save() django_file_1.close() file.close() return doc.docfile.name
def version_generator(value, version_prefix, root, force=None): """ Generate Version for an Image. value has to be a serverpath relative to MEDIA_ROOT. """ # PIL's Error "Suspension not allowed here" work around: # s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html try: from PIL import ImageFile except ImportError: import ImageFile ImageFile.MAXBLOCK = settings.IMAGE_MAXBLOCK # default is 64k if storage.exists(value): tmpfile = File(NamedTemporaryFile()) try: orig_file = storage.open(value) im = Image.open(orig_file) version_path = get_version_path(value, version_prefix, root) path, version_basename = os.path.split(version_path) root, ext = os.path.splitext(version_basename) version = scale_and_crop( im, settings.VERSIONS[version_prefix]["width"], settings.VERSIONS[version_prefix]["height"], settings.VERSIONS[version_prefix]["opts"], ) if not version: version = im if "methods" in settings.VERSIONS[version_prefix].keys(): for method in settings.VERSIONS[version_prefix]["methods"]: if callable(method): version = method(version) try: version.save( tmpfile, format=Image.EXTENSION[ext], quality=settings.VERSION_QUALITY, optimize=(os.path.splitext(version_path)[1].lower() != ".gif"), ) except IOError: version.save(tmpfile, format=Image.EXTENSION[ext], quality=settings.VERSION_QUALITY) # Remove the old version, if there's any if version_path != storage.get_available_name(version_path): storage.delete(version_path) storage.save(version_path, tmpfile) return version_path except: raise finally: tmpfile.close() try: orig_file.close() except: pass return None
def uploaddata(request): print 'Save upload data' try: print request.REQUEST['organismtype'] print request.REQUEST['datatype'] print request.REQUEST['userid'] userid = request.REQUEST['userid'] organismtype = request.REQUEST['organismtype'] organism = Organisms.objects.filter(name = organismtype)[0] print 'organism id: ' + str(organism.id) dtype = request.REQUEST['datatype'] datatypeobj = OrganismDataTypes.objects.filter(type = dtype)[0] #sessionpath = os.path.join('/local/network_portal/web_app/static/data', organismtype) sessionpath = os.path.join('/github/baligalab/network_portal/web_app/static/data', organismtype) sessionpath = os.path.join(sessionpath, dtype) sessionpath = os.path.join(sessionpath, userid) if not os.path.exists(sessionpath): os.mkdir(sessionpath) print 'save path: ' + sessionpath savepath = '/static/data/' + organismtype + '/' + dtype + '/' + userid responsedata = {} #responsedata['organismtype'] = organismtype #responsedata['datatype'] = dtype idx = 0 for key in request.FILES.keys(): #each file is an UploadedFile object print 'FILE key: ' + key srcfile = request.FILES[key] fullfilename = srcfile.name print fullfilename prefix, filename = os.path.split(fullfilename) print 'File name: ' + filename with open(os.path.join(sessionpath, filename), 'wb') as f: destination = File(f) for chunk in srcfile.chunks(): destination.write(chunk) destination.close() dataurl = savepath + '/' + filename print 'File url: ' + dataurl # save to DB data = WorkflowCapturedData(owner_id = userid, type_id = datatypeobj.id, dataurl = dataurl, urltext = filename, organism_id = organism.id) data.save() pair = {'id': str(data.id), 'userid': userid, 'organism': organismtype, 'datatype': dtype, 'text' : filename, 'url': dataurl } responsedata[str(idx)] = pair idx = idx + 1 except Exception as e: print str(e) error = {'status':500, 'message': 'Failed to delete workflow data group' } return HttpResponse(json.dumps(error), mimetype='application/json') return HttpResponse(json.dumps(responsedata), mimetype='application/json')
def write(text, fname): """ Very naive write class to write the given text to a file Needs to be extended for the webapplication """ outfile = open(fname, 'w') writable = File(outfile) for line in text: writable.write(line) writable.close()
def submitted_code(self): attempt = self.attempt_num files = self.studentproblemfile_set.get(attempt_num=attempt) # get file content (assumes only one file submission) submission = File(files.submitted_file) code = submission.read() submission.close() code = pretty_code.python_prettify(code, "inline") return code
def downloadPD(request): filename = './FengHao' + settings.MEDIA_URL + 'Projects Description 20160106.docx' f = open('./' + filename, 'r') file = File(f) response = HttpResponse(FileWrapper(file), content_type='application/vnd.openxmlformats-officedocument.wordprocessingml.document') response['Content-Disposition'] = 'attachment; filename=%s' % filename file.close() f.close() return response
def downloadSample(request): filename = './FengHao' + settings.MEDIA_URL + 'TrafficIncidentSet_2015-05-08.txt' f = open('./' + filename, 'r') file = File(f) response = HttpResponse(FileWrapper(file), content_type='application/txt') response['Content-Disposition'] = 'attachment; filename=%s' % filename file.close() f.close() return response
def settings(request): c = {} c.update(csrf(request)) try: if 'logged_in' not in request.session: return redirect('/login') except: return redirect('/login') if request.method == 'GET': nexmo_conf = nexmo_config() return render_to_response('settings.htm', {'nxmo_conf': nexmo_conf}, context_instance=RequestContext(request)) if request.method == 'POST': try: error_dict = {''} nxmo_conf = nexmo_config() for k, v in nxmo_conf.iteritems(): if v is '': nxmo_conf[k] = request.POST[k] Error = False recv = ''.join(request.POST['NRecv'].split()) recv = recv.replace('+', '') recv = recv.replace('-', '') from_number = request.POST['NexmoFrom'] myvar = '' myvar += 'api_key=' + request.POST['NKey'] + "\n" myvar += 'secret_key=' + request.POST['NSecret'] + "\n" myvar += 'fromuser='******'touser='******'EnableSMS' in request.POST: myvar += 'EnableSMS=' + request.POST['EnableSMS'] + "\n" else: myvar += 'EnableSMS=0\n' myvar += 'username='******'UserName'] + "\n" myvar += 'password='******'password'] + "\n" with open(FILE_NAME, 'w') as f: my = File(f) my.write('[nexmo_monitor]\n') my.write(myvar) f.close() my.close() nxmo_conf = nexmo_config() messages.success(request, "Configuration Settings saved successfully.") return render_to_response('settings.htm', {'nxmo_conf': nxmo_conf}, context_instance=RequestContext(request)) except Exception as err: messages.error(request, "We have find some errors.") return render_to_response('settings.htm', {'nxmo_conf': nxmo_conf}, context_instance=RequestContext(request))
class FormTest(TestCase): def setUp(self): super(FormTest, self).setUp() self.seed = mommy.make_recipe("grunt.seed") test_audio_path = Path(settings.APP_DIR, "grunt/tests/media/test-audio.wav") self.audio = File(open(test_audio_path, "rb")) def tearDown(self): self.audio.close() TEST_MEDIA_ROOT.rmtree()
def save_model(self, request, form, formset, change): super(TemplateAdmin, self).save_model(request, form, formset, change) nameTpl = form.name.replace(" ", "_").lower() extend = glb.extendBlock + form.templateType.name.lower() + glb.endExtendBlock + "\n" title = glb.titleBlock + form.name + glb.endTitleBlock + "\n" content = extend + title + form.content myFile = File(open(glb.fileRoot + nameTpl +'.html','w')) myFile.write(content) myFile.close()
def test_open_reopens_closed_file_and_returns_context_manager(self): temporary_file = tempfile.NamedTemporaryFile(delete=False) file = File(temporary_file) try: file.close() with file.open() as f: self.assertFalse(f.closed) finally: # remove temporary file os.unlink(file.name)
def log_message(self, text, param1="", param2=""): current_time = datetime.now() text_message = "%s %s %s %s %s" % (smart_str(current_time), smart_str(self.user), smart_str(text), smart_str(param1), smart_str(param2)) with open(self.file_name.encode('utf-8'), "a", encoding='UTF-8') as f: file_log = File(f) file_log.write(text_message) file_log.close()
def setUp(self): """Upload file to move""" user = User.objects.create_user('temporary', '*****@*****.**', 'temporary') f = File(open('./Files/tests/upload_f/text.txt')) self.client.login(username='******', password='******') self.client.post('/Files/tree/', {'file': f, 'path': ''}) self.client.logout() os.mkdir('./media/temporary/files/dir1') f.close()
def scrape_save(product): ml_file = 'data/' + product + '_scrape.pickle' try: file = open(ml_file, 'wb') pyfile = File(file) pickle.dump(models.scrape_li, pyfile, protocol=pickle.HIGHEST_PROTOCOL) pyfile.close() return True except: return False
def create_thumb(imgfield, size_x, size_y, suffix="_thumb_", force_aspect=False): imgfield.file.open() thumb_data = rescale(imgfield.file.read(), size_x, size_y, force_aspect) image_filename = basename(imgfield.file.name) thumb_name = image_filename[:image_filename.rfind(".")]+suffix+".jpg" file = open(dirname(imgfield.path)+'/'+thumb_name, "w") thumbnail = File(file) thumbnail.write(thumb_data) thumbnail.close() imgfield.file.close()