def upload_photo(request): image = request.FILES['photo'] s = FileSystemStorage() img = s.save(image.name, image) print(s.path(img)) exp = Expense.from_receipt(s.path(img), request.user) return redirect(reverse('admin:receipts_expense_change', args=(exp.id,)))
class Command(collectstatic.Command): help = "Collects static files from apps and other locations into "\ "`VERSIONED_STATIC_ROOT` with a versioned filename. (Used in "\ "conjunction with the {% versionedstatic %} template tag.)" def __init__(self, *args, **kwargs): super(Command, self).__init__(*args, **kwargs) # Force storage to be a filesystem storage for VERSIONED_STATIC_ROOT. self.storage = FileSystemStorage(location=settings.VERSIONED_STATIC_ROOT) try: self.storage.path('') except NotImplementedError: self.local = False else: self.local = True def copy_file(self, path, prefixed_path, source_storage, **kwargs): version = get_file_version(path, source_storage) prefixed_path = get_versioned_path(prefixed_path, version) return super(Command, self).copy_file(path, prefixed_path, source_storage, **kwargs) def link_file(self, path, prefixed_path, source_storage, **kwargs): version = get_file_version(path, source_storage) prefixed_path = get_versioned_path(prefixed_path, version) return super(Command, self).link_file(path, prefixed_path, source_storage, **kwargs)
class FileStoragePermissions(unittest.TestCase): def setUp(self): self.umask = 0o027 self.old_umask = os.umask(self.umask) self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): shutil.rmtree(self.storage_dir) os.umask(self.old_umask) @override_settings(FILE_UPLOAD_PERMISSIONS=0o654) def test_file_upload_permissions(self): name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0o777 self.assertEqual(actual_mode, 0o654) @override_settings(FILE_UPLOAD_PERMISSIONS=None) def test_file_upload_default_permissions(self): fname = self.storage.save("some_file", ContentFile("data")) mode = os.stat(self.storage.path(fname))[0] & 0o777 self.assertEqual(mode, 0o666 & ~self.umask) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765) def test_file_upload_directory_permissions(self): name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o765) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None) def test_file_upload_directory_default_permissions(self): name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o777 & ~self.umask)
class VariationField(object): """Instances of this class will be used to access data of the generated variations.""" def __init__(self, name): """ :param name: str """ self.name = name self.storage = FileSystemStorage() @property def path(self): """Return the abs. path of the image file.""" return self.storage.path(self.name) @property def url(self): """Return the url of the image file.""" return self.storage.url(self.name) @property def size(self): """Return the size of the image file, reported by os.stat().""" return self.storage.size(self.name)
class ThumbnailField: ''' Instances of this class will be used to access data of the generated thumbnails ''' def __init__(self, name): self.name = name self.storage = FileSystemStorage() def path(self): return self.storage.path(self.name) def url(self): return self.storage.url(self.name) def size(self): return self.storage.size(self.name) def is_landscape(self): return self.orientation() == 'landscape' def is_portrait(self): return self.orientation() == 'portrait' def orientation(self): w, h = get_image_dimensions(self.path()) if w > h: return 'landscape' else: return 'portrait'
class ThumbnailField: ''' Instances of this class will be used to access data of the generated thumbnails ''' def __init__(self, name): self.name = name self.storage = FileSystemStorage() @property def path(self): return self.storage.path(self.name) @property def url(self): return self.storage.url(self.name) @property def size(self): return self.storage.size(self.name) @property def height(self): im = Image.open(self.path) return im.size[1] @property def width(self): im = Image.open(self.path) return im.size[0]
class ExhibitStorage(FileSystemStorage): """ Assume that incoming paths are of the form <username>/.../... """ def __init__(self, *args, **kwargs): self.__userdata_storage = FileSystemStorage(location=kwargs['location'], base_url=kwargs['base_url']) def _open(self, name, mode='rb'): return self.__userdata_storage._open(chunk_path(name) + name, mode) def _save(self, name, content): chunk = chunk_path(name) fullname = chunk + name if (self.__userdata_storage.exists(fullname)): self.__userdata_storage.delete(fullname) result = self.__userdata_storage._save(fullname, content) return result.partition(chunk)[2] def exists(self, name): return self.__userdata_storage.exists(chunk_path(name) + name) def path(self, name): return self.__userdata_storage.path(chunk_path(name) + name) def size(self, name): return self.__userdata_storage.size(chunk_path(name) + name) def delete(self, name): return self.__userdata_storage.delete(chunk_path(name) + name) def url(self, name): return self.__userdata_storage.url(name) def get_available_name(self, name): return self.__userdata_storage.get_available_n
def path(self, name): ''' Returns the absolute path to a file, stripping out /djmodules/[module] from the beginning. If the system requests /djmodules/testmodule/hello.html, this will return /home/user/edinsights/src/modules/testmodule/static/hello.html ''' rootpath = os.path.relpath(os.path.join(name), self.base_url) return FileSystemStorage.path(self, rootpath)
class CdnFinder(BaseFinder): def __init__(self, *args, **kwargs): super(CdnFinder, self).__init__(*args, **kwargs) self.storage = FileSystemStorage(location=settings.STATICFILES_DIRS[0]) def find(self, path, all=False): #pylint: disable=redefined-builtin return [] def list(self, ignore_patterns): for package, data in ASSETS.items(): if "cache" in data: for src, dest in data["cache"]["paths"].items(): dest = self.to_cache_path(package, dest) if matches_patterns(dest, ignore_patterns): continue dest_dir = os.path.dirname(dest) if not os.path.exists(self.storage.path(dest_dir)): os.makedirs(self.storage.path(dest_dir)) if not os.path.exists(self.storage.path(dest)): with open(self.storage.path(dest), 'wb') as fp: fp.write(urlopen(self.to_url(src)).read()) yield dest, self.storage def to_cache_path(self, package, dest): return os.path.join('CACHE', package, dest) def transform_and_check_path(self, package, path): dest_cache = ASSETS.get(package, {}).get('cache', {}).get('paths', {}) if not path in dest_cache: return path dest = self.to_cache_path(package, dest_cache[path]) cache_path = self.storage.path(dest) if os.path.exists(cache_path): return settings.STATIC_URL + dest else: return path def transform_to_cache(self, package, paths): return [self.transform_and_check_path(package, path) for path in paths] def to_url(self, url): if url[:2] == '//': url = 'http:' + url return url
def test_delete_file_with_modified(self): list(default_collector.collect()) storage = FileSystemStorage(local_path('assets')) new_mtime = os.path.getmtime(storage.path('js/first.js')) - 1000 os.utime(default_collector.storage.path('pipeline/js/first.js'), (new_mtime, new_mtime)) self.assertTrue(default_collector.delete_file( 'js/first.js', 'pipeline/js/first.js', storage))
class ResizedImageField: def __init__(self, name): self.name = name self.storage = FileSystemStorage() def path(self): return self.storage.path(self.name) def url(self): return self.storage.url(self.name) def size(self): return self.storage.size(self.name)
class FileStoragePermissions(unittest.TestCase): def setUp(self): self.umask = 0o027 self.old_umask = os.umask(self.umask) self.storage_dir = tempfile.mkdtemp() def tearDown(self): shutil.rmtree(self.storage_dir) os.umask(self.old_umask) @override_settings(FILE_UPLOAD_PERMISSIONS=0o654) def test_file_upload_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0o777 self.assertEqual(actual_mode, 0o654) @override_settings(FILE_UPLOAD_PERMISSIONS=None) def test_file_upload_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) fname = self.storage.save("some_file", ContentFile("data")) mode = os.stat(self.storage.path(fname))[0] & 0o777 self.assertEqual(mode, 0o666 & ~self.umask) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=0o765) def test_file_upload_directory_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o765) @override_settings(FILE_UPLOAD_DIRECTORY_PERMISSIONS=None) def test_file_upload_directory_default_permissions(self): self.storage = FileSystemStorage(self.storage_dir) name = self.storage.save("the_directory/the_file", ContentFile("data")) dir_mode = os.stat(os.path.dirname(self.storage.path(name)))[0] & 0o777 self.assertEqual(dir_mode, 0o777 & ~self.umask)
def save_officers_in_csv(request, overwrite): """ Saved the officers specified in the csv Keyword Argument request -- the django request object overwrite -- indicates whether or not to remove the officer under a term if that term appears in the csv Return render value -- sends user to appropriate page based on if there was an error """ year = 0 term = 0 output = collections.OrderedDict() uploaded_file = request.FILES['csv'] fs = FileSystemStorage() context = create_main_context(request, TAB_STRING) file_name = fs.save(uploaded_file.name, uploaded_file) with open(fs.path(file_name)) as csv_file: csv_reader = csv.reader(csv_file, delimiter=",") for row in csv_reader: if re.match(fr"{'|'.join(TERM_SEASONS)} \d\d\d\d", row[YEAR_AND_TERM_COLUMN]): # noqa: W605 year = (row[YEAR_AND_TERM_COLUMN].strip( )[row[YEAR_AND_TERM_COLUMN].strip().find(" "):]).strip() term = row[YEAR_AND_TERM_COLUMN].strip( )[:row[YEAR_AND_TERM_COLUMN].strip().find(" ")].strip() logger.info( f"[about/import_export_officer_lists save_officers_in_csv()] going through term {term} {year}" ) if row[NAME_COLUMN] != "" and row[NAME_COLUMN] != "Name": if year not in output: output[year] = {} if term not in output[year]: output[year][term] = [] success, member, error_message = return_member_json(row) if not success: context[ERROR_MESSAGE_KEY] = error_message return render(request, 'about/upload_list.html', context) output[year][term].append(member) output = collections.OrderedDict(reversed(list(output.items()))) for key, value in output.items(): output[key] = collections.OrderedDict( reversed(list(output[key].items()))) error_message = save_yearly_document(output, overwrite) if error_message is not None: context[ERROR_MESSAGE_KEY] = error_message return render(request, 'about/upload_list.html', context) return HttpResponseRedirect(f'{settings.URL_ROOT}about/list_of_officers')
def file(request): if request.method == 'POST': head = ['Name'] attendees = [] p = [] attendance = {} num = int(request.POST['num']) for i in range(1, num + 1): data = request.FILES[str(i)] fs = FileSystemStorage() name = fs.save(data.name, data) path = fs.path(name) f = pd.read_csv('uploads/' + name, sep='\t', encoding='UTF-16') names = f['Full Name'].unique() add = [name for name in names if name not in attendees] attendees = attendees + add if i != 1: for j in range(1, len(add) + 1): for k in range(1, len(p) + 1): p[-1 * k].append('A') pre = [] pre.extend(repeat('P', len(attendees))) for x, name in enumerate(attendees): if name not in names: pre[x] = 'A' p.append(pre) date = f['Timestamp'][0].split(',')[0] head.append(date) fs.delete(path) attendance['1Name'] = attendees i = 0 for date in head[1:]: attendance[date] = p[i] i = i + 1 final = pd.DataFrame(attendance) final.sort_values('1Name').sort_index(axis=1).to_csv( 'attendance/files/Attendance.csv', index=False) m = open('attendance/files/Attendance.csv', 'r') data = m.readlines() response = HttpResponse( content_type='text/csv', headers={ 'Content-Disposition': 'attachment;filename ="attendance.csv"' }, ) writer = csv.writer(response) for line in data: writer.writerow(line.split(',')) return response
def user_file_handler(message: Message): user_telegram_file = message.audio file_size_mb = user_telegram_file.file_size / 5120**2 if file_size_mb > 1: too_much_size_message = strings.get_string('user_files.too_much_size') telegram_bot.reply_to(message, too_much_size_message) else: try: wait_message = strings.get_string('user_files.wait') telegram_bot.reply_to(message, wait_message) telegram_file_info = telegram_bot.get_file( user_telegram_file.file_id) telegram_file_path = telegram_file_info.file_path file_caption = 'audio_' + secrets.token_hex(5) telegram_file = requests.get( 'https://api.telegram.org/file/bot{0}/{1}'.format( API_TOKEN, telegram_file_path)) file_storage = FileSystemStorage() filename = 'users/' + file_caption extension = os.path.splitext( os.path.basename(telegram_file_path))[1] if os.path.exists( os.path.join(file_storage.location, filename + extension)): filename += secrets.token_hex(5) filename += extension filepath = os.path.join(file_storage.location, filename) open(filepath, 'wb').write(telegram_file.content) user = users.get_user_by_telegram_id(message.from_user.id) new_file = File.objects.create( name=file_caption, file_path=file_storage.path(filename), file_url=file_storage.url(filename), is_user_file=True, confirmed=False, caption='@send_sound_bot', user=user) type_filename_message = strings.get_string( 'user_files.type_file_name') remove_keyboard = keyboards.get_keyboard('remove') telegram_bot.send_message(message.chat.id, type_filename_message, reply_markup=remove_keyboard) telegram_bot.register_next_step_handler(message, file_name_handler, file_id=new_file.id) except Exception as e: error_message = strings.get_string('user_files.error') Navigation.to_main_menu(message.chat.id, error_message) logging.error(e)
def post(self, request, filename, format='jpg'): # up_f = request.data['file'] up_f = request.FILES['file'] fs = FileSystemStorage() localFN = fs.save(filename, up_f) uploaded_file_url = fs.url(localFN) print uploaded_file_url uploaded_file_path = fs.path(localFN) print uploaded_file_path rotate_image_basedon_exif(uploaded_file_path) imgFNs = search_sim_images( uploaded_file_path, (app_settings.imgFea1Ds_list, app_settings.normImgFea1Ds_list, app_settings.imgBNs_list), app_settings.cnn_model) print imgFNs gids = [fn.split('_')[0] for fn in imgFNs] gid2props = query_by_gids(gids) print gid2props.keys() recomd_list = [] for fn in imgFNs: gid = fn.split('_')[0] recomd_list.append({ 'id': gid, 'name': gid2props[gid]['name'] if gid in gid2props else '', 'sale_price': gid2props[gid]['sale_price'] if gid in gid2props else '', 'goods_img_url': urlparse.urljoin(app_settings.FEATURE_IMAGE_URL, fn), 'goods_page_url': 'https://www.momoshop.com.tw/goods/GoodsDetail.jsp?i_code={}'. format(gid) }) recomd_id = str(uuid.uuid4()).split('-')[-1] resp = JsonResponse({ 'recomd_id': recomd_id, 'uploaded_file_url': uploaded_file_url, 'recomd_list': recomd_list }) return resp
def generate_printable_html(self, charts): location = tempfile.mkdtemp(prefix=settings.APP_NAME + "-", dir=settings.MEDIA_ROOT) base_url = (settings.MEDIA_URL + location.replace(settings.MEDIA_ROOT, "")) cache_storage = FileSystemStorage(location=location, base_url=base_url) js_content = io.StringIO() js_content.write("""var casper = require('casper').create(); """) on_start = True for chart in charts: if chart['slug'] == 'totals': chart['image'] = self.generate_chart_image( chart['slug'], 'envconnect/prints/total_score.html', context={'total_score': chart}, js_content=js_content, cache_storage=cache_storage, on_start=on_start, width=300, height=200, delay=2) else: chart['distribution'] = json.dumps( chart.get('distribution', {})) chart['image'] = self.generate_chart_image( chart['slug'], 'envconnect/prints/benchmark_graph.html', context={'chart': chart}, js_content=js_content, cache_storage=cache_storage, on_start=on_start, width=250, height=204) on_start = False js_content.write(""" casper.run(); """) js_content.seek(0) js_generate_images = 'generate-images.js' cache_storage.save(js_generate_images, js_content) phantomjs_script_path = cache_storage.path(js_generate_images) cmd = [ settings.PHANTOMJS_BIN.replace('bin/phantomjs', 'bin/casperjs'), phantomjs_script_path ] LOGGER.info("RUN: %s", ' '.join(cmd)) subprocess.check_call(cmd)
def extractAudioFeatures(filename): fs = FileSystemStorage(location=settings.FEAT_ROOT) (rate,sig) = wav.read(filename) mfc = MFCC(nfilt=20, ncep=20, lowerf=50, upperf=15000, alpha=0.97, samprate=rate, frate=(1/0.032), wlen=0.064, nfft=512) mfccs = mfc.sig2s2mfc(sig).T stft = np.exp(mfc.sig2logspec(sig)).T mfcc_path = fs.path(filename.split('/')[-1])+'.mfcc64ms' stft_path = fs.path(filename.split('/')[-1])+'.stft' np.savetxt(mfcc_path, mfccs) np.savetxt(stft_path, stft) diff_path = makeDiffCoefficients(mfcc_path) return [mfcc_path, diff_path, stft_path]
def uploadpdf(request): try: print("in") filename = generate_ref_id() fs = FileSystemStorage(location=settings.STATIC_ROOT+"/media") filesave = fs.save(filename+'.pdf', request.FILES['filedata']) filepath = fs.path(filesave) pdf_text = extract_text_from_pdf(filepath,filename) pdf_data = PdfData() pdf_data.data = pdf_text pdf_data.save() return Response({"status": "SUCCESS", "message": "file Uploaded Sucessfully","refId":filename}, content_type='application/json', status=200) except: traceback.print_exc() return Response({"status": "FAILURE", "message": "Invalid Request"}, content_type='application/json', status=500)
class ThumbnailField: """Instances of this class will be used to access data of the generated thumbnails""" def __init__(self, name): self.name = name self.storage = FileSystemStorage() def path(self): return self.storage.path(self.name) def url(self): return self.storage.url(self.name) def size(self): return self.storage.size(self.name)
class FileStoragePermissions(TestCase): def setUp(self): self.old_perms = settings.FILE_UPLOAD_PERMISSIONS settings.FILE_UPLOAD_PERMISSIONS = 0666 self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): settings.FILE_UPLOAD_PERMISSIONS = self.old_perms shutil.rmtree(self.storage_dir) def test_file_upload_permissions(self): name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0777 self.assertEqual(actual_mode, 0666)
def seis(request): dic = {} midiList = [] if request.method == 'POST': files = request.FILES.getlist('midi') numero_canciones = int(request.POST.get('n_song')) division_tiempo_compas = int(request.POST.get('division')) entrenar_por = request.POST.get('tipoentrenamiento') long_canciones = int(request.POST.get('l_song')) epocasmlp = int(request.POST.get('epoch_train')) for f in files: fs = FileSystemStorage() name = fs.save(f.name, f) fs.path(name) midiList.append(fs.path(name)) #CNN_custom(midiList,division_tiempo_compas,entrenar_por,path_exit_file,duracion_pista,numero_de_canciones_a_generar,numero_epocas) CNN_custom(midiList, division_tiempo_compas, entrenar_por, settings.MEDIA_ROOT, long_canciones, numero_canciones, epocasmlp) for f in midiList: fs.delete(f) for i in range(numero_canciones + 1): dic[str(i)] = settings.MEDIA_URL + "{}.mid".format(i) return render(request, "core/seis.html", context={"dic": dic})
def tres(request): dic = {} midiList = [] if request.method == 'POST': files = request.FILES.getlist('midi') numero_canciones = int(request.POST.get('n_song')) division_tiempo_compas = int(request.POST.get('division')) size_win = int(request.POST.get('sizew')) long_canciones = int(request.POST.get('l_song')) epocasmlp = int(request.POST.get('epoch_train')) for f in files: fs = FileSystemStorage() name = fs.save(f.name, f) fs.path(name) midiList.append(fs.path(name)) #LSTM_free_windows(midiList,division_tiempo_compas,size_win,duracion_pista,path_exit_file,numero_de_canciones_a_generar,numero_epocas) LSTM_free_windows(midiList, division_tiempo_compas, size_win, long_canciones, settings.MEDIA_ROOT, numero_canciones, epocasmlp) for f in midiList: fs.delete(f) for i in range(numero_canciones + 1): dic[str(i)] = settings.MEDIA_URL + "{}.mid".format(i) return render(request, "core/tres.html", context={"dic": dic})
def form_valid(self, form): text = form.cleaned_data['text'] file = form.cleaned_data['file'] file_path = None if file: file_storage = FileSystemStorage() filename = file_storage.save( os.path.join('advertising', file.name), file) file_path = file_storage.path(filename) Helpers.distribute_advertising_post(text, file_path) if file: file_storage.delete(filename) messages.success(self.request, "Рассылка прошла успешно!") result = super().form_valid(form) return result
class FileStoragePermissions(unittest.TestCase): def setUp(self): self.old_perms = settings.FILE_UPLOAD_PERMISSIONS settings.FILE_UPLOAD_PERMISSIONS = 0666 self.storage_dir = tempfile.mkdtemp() self.storage = FileSystemStorage(self.storage_dir) def tearDown(self): settings.FILE_UPLOAD_PERMISSIONS = self.old_perms shutil.rmtree(self.storage_dir) def test_file_upload_permissions(self): name = self.storage.save("the_file", ContentFile("data")) actual_mode = os.stat(self.storage.path(name))[0] & 0777 self.assertEqual(actual_mode, 0666)
def upload(request): file = False if request.method == 'POST' and request.FILES['file']: file = True myfile = request.FILES['file'] fs = FileSystemStorage() filename = fs.save(myfile.name, myfile) out_file = parse(fs.path(filename)) with open(out_file, 'rb') as fh: response = HttpResponse(fh) response["Content-disposition"] = "attachment; filename={}".format( os.path.basename(out_file)) #return render(request, 'index.html', {'OUTPUT': response, 'file': file}) return response return render(request, 'index.html', {'file': file})
def bulk_add(self, request): """ """ if request.method == 'POST': myfile = request.FILES.get('myfile') if myfile is None: messages.warning(request, 'Must select a file for upload first!') return HttpResponseRedirect(request.META.get('HTTP_REFERER')) fs = FileSystemStorage(location=gettempdir()) filename = fs.save(myfile.name, myfile) fullname = fs.path(filename) if not os.path.exists(fullname): messages.error(request, 'Internal server error accessing file.') uploaded_file_url = fs.url(filename) entries = self.parse_file(fullname) num_saved = self.save_results(entries) fs.delete(filename) path = Path(request.path) redirection = path.parents[0].as_posix() # go up 1 if num_saved > 0: if num_saved > 1: msg_str = 'Successfully uploaded {} {}'.format( num_saved, self.model._meta.verbose_name_plural) else: msg_str = 'Successfully uploaded {} {}'.format( num_saved, self.model._meta.verbose_name) messages.success(request, msg_str) elif num_saved == 0: messages.warning( request, '''Failed to upload any {}, has this file already been uploaded?'''. format(self.model._meta.verbose_name_plural)) return HttpResponseRedirect(redirection) return render(request, 'simple_upload.html', context={ 'entry_name': self.entry_name, 'additional_help': self.help_text, })
def upload_files(upfile, path, escape=True): base_dir = str(settings.BASE_DIR) filename = upfile.name if not escape else make_filename(upfile.name) upload_url = path.replace(base_dir, settings.BASE_URL) fs = FileSystemStorage(location=path, base_url=upload_url) obj = fs.save(filename, upfile) if fs.exists(obj): return { 'path': fs.path(obj).replace(base_dir, ''), 'size': fs.size(obj), 'url': fs.url(obj) } return False
def index(request): if request.method == 'POST' and request.FILES['image']: image = request.FILES['image'] dir_path = os.path.join(settings.MEDIA_ROOT, 'input') fs = FileSystemStorage(dir_path) input_filename = fs.save(image.name, image) input_path = os.path.join(dir_path, fs.path(input_filename)) output_path = api_main(input_path) context = {'output_path': output_path} return render(request, 'web/download.html', context) context = {'form': ImageUploadForm()} return render(request, 'web/upload.html', context)
def upload(self, request): # print(File.objects.all().delete()) fs = FileSystemStorage() form = UploadForm(data=request.POST) context = {'form': form} user_id = '' if "user" in request.session: user_id = request.session['user'] if user_id and not fs.exists(user_id + '/'): mkdir(fs.path(user_id + '/')) if request.method == 'POST' and request.FILES['file']: # File Save uploaded_file = request.FILES['file'] file_full_name = '{}/{}'.format(user_id, uploaded_file) real_name = fs.save(file_full_name, uploaded_file) # DB file_name = real_name[len(user_id) + 1:] file_path = '{}/'.format(user_id) file_description = request.POST['description'] file = File(user_id=user_id, file_no=len(File.objects.all()), file_name=file_name, file_path=file_path, file_description=file_description) file.save() del request.FILES['file'] files = File.objects.filter(user_id__iexact=user_id) if files: file_list = [] for f in files: file_name = '{} <{}>'.format(f.file_name[:-4], f.file_no) file_type = f.file_name[-3:] file_desc = f.file_description file_no = f.file_no tmp = [file_name, file_type, file_desc, file_no] file_list.append(tmp) context['file_list'] = file_list return context
def upload(request): # TODO: Async status updates? if not request.user.is_authenticated: return HttpResponse('log in first') credential = request.user.profile.google_oauth if not credential: return HttpResponse('no creds') manager = Musicmanager() # TODO: Maybe change mac address for each user? login_success = manager.login( credential, uploader_name="GMusicManagerOnline - {}".format(request.user.username)) form = MusicUploadForm() args = {'can_login': login_success, 'form': form, 'success': False} if request.method == "POST": form = MusicUploadForm(request.POST, request.FILES) if form.is_valid(): music = form.save() music_file = request.FILES.get('music_file') ext = music_file.name[music_file.name.rfind('.'):] fs = FileSystemStorage() filename = fs.save("{0}{1}".format(request.user.username, ext), music_file) music_filepath = fs.path(filename) post_filepath = music_filepath + ".mp3" options = { i: getattr(music, i) for i in [ 'title', 'album', 'composer', 'genre', 'language', 'artist', 'album_artist' ] if getattr(music, i) != "" } logger.info("Transcoding metadata: " + str(options)) options.update({'quality': '320k'}) _transcode(music_filepath, options, post_filepath) if os.path.isfile(music_filepath): os.remove(music_filepath) success, _, _ = manager.upload( # Already transcoding. post_filepath, enable_matching=True, enable_transcoding=False) if os.path.isfile(post_filepath): os.remove(post_filepath) args.update({'success': True}) args.update({'form': form}) manager.logout() return render(request, 'core/upload.html', args)
def load_data(folder_name, file_name): """A helper class to load a "processed" file from the media folder .. warning:: This function never returns a "file not found error". Instead, it returns an empty list if the file is not found at te path. Parameters ---------- folder_name : string Works as the folder name file_name: string Name of the file without the file extension Returns ------- data_ : list of list of strings Returns the data read from the file. Returns an empty list incase the file is not found. """ data_ = [] fs = FileSystemStorage() file_path = fs.path(folder_name + '/' + file_name + '.tsv') # Check whether the file exists, otherwise do nothing # In the template, we display a warning for missing file if os.path.isfile(file_path): # Read the processed data from the file system # Each line consists of a dialect word and the predicted keywords with open(file_path, 'r') as f: for i, line in enumerate(f.readlines()): if i == 0: continue # The original processed file contains confidence scores within paranthesis # For easier reading on the templating, we change the structure into # [dialect, keyword1, confidence1, keyword2, confidence2] line = line.strip().replace(' (', '|').replace(')', '') line = [ ls for l in line.split('\t') for ls in l.split('|') if ls ] data_.append(line) return data_
def upload_file_turma(request, pk_1): if request.method == 'POST': for myfile in request.FILES.getlist('arquivo'): fs = FileSystemStorage( location=os.path.join(os.path.dirname(__file__), '../core/corretor/dataset/submetidas/')) filename = fs.save(myfile.name, myfile) matricula = myfile._name.split('.')[0] alunoSessao = AlunoSessao.objects.filter( sessao_id=pk_1, aluno__matricula=matricula).first() uploaded_file_url = fs.path(filename) if alunoSessao: generate_alunoSessao(pk_1, alunoSessao.pk, uploaded_file_url) return redirect(reverse_lazy('sessao-details', kwargs={'pk': pk_1}))
def view_changes(request, file_name): fn=file_name fs = FileSystemStorage('loanboard/files') file_name=fs.path('')+'/'+file_name fl=pyexcel.iget_records(file_name=file_name) list_to_be_updated=[] for std in fl: try: if std['Status'] != Beneficiary.objects.get(form_four_index_no=std['Form 4 Index Number']).status: list_to_be_updated.append( ( Beneficiary.objects.get(reg_no=std['Registration Number']), std['Status'] ) ) except Exception: continue list_to_be_updated=paginate(list_to_be_updated, 13, request) return render(request, 'updates.html', {'beneficiaries': list_to_be_updated, 'file_name': fn})
def update_status(request, file_name, reg_no): fn=file_name fs = FileSystemStorage('loanboard/files') file_name=fs.path('')+'/'+file_name fl=pyexcel.iget_records(file_name=file_name) loanbeneficiary=Beneficiary.objects.all() if reg_no=="all": for std in fl: bf=Beneficiary.objects.get(form_four_index_no=std['Form 4 Index Number']) if std['Status'] != bf.status: bf.status=std['Status'] bf.save() templete_data={ 'beneficiaries': loanbeneficiary, } return redirect('/loanboard/view-changes/'+fn)
class StdAudioField: ''' Instances of this class will be used to access data of the converted audio files ''' def __init__(self, name): self.name = name self.storage = FileSystemStorage() def path(self): return self.storage.path(self.name) def url(self): return self.storage.url(self.name) def size(self): return self.storage.size(self.name)
def simple_upload(request): if request.method == 'POST' and request.FILES['myfile']: myfile = request.FILES['myfile'] fs = FileSystemStorage() filename = fs.save(myfile.name, myfile) uploaded_file_url = fs.url(filename) # using myfile.read() results in _ingest determining # 'content_type': 'application/octet-stream' # with content_length : 0 res = Command.ingest_doc(fs.path(filename)) return render(request, 'app/simple_upload.html', { 'uploaded_file_url': uploaded_file_url, 'indexed': res['created'] }) return render(request, 'app/simple_upload.html')
def get_sentence_from_file(uploadedFile): fs = FileSystemStorage() name = fs.save(uploadedFile.name, uploadedFile) f = open(fs.path(name), 'rt', errors='ignore') sentenceRR = f.read() f.close() sentenceR = re.sub('@', '', sentenceRR) sentencee = re.sub('[uU][Ss][Ee][Rr]', '', sentenceR) sentenceee = re.sub('_', ' ', sentencee) sentenceeee = re.sub('-', ' ', sentenceee) sentenceeeee = re.sub('=', ' ', sentenceeee) sentenceeeeee = re.sub('%', ' ', sentenceeeee) sentenceeeeeee = re.sub('"', ' ', sentenceeeeee) sentence = re.sub("'", ' ', sentenceeeeeee) sepsent = re.findall('\d,+(.*)\n', sentence) return sepsent
def get_builtin_images(images_dir, n=None): fs = FileSystemStorage() images = [] file_list = fs.listdir(images_dir)[1] # Reduce number of input images if requested if n: while len(file_list) > n: file_list.pop(random.randrange(0, len(file_list))) for filename in file_list: img_path = fs.path('{}/{}'.format(images_dir, filename)) try: images.append(Image.open(img_path)) except IOError: print("Invalid file: {}. Skipping...".format(filename)) return images
def admin_upload(request): if request.method == 'POST' and request.FILES['photozip']: photozip = request.FILES['photozip'] trip_id = request.POST.get('trip_id') fs = FileSystemStorage() filename = fs.save("fatalgram/temp/" + photozip.name, photozip) uploaded_file_url = fs.url(filename) trip = get_object_or_404(Trip, pk=trip_id) photoService = PhotoService() result = photoService.processZipFile(trip=trip, photozip=fs.path(filename), user=request.user) return render(request, 'fatalgram/admin/upload.html', { 'uploaded_file_url': uploaded_file_url, "result": result }) return render(request, 'fatalgram/admin/upload.html', {})
def upload(request): if request.method == 'POST' or request.method == 'OPTIONS': myfile = request.FILES['file'] fs = FileSystemStorage() filename = fs.save(myfile.name, myfile) filepath = fs.path(filename) logging.info(f"Saved {filepath}") else: #default value for test filepath = "static/Smart_ISA_01.docx" logging.info(f"Use default file {filepath}") nlp_text = _get_text(filepath) #ents = _show_ents(nlp_text) org_ents = _get_org_ents(nlp_text) return JsonResponse({ 'text': [token.text for token in nlp_text], "org_ents": org_ents })
def get_file_predictions(filename): fs = FileSystemStorage() file_path = fs.path(filename) df = pd.read_csv(file_path) fs.delete(filename) id_df = pd.DataFrame() if 'id' in df.columns: id_df['id'] = df['id'] df.drop('id', axis=1, inplace=True) payload = formatting.get_scoring_payload(df) scoring_payload = {client.deployments.ScoringMetaNames.INPUT_DATA: [payload]} result = client.deployments.score(DEPLOYMENT_UID, scoring_payload) id_dict = id_df.to_dict() ids = [id_dict['id'][x] for x in id_dict['id']] return {"id": ids, "predictions": result['predictions']}
class ThumbnailField(object): """Instances of this class will be used to access data of the generated thumbnails """ def __init__(self, name): self.name = name self.storage = FileSystemStorage() def path(self): return self.storage.path(self.name) def delete(self): return self.storage.delete(self.name) def open(self): self.file = self.storage.open(self.name) def chunks(self, chunk_size=None): """ Read the file and yield chucks of ``chunk_size`` bytes (defaults to ``UploadedFile.DEFAULT_CHUNK_SIZE``). """ if not chunk_size: chunk_size = 64 * 2 ** 10 if hasattr(self.file, 'seek'): self.file.seek(0) # Assume the pointer is at zero... counter = self.file.size while counter > 0: yield self.file.read(chunk_size) counter -= chunk_size def close(self): self.file.close() def url(self): return self.storage.url(self.name) def size(self): return self.storage.size(self.name)
class ThumbnailField(object): """Instances of this class will be used to access data of the generated thumbnails """ def __init__(self, name): warn('%(class)s has been deprecated in favor of VariationsField()', DeprecationWarning) self.name = name self.storage = FileSystemStorage() def path(self): return self.storage.path(self.name) def url(self): return self.storage.url(self.name) def size(self): return self.storage.size(self.name)
class DummyExternalStorage(Storage): def __init__(self, *args, **kwargs): self.wrapped = FileSystemStorage(*args, **kwargs) def path(self, name): # Overridden to give it the behaviour of the base Storage class # This is what an external storage backend would have raise NotImplementedError("This backend doesn't support absolute paths.") def _open(self, name, mode='rb'): # Overridden to return a DummyExternalStorageFile instead of a normal # File object return DummyExternalStorageFile(open(self.wrapped.path(name), mode)) # Wrap all other functions def _save(self, name, content): return self.wrapped._save(name, content) def delete(self, name): self.wrapped.delete(name) def exists(self, name): return self.wrapped.exists(name) def listdir(self, path): return self.wrapped.listdir(path) def size(self, name): return self.wrapped.size(name) def url(self, name): return self.wrapped.url(name) def accessed_time(self, name): return self.wrapped.accessed_time(name) def created_time(self, name): return self.wrapped.created_time(name) def modified_time(self, name): return self.wrapped.modified_time(name)
class VariationField(object): """Instances of this class will be used to access data of the generated thumbnails """ def __init__(self, name): self.name = name self.storage = FileSystemStorage() @property def path(self): return self.storage.path(self.name) @property def url(self): return self.storage.url(self.name) @property def size(self): return self.storage.size(self.name)
class PeruimUtils(object): def __init__(self): self.fs = FileSystemStorage(location=settings.PERUIM_FILE_PATH) self.samples_dir = "droidbot_samples" self.report_dir = "peruim_user_reports" def get_perm_desc(self, report_path): """ get the permission description of given package :param report_path: report path of target app :return: """ available_reports = self.get_available_reports() if report_path not in available_reports: return None perm_desc_path = "%s/perm_desc.json" % (report_path) storage = self.fs.open(perm_desc_path, 'r') perm_desc = json.load(storage) return perm_desc def get_app_info(self, report_path): app_info = {} app_info['Package'] = report_path app_info['Name'] = 'Unknown' app_info['Category'] = 'Unknown' return app_info def get_state_image(self, report_path, state_tag): image_path = "%s/device_states/snapshot_%s.png" % (report_path, state_tag) # print image_path if self.fs.exists(image_path): image = self.fs.open(image_path, "rb").read() else: image = None return image def get_component_image(self, report_path, component_id): image_path = "%s/components/%s.jpg" % (report_path, component_id) # print image_path if self.fs.exists(image_path): image = self.fs.open(image_path, "rb").read() else: image = None return image def save_report(self, report): print report tag = self.get_time_tag() report_path = "%s/%s_%s.json" % (self.report_dir, tag, report['user_name']) report_file = self.fs.open(report_path, 'w') json.dump(report, report_file) def get_time_tag(self): from datetime import datetime tag = datetime.now().strftime("%Y-%m-%d_%H%M%S") return tag def get_available_reports(self): available_reports = [] base_path = self.fs.path('.') if self.fs.exists(self.samples_dir): import os for root, dirs, files in os.walk(base_path): for f in files: if f == "perm_desc.json": report_path = os.path.relpath(root, base_path) # print report_path available_reports.append(report_path) # available_packages = self.fs.listdir(self.samples_dir) # print available_packages # for package in available_packages[0]: # perm_desc_path = "%s/%s/perm_desc.json" % (self.samples_dir, package) # if self.fs.exists(perm_desc_path): # available_reports.append(package) return available_reports
class TwisterDataUploader(): def __init__(self): # The column headers from the input csv data (or file) self.csv_data = list() self.column_headers = list() #file system for storing files if os.path.isdir(settings.BASE_DIR + "/twister/templates/twister/"): pass else: #os.mkdir(settings.MEDIA_ROOT + "twister/") #use this one, but the below is a quick test os.mkdir(settings.BASE_DIR + "/twister/templates/twister/") self.file_system = FileSystemStorage(location=settings.BASE_DIR + "/twister/templates/twister/") def get_csv_data_from_db(self, spreadsheet_id): # retrieve data associated with a given spreadsheet_id spreadsheet_result = list(Spreadsheets.objects.using('pa_io').filter(spreadsheet_id=spreadsheet_id).values_list('data', 'encoding_method')) csv_data = spreadsheet_result[0][0] encoding_method = spreadsheet_result[0][1] # encode from unicode to str csv_data = csv_data.encode(encoding_method) # next, we split the data using newlines as a delimiter csv_data_list = csv_data.split('\n') # get the data row_string_list = [one_row for index, one_row in enumerate(csv_data_list) if index > 0] # for each element of data_list, split it by \t, since adjacent cells are separated by tabs csv_data = [one_row_string.split('\t') for one_row_string in row_string_list] # get the header header_str = csv_data_list[0] # turn header into a list of strings header_list = header_str.split('\t') # assign header_str and csv_data to their corresponding fields self.column_headers = header_list self.csv_data = csv_data return csv_data def insert_csv_data_into_db(self, file_name, company_id, user_id, spreadsheet_id=None, usage=1): #get the headers and data as a single string, where each row is separated by a newline character csv_headers = self.column_headers csv_data = self.csv_data headers_formatted = '\t'.join(csv_headers) data_flattened = ['\t'.join(onerow) for onerow in csv_data] data_formatted = '\n'.join(data_flattened) final_data = headers_formatted + '\n' + data_formatted final_data = final_data.replace("\'", "\'\'") #detect character encoding of csv data encoding = chardet.detect(final_data) encoding_method = encoding['encoding'] #encoding_confidence = encoding['confidence'] spreadsheet_date = time.strftime('%Y-%m-%d %H:%M:%S') if spreadsheet_id is not None: spreadsheet_object = Spreadsheets.objects.using('pa_io').filter(spreadsheet_id=spreadsheet_id)[0] spreadsheet_object.data = final_data spreadsheet_object.spreadsheet_name = file_name spreadsheet_object.last_updated_by = user_id spreadsheet_object.last_updated_date = spreadsheet_date spreadsheet_object.save(using='pa_io') else: final_data = final_data.decode(encoding_method) spreadsheet_id = Spreadsheets.objects.using('pa_io').latest('spreadsheet_id').spreadsheet_id + 1 spreadsheet_object = Spreadsheets() spreadsheet_object.company_id = company_id spreadsheet_object.user_id = user_id spreadsheet_object.spreadsheet_name = file_name spreadsheet_object.usage = usage spreadsheet_object.data = final_data spreadsheet_object.share_type = 2 spreadsheet_object.created_by = user_id spreadsheet_object.created_date = spreadsheet_date spreadsheet_object.last_updated_by = user_id spreadsheet_object.last_updated_date = spreadsheet_date spreadsheet_object.encoding_method = encoding_method flag_saved = False while not flag_saved: try: spreadsheet_object.spreadsheet_id = spreadsheet_id spreadsheet_object.save(using='pa_io') flag_saved = True except IntegrityError: try: spreadsheet_id = Spreadsheets.objects.using('pa_io').latest('spreadsheet_id').spreadsheet_id + 1 except ObjectDoesNotExist: spreadsheet_id = 1 return spreadsheet_id def read_spreadsheet_data(self, data): #clear self.csv_data value from previous uploads before we start reading self.csv_data = [] row_list = data.split('\n') for index, row in enumerate(row_list): #replace tabs with commas row = row.replace('\t',',') row = row.replace('\r','') #surround each field with single quotes row_fields_list = row.split(',') row_fields_list = [field.encode('utf-8') for field in row_fields_list] #if line 1 in data, it is the header if index == 0: self.column_headers = row_fields_list else: if row: self.csv_data.append(row_fields_list) def read_csv_data_from_file(self, filename): #clear self.csv_data value from previous uploads before we start reading self.csv_data = [] try: file_path = self.file_system.path(filename) except NotImplementedError: raise Exception('File not found for reading') with open(file_path, 'rb') as file_handle: file_reader = csv.reader(file_handle) count = 0 for row in file_reader: if count == 0: count = 1 self.column_headers = row else: if row: self.csv_data.append(row)
class RemoteFinder(BaseFinder): def __init__(self): self.always_verify = getattr(settings, "REMOTE_FINDER_ALWAYS_VERIFY", False) self.cache_dir = getattr(settings, "REMOTE_FINDER_CACHE_DIR", None) if not self.cache_dir: raise ImproperlyConfigured("settings.REMOTE_FINDER_CACHE_DIR must point to a cache directory.") self.storage = FileSystemStorage(self.cache_dir) try: resources_setting = settings.REMOTE_FINDER_RESOURCES except AttributeError: logger.warning("RemoteFinder is enabled, but settings.REMOTE_FINDER_RESOURCES is not defined.") resources_setting = () if not isinstance(resources_setting, (list, tuple)): raise ImproperlyConfigured("settings.REMOTE_FINDER_RESOURCES must be a list or tuple") resources = {} for resource in resources_setting: try: path, url, cksm = resource except ValueError: raise ImproperlyConfigured("Each item in settings.REMOTE_FINDER_RESOURCES must be a tuple of three elements (path, url, cksm).") try: hash_type, expected_hexdigest = cksm.split(':') except ValueError: raise ImproperlyConfigured("RemoteFinder checksum `%s` is not in `hash_type:hexdigest` format." % cksm) try: hash_func = hash_func_map[hash_type] except KeyError: raise ImproperlyConfigured("RemoteFinder: hash type `%s` unknown" % hash_type) try: expected_digest = bytearray.fromhex(expected_hexdigest) except ValueError: raise ImproperlyConfigured("Cannot parse hex string in settings.REMOTE_FINDER_RESOURCES: `%s`" % expected_hexdigest) if len(expected_digest) != hash_func().digest_size: raise ImproperlyConfigured("settings.REMOTE_FINDER_RESOURCES: %s digest expected %d bytes but %d provided: `%s`" % (hash_type, hash_func().digest_size, len(expected_digest), expected_hexdigest)) resources[path] = _ResourceInfo(url, hash_func, expected_digest) self.resources = resources def find(self, path, all=False): try: resource_info = self.resources[path] except KeyError: return [] self.fetch(path, resource_info) match = self.storage.path(path) if all: return [match] else: return match def fetch(self, path, resource_info): if self.storage.exists(path): # check to see if the hash has already been verified in the # lifetime of this process if resource_info.hash_verified and not self.always_verify: return # verify the hash f = self.storage.open(path) try: content = f.read() finally: f.close() digest = resource_info.hash_func(content).digest() if digest == resource_info.expected_digest: resource_info.hash_verified = True return # hash verification failed, so delete it from storage and # re-download the file logger.info("Hash verification failed, so deleting %s from storage", path) # The following line does /not/ raise an exception if the file is # already deleted, which is desirable for us as it prevents an # error in the case of a race condition. self.storage.delete(path) # download the file logger.info("Downloading %s", resource_info.url) f = urlopen(resource_info.url) try: content = f.read() finally: f.close() # check its hash digest = resource_info.hash_func(content).digest() if digest != resource_info.expected_digest: raise RuntimeError("Digest for %s does not match expected value given in settings.REMOTE_FINDER_RESOURCES", resource_info.url) # save it name = self.storage.save(path, ContentFile(content)) if name == path: resource_info.hash_verified = True else: logger.warning("Save failed: %r != %r", name, path) def list(self, ignore_patterns): for path, resource_info in self.resources.items(): if matches_patterns(path, ignore_patterns): continue self.fetch(path, resource_info) yield path, self.storage
def path(self, name): if self.is_thumbor(name): return ThumborStorage.path(self, name) else: return FileSystemStorage.path(self, name)
class WebcamStorage: PICTURE_DIR = 'pics' PREDICTION_DIR = 'predictions' def __init__(self, location): self.fs = FileSystemStorage(location) def dataset_path(self, webcam_id): return self.fs.path(webcam_id + '.h5') def _image_path(self, webcam_id, img_dir, timestamp=None): rel_path = os.path.join(webcam_id, img_dir) if timestamp is not None: path_format = settings.PICTURE_PATH.replace('%t', str(timestamp)) pic_date = datetime.fromtimestamp(timestamp) pic_path = pic_date.strftime(path_format) rel_path = os.path.join(rel_path, pic_path) return rel_path def picture_path(self, webcam_id, timestamp=None): """ Return the path of a picture from the given webcam and taken at given `timestamp`. If `timestamp` is None, return the of the directory. """ return self._image_path(webcam_id, self.PICTURE_DIR, timestamp) def prediction_path(self, webcam_id, params_name, timestamp=None): """ Return the path of a prediction image for the given webcam, the prediction parameters named `params_name` and made at `timestamp`. If `timestamp` is None, return the path of the directory. """ img_dir = os.path.join(self.PREDICTION_DIR, params_name) return self._image_path(webcam_id, img_dir, timestamp) def get_pixels(self, img, webcam_id): """ Return the pixels data of an image, as given by `ImageSet.pixels_from_file()`. Parameters ---------- img : int or str If a number, it is assumed to be a timestamp of when the picture was taken by the webcam. It will be read from the images set. If a string, it is assumed to be a path, it will be read from the file at this path. webcam_id : str The id of the webcam that took the picture """ with self.get_dataset(webcam_id) as dataset: if isinstance(img, Number): img_dict = dataset.imgset.get_image(img, False) return img_dict['pixels'] else: if not os.path.isabs(img): img = self.fs.path(img) return dataset.imgset.pixels_from_file(img) def add_webcam(self, webcam_id): """ Create the required files and directories for a new webcam """ logger.info("Creating webcam %s on file system", webcam_id) # create pytables files hdf5_path = self.dataset_path(webcam_id) w, h = settings.WEBCAM_SIZE img_shape = h, w, 3 w, h = settings.DEFAULT_FEATURES_SIZE feat_shape = h, w, 3 with ImageSet.create(hdf5_path, img_shape) as imageset: extractor = RawFeatures(feat_shape, img_shape) imageset.add_feature_set(extractor) DataSet.create(imageset).close() # create directories for pictures pics_path = self.fs.path(self.picture_path(webcam_id)) os.makedirs(pics_path) def delete_webcam(self, webcam_id): """ Delete the files and directories related to a webcam """ logger.info("Deleting webcam %s from file system", webcam_id) hdf5_path = self.dataset_path(webcam_id) os.remove(hdf5_path) shutil.rmtree(self.fs.path(webcam_id)) def add_feature_set(self, feature_set_model): feat_type = feature_set_model.extract_type webcam_id = feature_set_model.webcam.webcam_id if feat_type not in features_extractors: raise ValueError("No features extractor named %s" % feat_type) def task(webcam_id, feat_type): with self.open_dataset(webcam_id) as dataset: if feat_type == 'raw': img_shape = dataset.imgset.img_shape w, h = settings.DEFAULT_FEATURES_SIZE feat_shape = h, w, 3 extractor = RawFeatures(feat_shape, img_shape) elif feat_type == 'pca': logger.info("Starting computation of a PCM model") sample = dataset.imgset.sample() extractor = PCAFeatures.create(sample) else: # rbm logger.info("Starting computation of a RBM model") sample = dataset.imgset.sample() extractor = RBMFeatures.create(sample) if extractor.name in dataset.imgset.feature_sets: raise ValueError("The feature set %s already exists" % extractor.name) logger.info("Adding a new set of features %s to webcam %s", feat_type, webcam_id) dataset.imgset.add_feature_set(extractor) t = threading.Thread(target=task, args=[webcam_id, feat_type]) t.start() return feat_type # FIXME an event should send name to model def delete_feature_set(self, feature_set_model): logger.info("Deleting feature set %s", feature_set_model.name) webcam_id = feature_set_model.webcam.webcam_id with self.get_dataset(webcam_id) as dataset: dataset.imgset.remove_feature_set(feature_set_model.name) dataset.repack() def open_dataset(self, webcam_id): hdf5_path = self.dataset_path(webcam_id) return DataSet.open(hdf5_path) @contextmanager def get_dataset(self, webcam_id): """ Returns the dataset of `webcam_id`, cached for the current request. This method should be called only in a request thread. Returns a dummy context manager for backward compatibility """ cache = get_dataset_cache() yield cache[self.dataset_path(webcam_id)] def add_picture(self, webcam, timestamp, fp): """ Add a new picture associated to `webcam` Parameters ---------- webcam : models.Webcam instance timestamp : str or int The UNIX Epoch of when the picture was taken fp : str or `file-like` object The filename or pointer to the file of the image. If a file object, it must be accepted by Pillow """ # read and resize the image img = Image.open(fp) if img.size != settings.WEBCAM_SIZE: img = img.resize(settings.WEBCAM_SIZE) # store the image in file filepath = self.picture_path(webcam.webcam_id, timestamp) dirname = self.fs.path(os.path.dirname(filepath)) if not os.path.exists(dirname): os.makedirs(dirname) with self.fs.open(filepath, mode='wb') as fp_res: img.save(fp_res) # store the image in dataset with self.get_dataset(webcam.webcam_id) as dataset: # FIXME give directly PIL reference abspath = self.fs.path(filepath) img_dict = dataset.add_image(abspath) return img_dict def add_prediction(self, prediction): dirname = os.path.dirname(self.fs.path(prediction.path)) if not os.path.exists(dirname): os.makedirs(dirname) with self.fs.open(prediction.path, 'w') as fp: plt.imsave(fp, prediction.sci_bytes) def add_examples_set(self, params): """ Create the directories and pytables group for a set of examples """ def task(params): logger.info("Creating example set %s on file system", params.name) cam_id = params.webcam.webcam_id pred_path = self.fs.path(self.prediction_path(cam_id, params.name)) try: # Make sure the directory is empty if it exists shutil.rmtree(pred_path, ignore_errors=True) os.makedirs(pred_path) with self.open_dataset(cam_id) as dataset: dataset.make_set(params.name, params.intervals, params.features.name) except Exception: logger.exception("Error while creating set %s", params.name) else: logger.info("Done creating the set %s", params.name) t = threading.Thread(target=task, args=[params]) t.start() def delete_examples_set(self, params): """ Remove the directories and pytables group for a set of examples """ logger.info("Deleting example set %s from file system", params.name) cam_id = params.webcam.webcam_id pred_path = self.fs.path(self.prediction_path(cam_id, params.name)) try: shutil.rmtree(pred_path) with self.get_dataset(cam_id) as dataset: dataset.delete_set(params.name) dataset.repack() except Exception: logger.exception("Error while deleting set %s", params.name) else: logger.info("Done deleting the set %s", params.name)
class ProcessorMixin(object): """ Adds pre-processor support to a StaticFilesFinder """ def __init__(self, *args, **kwargs): super(ProcessorMixin, self).__init__(*args, **kwargs) # Configure temporary storage space for processed files tmp_dir = getattr(settings, 'STATICFILESPLUS_TMP_DIR', os.path.join(settings.STATIC_ROOT, 'staticfilesplus_tmp')) self.tmp_storage = FileSystemStorage(location=tmp_dir) # Can't set this as None through the constructor because it will # default to MEDIA_URL self.tmp_storage.base_url = None # Configure processors if not isinstance(settings.STATICFILESPLUS_PROCESSORS, (list, tuple)): raise ImproperlyConfigured( "Your STATICFILESPLUS_PROCESSORS setting is not a tuple or list; " "perhaps you forgot a trailing comma?") self.processors = [] for processor in settings.STATICFILESPLUS_PROCESSORS: Processor = get_callable(processor) self.processors.append(Processor()) def find(self, path, all=False): if all: raise NotImplementedError("Staticfilesplus can't handle the `all` flag at the moment") # Walk the list of processors, seeing if any want to handle # this request and if there's a matching file tried_names = set() for processor in self.processors: orig_name = processor.get_original_name(path) if orig_name is None or orig_name in tried_names: continue tried_names.add(orig_name) match = super(ProcessorMixin, self).find(orig_name) if match: if processor.is_ignored_file(orig_name): return [] else: return self.process_file(processor, match, path) # As a last resort we try the untransformed path if path not in tried_names: return super(ProcessorMixin, self).find(path) else: return [] def list(self, *args, **kwargs): for name, storage in super(ProcessorMixin, self).list(*args, **kwargs): # Walk the list of processors, seeing if any want to handle # this type of file matched_processor = None for processor in self.processors: processed_name = processor.get_processed_name(name) if processed_name is not None: matched_processor = processor break if matched_processor is None: yield name, storage else: # If the processor explicitly excludes this file then pretend # we never found it if matched_processor.is_ignored_file(name): continue path = storage.path(name) self.process_file(matched_processor, path, processed_name) yield processed_name, self.tmp_storage def process_file(self, processor, path, processed_name): # Get the full output path output_path = self.tmp_storage.path(processed_name) # Create the required directories try: os.makedirs(os.path.dirname(output_path), 0o775) except OSError as e: if e.errno != errno.EEXIST: raise # Process the file processor.process_file(path, output_path) return output_path
class LocallyMirroredS3BotoStorage(S3BotoStorage): def __init__(self, *args, **kwargs): super(LocallyMirroredS3BotoStorage, self).__init__(*args, **kwargs) self.mirror = FileSystemStorage(location=settings.S3_MIRROR_ROOT) def delete(self, name): super(LocallyMirroredS3BotoStorage, self).delete(name) try: self.mirror.delete(name) except OSError: full_path = self.mirror.path(name) if os.path.exists(full_path): os.rmdir(full_path) def exists(self, name): exists_local = self.mirror.exists(name) if exists_local: return True else: exists_remote = super(LocallyMirroredS3BotoStorage, self).exists(name) if exists_remote: self.mirror._save(name, ContentFile("")) return True return False def _save(self, name, content): cleaned_name = super(LocallyMirroredS3BotoStorage, self)._save(name, content) self.mirror._save(name, ContentFile("")) return cleaned_name