def search(query, offset = 0, limit = 10): results = None search_type = None if query: if len(query) > 10 and lower_hex_regex.match(query): for d in hash_sizes: if len(query) == hash_sizes[d]: search_type = '%s search' % d.upper() results = File.all().filter('%s =' % d, query) break if not search_type: search_type = 'SHA1 prefix search' results = File.all().filter('sha1 >= ', query).filter('sha1 < ', query + u'\ufffd') else: #Last resort: basename matching match_case = 0 if query.lower() == query: results = File.all().filter('name_lower >= ', query).filter('name_lower < ', query + u'\ufffd') else: match_case = 1 results = File.all().filter('name >= ', query).filter('name < ', query + u'\ufffd') search_type = 'Filename prefix search%s' % ['', ', matching case,'][match_case] #db.GqlQuery("SELECT * FROM MyModel WHERE prop >= :1 AND prop < :2", "abc", u"abc" + u"\ufffd") if results and limit: results.fetch(limit) return {'results': results, 'search_type': search_type, 'query': query, 'search_limit': limit, 'search_offset': offset}
def file_path(slug): f = File.all().filter('abs_path = ', slug).get() if f is None: f = File.all().filter('slug = ', slug).get() if f is None: return u'' return f.get_absolute_url()
def get(self): #TODO insert admin logs #self.response.headers['Content-Type'] = 'text/plain' #Validate with url-fetch head on a few urls for f in File.all().filter('available = ', False).fetch(1): response = None #self.response.out.write('URL: %s\n' % f.url()) try: response = urlfetch.fetch(f.url(), method = urlfetch.HEAD)#, headers = {'Referer': BOT_REFERER}) except Exception, e: f.delete() #self.response.out.write('DEL: %s\n' % str(e)) continue #Check response data if not response.status_code == 200: f.delete() #self.response.out.write('DEL: status %i\n' % response.status_code) continue #self.response.out.write(str(dir(response))) #Read content-MD5/SHA1 etc. #Content-Length, Content-MD5 try: f.size = int(response.headers['content-length']) f.mime_type = response.headers['content-type'] #TODO Add support for http://www.ietf.org/rfc/rfc1864.txt MD5 as base64 encoded digest #md = response.headers['content-md5'].lower() #if len(md) == 32: # f.md5 = md except Exception: pass f.available = True f.put()
def get(self): "Responds to GET requets with the admin interface" # query the datastore for images owned by # the current user. You can't see anyone elses images # in the admin test = geturl("http://org-images.appspot.com/remote/upload/image/[valid key]") # add valid realmkey url = test.get_url() user = users.get_current_user() realms = RealmKeys.all() images = Image.all().filter("user ="******"-date") cssfilesq = File.all() jscriptfilesq = File.all() cssfiles = cssfilesq.filter("content_type =", "text/css").order("-date") jscriptfiles = jscriptfilesq.filter("content_type =", "text/x-c").order("-date") blobs = BlobFile.all() blobs.filter("user ="******"-date") # we are enforcing loggins so we know we have a user # we need the logout url for the frontend logout = users.create_logout_url("/") # prepare the context for the template context = { "testurl": url, "blobuploadurl": blobstore.create_upload_url("/upload/blob"), "blobs": blobs, "cssfiles": cssfiles, "jscriptfiles": jscriptfiles, "images": images, "logout": logout, "realms": realms, } # calculate the template path path = os.path.join(os.path.dirname(__file__), "templates", "index.html") # render the template with the provided context self.response.out.write(template.render(path, context))
def get(self): files = File.all().order('-inserted').fetch(file_count+1) more = True if len(files) > file_count else False vals = { 'files' : files, 'file_count' : file_count if more else len(files), 'more' : more, } self.template( 'file-list.html', vals, 'admin' );
def list(request): files = dict([(f.get_key(), f) for f in File.all()]) root = None for file in files: file = files[file] if file.path == '0': root = file break if root is not None: root = rec(root, files) return render_template('app:files/list.html', root=root)
def delete(request, key): form = FileConfirmDeleteForm(request.form) if request.method == "POST" and form.validate(): if form.drop.data is True: File.drop(key) return redirect(url_for('nut:files/list'), 301) if form.cascade.data is True: File.drop(key, cascade=True) return redirect(url_for('nut:files/list'), 301) file = File.get(key) files = dict([(n.get_key(), n) for n in File.all().filter("ancestors = ", key)]) file = rec(file, files) return render_template('app:files/confirm_delete.html', file=file, form=form)
def get(self): #Clean up Imported List [f.delete() for f in ImportedList.all().filter('ctime < ', datetime.datetime.now() - datetime.timedelta(days = 7))] #Clean up Hit List #TODO rewrite to make sure we always have a few redirects left hitCount = Hit.all().count() if hitCount > 30: [f.delete() for f in Hit.all().filter('ctime < ', datetime.datetime.now() - datetime.timedelta(days = 30))] #Remove old files for f in File.all().filter('available = ', True).filter('mtime < ', datetime.datetime.now() - datetime.timedelta(days = 32)).fetch(100): f.available = False f.put()
def get(self, filename): file = File.all().filter('filename =', filename).get() if file: self.send_blob(file.blob, save_as=True) else: self.error(404)
def show(request, key, type): file = File.all().filter('abs_path = ', key)\ .filter('content_type =', file_ext_to_content_type[type]).get() return Response(file.data, mimetype=file.content_type)
def get(self): self.response.headers['Content-Type'] = 'text/plain' devhelp_url = '\n\nPlease visit http://www.dynmirror.net/help/developers/ for more information.' #If url is given, use that as the digest digest = os.path.basename(self.request.path) if len(digest) and not digest.endswith('.metalink'): return self.not_found( '404 not found: If you do file look-ups by digest, it needs to end in ".metalink".' + devhelp_url) digest = digest[:-len('.metalink')] digests = {} url = self.request.get('url') fileLink = LinkElement() if len(url): #Try to load the digest from the database url = File.urlparse(url).geturl() #Add Coral link if wanted if self.request.get('coral', default_value='not_set') != 'not_set': cu = urlparse.urlparse(url) #Parse for the coral url if cu.scheme == 'http' and not cu.netloc.endswith('.nyud.net'): fileLink.urls.append({ 'v': 'http://' + cu.hostname + '.nyud.net' + cu.path, 'a': { 'type': File.urlparse(url).scheme } }) Hit(path=self.request.path, remote_addr=self.request.remote_addr, referer=self.request.headers.get('referer', ''), comment=url).save() f = File.get_by_key_name(url) if f == None: #Host a link only metalink fileLink.name = os.path.basename(url) fileLink.urls.insert(0, { 'v': url, 'a': { 'type': File.urlparse(url).scheme } }) self.response.headers[ 'Content-Type'] = 'application/metalink+xml' self.response.headers[ 'Content-Disposition'] = 'attachment; filename="%s.metalink"' % os.path.basename( url).replace('"', '\\"') return self.render_to_response( 'metalink.xml', { 'files': [fileLink], 'comment': 'Link only, because no validated link information could be found in the database. Make sure you use the _exact_ url you used to add the metadata.' }) #inherit all the digest information from the file in db fileLink.name = f.name fileLink.size = f.size fileLink.digests.update(f.digests()) if len(digest) > 0: return self.response.out.write( 'For security reasons, you can not combine digest and url, because we can not determine which is authorative and what to do in case they do not match in the database.' + devhelp_url) if len(digest) > 0: #Malformed digest if not hash_sizes.has_key(len(digest)): return self.not_found( '404 Not Found, the digest is considered malformed. Make sure it is lowercase hex representing an MD5, SHA1, SHA256 or SHA512.' + devhelp_url) if not lower_hex_regex.match(digest): return self.not_found( '404 Not Found, the digest is considered malformed because it did not match /[0-9a-f]/.' + devhelp_url) #OK, so a digest is given, set the digest in the fileLink to the given value fileLink.digests[hash_sizes[len(digest)]] = digest #TODO Unique hosts; hosts = set #Explode the file using all known digests, both url and digest names = {} sizes = {} cntry_regex = re.compile( '.*\.(a[cdefgilmnoqrstuwxz]|c[acdfghiklmnoruvxyz]|b[abdefghijmnorstvwyz]|e[ceghrstu]|d[ejkmoz]|g[abdefghilmnpqrstuwy]|f[ijkmor]|i[delmnoqrst]|h[kmnrtu]|k[eghimnprwyz]|j[emop]|m[acdeghklmnopqrstuvwxyz]|l[abcikrstuvy]|o[m]|n[acefgilopruz]|q[a]|p[aefghklmnrstwy]|s[abcdeghijklmnortuvyz]|r[eosuw]|u[agkmsyz]|t[cdfghjklmnoprtvwz]|w[fs]|v[aceginu]|y[etu]|z[amw])$' ) for digest_type in fileLink.digests: files = File.all().filter('%s = ' % digest_type, fileLink.digests[digest_type]).fetch(20) for f in files: attr = { 'type': 'http' } #Optimization, currently only HTTP supported f.scheme cntry = cntry_regex.match(f.hostname) if cntry: attr['location'] = cntry.group(1) fileLink.urls.append({'v': f.url(), 'a': attr}) names.setdefault(f.name, 0) names[f.name] += 1 sizes.setdefault(f.size, 0) sizes[f.size] += 1 #If a name is given, just rename the file name = self.request.get('name') if name: fileLink.name = name if not fileLink.name: #Democratic naming names = [(names[k], k) for k in names] names.sort() fileLink.name = names[-1][1] if not fileLink.size: #Democratic size sizes[0] = 0 sizes = [(sizes[k], k) for k in sizes] sizes.sort() fileLink.size = sizes[-1][1] random.shuffle(fileLink.urls) self.response.headers['Content-Type'] = 'application/metalink+xml' self.response.headers[ 'Content-Disposition'] = 'attachment; filename="%s.metalink"' % os.path.basename( fileLink.name).replace('"', '\\"') return self.render_to_response('metalink.xml', {'files': [fileLink]})
def get(self): self.response.headers['Content-Type'] = 'text/plain' devhelp_url = '\n\nPlease visit http://www.dynmirror.net/help/developers/ for more information.' #If url is given, use that as the digest digest = os.path.basename(self.request.path) if len(digest) and not digest.endswith('.metalink'): return self.not_found('404 not found: If you do file look-ups by digest, it needs to end in ".metalink".' + devhelp_url) digest = digest[:-len('.metalink')] digests = {} url = self.request.get('url') fileLink = LinkElement() if len(url): #Try to load the digest from the database url = File.urlparse(url).geturl() #Add Coral link if wanted if self.request.get('coral', default_value = 'not_set') != 'not_set': cu = urlparse.urlparse(url) #Parse for the coral url if cu.scheme == 'http' and not cu.netloc.endswith('.nyud.net'): fileLink.urls.append({'v': 'http://' + cu.hostname + '.nyud.net' + cu.path, 'a':{'type': File.urlparse(url).scheme}}) Hit(path = self.request.path, remote_addr = self.request.remote_addr, referer = self.request.headers.get('referer', ''), comment = url).save() f = File.get_by_key_name(url) if f == None: #Host a link only metalink fileLink.name = os.path.basename(url) fileLink.urls.insert(0, {'v': url, 'a':{'type': File.urlparse(url).scheme}}) self.response.headers['Content-Type'] = 'application/metalink+xml' self.response.headers['Content-Disposition'] = 'attachment; filename="%s.metalink"' % os.path.basename(url).replace('"', '\\"') return self.render_to_response('metalink.xml', {'files': [fileLink], 'comment': 'Link only, because no validated link information could be found in the database. Make sure you use the _exact_ url you used to add the metadata.'}) #inherit all the digest information from the file in db fileLink.name = f.name fileLink.size = f.size fileLink.digests.update(f.digests()) if len(digest) > 0: return self.response.out.write('For security reasons, you can not combine digest and url, because we can not determine which is authorative and what to do in case they do not match in the database.' + devhelp_url) if len(digest) > 0: #Malformed digest if not hash_sizes.has_key(len(digest)): return self.not_found('404 Not Found, the digest is considered malformed. Make sure it is lowercase hex representing an MD5, SHA1, SHA256 or SHA512.' + devhelp_url) if not lower_hex_regex.match(digest): return self.not_found('404 Not Found, the digest is considered malformed because it did not match /[0-9a-f]/.' + devhelp_url) #OK, so a digest is given, set the digest in the fileLink to the given value fileLink.digests[hash_sizes[len(digest)]] = digest #TODO Unique hosts; hosts = set #Explode the file using all known digests, both url and digest names = {} sizes = {} cntry_regex = re.compile('.*\.(a[cdefgilmnoqrstuwxz]|c[acdfghiklmnoruvxyz]|b[abdefghijmnorstvwyz]|e[ceghrstu]|d[ejkmoz]|g[abdefghilmnpqrstuwy]|f[ijkmor]|i[delmnoqrst]|h[kmnrtu]|k[eghimnprwyz]|j[emop]|m[acdeghklmnopqrstuvwxyz]|l[abcikrstuvy]|o[m]|n[acefgilopruz]|q[a]|p[aefghklmnrstwy]|s[abcdeghijklmnortuvyz]|r[eosuw]|u[agkmsyz]|t[cdfghjklmnoprtvwz]|w[fs]|v[aceginu]|y[etu]|z[amw])$') for digest_type in fileLink.digests: files = File.all().filter('%s = ' % digest_type, fileLink.digests[digest_type]).fetch(20) for f in files: attr = {'type': 'http'}#Optimization, currently only HTTP supported f.scheme cntry = cntry_regex.match(f.hostname) if cntry: attr['location'] = cntry.group(1) fileLink.urls.append({'v': f.url(), 'a': attr}) names.setdefault(f.name, 0) names[f.name] += 1 sizes.setdefault(f.size, 0) sizes[f.size] += 1 #If a name is given, just rename the file name = self.request.get('name') if name: fileLink.name = name if not fileLink.name: #Democratic naming names = [(names[k], k) for k in names] names.sort() fileLink.name = names[-1][1] if not fileLink.size: #Democratic size sizes[0] = 0 sizes = [(sizes[k], k) for k in sizes] sizes.sort() fileLink.size = sizes[-1][1] random.shuffle(fileLink.urls) self.response.headers['Content-Type'] = 'application/metalink+xml' self.response.headers['Content-Disposition'] = 'attachment; filename="%s.metalink"' % os.path.basename(fileLink.name).replace('"', '\\"') return self.render_to_response('metalink.xml', {'files': [fileLink]})
def get(self): limit = 20 return self.render_to_response('latest.html', {'limit': limit, 'files': File.all().order('-mtime').fetch(limit)})