def get_image(q, r):
    q = q.replace('\\\\', '/')
    q = urllib3.unquote(q)
    r = urllib3.unquote(r)
    filename = r
    directory = q
    logger.info(filename)
    logger.info(directory)
    if os.path.isfile(q + '\\' + r):
        return send_from_directory(directory, filename, as_attachment=False)
    else:
        sReturn = {}
        sReturn['results'] = 'File not found on server.'
        return (json.dumps(sReturn))
Beispiel #2
0
    def get_bulk_metadata(self, directory):
        ''':returns: A dictionary mapping the path of every file object in *directory* to a dictionary with the keys\
        'modified', 'bytes' and 'is_dir' containing the corresponding metadata for the file object.
        
        The value for 'modified' is a date in seconds, stating when the file object was last modified.  
        The value for 'bytes' is the number of bytes of the file object. It is 0 if the object is a directory.
        The value for 'is_dir' is True if the file object is a directory and False otherwise.
        
        :raises: NoSuchFilesytemObjectError if the directory does not exist
        '''
        if not directory.endswith('/'):
            directory += '/'
        response = self._get_client().propfind(self.root + directory, depth=1)
        response_soup = BeautifulSoup(response.content)
        multi_response = response_soup.findAll(
            re.compile(r'(?i)[a-z0-9]:response'))
        ret = {}
        for response in multi_response:
            path = response.find(re.compile(r'(?i)[a-z0-9]:href')).text
            path = unquote(path)
            item = {}
            if path.endswith('/'):
                path = path[:-1]
            if path == self.root + directory[:-1]:
                continue
            if path.startswith(self.root):  #cut off root
                path = path[len(self.root):]
            if path == '/' or path == '':
                continue
            item["path"] = path
            mod_date = response.find(
                re.compile(r'(?i)[a-z0-9]:getlastmodified')).text
            cal = pdt.Calendar()
            mod_date = int(time.mktime(cal.parse(mod_date)[0]))
            item["modified"] = mod_date
            resource_type = response.find(
                re.compile(r'(?i)[a-z0-9]:resourcetype'))
            if resource_type.findChild() == None:
                item[
                    "is_dir"] = False  #GMX Mediacenter does not necessarily return a type in resourcetype tag, so we just assume it is a file
            else:
                item["is_dir"] = resource_type.findChild().name.split(
                    ':')[-1] == 'collection'
            if not item["is_dir"]:
                item["bytes"] = int(
                    response.find(
                        re.compile(r'(?i)[a-z0-9]:getcontentlength')).text)
            else:
                item["bytes"] = 0
            if not ('is_dir' in item and 'bytes' in item
                    and 'modified' in item):
                raise StoreAccessError(
                    "Error in get_metadata(%s): \n no getcontentlength or getlastmodified property in %s"
                    % (path, response))
            ret[path] = item

        return ret
def get_media_folder(q):
    dMedia = {}
    lMedia = []
    safe_files = ['.JPG', '.MP4', '.AVI', '.MOV', '.PNG', '.TIF']
    q = urllib3.unquote(q)
    q = q.replace('\\\\', '/')
    for file in os.listdir(q):
        if file[-4:].upper() in safe_files:
            lMedia.append(file)

    dMedia[q] = lMedia

    return json.dumps(dMedia)
    def get_bulk_metadata(self, directory):
        ''':returns: A dictionary mapping the path of every file object in *directory* to a dictionary with the keys\
        'modified', 'bytes' and 'is_dir' containing the corresponding metadata for the file object.
        
        The value for 'modified' is a date in seconds, stating when the file object was last modified.  
        The value for 'bytes' is the number of bytes of the file object. It is 0 if the object is a directory.
        The value for 'is_dir' is True if the file object is a directory and False otherwise.
        
        :raises: NoSuchFilesytemObjectError if the directory does not exist
        '''
        if not directory.endswith('/'):
            directory += '/'
        response = self._get_client().propfind(self.root + directory, depth=1)
        response_soup = BeautifulSoup(response.content)
        multi_response = response_soup.findAll(re.compile(r'(?i)[a-z0-9]:response'))
        ret = {}
        for response in multi_response:
            path = response.find(re.compile(r'(?i)[a-z0-9]:href')).text
            path = unquote(path)
            item = {}
            if path.endswith('/'):
                path = path[:-1]
            if path == self.root + directory[:-1]:
                continue
            if path.startswith(self.root): #cut off root
                path = path[len(self.root):]
            if path == '/' or path == '':
                continue
            item["path"] = path
            mod_date = response.find(re.compile(r'(?i)[a-z0-9]:getlastmodified')).text
            cal = pdt.Calendar()
            mod_date =  int(time.mktime(cal.parse(mod_date)[0]))
            item["modified"] = mod_date
            resource_type = response.find(re.compile(r'(?i)[a-z0-9]:resourcetype'))
            if resource_type.findChild() == None:
                item["is_dir"] = False #GMX Mediacenter does not necessarily return a type in resourcetype tag, so we just assume it is a file
            else: 
                item["is_dir"] = resource_type.findChild().name.split(':')[-1] == 'collection'
            if not item["is_dir"]:
                item["bytes"] = int(response.find(re.compile(r'(?i)[a-z0-9]:getcontentlength')).text)
            else:
                item["bytes"] = 0
            if not ( 'is_dir' in item and 'bytes' in item and 'modified' in item):
                raise StoreAccessError("Error in get_metadata(%s): \n no getcontentlength or getlastmodified property in %s" % (path, response))
            ret[path] = item 

        return ret 
Beispiel #5
0
def scripdata(request):
    g = request.GET or request.POST

    ### sanitize g['symbol'] and return data for that symbol
    kiddies = re.findall(r'[^0-9a-zA-Z&]+', urllib3.unquote(g['symbol']))
    if kiddies:
        return HttpResponse(403)  ## FIXME : Take a closer look

    symbol = g['symbol'].upper()

    data = Nsehistdata.objects.filter(scrip=symbol)
    rows = []
    for d in data:
        rows.append([d.date * 1000, d.open, d.high, d.low, d.close, d.volume])
    sdata = {'label': symbol, 'data': rows}
    datastr = json.dumps(sdata)
    return JsonResponse(datastr)
Beispiel #6
0
 def get_directory_listing(self, directory):
     ''':raises: StoreAccessError if the directory cannot be listed
     :raises: NoSuchFilesytemObjectError if path does not exist'''
     response = self._get_client().propfind(self.root + directory, depth=1)
     if response.content == '':
         return []
     response_soup = BeautifulSoup(response.content)
     multi_response = response_soup.findAll(re.compile(r'(?i)[a-z0-9]:response'))
     ret = []
     for response in multi_response:
         path = response.find(re.compile(r'(?i)[a-z0-9]:href')).text
         path = unquote(path)
         if path == '/' or path == '':
             continue
         if path.endswith('/'):
             path = path[:-1]
         if path.startswith(self.root): #cut off root
             path = path[len(self.root):]
         if path != directory:
             ret.append( path )
     return ret 
Beispiel #7
0
 def get_directory_listing(self, directory):
     ''':raises: StoreAccessError if the directory cannot be listed
     :raises: NoSuchFilesytemObjectError if path does not exist'''
     if not directory.endswith('/'):
         directory += '/'
     response = self._get_client().propfind(self.root + directory, depth=1)
     if response.content == '':
         return []
     response_soup = BeautifulSoup(response.content)
     multi_response = response_soup.findAll(
         re.compile(r'(?i)[a-z0-9]:response'))
     ret = []
     for response in multi_response:
         path = response.find(re.compile(r'(?i)[a-z0-9]:href')).text
         path = unquote(path)
         if path.endswith('/'):
             path = path[:-1]
         if path.startswith(self.root):  #cut off root
             path = path[len(self.root):]
         if path == '/' or path == '':
             continue
         if path != directory[:-1]:
             ret.append(path)
     return ret
Beispiel #8
0
def print_negative_scan(url_to_scan):
    unqouted_url = urllib3.unquote(url_to_scan).decode('utf8')
    message = "[-]No XSS %s" % unqouted_url
    cprint(message, "red")