class TestMagic(unittest.TestCase): mime = False def setUp(self): self.m = Magic(mime=self.mime) def testFileTypes(self): for filename, desc, mime in testfile: filename = path.join(path.dirname(__file__), "testdata", filename) if self.mime: target = mime else: target = desc self.assertEqual(target, self.m.from_buffer(open(filename).read(1024))) self.assertEqual(target, self.m.from_file(filename), filename) def testErrors(self): self.assertRaises(IOError, self.m.from_file, "nonexistent") self.assertRaises(MagicException, Magic, magic_file="noneexistent") os.environ['MAGIC'] = '/nonexistetn' self.assertRaises(MagicException, Magic) del os.environ['MAGIC']
def scan_directory(repo, slab_repo, directory): m = Magic(True) active = repo.active_files() updated = {} confirmed = set() slab_repo.open_slab() try: for dirpath, dirnames, filenames in walk(directory): rel_path = relpath(dirpath, directory) if directory != dirpath else '' for file_name in filenames: full_name = join(dirpath, file_name) file_name = join(rel_path, file_name) stats = stat(full_name) confirmed.add(file_name) seen_ts, seen_size = active.get(file_name, (None, None)) if seen_ts != long(stats.st_mtime) or seen_size != long(stats.st_size): segments = slab_repo.process_file(full_name) with open(full_name) as f: mime = m.from_buffer(f.read(2**15)) updated[file_name] = long(stats.st_mtime), long(stats.st_size), mime, segments finally: slab_repo.close_slab() repo.mark_deleted(set(active) - confirmed) repo.mark_seen(confirmed - set(updated)) repo.mark_updated(updated)
def identify_platform(self, filepath): filemagic = Magic() filetype = "" try: filetype = filemagic.id_filename(filepath) except Exception as e: # certain version of libmagic throws error while parsing file, the CPU information is however included in the error in somecases filetype = str(e) # filemagic.close() if "ELF 32-bit" in filetype: if "ARM" in filetype: return "ELF", "arm" if "80386" in filetype: return "ELF", "x86" if ("MIPS" in filetype) and ("MSB" in filetype): return "ELF", "mips" if "MIPS" in filetype: return "ELF", "mipsel" if "PowerPC" in filetype: return "ELF", "powerpc" if "ELF 64-bit" in filetype: if "x86-64" in filetype: return "ELF", "x86-64" return filetype, self.default_cpu
def get_player(self, id=None): if id: id_attr = u" id=\"%s\"" % id else: id_attr = u" id=\"%s\"" % self.id if self.file: output = u"<a%s href='%s'>%s</a>" \ % (id_attr, self.file.url, self.file.name) get_mimetype = Magic(mime=True) mimetype = get_mimetype.from_file(self.file.path) if mimetype: main_type = mimetype.split("/")[0] if main_type == "audio": output = u"<audio%s src='%s' preload='none' type='%s'></audio>" \ % (id_attr, self.file.url, mimetype) elif main_type == "video": output = u"<video%s src='%s' preload='none' type='%s'></video>" \ % (id_attr, self.file.url, mimetype) elif main_type == "image": output = u"<img%s src='%s'/>" \ % (id_attr, self.file.url, self.file.name) elif self.url: output = u"<a%s href='%s'>%s</a>" \ % (id_attr, self.url, self.title) else: output = u"%s" % self.title return output
def determineDataType(data): # If data is a dictionary and contains type key, # we can directly derive the data_type if isinstance(data, dict): if 'type' in data: data_type = data['type'] else: data_type = 'dict' else: # If data is not a dictionary, we try to guess MIME type # by using magic library try: from magic import Magic mime_checker = Magic(mime=True) data_type = mime_checker.from_buffer(data) # noqa except: register_exception( stream="warning", prefix="BibWorkflowObject.determineDataType:" " Impossible to resolve data type." ) data_type = "" return data_type
def do_fileprocess(self, filepath, admin_fields): magic_file_check = Magic() print "Type Validation plugin: checking %s", filepath print "and is...", magic_file_check.id_filename(filepath) return True # Accept
class TestMagicMimeEncoding(unittest.TestCase): def setUp(self): self.m = Magic(mime_encoding=True) def testFileEncoding(self): for filename, encoding in testFileEncoding: filename = path.join(path.dirname(__file__), "testdata", filename) self.assertEqual(encoding, self.m.from_buffer(open(filename).read(1024))) self.assertEqual(encoding, self.m.from_file(filename), filename)
def get_report(self): sample_file = open(self.sample_filepath, "rb") sample_data = sample_file.read() sample_file.close() self.report['md5'] = md5(sample_data).hexdigest() self.report['sha256'] = sha256(sample_data).hexdigest() self.report['sha1'] = sha1(sample_data).hexdigest() self.report['filesize'] = len(sample_data) network_con2 = {} ip = set() port = set() protocol = set() dns = set() try: filemagic = Magic() self.report['filetype'] = filemagic.id_filename(self.sample_filepath) # filemagic.close() except Exception as e: self.report['filetype'] = "Unknown" if self.error_in_exec == False and os.path.isfile(self.pcap_filepath): self.report['cpu'] = self.cpu_arch self.report['interpreter'] = self.interpreter pparser = PacketParser(self.pcap_filepath) self.report['dns_request'] = pparser.get_dns_requests() self.report['url'] = pparser.get_urls() network_con = pparser.get_network_connections() for dns_q in self.report['dns_request']: dns.add(dns_q['name']) if dns_q['type'] == "A": ip.add(dns_q['result']) for key in network_con.keys(): protocol.add(key) network_con[key] = list(network_con[key]) if key in ['TCP', 'UDP']: network_con2[key] = [] for socks in network_con[key]: socks = socks.split(" : ") network_con2[key].append( {'ip': socks[0], 'port' : socks[1]} ) ip.add(socks[0]) port.add(socks[1]) else: network_con2[key] = network_con[key] for t_ip in network_con[key]: ip.add(t_ip) self.report['network'] = network_con2 self.report['dns'] = list(dns) self.report['ip'] = list(ip) self.report['port'] = list(port) self.report['protocol'] = list(protocol) self.report['static_analysis'] = self.static_analysis() self.report['start_time'] = datetime.utcfromtimestamp(self.start_time).isoformat() self.report['end_time'] = datetime.utcfromtimestamp(self.end_time).isoformat() self.report['sample_filepath'] = self.sample_filepath self.report['pcap_filepath'] = self.pcap_filepath self.report['error'] = self.error_in_exec return self.report
def returnType(filePath): if not filePath: filePath = self.sourcefile m = Magic() filetype = m.from_file(filePath) for mediatype in SUPPORTED_TYPES.keys(): for filestring in SUPPORTED_TYPES[mediatype]: if filestring in filetype: return mediatype return None
def contentType(self): if (self.content_type == None): mime = Magic(mime=True) content_type = mime.from_buffer(self.data.read(1024)) if content_type in self.allowed_content_types: self.content_type = content_type else: raise FlogMediaError(' '.join(['content_type', str(content_type), 'is not allowed'])) return self.content_type
def get_info_about(self): """ return (encoding, info) tuple info is a plain string and has to be parsed """ db = csv2rdf.database.DatabasePlainFiles(csv2rdf.config.config.resources_path) filename = db.get_path_to_file(self.filename) mgc_encoding = Magic(mime=False, mime_encoding=True) mgc_string = Magic(mime=False, mime_encoding=False) encoding = mgc_encoding.from_file(filename) info = mgc_string.from_file(filename) return (encoding, info)
def mimetype(): # use the built-in mimetypes, then use magic library # XXX performance penalty here getting the entire file? # XXX should provide a way to allow other packages to add # more mimetypes. data = contents() if not isinstance(data, basestring): return None mt = mimetypes.guess_type(path)[0] if mt is None or mt.startswith('text/'): magic = Magic(mime=True) mt = magic.from_buffer(data[:4096]) return mt
def determine_figure_type(buff): """ Attempt to determine the figure type of an image file stored in a buffer. Uses the `magic` module to try to determine the MIME type of the image and then converts that to a Hedwig `FigureType` enum value. Raises a `UserError` exception if the determined MIME type is not recognised as a Hedwig figure type. """ m = Magic(mime=True) return FigureType.from_mime_type(m.from_buffer(buff))
def _get_type(self, file_path=None): """Returns the file_type of the classes source media file or the type of the provided file.""" if not file_path: file_path = self.sourcefile magic = Magic() file_type = magic.from_file(file_path) for media_type in SUPPORTED_TYPES.keys(): for file_string in SUPPORTED_TYPES[media_type]: if file_string in file_type: return media_type raise AudioSlaveException("File type '%s' not supported!" % file_type)
def __init__(self, gridfs, obj): self._gridfs = gridfs for container in self._gridfs.get('containers', []): self.__dict__[container] = FSContainer(container, obj) self._obj = obj self._fs = GridFS(self._obj.db) if Magic: self._magic = Magic(mime=True)
class FSContainer(object): def __init__(self, container_name, obj): self._container_name = container_name self._obj = obj self._fs = GridFS(self._obj.db) if Magic: self._magic = Magic(mime=True) def __getitem__(self, key): f = self.open(key) content = f.read() f.close() return content def __setitem__(self, key, value): content_type = None if value and Magic: content_type = self._magic.from_buffer(value) f = self.open(key, 'w') try: f.content_type = content_type f.write(value) except TypeError: raise TypeError("GridFS value mus be string not %s" % type(value)) finally: f.close() def __delitem__(self, key): spec = {'metadata.doc_id':self._obj['_id'], 'metadata.container':self._container_name, 'metadata.name':key} self._fs.remove(spec,collection=self._obj.collection.name) def open(self, name, mode='r'): search_spec = {'metadata.name':name, 'metadata.container': self._container_name, 'metadata.doc_id':self._obj['_id']} if mode == 'r': try: return GridFile(search_spec, self._obj.db, 'r', self._obj.collection.name) except IOError: raise IOError('"%s" is not found in the database' % name) else: file = self._obj.collection.files.find_one(search_spec) if file: return GridFile({'_id':ObjectId(file['_id'])}, self._obj.db, 'w', self._obj.collection.name) write_spec = {'metadata':{'name':name, 'container':self._container_name, 'doc_id':self._obj['_id']}} return GridFile(write_spec, self._obj.db, 'w', self._obj.collection.name) def __iter__(self): for metafile in self._obj.collection.files.find( {'metadata.container': self._container_name, 'metadata.doc_id': self._obj['_id']}): yield metafile['metadata']['name'] def list(self): return [i for i in self] def __repr__(self): return "<%s '%s'>" % (self.__class__.__name__, self._container_name)
def _move_moov_atoms(self, in_file): '''Server needs the H264 file to have its moov atoms (or boxes) in front of the data. The qt-faststart.c program - shipped with FFMpeg - rewrites H264 files so their atoms are placed in the required order. This is only necessary to do if it's a quicktime file. $ qt-faststart <infile.mov> <outfile.mov> ''' moovs = ( 'video/x-ms-asf', 'video/quicktime', 'application/octet-stream', ) magic = Magic(mime=True) if magic.from_file(self.video.file.path) in moovs: (name, ext) = os.path.splitext(in_file) tmp = ''.join([name, '_tmp', ext]) shutil.copyfile(in_file, tmp) ret = subprocess.call(["qt-faststart", tmp, in_file]) if ret != 0: raise OSError()
def detecttype(filepath): """Detect the mime type of the text file.""" try: from magic import Magic mime = Magic(mime=True) type = mime.from_file(filepath) root, ext = os.path.splitext(filepath) if ext in '.mo': return "mo" elif "text/" in type: return "text" else: return type except (ImportError, TypeError): root, ext = os.path.splitext(filepath) if ext in '.mo': return "mo" elif ext in listofexts: return "text" else: return "unknown"
def classify_file(path, verbose=False): assert type(path) in [str, unicode] supported = ['MPEG ADTS', 'FLAC', 'MPEG Layer 3', 'Audio', '^data$'] ignored = ['ASCII', 'JPEG', 'PNG', 'text', '^data$', 'AppleDouble'] magic = Magic() try: if type(path) == unicode: m = magic.from_file(path.encode('utf-8')) else: m = magic.from_file(path) except Exception as e: print 'INTERNAL ERROR: %s: %s' % (path, str(e)) return (None, None) if verbose: print('Magic(%s):\n%s' % (path, m)) for s in supported: match = re.search(s, m) if match: try: audio = mutagen.File(path, easy=True) if type(audio) == mutagen.mp3.EasyMP3: return ('mp3', audio) elif type(audio) == mutagen.flac.FLAC: return ('flac', audio) else: return ('file', None) return (format, audio) except AttributeError, e: print('Unknown file type: %s' % path) break except mutagen.mp3.HeaderNotFoundError, e: print('Header not found: %s' % path) break except Exception, e: print('INTERNAL ERROR: get_children()') traceback.print_exc() return (None, None)
class FileType: def __init__(self): self.m = Magic() def get_type(self, fname): ftype = self.m.from_file(fname) for k in TYPE_MAPPING.keys(): if k in ftype: return TYPE_MAPPING[k] #solutions here from http://stackoverflow.com/questions/9084228/python-to-check-if-a-gzipped-file-is-xml-or-csv #and http://stackoverflow.com/questions/2984888/check-if-file-has-a-csv-format-with-python if 'text' in ftype: with open(fname, 'rb') as fh: try: xml.sax.parse(fh, xml.sax.ContentHandler()) return 'xml' except: # SAX' exceptions are not public pass fh.seek(0) try: dialect = csv.Sniffer().sniff(fh.read(1024)) return 'csv' except csv.Error: pass return 'txt' def is_compression(self, fname): ftype = self.get_type(fname) return self.is_compression_by_type(ftype) def is_compression_by_type(self, ftype): if ftype in COMPRESSION: return True return False def is_archived(self, fname): ftype = self.get_type(fname) return self.is_archived_by_type(ftype) def is_archived_by_type(self, ftype): if ftype in ARCHIVED: return True return False
def filePatch(fileName: str, fileId: str): global service fileMimetype = Magic(mime=True).from_file(fileName) fileMetadata = {'name': fileName, 'mimeType': fileMimetype} mediaBody = MediaFileUpload(filename=fileName, mimetype=fileMimetype, resumable=False) fileOp = service.files().update(fileId=fileId, body=fileMetadata, media_body=mediaBody).execute() print( f"Synced: [{fileOp['id']}] [{fileName}] [{os.path.getsize(fileName)} bytes]" ) return fileOp['id']
def check_file_typ(self,filename): full_file_name = os.path.join(self.base_folder,self.upload_folder, filename) if os.name == 'nt': #if its windows we need to specify magic file explicitly #https://github.com/ahupp/python-magic#dependencies magic = Magic(magic_file=current_app.config['MAGIC_FILE_WIN32'],mime=True) else: magic = Magic(mime=True) try: file_type = magic.from_file(full_file_name) except IOError: app.logger.error("check_file_type is called with non existing file or I/O error while opening :%s"%full_file_name) return None if file_type == 'image/gif': return FILE_TYPE_IMAGE elif file_type == 'image/png': return FILE_TYPE_IMAGE elif file_type == 'image/jpeg': return FILE_TYPE_IMAGE elif file_type == 'application/pdf': return FILE_TYPE_PDF else: return FILE_TYPE_UNKNOWN
def handle_uploaded_file(f): if not f.size < 1e6: raise MemoryError('File too big!') mime = Magic(mime=True) if mime.from_buffer(f.read()) != 'text/plain': raise ValueError('Passed file is not a text file.') f.file.seek(0) file = TextIOWrapper(f.file) recipe_file = parse_file(file) output = recipe_file.to_djangodb() # If recipe with same UUID exists, try to update it, else create a new one. if 'uuid' in output and output['uuid']: try: current_version = Recipe.objects.get(pk=output['uuid']) log.info('Replacing currently existing recipe with new one.') for field in current_version.__dict__: if field in output: current_version.__dict__[field] = output[field] current_version.save() return current_version except Recipe.DoesNotExist: pass log.info('Creating new recipe entry for {}'.format(output['name'])) recipe = Recipe(**output) recipe.save() return recipe
def area(config, override_meta): print("area") cmd, cmd_output, meta = get_config_for_option(config, override_meta, 'area') print(cmd) print(subprocess.check_output(cmd.split(' '))) meta.update({'meta_type': 'quicksave/screenshot'}) if not GLOBAL.dry: response = API.create(meta) meta_hash = response['item']['meta']['meta_hash'] else: print('API.create(%s)' % meta) meta_hash = '' magic_mimetyper = Magic() mimetype = magic_mimetyper.from_file(cmd_output) with open(cmd_output, 'rb') as file: filebase = base64.b64encode(file.read()).decode('ascii') filename = 'screenshot.png' if not GLOBAL.dry: API.upload(meta_hash, mimetype, filename, filebase) else: print('API.upload(__meta_hash__, %s, %s, __base64__)' % (mimetype, filename)) return meta_hash
def __init__(self, mapping, reactor=None, **kwargs): Observable.__init__(self, **kwargs) self._mapping = {} self._watchers = {} self._reactor = reactor self._magic = Magic(mime=True) for shortname, filename in mapping.items(): if not isfile(filename): print(f"file {filename} for {shortname} does not exist.") del self._mapping[shortname] else: if not self._reactor is None: basedir = dirname(filename) if not basedir in self._watchers: self._watchers[basedir] = DirectoryWatcher( basedir, self._update, ModifyFile=True, MoveInFile=True) self._updateFile(shortname, filename) if not self._reactor is None: for each in self._watchers.values(): self._reactor.addReader(each, each)
def interpret(self, path: str) -> bool: mimetype = Magic(mime=True).from_file(path) _, extension = os.path.splitext(path.lower()) recognized = False for Impl in self.preferred_impls: if Impl.recognizes(mimetype, extension): recognized = True if self.impl is not None: logger.error( f"cannot parse {path}: previous {type(Impl).__name__} was: {self.impl.path}, skipping..." ) continue self.impl = Impl(path) return True return recognized
def mutate(self, info, **kwargs): """Process file input.""" finding_id = kwargs.get('finding_id') origin = kwargs.get('origin', '') project = finding_domain.get_project(finding_id) file_input = info.context.FILES['1'] mime = Magic(mime=True) if isinstance(file_input, TemporaryUploadedFile): mime_type = mime.from_file(file_input.temporary_file_path()) elif isinstance(file_input, InMemoryUploadedFile): mime_type = mime.from_buffer(file_input.file.getvalue()) else: mime_type = '' mib = 1048576 if (file_input and mime_type in ['text/x-yaml', 'text/plain', 'text/html']): if file_input.size < 1 * mib: success = process_file(file_input, finding_id, info, origin) else: raise InvalidFileSize() else: raise InvalidFileType() ret = UploadFile(success=success) if success: update_last_vuln_date(finding_id) util.invalidate_cache(finding_id) util.invalidate_cache(project) util.cloudwatch_log( info.context, 'Security: Uploaded file in {project} \ project succesfully'.format(project=project)) else: util.cloudwatch_log( info.context, 'Security: Attempted to delete file \ from {project} project'.format(project=project)) raise ErrorUploadingFileS3() return ret
class TestHandler(HandlerTestCase): def afterSetUp(self): self.data = open("./data/test.ogv").read() self.kw = dict(env=dict(PATH=self.env_path)) self.input = Handler(self.tmp_url, self.data, "ogv", **self.kw) self.file_detector = Magic(mime=True) def testConvertVideo(self): """Test coversion of video to another format""" output_data = self.input.convert("mpeg") file_format = self.file_detector.from_buffer(output_data) self.assertEquals(file_format, 'video/mpeg') def testgetMetadata(self): """Test if metadata is extracted from""" output_metadata = self.input.getMetadata() self.assertEquals(output_metadata, {'Encoder': 'Lavf52.64.2'}) def testsetMetadata(self): """ Test if metadata are inserted correclty """ metadata_dict = {"title": "Set Metadata Test", "creator": "cloudooo"} output = self.input.setMetadata(metadata_dict) handler = Handler(self.tmp_url, output, "ogv", **self.kw) metadata = handler.getMetadata() self.assertEquals(metadata["Title"], "Set Metadata Test") self.assertEquals(metadata["Creator"], "cloudooo") def testConvertAudio(self): """Test coversion of audio to another format""" self.data = open("./data/test.ogg").read() self.input = Handler(self.tmp_url, self.data, "ogg", **self.kw) output_data = self.input.convert("wav") file_format = self.file_detector.from_buffer(output_data) # XXX this might expect 'audio/vnd.wave' but magic only got 'audio/x-wav' self.assertEquals(file_format, 'audio/x-wav')
def send_file_to_s3(file_name, evidence, event_id): """Save evidence files in s2.""" evidence_id = evidence['id'] fileroute = '/tmp/:id.tmp'.replace(':id', evidence_id) evidence_type = evidence['file_type'] is_file_saved = False with open(fileroute, 'r') as file_obj: try: mime = Magic(mime=True) mime_type = mime.from_file(fileroute) if evidence_type.get(mime_type): file_name_s3 = file_name + evidence_type.get(mime_type) CLIENT_S3.upload_fileobj(file_obj, BUCKET_S3, file_name_s3) is_file_saved = True else: util.cloudwatch_log_plain( 'File of event {event_id} does not have the right type' .format(event_id=event_id) ) except ClientError: rollbar.report_exc_info() is_file_saved = False os.unlink(fileroute) return is_file_saved
def uploadFile(localPath, remoteName, parentId): """ Upload a single file to google drive. Arguments: localPath {string} -- path to the file to upload remoteName {string} -- path to the new file on drive parentName {string} -- name of parent folder on drive """ fileId = findRemoteFileId(remoteName, parentId) mime = Magic(mime=True).from_file(localPath) media = MediaFileUpload(localPath, mimetype=mime) # specify id if the file already exists if fileId is None: createRemoteFile(localPath, remoteName, parentId, mime, media) else: updateRemoteFile(localPath, media, fileId)
def fileUpload(fileName: str): global service, CONFIG_PARENT_ID fileMimetype = Magic(mime=True).from_file(fileName) fileMetadata = { 'name': fileName, 'mimeType': fileMimetype, 'parents': [CONFIG_PARENT_ID] } mediaBody = MediaFileUpload(filename=fileName, mimetype=fileMimetype, resumable=False) fileOp = service.files().create(body=fileMetadata, media_body=mediaBody).execute() print( f"Uploaded: [{fileOp['id']}] [{fileName}] [{os.path.getsize(fileName)} bytes]" ) return fileOp['id']
def analyze(self, data, _path, _unicode) -> bool: data["Encoding"] = {"Details": {}, "_Details": {}} open(_path, "rb").read() fbom = open(_path, "rb").read(4) if _unicode: encoding = "utf-16" else: encoding = "utf-8" data["Encoding"]["Details"] = { "charset": Magic(mime_encoding=True).from_file(_path), "ForceEncoding": encoding, "ByteOrderMark": self.check_bom(fbom) }
def get_records_from_file( project_name: str, finding_id: str, file_name: str) -> List[Dict[object, object]]: file_path = _download_evidence_file(project_name, finding_id, file_name) file_content = [] encoding = Magic(mime_encoding=True).from_file(file_path) try: with io.open(file_path, mode='r', encoding=encoding) as records_file: csv_reader = csv.reader(records_file) max_rows = 1000 headers = next(csv_reader) file_content = [util.list_to_dict(headers, row) for row in itertools.islice(csv_reader, max_rows)] except (csv.Error, LookupError, UnicodeDecodeError) as ex: rollbar.report_message('Error: Couldnt read records file', 'error', extra_data=ex, payload_data=locals()) return file_content
async def highlight(context): """ Generates syntax highlighted images. """ if context.fwd_from: return reply = await context.get_reply_message() reply_id = None msg = await context.reply(lang('highlight_processing')) if reply: reply_id = reply.id target_file_path = await context.client.download_media( await context.get_reply_message()) if target_file_path is None: message = reply.text else: if Magic(mime=True).from_file(target_file_path) != 'text/plain': message = reply.text else: with open(target_file_path, 'r') as file: message = file.read() remove(target_file_path) else: if context.arguments: message = context.arguments else: await msg.edit(lang('highlight_no_file')) return lexer = guess_lexer(message) try: formatter = img.JpgImageFormatter(style="colorful") except img.FontNotFound: await msg.edit(lang('caption_error')) return except FileNotFoundError: await msg.edit(lang('caption_error')) return try: result = syntax_highlight(message, lexer, formatter, outfile=None) except OSError: await msg.edit(lang('caption_error')) return await msg.edit(lang('highlight_uploading')) await context.client.send_file(context.chat_id, result, reply_to=reply_id) await msg.delete()
def update(self): if self.upgrade >= 1: self.damage = 10 if self.upgrade == 2: self.delays = 0.7 for game_object in game_world.all_objects(): #맨앞 몬스터 위치 if str(game_object).find("monster1") != -1 or str( game_object).find("monster2") != -1 or str( game_object).find("monster3") != -1 or str( game_object).find("monster4") != -1 or str( game_object).find("boss") != -1 or str( game_object).find("teemo") != -1: if math.sqrt((game_object.x - self.x)**2 + (game_object.y - self.y)**2) < 250: if get_time() >= self.time + 1.5: # 마법 사용 magic = Magic(self.x, self.y, self.damage, self.delays) game_world.add_object(magic, 2) self.time = get_time()
def clean(self, data, initial=None): rv = super(PictureFileField, self).clean(data, initial) if data is None: # this happens with flickr pictures, see https://code.ductus.us/ticket/187 return rv # make sure the blob is small enough to fit in the ResourceDatabase # without raising SizeTooLargeError max_blob_size = get_resource_database().max_blob_size if data.size > max_blob_size: raise forms.ValidationError(self.error_messages['file_too_large'] % max_blob_size) filename_requires_cleanup = False try: if hasattr(data, 'temporary_file_path'): filename = data.temporary_file_path() else: fd, filename = mkstemp() filename_requires_cleanup = True f = os.fdopen(fd, 'wb') try: for chunk in data.chunks(): f.write(chunk) finally: f.close() from magic import Magic mime_type = Magic(mime=True).from_file(filename) try: logger.debug("Mime type detected: %s", mime_type) except KeyError: raise forms.ValidationError( self.error_messages['unrecognized_file_type']) #TODO: double check the file type, like we do for audio files rv.ductus_mime_type = mime_type return rv finally: if filename_requires_cleanup: os.remove(filename)
def get_file_type(filename): """ Return file mime type Input: filename Output: file mime type """ try: mime_type = Magic(mime=True).from_file(filename) except: pass else: file_type = file_types.get(mime_type, None) if file_type == 'word': if zipfile.is_zipfile(filename): return 'docx' else: return 'doc' return file_type
def get(self, notebook_name, note_name): notebook_name = self.encode_name(notebook_name) note_name = self.encode_name(note_name) action = self.get_argument('a', 'view') if action == 'delete': self._delete(notebook_name, note_name, confirmed=False) elif action == 'edit': self._edit(notebook_name, note_name, confirmed=False) elif action == 'star': self._star(notebook_name, note_name, star='set') elif action == 'unstar': self._star(notebook_name, note_name, star='unset') else: path = join(self.settings.repo, notebook_name, note_name) dot_path = join(self.settings.repo, notebook_name, '.' + note_name) highlight = self.get_argument('hl', None) with Magic() as m: # Open the file since m.id_filename() does not accept utf8 # paths, not even when using path.decode('utf8') with open(path) as f: mime = m.id_buffer(f.read()) if 'text' in mime or 'empty' in mime: self._view_plaintext(notebook_name=notebook_name, note_name=note_name, highlight=highlight) elif exists(dot_path): download = self.get_argument('dl', False) if download: self._view_file(notebook_name=notebook_name, note_name=note_name) else: self._view_plaintext(notebook_name=notebook_name, note_name=note_name, highlight=highlight, dot=True) else: self._view_file(notebook_name=notebook_name, note_name=note_name)
class TestHandler(HandlerTestCase): def afterSetUp(self): self.kw = dict(env=dict(PATH=self.env_path)) self.file_detector = Magic(mime=True) def testConvertPDFtoText(self): """Test conversion of pdf to txt""" pdf_document = open("data/test.pdf").read() handler = Handler(self.tmp_url, pdf_document, "pdf", **self.kw) txt_document = handler.convert("txt") self.assertTrue(txt_document.startswith("UNG Docs Architecture")) def testConvertPStoPDF(self): """Test conversion of ps to pdf""" ps_document = open("data/test.ps").read() handler = Handler(self.tmp_url, ps_document, "ps", **self.kw) pdf_document = handler.convert("pdf") mimetype = self.file_detector.from_buffer(pdf_document) self.assertEquals(mimetype, "application/pdf") def testgetMetadata(self): """Test if the metadata are extracted correctly""" pdf_document = open("data/test.pdf").read() handler = Handler(self.tmp_url, pdf_document, "pdf", **self.kw) metadata = handler.getMetadata() self.assertEquals(type(metadata), DictType) self.assertNotEquals(metadata, {}) self.assertEquals(metadata["title"], 'Free Cloud Alliance Presentation') def testsetMetadata(self): """Test if the metadata is inserted correctly""" pdf_document = open("data/test.pdf").read() handler = Handler(self.tmp_url, pdf_document, "pdf", **self.kw) metadata_dict = {"title": "Set Metadata Test", "creator": "gabriel\'@"} new_document = handler.setMetadata(metadata_dict) handler = Handler(self.tmp_url, new_document, "pdf", **self.kw) metadata = handler.getMetadata() self.assertEquals(metadata["title"], 'Set Metadata Test') self.assertEquals(metadata['creator'], 'gabriel\'@')
def download(cls, track_name, track_url): """ Download track """ cache = MixedCache() filename = os.path.join(cls.cfg_data().get('cache_dir'), cache.track_to_hash(track_name)) + '.mp3' # Try remote HTTP cache first result = cache.get_from_cache(filename) if result: return result track_id = track_url.replace('http://pleer.net/en/download/page/', '') url = 'http://pleer.net/site_api/files/get_url' headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0', 'Accept': 'application/json, text/javascript, */*; q=0.01', 'Accept-Language': 'en-US,en;q=0.5', 'X-Requested-With': 'XMLHttpRequest', 'Referer': 'http://pleer.net/en/download/page/%s' % track_id } reply = requests.post(url, data={ 'action': 'download', 'id': track_id }, timeout=10) result = json.loads(reply.text).get('track_link') r = requests.get(result, headers=headers, stream=True, timeout=10) with open(filename, 'wb') as fd: for chunk in r.iter_content(cls.chunk_size): fd.write(chunk) with Magic() as magic: ftype = magic.id_filename(filename) if ftype.startswith('HTML'): filename = None if filename: cache.copy_to_cache(filename) return filename
def _get_magic(self, xaf_file, magic_file=None): global MAGIC_OBJECTS_CACHE key = hashlib.md5(magic_file.encode('utf8')).hexdigest() \ if magic_file is not None else "system" try: if key not in MAGIC_OBJECTS_CACHE: MAGIC_OBJECTS_CACHE[key] = Magic(magic_file=magic_file) magic = MAGIC_OBJECTS_CACHE[key] tag_magic = magic.from_file(xaf_file.filepath) except Exception as e: if magic_file is None: self.warning( "exception during magic call with system magic " "configuration: %s => let's return magic_exception as " "magic output" % str(e)) else: self.warning( "exception during magic call with custom magic: %s " "configuration: %s => let's return magic_exception as " "magic output" % (magic_file, str(e))) tag_magic = "magic_exception" return tag_magic
def upload_resume(token, filename): ''' Загрузка и распознование файлов Документация к API huntflow обязывает отправлять запрос multipart/form-data. Но при установке в заголовке параметра 'Content-Type': 'multipart/form-data' на запрос возвращается ошибка {"errors": [{"type": "server_error"}]}. Поэтому я отправляю Content-Type в параметре files. (Через curl запрос проходит, через requests - нет). ''' url = 'https://{host}/account/{id}/upload'.format( host=HOST, id=ORGANIZATION_ID, ) headers = { 'User-Agent': 'test-quest ([email protected])', 'Authorization': token, # 'Content-Type': 'multipart/form-data', 'X-File-Parse': 'true', } data = open(filename, 'rb') content_type = Magic(mime=True).from_file(filename) files = { 'file': (filename, data, content_type), } response = requests.post( url, headers=headers, files=files, ) if response.ok: logger.info('Файлы резюме успешно распознан: {}'.format(files)) else: logger.error('Ошибка загрузки файлов резюме: {}'.format(files)) return response.json()
async def highlight(context): """ Generates syntax highlighted images. """ if context.fwd_from: return reply = await context.get_reply_message() reply_id = None await context.edit("Rendering image, please wait . . .") if reply: reply_id = reply.id target_file_path = await context.client.download_media( await context.get_reply_message() ) if target_file_path is None: message = reply.text else: if Magic(mime=True).from_file(target_file_path) != 'text/plain': message = reply.text else: with open(target_file_path, 'r') as file: message = file.read() remove(target_file_path) else: if context.arguments: message = context.arguments else: await context.edit("`Unable to retrieve target message.`") return lexer = guess_lexer(message) formatter = img.JpgImageFormatter(style="colorful") result = syntax_highlight(message, lexer, formatter, outfile=None) await context.edit("Uploading image . . .") await context.client.send_file( context.chat_id, result, reply_to=reply_id ) await context.delete()
def _store_file(self, filepath, target_file): ''' Stores file in database with its path, hash and type. Extract tags in case of ASCII text :param filepath - type: str, path of file to be stored :target_file - type: str, indicates type of file and suitable node for storage ''' if not self._check_file(filepath): print(colored.red("[!] No Such File exists!!")) return file_type = Magic().from_file(filepath) with open(f'{target_file}.txt', 'a') as file: file.write('{} :: {}\n'.format(filepath, file_type)) self.files_db.insert({ 'file': filepath, 'hash': self._sha_file(filepath), 'type': target_file }) if 'ASCII text' in file_type: tag_record = self._tag_compiler(filepath) self._tag_store(tag_record)
def _download(self, ssl): ctx = create_default_context() if not ssl: ctx.check_hostname = False ctx.verify_mode = CERT_NONE with urlopen(self.uri, timeout=self._timeout, context=ctx) as connection: head = connection.read(1024) self.accessibility = AccessInfo( status=connection.status, reason=connection.reason, accesible=True, ssl_error=not ssl, ) extension = None filename = connection.getheader("content-disposition") if filename: extension = filename.split(".")[-1] else: path = urlparse(connection.url).path extension = path.split(".")[-1] if LIB_MAGIC: with Magic(flags=MAGIC_MIME_TYPE) as m: magicType = m.id_buffer(head) else: magicType = magic.from_buffer(head) self.type = TypeInfo( magic=magicType, http=connection.getheader("content-type"), extension=extension, )
def afterSetUp(self): self.kw = dict(env=dict(PATH=self.env_path)) self.file_detector = Magic(mime=True)
def get_content_type(self, file_): """ Returns the content type of the file using python magic """ magic = Magic(mime=True) return magic.from_buffer(file_.read(1024))
def setUp(self): self.m = Magic(mime_encoding=True)
def afterSetUp(self): self.data = open("./data/test.ogv").read() self.kw = dict(env=dict(PATH=self.env_path)) self.input = Handler(self.tmp_url, self.data, "ogv", **self.kw) self.file_detector = Magic(mime=True)
def guess(path: pathlib.Path) -> str: """Guess a lexer type based on a path.""" return mime_map.get(Magic(mime=True).from_file(str(path)), "text")
def setUp(self): self.m = Magic(mime=self.mime)
# Make sure a file is present if len(argv) < 2: print('Please provide an image file') exit(1) input_file = argv[1] extension = '.' + input_file.split('.')[-1] if not isfile(input_file): print('File not found: %s' % input_file) exit(1) # Make sure the file is an image mime = Magic(mime=True) mediatype = mime.from_file(input_file) media_type = mediatype.split('/')[0] media_subtype = mediatype.split('/')[-1] if media_type != 'image': print('Not an image: %s' % input_file) exit(1) if media_subtype != 'jpeg' and media_subtype != 'png': print('[!] Warning! This tool is currently only tested on jpeg and png images. The type of the current image (%s) is not tested' % media_subtype) # Do the stripping image = Image.open(input_file) image_stripped = Image.new(image.mode, image.size) image_stripped.putdata(list(image.getdata()))
def _getFileType(self, output_data): mime = Magic(mime=True) mimetype = mime.from_buffer(decodestring(output_data)) return mimetype
def get_file_mime_type(file_name): mime = Magic(mime=True) return mime.from_file(file_name)
def render(old_path, old_blob, new_path, new_blob): old_type = None new_type = None try: from magic import Magic mag = Magic(mime=True) if old_blob: old_type = mag.from_buffer(old_blob) if new_blob: new_type = mag.from_buffer(new_blob) except ImportError: pass types = [] if old_type: types.append(old_type) if new_type: types.append(new_type) if len([t for t in types if t.startswith('image/')]) > 1: """ Image diff. """ icon = 'picture' def do_render(out): out.extend('<table class="diff-any diff-sidebyside diff-image">') out.extend('<td class="left old num"></td>') out.extend('<td class="left old line">') if old_blob: data = "data:%s;base64,%s" % (old_type, b64encode(old_blob)) out.extend('<img alt="" src="%s">' % smart_str(escape(data))) out.extend('</td>') out.extend('<td class="right new num"></td>') out.extend('<td class="right new line">') if new_blob: data = "data:%s;base64,%s" % (new_type, b64encode(new_blob)) out.extend('<img alt="" src="%s">' % smart_str(escape(data))) out.extend('</td>') out.extend('</tr>') out.extend('</table>') else: """ Text diff. """ old_token_lines = () new_token_lines = () try: """ Syntax highlighting for the old text. """ if old_blob: old_lexer = guess_lexer_for_filename(old_path, old_blob) old_tokens = old_lexer.get_tokens(old_blob) old_token_lines = TokenLineStream(old_tokens) except ClassNotFound: pass try: """ Syntax highlighting for the new text. """ if new_blob: new_lexer = guess_lexer_for_filename(new_path, new_blob) new_tokens = new_lexer.get_tokens(new_blob) new_token_lines = TokenLineStream(new_tokens) except ClassNotFound: pass if old_blob is not None and new_blob is not None: """ Normal two sided diff """ icon = 'edit' def do_render(out): diff_line_stream = diff(old_blob, new_blob) render_side_diff(out, diff_line_stream, (old_token_lines, new_token_lines)) elif old_blob is not None: """ One sided deletion diff. """ icon = 'trash' def do_render(out): render_blob(out, -1, old_blob, old_token_lines) elif new_blob is not None: """ One sided creation diff. """ icon = 'file' def do_render(out): render_blob(out, 1, new_blob, new_token_lines) else: """ No sided diff. eg: Metadata change. """ icon = 'question-sign' def do_render(out): out.extend("No data") return render_box(old_path, new_path, icon, do_render)
def __init__(self, damage, knockback_multiplier=1): Magic.__init__(self, damage, knockback_multiplier)
class GnuPG_Decryptor: """ Class representing Native application of GnuPG_Decryptor broswer extension. Native application is responsible for accessing private keys and decrypting content of a web page. """ def __init__(self): self._passwords = dict() self._gui = None self._QApp = None self._sudo = None self._homedir = None self.MAX_MESSAGE_SIZE = 750 * 1024 self.mimeResolver = Magic(mime=True) self._lock = Lock() def show(self): """ Method displays GUI window for user """ # If Gui is not defined yet, construct it if (self._gui is None): self._QApp = QApplication(sys.argv) initKeys = [] for keyId, password in self._passwords.items(): initKeys.append({'id': keyId, 'password': password}) self._gui = GnuPG_Decryptor_GUI(self, initKeys, self._sudo, self._homedir) # show the window self._gui.show() return self._QApp.exec_() def keyList(self, settings): """ Method returns list of secret keys based on sudo and homedir settings """ stdin = '' args = [] # use sudo if (settings['sudo']['use']): # sudo argument args.append('sudo') # do not remember password args.append('-Sk') # add password to stdin stdin += settings['sudo']['password'] + '\n' # gpg call args.append('gpg') # use homedir if (settings['home']['use']): args.append('--homedir') args.append(settings['home']['homedir']) # command to list secret keys args.append('--list-secret-keys') # call subprocess process = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, _ = process.communicate(stdin.encode()) retcode = process.returncode ids = [] # if success if (retcode == 0): stdout = stdout.decode().splitlines() ids = [{ 'id': line[25:].strip(), 'password': '' } for line in stdout if line.startswith('uid')] return {'returnCode': retcode, 'keys': ids} def setPasswords(self, config): """ Method sets new keys and passwords. """ # clear current keys and passwords self._passwords = dict() # set new keys and password for key in config['keys']: self._passwords[key['id']] = key['password'] # set sudo if (config['sudo']['use']): self._sudo = config['sudo']['password'] else: self._sudo = None # set homedir parameter if (config['home']['use']): self._homedir = config['home']['homedir'] else: self._homedir = None # notify background script about changes self.updateKeys() def getKeyUidFromId(self, keyId): """ From key id (or fingerprint if you prefer) generates get UID using gpg application """ args = ['gpg'] # if homedir parameter should be used if (not self._homedir is None): args.append('--homedir') args.append(self._homedir) # add gpg argumnets args.append('--list-public-keys') args.append('--fingerprint') args.append(keyId) # call subprocess process = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, _ = process.communicate() retcode = process.returncode uid = None # if success if (retcode == 0): stdout = stdout.decode().splitlines() uids = [ line[25:].strip() for line in stdout if line.startswith('uid') ] if (uids): uid = uids[0] return uid def getKeyUidFromData(self, data): """ Method finds out, which keys were used for data encryption. """ # command line arguments args = ['gpg', '--list-packets', '--list-only'] # call gpg process = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) stdout, _ = process.communicate(data) retcode = process.returncode keys = [] # if success if (retcode == 0): # output is on stderr stdout = stdout.decode().splitlines() # we care only about lines starting with "gpg: encrypted" filtered = [line for line in stdout if line.startswith(':pubkey')] for line in filtered: # find where ID/fingerprint is idx1 = line.find('keyid ') + 6 idx2 = line.find(',', idx1) if (idx2 == -1): idx2 = len(line) # get uid from id/fingerprint uid = self.getKeyUidFromId(line[idx1:idx2]) if (not uid is None): keys.append(uid) return keys @staticmethod def get_message(): """ Reads message from background script """ raw_length = sys.stdin.buffer.read(4) if not raw_length: sys.exit(0) message_length = unpack('=I', raw_length)[0] message = sys.stdin.buffer.read(message_length).decode("utf-8") return loads(message) @staticmethod def encode_message(message_content): """ Encode a message for transmission, given its content. """ encoded_content = dumps(message_content).encode("utf-8") encoded_length = pack('=I', len(encoded_content)) return { 'length': encoded_length, 'content': pack(str(len(encoded_content)) + "s", encoded_content) } def send_message(self, encoded_message): """ Sends an encoded message to background script. """ with self._lock: sys.stdout.buffer.write(encoded_message['length']) sys.stdout.buffer.write(encoded_message['content']) sys.stdout.buffer.flush() def debug(self, messageString): """ Sends debug message to background script """ self.send_message( GnuPG_Decryptor.encode_message({ 'message': messageString, 'type': 'debug' })) def loadKeys(self): """ Asks background scripts for stored keys. """ self.send_message( GnuPG_Decryptor.encode_message({'type': 'getKeysRequest'})) def updateKeys(self): """ Update keys in background scripts """ keys = self._passwords.copy() for key in keys.keys(): keys[key] = '' message = {'type': 'updateKeysRequest', 'keys': keys} if (not self._sudo is None): message['sudo'] = 1 else: message['sudo'] = 0 if (not self._homedir is None): message['homedir'] = self._homedir self.send_message(GnuPG_Decryptor.encode_message(message)) def decrypt(self, rawData, keys, messageId, tabId): """ Decrypts the data and sends decrypted content to the content script. """ err = b'' retcode = 0 for key in keys: args = [] sudoPass = '' keyPass = self._passwords[key] # if sudo should be used if (not self._sudo is None): args.append('sudo') args.append('-Sk') sudoPass = self._sudo + '\n' # gpp argument args.append('gpg') # if homedir should be used if (not self._homedir is None): args.append('--homedir') args.append(self._homedir) # be quiet as possible args.append('--quiet') # use password if we know it if (keyPass): args.append('--no-tty') args.append('--pinentry-mode=loopback') args.append('--passphrase') args.append(keyPass) # decrypt command for gpg args.append('--decrypt') # call subprocess process = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE) decrypted, err = process.communicate(sudoPass.encode() + rawData) retcode = process.returncode # if decryption failed, try next key if (retcode != 0): continue # get mimeType of data mimeType = self.mimeResolver.from_buffer(decrypted) # encode data using base64 decrypted = b64encode(decrypted) # split data into blocks blocks = [ decrypted[i:i + self.MAX_MESSAGE_SIZE] for i in range(0, len(decrypted), self.MAX_MESSAGE_SIZE) ] # get last block of data lastBlock = blocks.pop() # prepare response response = { 'messageId': messageId, 'success': 1, 'message': '', 'type': 'decryptResponse', 'data': '', 'encoding': 'base64', 'mimeType': mimeType, 'lastBlock': 0, 'tabId': tabId } # send all blocks, except last one for block in blocks: response['data'] = block.decode() self.send_message(GnuPG_Decryptor.encode_message(response)) # send last blocks response['data'] = lastBlock.decode() response['lastBlock'] = 1 self.send_message(GnuPG_Decryptor.encode_message(response)) break if (retcode != 0): errorMessage = 'Unable to decrypt data: ' + err.decode() self.send_message( GnuPG_Decryptor.encode_message({ 'messageId': messageId, 'success': 0, 'message': errorMessage, 'type': 'decryptResponse', 'data': '', 'tabId': tabId })) elif (not keys): errorMessage = 'Unable to decrypt data: Required key is not present' self.send_message( GnuPG_Decryptor.encode_message({ 'messageId': messageId, 'success': 0, 'message': errorMessage, 'type': 'decryptResponse', 'data': '', 'tabId': tabId })) def main(self): """ Reads messages from background scripts and create responses. """ largeRequests = dict() # load stored keys self.loadKeys() while True: # read message message = GnuPG_Decryptor.get_message() errorMessage = str() if (message['type'] == 'decryptRequest' and 'tabId' in message): # message is containts encrypted data # ged id of sender tabId = message['tabId'] # decode data if (message['encoding'] == 'base64'): rawData = b64decode(message['data']) elif (message['encoding'] == 'ascii'): rawData = message['data'].encode() else: errorMessage = 'Invalid encoding: ' + message['encoding'] self.send_message( GnuPG_Decryptor.encode_message({ 'messageId': message['messageId'], 'success': 0, 'message': errorMessage, 'type': 'decryptResponse', 'data': '', 'tabId': tabId })) continue # data are split into blocks, join those blocks if (message['lastBlock'] == 0): largeRequests[message['messageId']] = largeRequests[ message['messageId']] + rawData if ( message['messageId'] in largeRequests) else rawData continue elif (message['messageId'] in largeRequests): rawData = largeRequests[message['messageId']] + rawData del (largeRequests[message['messageId']]) # get key, that was used for encryption keys = self.getKeyUidFromData(rawData) # use only keys that are available keys = [key for key in keys if key in self._passwords] #start_time = time.time() t1 = Thread(target=self.decrypt, args=(rawData, keys, message['messageId'], tabId)) t1.start() # if we have at least one valid key, decrypt data elif (message['type'] == 'displayWindow'): # User clicked on icon - diplay window self.show() elif (message['type'] == 'getKeysResponse'): # Set new keys self._passwords = message['keys'] self._homedir = message[ 'homedir'] if 'homedir' in message else None self._sudo = '' if 'sudo' in message and message[ 'sudo'] else None
from importlib.util import spec_from_file_location, module_from_spec import os import config from glob import glob import pandas as pd from magic import Magic magic = Magic(mime_encoding=True) DATA_ROOT = config.storage.legacy.dataRoot def exists(relative_path): abs_path = os.path.join(DATA_ROOT, 'datasets', relative_path) return os.path.exists(abs_path) def bytes(relative_path): abs_path = os.path.join(DATA_ROOT, 'datasets', relative_path) return os.stat(abs_path).st_size def read_raw(relative_path): abs_path = os.path.join(DATA_ROOT, 'datasets', relative_path) data = "" with open(relative_path, 'r') as file: data = file.read() return data
parts = line.split("\t") type, ext = parts[0], parts[1:] type_maps[type] = ext except: print("Cannot read " + magic_file) exit(0) print("{:35s} \t {:10s} \t {:20s}".format('Camouflaged file', 'Claimed ext', 'Expected ext')) # --------------------------------------------------------------------------------------------- # # For each file: # + Get claimed ext # + Using magic and mime.types to find out expected exts # --------------------------------------------------------------------------------------------- # mg = Magic(mime=True, mime_encoding=False, keep_going=True, uncompress=False) for folder, subs, files in os.walk(directory): for filename in files: try: full_path = folder + "//" + filename detected_type = str(mg.from_file(full_path).decode("utf-8")) ext = os.path.splitext(filename)[1][1:] expected_exts = type_maps[detected_type] if (ext != "" and ext not in expected_exts): # Print out highly likely camouflaged files print_result(full_path, ext, "".join([b + " " for b in expected_exts])) except: pass