def file_upload(request): file = request.FILES.get('file', None) type = request.DATA.get('type', None) if file: # TODO: Streaming Video (FLV, F4V, MP4, 3GP) Streaming Audio (MP3, F4A, M4A, AAC) file_name = '' thumbnail = '' convert = Converter() if type == u'video/x-flv': uuid_string = str(uuid.uuid1()) file_name = uuid_string + '.flv' thumbnail = uuid_string + '.jpg' elif type == u'video/mp4': uuid_string = str(uuid.uuid1()) file_name = uuid_string + '.mp4' thumbnail = uuid_string + '.jpg' if file_name != '': file_path = FILE_PATH + file_name with open(file_path, 'wb+') as destination: for chunk in file.chunks(): destination.write(chunk) destination.close() convert.thumbnail(file_path, 10, FILE_PATH + thumbnail) temp_file = TempFile(name=file_name, path=file_path) temp_file.save() return Response({ 'file_name': file_name, 'thumbnail': thumbnail }) else: return Response({ 'status': 'Current just support .mp4 && .flv.' })
def test_(self): d = OrderedDict(( ("A", OrderedDict((("1", 'o'), ("2", 'o'), ("3", 'x'),))), ("B", OrderedDict((("1", 'x'), ("3", 'o'),))), ("D", OrderedDict((("1", 'x'), ("2", 'x'), ("3", 'x'),))), ("E", OrderedDict( (("1", 'x'), ("2", 'x'), ("3", 'x'), ("5", "x"),))), ("F", OrderedDict()),)) converter = Converter() csv_data = converter.convert(d) header = csv_data.readline() self.assertEqual(header, ",1,2,3,5\n") a = csv_data.readline() self.assertEqual(a, "A,o,o,x,\n") b = csv_data.readline() self.assertEqual(b, "B,x,,o,\n") d = csv_data.readline() self.assertEqual(d, "D,x,x,x,\n") e = csv_data.readline() self.assertEqual(e, "E,x,x,x,x\n") f = csv_data.readline() self.assertEqual(f, "F,,,,\n")
def process_file(csv_file, thread_count, region_code, shift_radius, persistently): #create instances conv = Converter() checker = DataStructureChecker() region_helper = RegionNameHelper() #district_helper = DistrictNameHelper() addr_parser = AddressParser() geocoder = OsmRuGeocoder() extractor = GeomColumnsExtractor() shifter = PointShift() print "Process " + csv_file + ": " sqlite_path = csv_file.replace('.csv', '.sqlite') print "\t Check input data structure..." print "\t Check uik addr_v..." if not checker.check_addr_v(csv_file): return print "\t Convert to sqlite..." conv.processing(csv_file, sqlite_path) print "\t Set region name..." region_helper.set_region_name(sqlite_path, region_code) print "\t Parse address..." addr_parser.parse(sqlite_path) print "\t Geocode..." geocoder.process(sqlite_path, thread_count=thread_count, persistently=persistently) print "\t Shift points..." shifter.shift(sqlite_path, shift_radius) print "\t Extract lat long..." extractor.extract_columns(sqlite_path)
class StoreActions(): def __init__(self): self.converter = Converter() self.urlFetcher = UrlFetcher() def addUrl(self, url, name, title, description): formatedUrl = url.replace('&', '&') if name == 'somename' and not url.startswith( 'http://en.wikipedia.org/'): return # Spam protection for default feed name lastFeedEntries = FeedEntry.objects.filter( name=name).order_by('-creation_date') if len(lastFeedEntries) == 0 or formatedUrl != lastFeedEntries[ 0].url: # Do not add same URL twice ! feedentry = FeedEntry(url=formatedUrl, name=name) feedentry.description = self.converter.convert(description) if not title: try: title = self.urlFetcher.fetch( url, '(?<=<(title|TITLE)>)[^<|^\r|^\n]*') except Exception, err: logging.exception('Error while fetching page title:') feedentry.title = feedentry.url feedentry.title = self.converter.convert(title) if not feedentry.title: feedentry.title = feedentry.url feedentry.save()
def main(argv): try: fp = argv[1] except IndexError: print("Usage: %s <filepath>" % argv[0], file=sys.stderr) try: with open(fp) as f: program = f.read() except IOError: print("File not found", file=sys.stderr) return 1 converter = Converter(program) dot_index = fp.rfind('.') if dot_index == -1: name = fp else: name = fp[:dot_index] with open(name+'.cpp', 'w') as f: f.write(converter.get_cpp()) #-O2 is especially necessary for tail recursion optimization return os.system('g++ -O2 -o %s.exe %s.cpp' % (name, name))
def __init__(self): self._conversion_tag = None self._converter = Converter() self._converter.complete_callback = self._on_convert_complete self._converter.progress_callback = self._on_convert_progress_change self._download_tag = None self._downloader = Downloader() self._downloader.complete_callback = self._on_download_complete self._downloader.progress_callback = self._on_download_progress_change self.view = mainform.MainForm( parent=None, title=metadata.title, icon_path=_app_icon_path(), conversions=Converter.get_conversions(), ) self.view.url_textbox.Bind(wx.EVT_TEXT_ENTER, self._on_url_enter_press) self.view.option_button.Bind(wx.EVT_BUTTON, self._on_option_click) self.view.download_button.Bind(wx.EVT_BUTTON, self._on_download_click) self.view.cancel_button.Bind(wx.EVT_BUTTON, self._on_cancel_click) self.view.ok_button.Bind(wx.EVT_BUTTON, self._on_ok_click) self.view.Bind(wx.EVT_ACTIVATE, self._on_set_focus) self.view.Bind(wx.EVT_CLOSE, self._on_form_close) self._auto_fill_url() self.view.Show() _initialize_log()
def format(filename): c = Converter(ffmpeg_path='/usr/bin/ffmpeg', ffprobe_path='/usr/bin/ffprobe') info = c.probe(filename) if not info: return None if len(info.streams) != 1: return None if info.streams[0].type != 'subtitle': return None return info.streams[0].codec
def test_converter_2pass(self): c = Converter() self.video_file_path = 'xx.ogg' options = { 'format': 'ogg', 'audio': { 'codec': 'vorbis', 'samplerate': 11025, 'channels': 1, 'bitrate': 16 }, 'video': { 'codec': 'theora', 'bitrate': 128, 'width': 360, 'height': 200, 'fps': 15 } } options_repr = repr(options) conv = c.convert('test1.ogg', self.video_file_path, options, twopass=True) verify_progress(conv) # Convert should not change options dict self.assertEqual(options_repr, repr(options)) self._assert_converted_video_file()
def fetchMetaDatas(self): c = Converter() infos = c.probe(self.path) if infos is None: self.delete() raise TypeError("File is not a valid media.") old_path = self.path if (len(infos.streams) == 2): self.media_type = "video" elif (len(infos.streams) == 1): self.media_type = "audio" else: self.media_type = "unknown" self.duration = infos.streams[0].duration file_format = infos.format.format if "mp4" in file_format: # Fix for ugly MP4 file formats file_format = "mp4" self.fileType = file_format self.save() os.rename(old_path, self.path)
def submit_task(): while True: fname = choice(video_set) path = os.path.join(base_path, fname) c = Converter() info = c.probe(path) o_w = info.video.video_width o_h = info.video.video_height service_type = choice(service_set) t_w = -1; t_h = -1; i = 0; while i < 3: if y_w[i] < o_w and y_h[i] < o_h: break else: i = i + 1 if i == 3: continue i = random.randint(i, 2) resolution = str(y_w[i]) + 'x' + str(y_h[i]) cmd = 'python ../submit_task.py -l ' + path + ' -s ' + resolution + ' -p ' + str(service_type) + ' >> task_id ' os.system(cmd) return
def main(): parser = ArgumentParser(description=__doc__) parser.add_argument('-f', '--file', help='target mako file to be translated') args = parser.parse_args() file = args.file converter = Converter(file) converter.convert()
class StoreActions(): def __init__(self): self.converter = Converter() self.urlFetcher = UrlFetcher() def addUrl(self, url, name, title, description): formatedUrl = url.replace('&', '&') if name == 'somename' and not url.startswith('http://en.wikipedia.org/'): return # Spam protection for default feed name lastFeedEntries = FeedEntry.objects.filter(name=name).order_by('-creation_date') if len(lastFeedEntries) == 0 or formatedUrl != lastFeedEntries[0].url: # Do not add same URL twice ! feedentry = FeedEntry(url=formatedUrl, name=name) feedentry.description = self.converter.convert(description) if not title: try: title = self.urlFetcher.fetch(url, '(?<=<(title|TITLE)>)[^<|^\r|^\n]*') except Exception, err: logging.exception('Error while fetching page title:') feedentry.title = feedentry.url feedentry.title = self.converter.convert(title) if not feedentry.title: feedentry.title = feedentry.url feedentry.save()
def hamming_distance(a, b): assert len(a) == len(b) # First convert to bitstrings. a_bits = Converter(a).bits() b_bits = Converter(b).bits() return sum(a_bit != b_bit for a_bit, b_bit in zip(a_bits, b_bits))
def test_probe_image(self): c = Converter() info = c.probe('test.png', posters_as_video=True) self.assertEqual(info.video.codec, 'png') info = c.probe('test.jpg', posters_as_video=False) self.assertEqual(info.video.codec, 'mjpeg')
def __init__(self, index): threading.Thread.__init__(self) self.index = index self.c = Converter() target = scipy.io.loadmat('./algorithms/t3_output.mat') self.net = nl.load('./algorithms/t3_estimator.net') t = target['t3_output'] self.norm_t = nl.tool.Norm(t)
def writing_callback(rp): converted = Converter(rp.webcat_rec).dest path = utils.getRecordPath(rp.recId, "converted") dir = os.path.dirname(path) if not os.path.exists(dir): os.makedirs(dir) converted.write(path=path) print "wrote %s" % rp.recId
def test_converter_load_labelMap(): converter = Converter() test_labelMap = LabelMap(id=0, type="name") converter.setLabelmap() assert len(converter.labelmap) == 1 assert converter.labelmap[0] == test_labelMap
def perform_move_from_motion_file(self, motion_name, motion_file, motion_time_type): print("Executing " + motion_name) converter = Converter() converter.parse_motion_file(motion_file, motion_time_type) names, times, keys = converter.get_lists() self.get_in_posture_for_kick() self.motion_proxy.angleInterpolation(names, keys, times, True)
def main(): args = parser.parse_args() if args.algorithm == "complex": algo = ComplexAlgorithm else: algo = SimpleAlgorithm conv = Converter(algorithm=algo) conv.run(args.input, args.output)
def __init__(self,args, parent=None): super().__init__(parent) self.args = args self.iconLoader = IconLoader() self.initUI() self.conv = Converter() self.previousSub = "" self.loadVideo(Path(args.url))
class Program: def __init__(self, arg): pygame.init() self.opt = Setting() self.name_map = arg.name_map self.screen = pygame.display.set_mode((self.opt.WIDTH + self.opt.TILE_SIZE*15,self.opt.HEIGHT + self.opt.TILE_SIZE*3)) self.set_grill_surface() pygame.display.set_caption(self.opt.TITLE) self.path = os.path.dirname(__file__) self.path_img = os.path.join(self.path, ".." , "src", "img") self.images = Spritesheet(self) self.path_maps = os.path.join(self.path, ".." , "src", "maps", self.name_map + self.opt.LUA_FORMAT) self.create = not os.path.exists(self.path_maps) self.builder = Builder(self) self.clock = pygame.time.Clock() self.converter = Converter(self) self.map = Map(self) self.selector = Selector(self) self.toolbar = Toolbar(self) self.loop() pygame.quit() def set_grill_surface(self): self.grill = self.screen.copy() for y in range(0, self.opt.HEIGHT+1, self.opt.TILE_SIZE): pygame.draw.line(self.grill, (255,255,255), (0, y), (self.opt.WIDTH, y)) for x in range(0, self.opt.WIDTH+1, self.opt.TILE_SIZE): pygame.draw.line(self.grill, (255,255,255), (x, 0), (x, self.opt.HEIGHT)) self.grill.set_colorkey((0,0,0)) def draw(self): self.screen.fill((0,0,0)) self.screen.blit(self.map.screen,(0,0)) self.screen.blit(self.grill,(0,0)) self.toolbar.draw(self.screen) self.selector.draw(self.screen) pygame.display.flip() def event(self): for event in pygame.event.get(): if event.type == pygame.QUIT: self.running = False elif event.type == pygame.KEYDOWN: if event.key == pygame.K_ESCAPE: self.pause = not self.pause elif event.key == pygame.K_s: self.builder.save() elif event.type == pygame.MOUSEBUTTONDOWN: if event.button == 1: self.selector.click() if event.button == 3: self.selector.remove() elif event.type == pygame.MOUSEMOTION: if event.buttons[0] == 1: self.selector.click() if event.buttons[2] == 1: self.selector.remove() def loop(self): self.running = True self.pause = False while self.running: self.clock.tick(50) self.converter.update() self.selector.update() self.event() self.draw()
def __init__(self, kinds: Union[str, List[str]] = "all", iou_threshold: float = 0.5, input_image_key: str = "image", input_mask_key: str = "mask", input_image_name_key: str = "image_name", output_mask_key: str = "mask", output_image_metrics_key: str = "image_metrics", output_objects_key: str = "objects", output_dir: str = "visualizations", filename_extension: str = ".jpg", mask_strength: float = 0.5, detection_only: str = True, valid_only: str = True, max_images: int = None): """ :param kinds: list of criteria for image to be saved all visualizations will be saved for the corresponding folders of each *kind* which this image satisfies :param iou_threshold: threshold value for image metrics to estimate precision/recall/etc used while checking if image satisfies some criteria or not :param input_image_key: :param input_mask_key: :param input_image_name_key: :param output_mask_key: :param output_image_metrics_key: :param output_objects_key: :param output_dir: :param filename_extension: :param mask_strength: overlay between mask and original image approximately visualization are computed as `mask * strength + image * (1-strength)` :param detection_only: draw only masks for detected classes (not classification classes) may be useful to save disk space :param valid_only: draw only on validation stage :param max_images: draw visualisations for at most max_images per loader """ super().__init__(CallbackOrder.Other) self.kinds = ImageResultCategories(kinds) self.iou_threshold = iou_threshold self.input_image_key = input_image_key self.input_mask_key = input_mask_key self.input_image_name_key = input_image_name_key self.output_mask_key = output_mask_key self.output_image_metrics_key = output_image_metrics_key self.output_objects_key = output_objects_key self.output_dir = output_dir self.filename_extension = filename_extension self.mask_strength = mask_strength self.target_map_info = TargetMapInfo() self.converter = Converter(self.target_map_info) self.detection_only = detection_only self.valid_only = valid_only self.max_images = max_images self._categories_counter = None
def __init__(self): self.wiki_dir = 'wiki-pages-text.zip' self.index_dir = './IndexFiles.index' self.original_data_dir = './' self.dataset_dir = './Data' assert lucene.getVMEnv() or lucene.initVM( vmargs=['-Djava.awt.headless=true']) self.converter = Converter(self.wiki_dir, self.index_dir, self.original_data_dir, self.dataset_dir)
def __init__(self, ffmpeg: str = FFMPEG_BIN, ffprobe: str = FFPROBE_BIN): super().__init__() if not Common.is_installed(ffmpeg) or \ not Common.is_installed(ffprobe): raise FileNotFoundError("Wrong path for {} or {}".format( FFmpeg.FFMPEG_BIN, FFmpeg.FFPROBE_BIN)) self.__converter = Converter(ffmpeg, ffprobe)
def test_converter_load_labelMap_with_custom_path(): converter = Converter() costum_path = os.path.join(os.getcwd(), 'tests', 'resource/labelmap.json') test_labelMap = LabelMap(id=0, type="name") converter.setLabelmap(path=costum_path) assert len(converter.labelmap) == 1 assert converter.labelmap[0] == test_labelMap
def get(self, number): output = '' try: converter = Converter() output = converter.translate(int(number)) except Exception as error: abort(400, str(error)) return {'extenso': output}
def main(): '''Processes commandline arguments and starts the converter service''' args = process_converter_service_args() converter = Converter() while True: to_convert = scan_directory(args.to_scan) if to_convert: converter.run_conversion(to_convert[0]) sleep(30)
def myRecordProcessor(self, rp): """ for each record processed, tallies the following fields """ rec = Converter(rp.webcat_rec).dest instNames = rec.getFieldValues('library_dc:instName') self.instNameTally.tally(instNames) instDivs = rec.getFieldValues('library_dc:instDivision') self.instDivTally.tally(instDivs)
def test_converter(): obj = Converter() exp = [('1000000 yard', 'yard'), ('914.40 kilometer', '<dummy>'), ('1138.49 yard / kilometer * the distance Paris -> Berlin', 'length')] res = obj.what_to_show(1000 * 1000 * ureg.yard) print(res) assert res == exp
def __init__(self, hparams): super().__init__() self.hparams = hparams self.net = SegmentationModel(4, 4) self.teacher = MapModel.load_from_checkpoint(pathlib.Path('/home/bradyzhou/code/carla_random/') / hparams.teacher_path) # self.teacher.eval() self.converter = Converter()
def convert_to_all_units(): val = float(input.get()) final_unit = UNITS[radio_state.get()].lower() c = Converter(val, final_unit) conversions = c.convert() for i, label in enumerate(labels): current_unit_name = UNITS[i] label[ "text"] = f"{conversions.get(current_unit_name.lower())} {current_unit_name}"
def test_convert_to_word_document(self): converter = Converter() type = 'word' online_folder = 'https://testconvert2.s3-eu-west-1.amazonaws.com' chunks_text_path = 'output1/20191130-2034_Test1' output_folder = 'output1/files' expected_output_path = 'output1/files/20191130-2034_Test1.doc' converter.convert(type, online_folder, chunks_text_path, output_folder) self.assertTrue(os.path.exists(expected_output_path))
def __init__(self, ffmpeg: str = FFMPEG_BIN, ffprobe: str = FFPROBE_BIN): super().__init__() Validation.is_installed(ffmpeg, f"Wrong path for {FFmpeg.FFMPEG_BIN}") Validation.is_installed(ffprobe, f"Wrong path for {FFmpeg.FFPROBE_BIN}") FFmpeg.__LOG = LogManager.get_instance().get( LogManager.Logger.CONVERTER) self.__converter = Converter(ffmpeg, ffprobe)
def __init__(self, filtersAndModifiers, fromKwarg, toKwarg=None, name=None): Converter.__init__(self, name=name, fromKwarg=fromKwarg, toKwarg=toKwarg) self._filtersAndModifiers = filtersAndModifiers
def main(): parser = argparse.ArgumentParser( description='Convert HTML table to CSV format.') parser.add_argument( 'input', help='input sources (files, URLs, etc., default: standard input)', nargs='*', default=['-'], ) parser.add_argument( '-o', '--output', help='output target (default: standard output)', nargs='?', type=str, #type=argparse.FileType('w'), default=sys.stdout, ) parser.add_argument( '-e', '--engine', help='HTML parser engine (default: html.parser or lxml if installed)', ) parser.add_argument( '-V', '--version', action='store_true', help='display version', ) args = parser.parse_args() if args.version: print(f'{__package__} {__version__}') return converter = Converter(**vars(args)) for input_source in args.input: if not input_source or input_source == '-': html_doc = sys.stdin.read() else: path = pathlib.Path(input_source) if path.exists(): html_doc = path.read_text() else: response = requests.get(input_source) html_doc = response.text output = converter.convert(html_doc) i = 1 for csv_string, _ in output: # args.output # args.output.write(csv_string) path = os.path.dirname(os.path.abspath(args.output)) filename = str(i) + args.output.split("/")[-1] makeCsvFile(path + "/" + filename, csv_string) i += 1
def fill(self, sender, step): converter = Converter() if step == 1: self.overall = int(sender) elif step == 2: self.age = int(sender) elif step == 3: self.position = converter.convertPosition(sender) elif step == 4: self.preferredFoot = converter.convertPreferredFoot(sender)
def real_convert_image(): self.method = self.value_to_method[self.selected.get()] self.converter = Converter(self.method, self.image_path, tone_length = self.tone_length) self.converter.set_null_colour = "white" #white background #self.converter.force_null = True self.audio = self.converter.convert() self.audio_path = self.converter.save_audio() #self.btn1['state'] = 'normal' self.image_converted = True self.btn3["state"] = "normal" self.btn3.configure(bg = self.green)
def test_converter_vp8_codec(self): c = Converter() conv = c.convert('test1.ogg', self.video_file_path, { 'format': 'webm', 'video': { 'codec': 'vp8', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300}, 'audio': { 'codec': 'vorbis', 'channels': 1, 'bitrate': 32} }) self.assertTrue(verify_progress(conv))
def __init__(self, filename): self.file = open(filename, 'r') buffer = '' for line in self.file.readlines(): line = line.strip() if line == '': converter = Converter(buffer) self.dictionary = converter.get_dictionary() buffer = '' else: buffer = buffer + '\n' + line
def test_convert_with_additional_option(self): c = Converter() conv = c.convert('test1.ogg', self.video_file_path, { 'format': 'ogg', 'video': { 'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300}, 'audio': { 'codec': 'vorbis', 'channels': 1, 'bitrate': 32} }) self.assertTrue(verify_progress(conv))
def test_concat(self): c = Converter() conv = c.concat(['test1.ogg', "test1.ogg"], self.video_file_path, { 'format': 'ogg', 'video': { 'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300}, 'audio': { 'codec': 'vorbis', 'channels': 1, 'bitrate': 32} }, temp_dir=self.temp_dir) self.assertTrue(verify_progress(conv))
def o2sc(): app = current_app._get_current_object() if request.method == 'POST': # get the form data file = request.files['file'] delimiter = str(request.form['delimiter']) quote_char = str(request.form['quote-char']) doc_type = str(request.form['doc-type']) file_path_field = str(request.form['file-path-field']) unique_id = str(request.form['unique-id']) omit = str(request.form['omit']) header_mapping = str(request.form['header-mapping']) if file and allowed_file(file.filename): filename = secure_filename(file.filename) file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) c = Converter() if '.zip' in filename: z = Zip() extracted_list = z.unzip(app.config['UPLOAD_FOLDER'], filename) for filename in extracted_list: c.convert_file(app.config['UPLOAD_FOLDER'], app.config['DOWNLOAD_FOLDER'], filename, delimiter, quote_char, doc_type, file_path_field, unique_id, omit, header_mapping) zip_archive = z.zip(app.config['DOWNLOAD_FOLDER'], extracted_list) response = make_response(zip_archive) response.headers["Content-Disposition"] = "attachment; filename=_converted_zip_archive.zip" return response else: converted_file = c.convert_file(app.config['UPLOAD_FOLDER'], app.config['DOWNLOAD_FOLDER'], filename, delimiter, quote_char, doc_type, file_path_field, unique_id, omit, header_mapping) response = make_response(converted_file) response.headers["Content-Disposition"] = "attachment; filename=_converted_" + filename return response return render_template('o2sc/upload.html')
def answerIsValid(self, sender): converter = Converter() if self.step == 1: try: overall: int = int(sender) if overall > 99: return False elif overall <= 0: return False else: return True except ValueError: return False if self.step == 2: try: age = int(sender) if age < 16: return False elif age > 44: return False return True except ValueError: return False if self.step == 3: try: position = str(sender).upper() if not converter.checkPosition(position): return False return True except ValueError: return False if self.step == 4: try: preferredFoot = str(sender).lower() if preferredFoot == "destro" or preferredFoot == "canhoto": return True return False except ValueError: return False else: return True
def load(cls, type_name): file_name = cls.data_directory_path + type_name.lower() + '.json' try: fp = open(file_name, 'r') except IOError: # ゲート変換データベースによる分解 #cls.decompose() pass json_object = json.load(fp, object_pairs_hook=OrderedDict) Converter.complement_icpm(json_object) fp.close() return json_object
def test_detect_ecb_mode(): """ Taken from cryptopals set 1 challenge 8. """ ecb_ciphertext_hex = "d880619740a8a19b7840a8a31c810a3d08649af70dc06f4fd5d2d69c744cd283e2dd052f6b641dbf9d11b0348542bb5708649af70dc06f4fd5d2d69c744cd2839475c9dfdbc1d46597949d9c7e82bf5a08649af70dc06f4fd5d2d69c744cd28397a93eab8d6aecd566489154789a6b0308649af70dc06f4fd5d2d69c744cd283d403180c98c8f6db1f2a3f9c4040deb0ab51b29933f2c123c58386b06fba186a" ecb_ciphertext = Converter(ecb_ciphertext_hex, input_type="hex").bytes() assert attacks.detect_ecb_mode(ecb_ciphertext, 16) non_ecb_ciphertext_hex = "8a10247f90d0a05538888ad6205882196f5f6d05c21ec8dca0cb0be02c3f8b09e382963f443aa514daa501257b09a36bf8c4c392d8ca1bf4395f0d5f2542148c7e5ff22237969874bf66cb85357ef99956accf13ba1af36ca7a91a50533c4d89b7353f908c5a166774293b0bf6247391df69c87dacc4125a99ec417221b58170e633381e3847c6b1c28dda2913c011e13fc4406f8fe73bbf78e803e1d995ce4d" non_ecb_ciphertext = Converter(non_ecb_ciphertext_hex, input_type="hex").bytes() assert not attacks.detect_ecb_mode(non_ecb_ciphertext, 16)
def __init__(self, file, FFMPEG_PATH="FFMPEG.exe", FFPROBE_PATH="FFPROBE.exe", delete=True, output_extension="mp4", output_dir=None): #Get path information from the input file working_dir, filename = os.path.split(file) filename, input_extension = os.path.splitext(filename) input_extension = input_extension[1:] #If no custom output directory is set, assume same directory as input file if output_dir is None: output_dir = working_dir c = Converter(FFMPEG_PATH, FFPROBE_PATH) info = c.probe(file) self.height = info.video.video_height self.width = info.video.video_width if input_extension in valid_input_extensions and output_extension in valid_output_extensions: acodec = "aac" vcodec = "h264" achannels = info.audio.audio_channels print "Video codec detected: " + info.video.codec print "Audiocodec detected: " + info.audio.codec print "Channels detected: " + str(achannels) if info.video.codec == "h264" or info.video.codec == "x264": vcodec = "copy" if info.audio.codec == "aac": acodec == "copy" options = { 'format': 'mp4', 'audio': { 'codec': acodec, 'channels': achannels, 'bitrate': 448, 'language': "eng", }, 'video': { 'codec': vcodec, }, } self.output = os.path.join(output_dir, filename + "." + output_extension) conv = c.convert(file, self.output, options) for timecode in conv: print '[{0}] {1}%'.format('#'*(timecode/10) + ' '*(10-(timecode/10)), timecode, end='\r') print "Conversion complete" if delete: try: os.remove(file) print file + " deleted" except OSError: print "Unable to delete " + file elif input_extension in valid_output_extensions: self.output = file else: print file + " - file not in the correct format" sys.exit()
def test_concat_with_option_per_file(self): c = Converter() conv = c.concat([('test1.ogg', ['-vf', 'transpose=2']), ("test1.ogg", ["-ss", "00:00:05", "-to", "00:00:25"])], self.video_file_path, { 'format': 'ogg', 'video': { 'codec': 'theora', 'width': 160, 'height': 120, 'fps': 15, 'bitrate': 300}, 'audio': { 'codec': 'vorbis', 'channels': 1, 'bitrate': 32} }, temp_dir=self.temp_dir) self.assertTrue(verify_progress(conv)) pass
def dumpAudio(videoPath): converter = Converter() f, path = tempfile.mkstemp(prefix='recognizer') conv = converter.convert(videoPath, path, { 'format': 'wav', 'audio': { 'codec': 'pcm_s16le' } }) for timecode in conv: print "Converting {} ...\r".format(timecode) print 'Audio dumped into {}'.format(path) return path
def get_original(self): """Here, we try to find inktex objects among the selected svg elements when the dialog was opened.""" src_attrib = Converter.add_ns('src', ns=u'inktex') g_tag = Converter.add_ns('g', ns=u'svg') for i in self.options.ids: node = self.selected[i] if node.tag == g_tag and src_attrib in node.attrib: return node, \ node.attrib.get(src_attrib, '').decode('string-escape') return None, None
def test_converter_2pass(self): c = Converter() options = { 'format': 'ogg', 'audio': {'codec': 'vorbis', 'samplerate': 11025, 'channels': 1, 'bitrate': 16}, 'video': {'codec': 'theora', 'bitrate': 128, 'width': 360, 'height': 200, 'fps': 15} } options_repr = repr(options) conv = c.convert('test1.ogg', self.video_file_path, options, twopass=True) verify_progress(conv) # Convert should not change options dict self.assertEqual(options_repr, repr(options)) self._assert_converted_video_file()
def create_sql(self, dxd_file, port, sensor, response, sql_file, begin_time=None, append=False): raw_data = dxd.read_dxd(dxd_file, port) if raw_data is None: return None if append: f = open(sql_file, 'a') else: f = open(sql_file,'w') #begin time to prevent insertion of duplicate values if begin_time is not None: begin_timestamp = calendar.timegm(begin_time.utctimetuple()) nr = len(raw_data["dates"]) c = Converter.create(sensor) for row in range(0, nr): raw_time = raw_data["dates"][row] raw_val = raw_data["vals"][row] utc_time = self.decagon_time_utc(raw_time) local_time = self.decagon_time_local(raw_time) utc_timestamp = time.mktime(utc_time) if begin_time is not None: if utc_timestamp <= begin_timestamp: continue val = c.convert(response, raw_val) sql = self.insert_values(utc_time, local_time, val, self.site_id, self.var_id, self.meth_id, self.src_id, self.qc_id) f.write(sql) f.write('\n') f.close()
def test_srsnr(self): c = Converter.create("SRS-Nr") raw = 46495837 self.assertAlmostEqual(c.convert(1, raw), 0.0169, 3) self.assertAlmostEqual(c.convert(2, raw), 0.0206, 3) self.assertAlmostEqual(c.convert(3, raw), 0.387, 3) self.assertAlmostEqual(c.convert(3, 46766311), 0.381, 3)
def test_probe_audio_poster(self): c = Converter() info = c.probe('test.mp3', posters_as_video=True) self.assertNotEqual(None, info.video) self.assertEqual(info.video.attached_pic, 1) info = c.probe('test.mp3', posters_as_video=False) self.assertEqual(None, info.video) self.assertEqual(len(info.posters), 1) poster = info.posters[0] self.assertEqual(poster.type, 'video') self.assertEqual(poster.codec, 'png') self.assertEqual(poster.video_width, 32) self.assertEqual(poster.video_height, 32) self.assertEqual(poster.attached_pic, 1)
def evaluate_tonic(self, estimated, annotated, source=None): est_cent = Converter.hz_to_cent(estimated, annotated) # octave wrapping cent_diff = est_cent % self.CENT_PER_OCTAVE # check if the tonic is found correct bool_tonic = (min([cent_diff, self.CENT_PER_OCTAVE - cent_diff]) < self.tonic_tolerance) # convert the cent difference to symbolic interval (P5, m3 etc.) interval = None for i in self.INTERVAL_SYMBOLS: if i[1] <= cent_diff < i[2]: interval = i[0] break elif cent_diff == 1200: interval = 'P1' break # if they are in the same octave the the estimated and octave-wrapped # values should be the same (very close) same_octave = (est_cent - cent_diff < 0.001) return {'mbid': source, 'tonic_eval': bool_tonic, 'same_octave': same_octave, 'cent_diff': cent_diff, 'interval': interval, 'annotated_tonic': annotated, 'estimated_tonic': estimated}
class VideoConverter: def __init__(self, codec="mp3", samplerate=44100, bitrate=256, channels=2): self.codec = codec self.samplerate = samplerate self.bitrate = bitrate self.channels = channels self.converter = Converter() def convert(self, filename, conversion_dir, file_dir): if not os.path.isdir(conversion_dir): os.makedirs(conversion_dir) converted_filename = filename.split(".")[0] + ".%s" % self.codec if not glob.glob(conversion_dir + converted_filename): #Check to see if the file was already converted or not print "Converting %s" % filename try: conv = self.converter.convert(file_dir + filename, conversion_dir + converted_filename, {"format": self.codec, "audio": { "codec": self.codec, "bitrate": self.bitrate, "samplerate": self.samplerate, "channels": self.channels } }) for timecode in conv: pass return 0 except Exception as e: print str(e) else: print "Already converted: %s" % converted_filename return 1
def sensor_upload(self, site_id, site_code, variable_id, method_id, source_id, upload_file, port, sensor, resp, logger): new_data = { "user": self.HYDROSERVER_USER, "password": self.HYDROSERVER_PASSWORD, "SiteID": site_id, "VariableID": variable_id, "MethodID": method_id, "SourceID": source_id, "values":[] } #reading the new data from the dxd file if (self.manual_upload_file != None): print (str(variable_id), str(site_code), str(u.manual_upload_file.name), str(port), str(self.old_timestamp), str(logger), str(self.xlsfile)) new_data['values'] = decagon.read_xls(variable_id, site_code, u.manual_upload_file.name, port, self.old_timestamp, logger, self.xlsfile) else: raw_data = decagon.read_dxd(upload_file, port) #converting the data from raw data to actual values nr = len(raw_data["dates"]) c = Converter.create(sensor) for row in range(0, nr): raw_time = raw_data["dates"][row] raw_val = raw_data["vals"][row] local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(raw_time + 946684800)) local_time_obj = parse(local_time) val = c.convert(resp, raw_val) #only upload the values more recent than the old latest update if self.old_timestamp != "none": if local_time_obj > self.old_timestamp: new_data["values"].append((local_time, val)) else: print "Error: No timestamp given for latest update. Rerun with timestamp" sys.exit() #if there's no data, return if len(new_data["values"]) <= 0: if self.verbose: print "No data to upload: " + str(new_data) return #the data is sent in the JSON format as the body of the request payload = json.dumps(new_data) print "payload " + str(payload) url = self.HYDROSERVER_URL + 'values' req = urllib2.Request(url) req.add_header('Content-Type', 'application/json') if self.no_upload: print "No Upload option set, data will not be uploaded" else: #upload the data to the web and check for any error status codes try: response = urllib2.urlopen(req, payload) status = json.load(response) print status except urllib2.HTTPError, e: print e.code print e.msg print e.headers print e.fp.read()
def convert_file(original_file, new_file, callback=None): print("START CONVERSION") c = Converter() # Let's assume we'll always have 2 channels. # Let's also assume we alway want .mp3 # Sample-rate & bitrate are automatically set to reasonable levels # if omitted. options = {"format": "mp3", "audio": {"codec": "mp3", "channels": 2}} conversion = c.convert(original_file, new_file, options, timeout=None) if callback: for prog in conversion: callback(prog / 100) callback(1) else: for x in conversion: pass print("FINISHED CONVERSION")
def test_add_audio(self): c = Converter() i = c.convert('test1.ogg', '/tmp/audio.ogg', options={ 'format': 'mov', 'params': ['-i', 'test.mp3', '-map', '0:0', '-map', '1'], 'video': { 'codec': 'copy', }, 'audio': { 'codec': 'mp3', 'bitrate': 128 * 1024, 'channels': 1, } } ) list(i)