def main(): while True: try: generator = PodcastGenerator() keywords = Helper.get_keyword() number_of_podcast = generator.generate_podcast(keywords) if number_of_podcast > 0: while True: read_or_save = Helper.ask_read_or_save() if read_or_save == "r": mp3_converter = Converter() mp3_converter.parameter_settings() mp3_converter.speak(generator.text) print("Thank you for using the Podcast Generator. We hope to see you soon!") break if read_or_save == "m": mp3_converter = Converter() mp3_converter.parameter_settings() mp3_file_name = Helper.ask_name_mp3() mp3_converter.save_as_mp3(generator.text, f"{mp3_file_name}.mp3") print("Thank you for using the Podcast Generator. We hope to see you soon!") break if read_or_save == "p": pdf_file_name = Helper.ask_name_pdf() mp3_converter = Converter() mp3_converter.save_as_pdf(generator.text, f"{pdf_file_name}.pdf") print("Thank you for using the Podcast Generator. We hope to see you soon!") break else: print(r"Your answer may not comply, please note that you may only press 'r', 'm' or 'p'.") else: print(f"No podcast with Keyword {keywords} found. Please rerun the app") break except AttributeError: print("Ups an error has occurred. Please try another keyword.")
def start(self): while True: try: with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock: sock.bind(('192.168.1.153', 53)) data, addr = sock.recvfrom(1024) parse_request = Converter(data) is_contain, value = self.cache.try_get_item( (parse_request.name, parse_request.q_type)) if is_contain: p = parse_request.make_answer(value[2], value[0]) sock.sendto(p, addr) else: with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as dns: dns.bind(("192.168.1.153", 55555)) dns.sendto(data, ("ns1.e1.ru", 53)) out = dns.recvfrom(1024)[0] sock.sendto(out, addr) parse_answer = Converter(out) for info in parse_answer.info: self.cache.put(*info) print("-" * 30) except Exception as e: print("Was exception") print(e) raise else: self.stop()
def getDimensions(self, dim_file, ignorecrop): if self.validSource(dim_file): if self.auto_crop and not ignorecrop: info = Converter(self.FFMPEG_PATH, self.FFPROBE_PATH).crop_detect(dim_file) result = { 'y': info.height, 'x': info.width, 'y_offset': info.y_offset, 'x_offset': info.x_offset } self.log.debug("Y Crop Offset: %s" % result.y_offset) self.log.debug("X Crop Offset: %s" % result.x_offset) else: info = Converter(self.FFMPEG_PATH, self.FFPROBE_PATH).probe(dim_file) result = { 'y': info.video.video_height, 'x': info.video.video_width, 'y_offset': 0, 'x_offset': 0 } self.log.debug("Height: %s" % result['y']) self.log.debug("Width: %s" % result['x']) return result return {'y': 0, 'x': 0, 'y_offset': 0, 'x_offset': 0}
def hamming_distance(a, b): assert len(a) == len(b) # First convert to bitstrings. a_bits = Converter(a).bits() b_bits = Converter(b).bits() return sum(a_bit != b_bit for a_bit, b_bit in zip(a_bits, b_bits))
def test_detect_ecb_mode(): """ Taken from cryptopals set 1 challenge 8. """ ecb_ciphertext_hex = "d880619740a8a19b7840a8a31c810a3d08649af70dc06f4fd5d2d69c744cd283e2dd052f6b641dbf9d11b0348542bb5708649af70dc06f4fd5d2d69c744cd2839475c9dfdbc1d46597949d9c7e82bf5a08649af70dc06f4fd5d2d69c744cd28397a93eab8d6aecd566489154789a6b0308649af70dc06f4fd5d2d69c744cd283d403180c98c8f6db1f2a3f9c4040deb0ab51b29933f2c123c58386b06fba186a" ecb_ciphertext = Converter(ecb_ciphertext_hex, input_type="hex").bytes() assert attacks.detect_ecb_mode(ecb_ciphertext, 16) non_ecb_ciphertext_hex = "8a10247f90d0a05538888ad6205882196f5f6d05c21ec8dca0cb0be02c3f8b09e382963f443aa514daa501257b09a36bf8c4c392d8ca1bf4395f0d5f2542148c7e5ff22237969874bf66cb85357ef99956accf13ba1af36ca7a91a50533c4d89b7353f908c5a166774293b0bf6247391df69c87dacc4125a99ec417221b58170e633381e3847c6b1c28dda2913c011e13fc4406f8fe73bbf78e803e1d995ce4d" non_ecb_ciphertext = Converter(non_ecb_ciphertext_hex, input_type="hex").bytes() assert not attacks.detect_ecb_mode(non_ecb_ciphertext, 16)
def test_find_single_byte_xor_key(): """ Taken from cryptopals set 1 challenge 3. """ ciphertext_hex = "1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736" expected_plaintext = "Cooking MC's like a pound of bacon" ciphertext_bytes = Converter(ciphertext_hex, input_type="hex").bytes() guessed_key = attacks.find_single_byte_xor_key(ciphertext_bytes) plaintext_bytes = ciphers.xor_single_byte(guessed_key, ciphertext_bytes) actual_plaintext = Converter(plaintext_bytes).string() assert expected_plaintext == actual_plaintext
def test_xor_repeating_key_bytes(): """ Taken from cryptopals set 1 challenge 5. """ plaintext = ("Burning 'em, if you ain't quick and nimble\n" "I go crazy when I hear a cymbal") plaintext_bytes = Converter(plaintext).bytes() ciphertext_bytes = ciphers.xor_repeating_key_bytes(plaintext_bytes, b"ICE") ciphertext_hex = Converter(ciphertext_bytes).hex() expected = ( "0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a2622632427276527" "2a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f" ) assert expected == ciphertext_hex
def start_test_CART(train_len, test_len): list = make_list_gender(train_len) from converter import Converter converter = Converter() [X, y] = get_X_y(list, converter) print("len list", len(list)) print("len converter", len(converter.mass)) model = DecisionTreeClassifier() # print("clf.fit start") problem = model.fit(X, y) # print("clf.fit finish ") # Проверка на новых данных test_list = make_list_gender(-test_len) print("len test_list", len(test_list)) [test_X, test_y] = get_X_y(test_list, converter) return test(test_X, test_y, model)
def submit_task(): while True: fname = choice(video_set) path = os.path.join(base_path, fname) c = Converter() info = c.probe(path) o_w = info.video.video_width o_h = info.video.video_height service_type = choice(service_set) t_w = -1 t_h = -1 i = 0 while i < 3: if y_w[i] < o_w and y_h[i] < o_h: break else: i = i + 1 if i == 3: continue i = random.randint(i, 2) resolution = str(y_w[i]) + 'x' + str(y_h[i]) cmd = 'python ../submit_task.py -l ' + path + ' -s ' + resolution + ' -p ' + str( service_type) + ' >> task_id ' os.system(cmd) return
def converter_thread(in_path: Optional[str], out_path: Optional[str]): time_start = time() c = Converter() info = c.probe('/Users/deniz/Desktop/gondola/1587562550752.webm') print(info) print(f'Elapsed Time: {time() - time_start}s')
def prepare_acquisition_pulse(self): """ Prepare the acquisition of counts. It prepares the pulse pattern and set the wait time. """ _debug('GUISaturation: prepare_acquisition') # Create the data array from counting # Prepare DIO1 in state 1 self.fpga.prepare_DIOs([1], [1]) # Get the actual DIOs, because there might be other DIOs open. self.dio_states = self.fpga.get_DIO_states() # Convert the instruction into the data array conver = Converter() # Load the converter object. self.count_time_ms = self.treeDic_settings['Count_time'] nb_ticks = self.count_time_ms * 1e3 / (conver.tickDuration) self.data_array = conver.convert_into_int32([(nb_ticks, self.dio_states)]) # Upate the waiting time self.wait_after_AOs_us = self.treeDic_settings['Wait_after_AOs'] self.fpga.prepare_wait_time(self.wait_after_AOs_us) # Send the data_array to the FPGA self.fpga.prepare_pulse(self.data_array)
def __init__(self, dataset_dir, transform=transforms.ToTensor()): dataset_dir = Path(dataset_dir) measurements = list( sorted((dataset_dir / 'measurements').glob('*.json'))) self.transform = transform self.dataset_dir = dataset_dir self.frames = list() self.measurements = pd.DataFrame( [eval(x.read_text()) for x in measurements]) self.converter = Converter() print(dataset_dir) for image_path in sorted((dataset_dir / 'rgb').glob('*.png')): frame = str(image_path.stem) assert (dataset_dir / 'rgb_left' / ('%s.png' % frame)).exists() assert (dataset_dir / 'rgb_right' / ('%s.png' % frame)).exists() assert (dataset_dir / 'topdown' / ('%s.png' % frame)).exists() assert int(frame) < len(self.measurements) self.frames.append(frame) assert len(self.frames) > 0, '%s has 0 frames.' % dataset_dir
def __init__(self, arg): # setting self.name_map = arg.name_map self.opt = Setting() # pygame pygame.init() self.screen = pygame.display.set_mode(self.opt.WINDOW_SIZE) pygame.display.set_caption(self.opt.TITLE % self.name_map) self.clock = pygame.time.Clock() self.set_grill_surface() # data self.path = os.path.dirname(__file__) self.path_img = os.path.join(self.path, "..", "src", "img") file = self.name_map + self.opt.LUA_FORMAT self.path_maps = os.path.join(self.path, "..", "src", "maps", file) self.create = not os.path.exists(self.path_maps) # objects self.builder = Builder(self) self.converter = Converter(self) self.images = Images(self) self.map = Map(self) self.selector = Selector(self) self.toolbar = Toolbar(self) # loop self.saved = True self.loop() pygame.quit()
def main(): args = args_parser() if args: converter = Converter(args.path, args.extensions) converter.start() else: print('No args exit')
def prepare_acquisition_pulse(self): """ Prepare the acquisition of counts. It prepares the pulse pattern and set the wait time. """ _debug('GUIOptimizer: prepare_acquisition') # Set the fpga NOT in each tick mode self.fpga.set_counting_mode(False) # Create the data array from counting # Prepare DIO1 in state 1 self.fpga.prepare_DIOs([1], [1]) # Get the actual DIOs, because there might be other DIOs open. self.dio_states = self.fpga.get_DIO_states() # Convert the instruction into the data array conver = Converter() # Load the converter object. self.count_time_ms = self.treeDic_settings['Usual/Count_time'] nb_ticks = self.count_time_ms * 1e3 / (conver.tickDuration) self.data_array = conver.convert_into_int32([(nb_ticks, self.dio_states)]) # Upate the waiting time self.fpga.prepare_wait_time(self.wait_after_AOs_us) # Send the data_array to the FPGA self.fpga.prepare_pulse(self.data_array) # Call the event to say "hey, stuff changed on the fpga" self.event_fpga_change()
def calculate(self): c = Converter() # print(c.currencies()) if self.__check_status(): if self.charge != '' and self.charge_cur != '': # print(self.charge_cur) # print(self.charge) # print(self.currency) self.penalty = c.calc(self.charge_cur, float(self.charge), self.currency) # print(self.penalty) elif self.charge != '' and self.percent: self.penalty = round(self.baseFare * self.percent / 100) # print(self.penalty) self.non_ref_tax, self.ref_tax = self.__calc_taxes() self.total = self.totalFare - self.penalty - self.non_ref_tax data = self.__get_data() return data
def convert(self, inputfile, options, reportProgress=False): input_dir, filename, input_extension = self.parseFile(inputfile) output_dir = input_dir if self.output_dir is None else self.output_dir try: outputfile = os.path.join(output_dir, filename + "." + self.output_extension) except UnicodeDecodeError: outputfile = os.path.join(output_dir, filename.decode('utf-8') + "." + self.output_extension) #If we're processing a file that's going to have the same input and output filename, resolve the potential future naming conflict if os.path.abspath(inputfile) == os.path.abspath(outputfile): newfile = os.path.join(input_dir, filename + '.tmp.' + input_extension) #Make sure there isn't any leftover temp files for whatever reason self.removeFile(newfile, 0, 0) #Attempt to rename the new input file to a temporary name try: os.rename(inputfile, newfile) inputfile = newfile except: i = 1 while os.path.isfile(outputfile): outputfile = os.path.join(output_dir, filename + "(" + str(i) + ")." + self.output_extension) i += i conv = Converter(self.FFMPEG_PATH, self.FFPROBE_PATH).convert(inputfile, outputfile, options, timeout=None) for timecode in conv: if reportProgress: sys.stdout.write('[{0}] {1}%\r'.format('#' * (timecode / 10) + ' ' * (10 - (timecode / 10)), timecode)) sys.stdout.flush() print outputfile + " created" os.chmod(outputfile, 0777) # Set permissions of newly created file return outputfile, inputfile
def file_upload(request): file = request.FILES.get('file', None) type = request.DATA.get('type', None) if file: # TODO: Streaming Video (FLV, F4V, MP4, 3GP) Streaming Audio (MP3, F4A, M4A, AAC) file_name = '' thumbnail = '' convert = Converter() if type == u'video/x-flv': uuid_string = str(uuid.uuid1()) file_name = uuid_string + '.flv' thumbnail = uuid_string + '.jpg' elif type == u'video/mp4': uuid_string = str(uuid.uuid1()) file_name = uuid_string + '.mp4' thumbnail = uuid_string + '.jpg' if file_name != '': file_path = FILE_PATH + file_name with open(file_path, 'wb+') as destination: for chunk in file.chunks(): destination.write(chunk) destination.close() convert.thumbnail(file_path, 10, FILE_PATH + thumbnail) temp_file = TempFile(name=file_name, path=file_path) temp_file.save() return Response({'file_name': file_name, 'thumbnail': thumbnail}) else: return Response({'status': 'Current just support .mp4 && .flv.'})
def convert_sentence(self, list_size, size): """ Converts sentences to feature vectors and stores them in self.features{n} Args: list_size (int) : the length of the number of features size (int) : the length of the sentence. """ for i in range(self.no_langs): word_list = getattr(self, f'word_list{i}') max_int = len(word_list) for j in range(list_size): sentence = [] while len(sentence) != size: word = word_list[random.randrange(max_int)] if word not in sentence: sentence.append(word) s = ' '.join(word for word in sentence) converter = Converter(s) result = converter.result result.append(self.langnames[i]) self.features[f'{self.langnames[i]}{j}'] \ = tuple(result) return
def instantiate(self): self.name = 'tb' self.input_size = 4 self.block_size = 12 self.in_sets = self.block_size // self.input_size self.num_nonzero = 5 self.preserve_order = True self.in_chn = Channel() self.mid_chn = Channel() self.out_chn = Channel() self.converter = Converter(self.in_chn, self.mid_chn, self.input_size, self.block_size) #self.pruner = NaivePruner(self.mid_chn,self.out_chn,self.num_nonzero, self.block_size, self.preserve_order) self.pruner = ClusteredPruner(self.mid_chn,self.out_chn,self.num_nonzero, self.block_size, self.preserve_order) #self.pruner = ThresholdPruner(self.mid_chn,self.out_chn,self.num_nonzero, self.block_size, self.preserve_order) self.iterations = 10 self.iteration = 0 self.curr_set = 0 self.out_counter = 0 self.test_data = [[randint(1,5) if randint(0,3)>1 else 0\ for j in range(self.block_size)]\ for i in range(self.iterations+1)] # send in one extra iteration to flush out last outputs print("Stimulus:") print("[") for i in range(len(self.test_data)-1): print(self.test_data[i]) print("]")
def konwertuj(nazwa): conv = Converter() filename = nazwa out_name = "tmp.mp4" os.system(f'ffmpeg -i {filename} {out_name}')
def index(): form = ConvertForm() if form.validate_on_submit(): amount = form.amount.data arg1 = form.from_unit.data arg2 = form.to_unit.data text = "==>" if arg_check(arg1): pass else: flash(f"Please enter valid unit, {arg1} is not supported", "danger") if arg_check(arg2): pass else: flash(f"Please enter valid unit, {arg2} is not supported", "danger") if arg_check(arg1) and arg_check(arg2): input_str = f"{amount} {arg1} {arg2}" inp = Parser().parse(input_str) result = Converter().result(inp) if result is None: flash(f"Units must be from same category!", "danger") return render_template("index.html", form=form) return render_template("index.html", amount=amount, arg1=arg1, \ text=text, arg2=arg2, result=result, form=form) else: render_template("index.html", form=form) return render_template("index.html", form=form)
def start_SVC(train_len, test_len): list = make_list_parts_of_speech(train_len) from converter import Converter converter = Converter() [X, y] = get_X_y(list, converter) print("len train list", len(list)) print("len converter", len(converter.mass)) model = Classifier() # print("clf.fit start") model.fit(X, y) # joblib.dump(model, 'CART.pkl') # сохраняем данные # print("clf.fit finish ") # Проверка на новых данных test_list = make_list_parts_of_speech(test_len, True) print("len test_list", len(test_list)) [test_X, test_y] = get_X_y(test_list, converter) return check_data(test_X, test_y, model)
def test_converter_2pass(self): c = Converter() self.video_file_path = 'xx.ogg' options = { 'format': 'ogg', 'audio': { 'codec': 'vorbis', 'samplerate': 11025, 'channels': 1, 'bitrate': 16 }, 'video': { 'codec': 'theora', 'bitrate': 128, 'width': 360, 'height': 200, 'fps': 15 } } options_repr = repr(options) conv = c.convert('test1.ogg', self.video_file_path, options, twopass=True) verify_progress(conv) # Convert should not change options dict self.assertEqual(options_repr, repr(options)) self._assert_converted_video_file()
def test_export_to_xlsx_with_helper(file_path_help): helper = {'key': 'key', 'value': 'value'} with Converter(file_path_help, 'result.xlsx') as c: c.convert(helper=helper) wb = load_workbook(filename='result.xlsx') ws = wb.active line_count = 0 for c1, c2, c3, c4, c5 in ws[ws.dimensions]: row = [c1.value, c2.value, c3.value, c4.value, c5.value] if line_count == 0: assert row == [ 'Test', 'Numbers!', 'Hell', 'Will this work_Hey', 'Dict help_key' ] if line_count == 1: assert row == ['Hi', 1, 'Yeah!', 1, 1] if line_count == 2: assert row == [None, 2, None, 2, None] if line_count == 3: assert row == [None, 3, None, None, None] line_count += 1
def _do_detect(thread_data: list): """ スレッドを実行。 :param thread_data: list of tuple of (image_files, output_video_dir) :return: """ # setup Converter converter_instance = Converter() thread_list = [] # スレッドをセット for data in thread_data: thread = threading.Thread(target=_detect_thread, args=([data, converter_instance])) thread_list.append(thread) # スレッド実行 for thread in thread_list: thread.start() # 全てのスレッドが完了するのを待機 for thread in thread_list: thread.join() global completed_num completed_num += ConverterBatch.THREAD_SIZE update_log(str(completed_num) + '/' + str(data_length) + ' is completed') return
def main(): converter = Converter() print('To exit the program type "exit" in "Sum>" field') # To catch errors from change_base_currency method try: print('List of available currencies:') for curr in converter.get_currencies(): print(curr) #converter.change_base_currency('gbp') while True: try: value = input('Sum> ') if value.lower() == 'exit': break elif isFloat(value): value = float(value) exchange_currency = input('To currency> ').upper() result = converter.convert(value, exchange_currency) print('{0:.2f} {1} is {2:.2f} {3}'.format( value, converter.base_currency, result, exchange_currency)) except (ValueError, TypeError, KeyError) as err: print(err) except requests.exceptions.HTTPError as err: print(err)
def convert(settings_filename: str, version: str) -> None: """Convert the input document to the specified output formats.""" # pylint: disable=unsubscriptable-object,unsupported-assignment-operation settings = cast(Settings, read_json(settings_filename)) variables = cast(Variables, {}) for variable_file in settings["VariablesFiles"]: variables.update(cast(Variables, read_json(variable_file))) variables["VERSIE"] = settings["Version"] = version variables["DATUM"] = settings["Date"] = datetime.date.today().strftime( "%d-%m-%Y") logging.info("Converting with settings:\n%s", pprint.pformat(settings)) build_path = pathlib.Path(settings["BuildPath"]) build_path.mkdir(parents=True, exist_ok=True) xml = MarkdownConverter(variables).convert(settings) write_xml(xml, settings) converter = Converter(xml) if "docx" in settings["OutputFormats"]: convert_docx(converter, build_path, settings) if "pdf" in settings["OutputFormats"]: copy_files(settings, "pdf") convert_pdf(converter, build_path, settings, variables) if "pptx" in settings["OutputFormats"]: convert_pptx(converter, build_path, settings) if "xlsx" in settings["OutputFormats"]: convert_xlsx(converter, build_path, settings) if "html" in settings["OutputFormats"]: copy_files(settings, "html") convert_html(converter, build_path, settings)
def __init__(self, hparams, teacher_path=''): super().__init__() # addition: convert dict to namespace when necessary # hack: if isinstance(hparams, dict): import argparse args = argparse.Namespace() for k, v in hparams.items(): setattr(args, k, v) hparams = args self.hparams = hparams self.to_heatmap = ToHeatmap(hparams.heatmap_radius) if teacher_path: # modifiction: add str self.teacher = MapModel.load_from_checkpoint(str(teacher_path)) self.teacher.freeze() self.net = SegmentationModel(10, 4, hack=hparams.hack, temperature=hparams.temperature) self.converter = Converter() self.controller = RawController(4)
def test_avepool2d( in_size, in_chan, pool_size, pool_strides, connect, request): # Don't use all GPU memory for TF! for gpu in tf.config.experimental.list_physical_devices('GPU'): tf.config.experimental.set_memory_growth(gpu, True) # Generate input tensor x = np.random.randint(0, 2, size=(1, in_size, in_size, in_chan)).astype(np.float64) # Create TensorFlow model tf_model = tf.keras.models.Sequential([ tf.keras.layers.AveragePooling2D( pool_size, strides=pool_strides, padding='valid', input_shape=(in_size, in_size, in_chan)), tf.keras.layers.Flatten(), tf.keras.layers.ReLU(), ], name=request.keywords.node.name) # Run TensorFlow model tf_y = tf_model([x]).numpy() # Run ML GeNN model mlg_model = mlg.Model.convert_tf_model( tf_model, converter=Converter(), connectivity_type=connect) mlg_model.outputs[0].neurons.set_threshold(np.float64(np.inf)) mlg_model.set_input_batch([x]) mlg_model.step_time(2) nrn = mlg_model.outputs[0].neurons.nrn nrn.pull_var_from_device('Vmem') mlg_y = nrn.vars['Vmem'].view.reshape(tf_y.shape) assert(np.allclose(mlg_y, tf_y, atol=0.0, rtol=1.0e-3))