def main(): input_data = [] with open(get_file_name()) as file: for line in file: input_data.append(list(line.strip())) asteroid_field = AsteroidField() asteroid_field.load(input_data) selected_location, asteroids_in_sight = asteroid_field.get_best_asteroid() print("Selected_location: {}".format(selected_location)) print("Asteroids in sight: {}".format(len(asteroids_in_sight))) num_of_asteroids_to_destroy = NUM_OF_ASTEROIDS_TO_DESTROY while num_of_asteroids_to_destroy > len(asteroids_in_sight): asteroid_field.destroy(asteroids_in_sight) num_of_asteroids_to_destroy -= len(asteroids_in_sight) asteroids_in_sight = asteroid_field.get_asteroids_in_sight_from(selected_location) asteroids_with_degree_info = [(a, get_degree_info(selected_location, a)) for a in asteroids_in_sight] asteroids_by_destroy_order = sorted(asteroids_with_degree_info, key=destroy_order_key) last_asteroid_to_destroy, angle = asteroids_by_destroy_order[num_of_asteroids_to_destroy-1] result = 100*last_asteroid_to_destroy[0] + last_asteroid_to_destroy[1] print("{} at {:.2f}° -> {}".format(last_asteroid_to_destroy, angle, result))
def lambda_handler(event, context): for record in event['Records']: download_path = download_record(s3_client, record) file_name = get_file_name(record) # get images from pdf image_prefix = str(uuid.uuid4()).replace('-', '') run([ 'pdfimages', '-png', download_path, '/tmp/{}'.format(image_prefix) ]) # find right png file call = run(['pdfimages', '-list', download_path], stdout=PIPE, universal_newlines=True) images = call.stdout.split('\n') correct_image = list( filter(lambda e: 'smask' in e, images)) # smask means it's the interest rates correct_image_number = '{0:0=3d}'.format( int(correct_image[0].split()[1])) correct_file = '{}-'.format( image_prefix) + correct_image_number + '.png' # upload png to s3 png_name = file_name.split('.')[0] + '.png' s3_client.upload_file('/tmp/' + correct_file, S3_BUCKET, 'pngs/' + png_name)
def lambda_handler(event, context): for record in event['Records']: image_path = download_record(s3_client, record) file_name = get_file_name(record) image = cv2.imread(image_path, 0) image = preprocess_image(image) # --oem 2 means both legacy and LSTM # --psm 6 means to treat the whole image as a block of text text = pytesseract.image_to_string(image, lang='eng', config='--oem 2 --psm 6') # remove any garbage before the rates start # by stripping everything before hte first digit text, index = text.strip().split('\n')[-2:], 0 for i, c in enumerate(text[1]): if c.isdigit(): index = i break text[1] = text[1][index:] months, rates = test_and_convert(text) file_date = file_name.split('.')[0] final_csv = [[file_date, i, j] for i, j in zip(months, rates)] csvname = '/tmp/{}.csv'.format(uuid.uuid4()) with open(csvname, 'w') as f: for row in final_csv: f.write(','.join(row)) f.write('\n') s3_client.upload_file(csvname, S3_BUCKET, 'csvs/' + file_date + '.csv')
def main(): panel = CablePanel() with open(get_file_name()) as file: for line in file: panel.add_cable(line) print(panel.get_closest_collision_distance())
def main(): with open(get_file_name()) as file: input_range_string = file.readline() range_start, range_end = input_range_string.strip().split('-') possible_passwords: set = _get_possible_passwords( range_start, range_end) print(len(possible_passwords))
def main(): computer = IntcodeComputer() with open(get_file_name()) as file: memory_str = file.readline() program = [int(d) for d in memory_str.strip().split(",")] computer.load_memory(program) computer.run()
def main(): image = SpaceImageFormat(width=25, height=6) with open(get_file_name()) as file: image_str = file.readline().strip() image.load_image(image_str) print("Checksum: {}".format(image.checksum())) image.render()
def main(): orbit_map = OrbitMap() orbit_list = [] with open(get_file_name()) as file: for line in file: orbited, orbiter = line.strip().split(')') orbit_list.append((orbited, orbiter)) orbit_map.load_orbit_map(orbit_list) print(orbit_map.get_total_number_or_orbits())
def main(): orbit_map = OrbitMap() orbit_list = [] with open(get_file_name()) as file: for line in file: orbited, orbiter = line.strip().split(')') orbit_list.append((orbited, orbiter)) orbit_map.load_orbit_map(orbit_list) print(orbit_map.transfers("YOU", "SAN"))
def main(): refinery = SpaceRefinery() with open(get_file_name()) as file: input_data = file.read().splitlines() refinery.parse_reactions(input_data) print("Quantity of ORE needed for 1 FUEL: {}".format( refinery.get_ore_needed_for_fuel())) num_of_ores = 1000000000000 print("FUEL produced with {} OREs: {}".format( num_of_ores, refinery.get_fuel_produced_with(num_of_ores)))
def process_output_file(): index = 0 global file_name_prefix file_name_prefix = result_directory + "/" + common.get_file_name() latest_file_info = get_output_file_content(file_arr[0]) for file_name in file_arr: content = get_output_file_content(file_name) parse_content(content) topic_output = topic_average() final_output = format_output(topic_output, latest_file_info) file_name = file_name_prefix + ".json" common.create_output_file(file_name, final_output)
def _dump_study(self): if self._asterstudy.study() is None: return if self._dump_file_name is None: file_name = get_file_name(0, self._asterstudy.mainWindow(), "Dump Study", "", "*.txt") if file_name: self._dump_file_name = file_name if self._dump_file_name is not None: try: dump_file = open(self._dump_file_name, "w") dump_file.write(self._get_repr()) dump_file.close() except IOError: pass
def main(): moon_re = re.compile(r"<x=(?P<x>-?\d+), *y=(?P<y>-?\d+), *z=(?P<z>-?\d+)>") moon_list = [] with open(get_file_name()) as file: for line in file: moon_match = moon_re.match(line.strip()) moon = [int(moon_match[a]) for a in AXES] moon_list.append(moon) moon_system = MoonSystem(moon_list) moon_system.iterate(times=1000) print("Total energy after 1000 iterations: {}".format( moon_system.get_total_energy())) moon_system = MoonSystem(moon_list) print("Iterations until first loop: {}".format(moon_system.find_period()))
def format_output(): topics = [] logging.info("Total Disk Utilized="+str(total_disk_utilized)) for topic_name in folder_info: value = folder_info[topic_name] percent_value = (value*100.0)/(total_disk_utilized*1.0) logging.info("Topic Name="+topic_name+" ,Utilization="+ str(value)+" ,Percentage="+str(percent_value)) topic = common.Topic(topic_name, percent_value, value) topics.append(topic) host_name, host_ip = common.get_Host_name_IP() inf = common.Output(host_name,host_ip,common.get_current_time(), total_disk_utilized, topics, note) output_data = json.dumps(inf, default=lambda o: o.__dict__) file_name = output_directory+"/"+common.get_file_name()+"."+host_name+".json" common.create_output_file(file_name,output_data)
def main(): with open(get_file_name()) as file: program_str = file.readline() program = [int(d) for d in program_str.strip().split(",")] arcade = ArcadeCabinet() arcade.load_game(program) arcade.run() print("Number of initial blocks: {}".format(arcade.get_num_of_blocks())) free_arcade = ArcadeCabinet(free_play=True) free_arcade.load_game(program) free_arcade.run() assert free_arcade.get_num_of_blocks() == 0, "There are blocks left" print("Final score: {}".format(free_arcade.get_score()))
def main(): robot = PaintingRobot() with open(get_file_name()) as file: memory_str = file.readline() program = [int(d) for d in memory_str.strip().split(",")] robot.load_program(program) robot.run() print(len(robot.get_painted_cells())) robot = PaintingRobot(initial_color=1) robot.load_program(program) robot.run() image = robot.get_board_as_image() image.render()
def _generate_cut_jar_for_runtime_sdk_dex(self): common.LOGI(self.TAG, "Generating jar file for runtime sdk ...") out_jar_file_name = common.get_file_name( self.sdk_jar_no_dex_obfuscated_path) jar_cutter_out_dir = os.path.join(self.out_dir_temp, "jar-cutter-out-for-runtime-dex") jar_cutter_config_path = os.path.join( self.script_path, "jar-cutter-for-cocosruntime-dex.json") self._generate_cut_jar(self.sdk_jar_no_dex_obfuscated_path, jar_cutter_config_path, jar_cutter_out_dir) cut_jar_file_path = os.path.join(jar_cutter_out_dir, out_jar_file_name) common.generate_dex_jar(cut_jar_file_path, self.sdk_jar_dex_path) # Copy dex sdk file to the output directory shutil.copy(self.sdk_jar_dex_path, os.path.join(self.out_dir, self.sdk_name_for_cocosruntime)) # Remove the dex file since it may be generated with the same name # in next step (I means generating cut jar for tencent) common.safe_remove_file(self.sdk_jar_dex_path)
def filter_midis(input_dir: str, output_dir: str, note_count: int = 4, time_steps: float = 0.125) -> str: suboutput = common.get_and_create_folder_path(output_dir, "filtered") click.echo( "Filtering files from {} to {} with {} notes in {} steps...".format( input_dir, suboutput, note_count, time_steps)) midis = common.load_midis_with_files(input_dir) for file, midi in midis: click.echo("\n\nAnalysing {}...".format(file)) if check.analyse_file((file, midi), note_count, time_steps): name = common.get_file_name(file) output = "{}.mid".format(name) output = os.path.join(suboutput, output) click.echo("Saving {}...".format(output)) midi.write(output) return suboutput
def main(): computer = IntcodeComputer() with open(get_file_name()) as file: memory_str = file.readline() program = [int(d) for d in memory_str.strip().split(",")] computer.load_memory(program) for noun in range(100): for verb in range(100): program[1] = noun program[2] = verb computer.load_memory(program) computer.run() result = computer.get_memory_position(0) if result == EXPECTED_OUTPUT: print(100 * noun + verb) return
def collect_data(date): global data date_kwargs = date_to_dict(date) file_name = get_file_name(**date_kwargs) file_exists = data_file_exists(**date_kwargs) if not file_exists: print("Warning. No data file at: " + file_name) return file_contents = read_file(file_name) date_data = parse_file_contents(file_contents) date_string = "{year}-{month}-{day}".format(**date_kwargs) for hour_data in date_data: hour = hour_data[0] dt = date + timedelta(hours=hour) timestamp = calendar.timegm(dt.timetuple()) data.append([timestamp, date_string] + hour_data)
def importFile(self): """ Called when 'Import from file' button is clicked. Allows to select file and fills in a table if any file was given. """ data = None title = translate("ParameterPanel", "Import table") filters = common_filters() filename = get_file_name(mode=1, parent=self.table, title=title, url="", filters=filters, dflt_filter=filters[2]) # "Text files" if filename: try: data = numpy.loadtxt(filename, delimiter=',') except BaseException as exc: QMessageBox.critical(self.table, "AsterStudy", exc.message) else: self.setData(data)
def _browseFileTemplate(self, mode, operation=lambda *_: None): """ Template for managing 'Browse file' operation. Arguments: mode (int): 0 has line edit, 1 has not. operation (func) : Operation to perform, typically moving a file from a `source` to a `dest`. Defaults to noop. """ combo_index = self.file_combo.currentIndex() index = self.file_combo.model().index(combo_index, 0) oldfile = index.model().data(index, Role.CustomRole) \ if index.isValid() else "" filters = common_filters() title = translate("UnitPanel", "Select file") # mode = 1 for "in" or "inout", 0 for "out" # 0 is intended for save and has line edit, # 1 is intended for open and has not filename = get_file_name(mode, parent=self.file_combo, title=title, url=oldfile, filters=filters) if filename: if not operation(oldfile, filename) \ and self.embedded_check.isChecked(): # remove old embedded file and uncheck Embedded checkbox, # when another file was browsed self.file_combo.model().emb2ext(oldfile, "") if os.path.exists(oldfile): os.remove(oldfile) self._checkEmbeddedSilently(False) self.setCurrentFilename(filename) return filename
def input_gen(): with open(get_file_name()) as f: for line in f: yield int(line)
async def main(): with open(get_file_name()) as file: memory_str = file.readline() program = [int(d) for d in memory_str.strip().split(",")] print(await get_amps_output(program))
result_directory = "./result" output_directory = "./output" note = "GB" offset = 0 result_days = 30 file_arr = [] topic_info = {} host_data = {} file_name_prefix = "" agg_level = 0 logging.basicConfig(filename="kafka_agg_result.log", level=logging.INFO) logging.info("----------------Script Started-----------------") logging.info(common.get_file_name()) # fetch input argumnet and init value def init_input_args(): global result_days, result_directory, output_directory if len(sys.argv) > 1: agg_level = int(sys.argv[1]) if len(sys.argv) > 2: result_days = int(sys.argv[2]) if len(sys.argv) > 3: output_directory = sys.argv[3] if len(sys.argv) > 4: result_directory = sys.argv[4]
#-*- encoding: utf-8 -*- from common import Firefox from common import create_logger from common import get_file_name web_list = ['www.zhihu.com', 'www.baidu.com', 'www.qq.com'] web_head = "http://" log = create_logger(get_file_name()) firefox = Firefox() wb = firefox.browser() fail_count = 0 for web in web_list: address = web if web.startswith(web_head) else web_head + web log.info("navigate url: {0}".format(address)) wb.get(address) if wb.title.__contains__("出错"): log.warning("navigate url: {0} Failed!".format(address)) fail_count += 1 log.warning("fail count: {0}".format(fail_count)) assert fail_count < 3, "web browser test failed!"
def store_day_of_data(response, **kwargs): file_name = get_file_name(**kwargs) with open(file_name, "w") as f: f.write(response.text)