def convertsingle(event, context): """ Accept a single JSON object which should be an SF record, convert it to HMIS format, and output. """ print(context) # Get POST body which should be a json object containing high level Salesforce objects. single_record = json.loads(event['body']) # Convert record converted_data = convert(single_record) # Save results to S3 prefix = str(datetime.datetime.now()).replace(' ', 'T') save_files_to_s3(bucket=S3_BUCKET_NAME, csv_files=converted_data, prefix=prefix) # Send success notifcation. #send_notification(n=1) # Return results for testing. # Eventually remove this and return a status message. response = {"statusCode": 200, "body": json.dumps(converted_data)} return response
def test_convert(self): # example from the README: self.assertEqual( conversion.convert( 'The quick brown fox jumped over the lazy dog.'), 'ðə kwɪk braʊn fɑks ʤəmpt ˈoʊvər ðə ˈlezi dɔg') # test same with retrieve=ALL, should return a list # this also tests the efficacy of the get_all() algorithm self.assertEqual( conversion.convert('The quick brown fox jumped over the lazy dog.', retrieve='ALL'), [ 'ði kwɪk braʊn fɑks ʤəmpt ˈoʊvər ði ˈlezi dɔg', 'ði kwɪk braʊn fɑks ʤəmpt ˈoʊvər ðə ˈlezi dɔg', 'ðə kwɪk braʊn fɑks ʤəmpt ˈoʊvər ði ˈlezi dɔg', 'ðə kwɪk braʊn fɑks ʤəmpt ˈoʊvər ðə ˈlezi dɔg' ])
def main(): #parsing print 'Program started.' parser = argparse.ArgumentParser(description= 'Enter parameters for conversion.') parser.add_argument('-ini', action='store', default='', dest='file_ini', help='Adress or name of source file in ini.') parser.add_argument('-json', action='store', default='', dest='file_json', help='Adress or name of source file in json. (UNUSED)') parser.add_argument('-final', action='store', default='', dest='file_final', help='Adress or name of converted file.') parser.add_argument('-mat', action='store', default='', dest='file_material', help='Adress or name of material file. Type * for automatic file detection.') parser.add_argument('-sw', action='store_true', dest='warnings', help='Show not found messages during conversion.') parser.add_argument('-nc', action='store_false', dest='comments', help='Disable saving of comments.') parser.add_argument('-db', action='store_true', dest='debug', help='Show saved values (used for debugging).') parser.add_argument('-nd', action='store_false', dest='altering', help='Disable alterations to json file (quotation marks, colons).') args = vars(parser.parse_args()) #print args #checking for empty parameters f1 = args['file_ini'] f2 = args['file_json'] f3 = args['file_final'] converted = False if (f1 == ''): if (f2 == ''): print 'Error: no file name inserted.' else : converted = True if (f3 == ''): f3 = f2[0:-3] + 'json' print 'Name of final file: ' + f3 else : if (f2 == ''): f2 = f1[0:-4] + '_temp.json' print 'Name of source file in json: ' + f2 if (f3 == ''): f3 = f1[0:-3] + 'json' print 'Name of final file: ' + f3 converted = cnv.convert(f1, f2, args['comments']) #converts inifile into JSON #alter json file if (converted == True): print 'Starting final conversion.' complete = comp.complete(f2, f3, args['warnings'], args['comments'], args['debug'], args['altering'], args['file_material']) print 'Program terminated.' pass
def main(): args = _parse_args() docx_path = args.path xslt_path = args.xslt result = conversion.convert(docx_path, xslt_path) for line in result: sys.stderr.write(line) sys.stderr.write("\n")
def main(prototxt, caffemodel): test_string = '' test_string += printt('Accuracy of conversion. Caffe required.') model = None try: import caffe net = caffe.Net(prototxt, caffemodel, caffe.TEST) test_string += printt('Accuracy of conversion - caffe parsing') model = conversion.convert(prototxt, caffemodel, caffe_parse=True) l2_distance = test_similarity(model, net) if l2_distance < 1e-7: test_string += prints( 'Accuracy of conversion - caffe parsing: Passed') else: test_string += prints( 'Accuracy of conversion - caffe parsing: Failed') del model test_string += printt('Accuracy of conversion - protobuf parsing') model = conversion.convert(prototxt, caffemodel, caffe_parse=False) l2_distance = test_similarity(model, net) if l2_distance < 1e-7: test_string += prints( 'Accuracy of conversion - protobuf parsing: Passed') else: test_string += prints( 'Accuracy of conversion - protobuf parsing: Failed') except Exception as e: print e test_string += printe('Caffe was not found. Continuing...') test_string += printt('Serialization') if model is None: model = conversion.convert(prototxt, caffemodel) success = test_serialization(model) if success: test_string += prints('Serialization: Passed') else: test_string += prints('Serialization: Failed') print '=====================================\n' * 10 print 'SUMMARY:' print test_string
def run(datadir): cfg = kbc_py.Config(datadir) datadir_path = Path(datadir) in_base_path = datadir_path / 'in/files' out_base_path = datadir_path / 'out/files' params = validate_expand_defaults(cfg.get_parameters()) print("Datadir: " + str(list(str(d) for d in datadir_path.glob("**")))) output_params = params["output"] feature_format = feature_output_formats[output_params["featureFormat"]] include_additional_fields = output_params["includeAdditionalColumns"] input_format_params = params["input"]["format"] for format_name, format_params in input_format_params.items(): in_format = input_formats[format_name] enabled = format_params["enabled"] glob_pattern = format_params["glob"] if not enabled: continue matching_files = list(in_base_path.glob(glob_pattern)) print(f"Files matching {glob_pattern} in {in_base_path}: " f"{[str(f) for f in matching_files]}") for full_in_path in matching_files: relative_path = Path(full_in_path).relative_to(in_base_path) target_relative_path = relative_path.with_suffix(".csv") full_out_path = out_base_path / target_relative_path print(f"Converting {relative_path} (as {format_name}) " f"to {target_relative_path}") full_out_path.parent.mkdir(parents=True, exist_ok=True) with open(str(full_out_path), mode="wt", encoding="utf=8") as out: convert(str(full_in_path), out, in_format, feature_format, include_additional_fields)
def recipe_tranlate(data): lines = data.split('\n') for l in range(len(lines)): lines[l] = convert(lines[l]) txt = '\n'.join(lines) translator = Translator() out = translator.translate(txt, dest='fr').text return out
def do_kanji(): input_text = request.forms.input_text if any(c not in chars for c in input_text): print('入力ミス') return template('kanji', text='入力ミス', in_text=input_text) elif input_text == '': print('空の文字列') return template('kanji', text='空の文字列', in_text='空の文字列') else: kanji = convert(input_text) return template('kanji', text=kanji, in_text=input_text)
def button_run(self): self.gio_dict = { "input_file": self.file_name.get(), "subject_id": self.enter_subject_id.get(), "subject_date_of_birth": self.datetime_dob, "subject_description": self.enter_subject_desc.get(), "subject_genotype": self.genotype_var.get(), "subject_sex": self.sex_var.get(), "subject_weight": self.enter_subject_weight.get(), "surgery": self.enter_subject_surgery.get(), "subject_brain_region": self.brain_var.get(), "subject_species": self.species_var.get(), "session_id": self.enter_session_id.get(), "session_start_time": self.datetime_session, "experimenter": self.experimenter_var.get(), "experiment_description": self.description_var.get(), "institution": self.enter_session_inst.get(), "lab_name": self.enter_session_lab.get(), } conversion.convert(**self.gio_dict)
def main(info, event, probability, url=None): """ Formats coordinates in form of nested arrays. """ days = info["day"] coords = info["coords"] probs = info["probs"] logging.info("Information from retrieval script unpacked.") un_coords = remove_cont(coords[event]) logging.info("CONTINUE coordinates removed from forecast.") d = [] for string in un_coords: new_string = list(filter(lambda a: a != '', string.split(' '))) d.append(new_string) indices = [] for _ in range(len(d)): if len(d[_]) != 0 and d[_][0] in probs[event]: indices.append(_) else: continue low = min(i for i in range(len(d)) if len(d[i]) != 0 and d[i][0] == probability) high = max(i for i in range(len(d)) if len(d[i]) != 0 and d[i][0] == probability) try: while len(d[high + 1]) != 0 and str( d[high + 1][0]) not in probs[event]: high += 1 except IndexError: pass i = low j = -1 out = [] while i <= high: if len(d[i]) != 0 and d[i][0] == probability: j += 1 out.append([]) new_out = out[j] new_out += d[i][1:] else: new_out += d[i][:] i += 1 out = [[conversion.convert(coord) for coord in coords] for coords in out] return out
def test_valid_convert(self): """tests four valid input strings of increasing complexity""" simple = 't' sol_simple = json.dumps({'units': 'kg', 'multiplication_factor': 1000}) medium = 't*degree/ha' factor_medium = co._round_sig(1000 * (math.pi / 180) / 10000) sol_medium = json.dumps({'units': 'kg*rad/m^2', 'multiplication_factor': factor_medium}) hard = 'ha*degree/(h*min/degree)' factor_hard = co._round_sig(10000 * math.pi / 180 / (3600 * 60 / (math.pi / 180))) sol_hard = json.dumps({'units': 'm^2*rad/(s*s/rad)', 'multiplication_factor': factor_hard}) very_hard = 'ha/(t/(hour*min/(degree)*d)*\")' factor_very_hard = co._round_sig(10000 / (1000 / (3600 * 60 / (math.pi / 180) * 86400) * math.pi / 648000)) sol_very_hard = json.dumps({'units': 'm^2/(kg/(s*s/(rad)*s)*rad)', 'multiplication_factor': factor_very_hard}) self.assertEqual(co.convert(simple), sol_simple) self.assertEqual(co.convert(medium), sol_medium) self.assertEqual(co.convert(hard), sol_hard) self.assertEqual(co.convert(very_hard), sol_very_hard)
def main(prototxt, caffemodel): test_string = '' test_string += printt('Accuracy of conversion. Caffe required.') model = None try: import caffe net = caffe.Net(prototxt,caffemodel,caffe.TEST) test_string += printt('Accuracy of conversion - caffe parsing') model = conversion.convert(prototxt,caffemodel,caffe_parse=True) l2_distance = test_similarity(model,net) if l2_distance < 1e-7: test_string += prints('Accuracy of conversion - caffe parsing: Passed') else: test_string += prints('Accuracy of conversion - caffe parsing: Failed') del model test_string += printt('Accuracy of conversion - protobuf parsing') model = conversion.convert(prototxt,caffemodel,caffe_parse=False) l2_distance = test_similarity(model,net) if l2_distance < 1e-7: test_string += prints('Accuracy of conversion - protobuf parsing: Passed') else: test_string += prints('Accuracy of conversion - protobuf parsing: Failed') except Exception as e: print e test_string += printe('Caffe was not found. Continuing...') test_string += printt('Serialization') if model is None: model = conversion.convert(prototxt,caffemodel) success = test_serialization(model) if success: test_string += prints('Serialization: Passed') else: test_string += prints('Serialization: Failed') print '=====================================\n'*10 print 'SUMMARY:' print test_string
def file_upload(event, context): """Accept a file with either a single or an array of Salesforce objects and convert them.""" s = event['body'] s_split = s.split('\n') payload = '\n'.join(s_split[3:-2]) try: obj = json.loads(payload) if isinstance(obj, list): obj_len = len(obj) list_of_converted_objects = [convert(obj) for obj in obj] # Consolidate into a single set of csv files. consolidated_csv_files = combine_csv_files( csv_files=list_of_converted_objects) # Pass the converted objects to be combined, written to strings, and saved as files in S3. prefix = str(datetime.datetime.now()).replace(' ', 'T') save_files_to_s3(bucket=S3_BUCKET_NAME, csv_files=consolidated_csv_files, prefix=prefix) else: obj_len = 1 # Convert record converted_data = convert(obj) # Save results to S3 prefix = str(datetime.datetime.now()).replace(' ', 'T') save_files_to_s3(bucket=S3_BUCKET_NAME, csv_files=converted_data, prefix=prefix) # Send success notification. send_notification(n=obj_len) except Exception as e: return { 'statusCode': 400, 'body': '400 Bad Request\n\n' + json.dumps(str(e)) } return {'statusCode': 200, 'body': 'Upload successful.'}
def main(): """loops through user inputs and returns IPA notations until __quit__ is typed""" user_in = input("Input: ").lower() while user_in != [''] and user_in != ['__quit__']: ipa = conversion.convert(user_in, retrieve='TOP') if type(ipa) == list: # if retrieve=ALL if len(ipa) > 1: print("List of possible transcriptions: ") for sent_num in range(len(ipa)): print(str(sent_num + 1) + ". " + ipa[sent_num]) # print list of numbered results else: print(ipa[0]) # when ALL is used but there's only one result else: print(ipa) user_in = input("Input: ").lower().split(" ")
def test_convertWithCompr(self): """Compare converted data to original data and a manual conversion.""" c.convert(allZerosMat, self.allZerosHdf5, compr=4) self.assertTrue(c.compare(allZerosMat, self.allZerosHdf5)) self.assertTrue(c.compare(self.allZerosHdf5, allZerosHdf5)) c.convert(maxIntMat, self.maxIntHdf5, compr=2) self.assertTrue(c.compare(maxIntMat, self.maxIntHdf5)) self.assertTrue(c.compare(self.maxIntHdf5, maxIntHdf5)) c.convert(randomMat, self.randomHdf5, compr=9) self.assertTrue(c.compare(randomMat, self.randomHdf5)) self.assertTrue(c.compare(self.randomHdf5, randomHdf5))
def convertbulk(event, context): """Accept a JSON array of SF records, convert to HMIS format, and output. Output options are limited to saving to S3 right now. """ # Get payload from request. Should load as a list of SF objects. List[SfRecord] list_of_sf_objects = json.loads(event['body']) # Convert all objects to HMIS format. list_of_converted_objects = [convert(obj) for obj in list_of_sf_objects] # Consolidate into a single set of csv files. consolidated_csv_files = combine_csv_files( csv_files=list_of_converted_objects) # Pass the converted objects to be combined, written to strings, and saved as files in S3. prefix = str(datetime.datetime.now()).replace(' ', 'T') save_files_to_s3(bucket=S3_BUCKET_NAME, csv_files=consolidated_csv_files, prefix=prefix) # Send success notification. #send_notification(len(list_of_converted_objects)) # Send back a confirmation message. response = {"statusCode": 200, "body": "Records converted and saved."} return response
def test_convert_oku(self): self.assertEqual(convert('一億一'), 100000001)
def test_convert_simple(self): self.assertEqual(convert('三百九十七'), 397)
def test_convert(self): number = '1234567890' carrier = 'Sprint' target = conversion.convert(number, carrier) assert number in target assert 'sprintpcs' in target
#!/usr/bin/env python import sys import conversion if __name__ == "__main__": # execute only if run as a script try: filePath = sys.argv[1] except IndexError: raise IndexError('docxcavate filename as first argument') conversion.convert(filePath, None)
import desugaring import c_generation import normalization import optimization import parsing import tokenization source_path = sys.argv[1] with open(source_path, 'r') as f: source = f.read() tokens = tokenization.tokenize(source) parsed = parsing.parse(tokens) desugared = desugaring.desugar(parsed) normalized = normalization.normalize(desugared) converted = conversion.convert(normalized) crossplatform_ir = crossplatform_ir_generation.generate(converted) optimized = optimization.optimize(crossplatform_ir) outputted = crossplatform_ir_generation.output(optimized) print(outputted) generated = c_generation.generate(optimized) assert source_path.endswith('.fur') destination_path = source_path + '.c' with open(destination_path, 'w') as f: f.write(generated)
with open('data/traffic_data.csv') as csvfile: reader = csv.reader(csvfile) for row in reader: feature = Feature(row[4], row[5], row[6], row[7], row[8], row[9], row[12], row[13], row[14], row[15], row[16], row[23], row[24]) # Road names as keys to dictionaries containing features for all segments ROAD_FEATURES[row[4]].append(feature) # Don't add an edge for every year if row[0] == '2000': GRAPH[row[8]].append(row[9]) GRAPH[row[9]].append(row[8]) coordinates = convert(int(row[6]), int(row[7])) coordinates[0] -= 0.0013 features.append({"geometry": {"type": "Point", "coordinates": coordinates }, "type": "Feature", "properties": {"title": row[4], "pedal_cycles": feature.pedal_cycles, "motor_cycles": feature.motor_cycles, "cars": feature.cars, "buses": feature.buses, "light_goods": feature.light_goods, "hgv": feature.hgv, "all_motor": feature.all_motor} })
def convert_experiment(): global experiment_menu input_file.delete(1.0, tk.END) output_file.delete(1.0, tk.END) if experiment_menu.current() == 1: # check selected experiment if os.path.isfile('experiments/experiment_1.txt'): # check file exists with open("experiments/experiment_1.txt", 'r') as infile: input_file.insert(tk.END, infile.read()) # read file into text window conversion.convert( "experiments/experiment_1.txt") # call conversion script open_csv() log_area.insert( tk.END, "[INFO]: \"experiment_1.txt\" converted to csv\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[INFO] %s : (CONVERSION) \"experiment_1.txt\" converted to csv\n" % timestamp()) else: log_area.insert( tk.END, "[ERROR]: \"experiment_1.txt\" could not be found\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[ERROR] %s : (CONVERSION_ERROR) \"experiment_1.txt\" could not be found\n" % timestamp()) elif experiment_menu.current() == 2: # check selected experiment if os.path.isfile('experiments/experiment_2.txt'): # check file exists with open("experiments/experiment_2.txt", 'r') as infile: input_file.insert(tk.END, infile.read()) # read file into text window conversion.convert("experiments/experiment_2.txt") open_csv() log_area.insert( tk.END, "[INFO]: \"experiment_2.txt\" converted to csv\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[INFO] %s : (CONVERSION) \"experiment_2.txt\" converted to csv\n" % timestamp()) else: log_area.insert( tk.END, "[ERROR]: \"experiment_2.txt\" could not be found\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[ERROR] %s : (CONVERSION_ERROR) \"experiment_2.txt\" could not be found\n" % timestamp()) elif experiment_menu.current() == 3: # check selected experiment if os.path.isfile('experiments/experiment_3.txt'): # check file exists with open("experiments/experiment_3.txt", 'r') as infile: input_file.insert(tk.END, infile.read()) # read file into text window conversion.convert( "experiments/experiment_3.txt") # call conversion script open_csv() log_area.insert( tk.END, "[INFO]: \"experiment_3.txt\" converted to csv\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[INFO] %s : (CONVERSION) \"experiment_3.txt\" converted to csv\n" % timestamp()) else: log_area.insert( tk.END, "[ERROR]: \"experiment_3.txt\" could not be found\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[ERROR] %s : (CONVERSION_ERROR) \"experiment_3.txt\" could not be found\n" % timestamp()) elif experiment_menu.current() == 4: # check selected experiment if os.path.isfile('experiments/experiment_4.txt'): # check file exists with open("experiments/experiment_4.txt", 'r') as infile: input_file.insert(tk.END, infile.read()) # read file into text window conversion.convert( "experiments/experiment_4.txt") # call conversion script open_csv() log_area.insert( tk.END, "[INFO]: \"experiment_4.txt\" converted to csv\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[INFO] %s : (CONVERSION) \"experiment_4.txt\" converted to csv\n" % timestamp()) else: log_area.insert( tk.END, "[ERROR]: \"experiment_4.txt\" could not be found\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[ERROR] %s : (CONVERSION_ERROR) \"experiment_4.txt\" could not be found\n" % timestamp()) elif experiment_menu.current() == 5: # check selected experiment if os.path.isfile('experiments/experiment_5.txt'): # check file exists with open("experiments/experiment_5.txt", 'r') as infile: input_file.insert(tk.END, infile.read()) # read file into text window conversion.convert( "experiments/experiment_5.txt") # call conversion script open_csv() log_area.insert( tk.END, "[INFO]: \"experiment_5.txt\" converted to csv\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[INFO] %s : (CONVERSION) \"experiment_5.txt\" converted to csv\n" % timestamp()) else: log_area.insert( tk.END, "[ERROR]: \"experiment_5.txt\" could not be found\n") # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[ERROR] %s : (CONVERSION_ERROR) \"experiment_5.txt\" could not be found\n" % timestamp()) else: # log with open("log/log_file.txt", 'a+') as log_file: log_file.write( "[ERROR] %s : (CONVERSION_ERROR) file not found - conversion aborted\n" % timestamp()) log_area.insert(tk.END, '[ERROR]: file not found - conversion aborted\n') print('no conversion run') log_area.see(tk.END)
def test_wrong_brackets_convert(self): """tests if too few open or too many open brackets are correctly recognized""" too_many_open = '(degree*h)/(ha*(ha)' too_few_open = 'degree/(ha)*h)' self.assertEqual(co.convert(too_many_open), "Too many open brackets.") self.assertEqual(co.convert(too_few_open), "Too few open brackets.")
def _convert_lists_to_entrez(gene_list, id_type): if id_type != 'entrez': return conversion.convert(gene_list, id_type, 'entrez') else: return gene_list
def test_illegal_units_convert(self): """tests if illegal expressions are correctly recognized""" illegal_unit = 'blah' self.assertEqual(co.convert(illegal_unit), 'Invalid expression blah')
'name': '', 'genre': '', 'type': '', 'episodes': '', 'ratings': '', 'members': '', 'image': 'http://placehold.it/300x200/000000/&text=Header' } Anime1_name = input("Select Anime: ") #removing chosen anime from the list genres1 = [] for row in rows: if row[1] == Anime1_name: genres1 = convert.convert(row[2]) #rows.pop(row) break print(f"Anime1 is {Anime1_name}") Anime1 = AT.Anime(Anime1_name, genres1) #rendering random anime profile to the user for row in rows: name = row[1] genres2 = convert.convert(row[2]) anime_type = row[3] episodes = row[4] ratings = row[5] members = row[6] print(f"Anime 2 is {name}") Anime2 = AT.Anime(name, genres2)
def test_convert_oku_sen(self): self.assertEqual(convert('一億七万五千八百五十四'), 100075854)
studentList = [] # Creating the Student Instances with open('studentsByAvailability.json') as data_file: data = json.load(data_file) for key, value in data.iteritems(): student = Student(key) for items in value: if type(items) is unicode: student.name = items if type(items) is dict: for key, value in items.iteritems(): student.availability.append(conversion.convert(value)) studentList.append(student) # Creating the Courses Instances with open('classes.json') as data_file: data = json.load(data_file) for key, value in data["classes"].iteritems(): course = Course(key) course.name = value['name'] times = value['times'] course.time1 = conversion.convert(times['time1']) course.time2 = conversion.convert(times['time2']) courseList.append(course)
def to_csv_with_format(file, format_name): with io.StringIO() as f: convert(file, f, format_kml, feature_output_formats[format_name], True) return f.getvalue()
def register_task(): form = request.form target = convert(form['number'], form['carrier']) send_message(form['message'], target) return redirect(url_for('main'))
def test_convert_juu(self): self.assertEqual(convert('四十'), 40)
def test_convert_sen(self): self.assertEqual(convert('四千'), 4000)
def test_convert_tan(self): self.assertEqual(convert('四'), 4)