def complete(self, word, state): """Return the next possible completion for ``word``. This is called successively with ``state == 0, 1, 2, ...`` until it returns ``None``. The completion should begin with ``word``. :param word: the word to complete :param state: an int, used to iterate over the choices """ try: import rl # TODO: doing this manually right now, but may make sense to # exploit rl.completion.suppress_append = True except ImportError: pass word = transform(word, self.transforms, word=True) if state == 0: self.matches = self.get_matches(word) try: match = self.matches[state] except IndexError: return None else: return transform(match, self.transforms, word=True, inverse=True)
def convert2beamer_full(self): """ convert to LaTeX beamer @return the text in LaTeX format """ state = w2bstate() result = [''] #start with one empty line as line 0 codebuffer = [] autotemplatebuffer = [] autotemplate=[] nowikimode = False codemode = False autotemplatemode = False for line in self.lines: (line, nowikimode) = self.get_nowikimode(line, nowikimode) if nowikimode: result.append(line) else: (line, _codemode) = self.get_codemode(line, codemode) if _codemode and not codemode: #code mode was turned on codebuffer = [] elif not _codemode and codemode: #code mode was turned off expand_code_segment(result, codebuffer, state) codemode = _codemode if codemode: codebuffer.append(line) else: (line, _autotemplatemode) = self.get_autotemplatemode(line, autotemplatemode) if _autotemplatemode and not autotemplatemode: #autotemplate mode was turned on autotemplatebuffer = [] elif not _autotemplatemode and autotemplatemode: #autotemplate mode was turned off self.expand_autotemplate_opening(result, autotemplatebuffer, state, autotemplate) autotemplatemode = _autotemplatemode if autotemplatemode: autotemplatebuffer.append(line) else: state.current_line = len(result) result.append(transform(line, state)) result.append(transform("", state)) # close open environments if state.frame_opened: result.append(self.get_frame_closing(state)) if state.autotemplate_opened: result.append(self.get_autotemplate_closing()) #insert defverbs somewhere at the beginning expand_code_defverbs(result, state) return result
def run_config(config, search, bitcodefile, originalConfig, limit): print_config(config, "config_temp.json") result = transform.transform(bitcodefile, "config_temp.json") if result == 1: print "check VALID_config_" + str(search) + ".json for a valid config file" print_config(config, "VALID_config_" + bitcodefile + "_" + str(search) + ".json") print_diff(config, originalConfig, "diff_" + str(search) + ".cov") utilities.log_config(config, "VALID", "log.bf", search) elif result == 0: print "\tINVALID CONFIG" print_config(config, "INVALID_config_" + bitcodefile + "_" + str(search) + ".json") utilities.log_config(config, "INVALID", "log.bf", search) elif result == -1: print "\tFAIL TYPE 1" print_config(config, "FAIL1_config_" + bitcodefile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL1", "log.bf", search) elif result == -2: print "\tFAIL TYPE 2" print_config(config, "FAIL2_config_" + bitcodefile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL2", "log.bf", search) elif result == -3: print "\tFAIL TYPE 3" print_config(config, "FAIL3_config_" + bitcodefile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL3", "log.bf", search) search += 1 if search > limit and limit != -1: sys.exit(0) return search
def test_insert_delete_2(self): o1 = transform.delete_helper(0) o2 = transform.insert_helper(10, 'x') expected = transform.insert_helper(9, 'x') received = transform.transform(o2, o1) self.assertEqual(expected, received)
def test_delete_insert_2(self): o1 = transform.insert_helper(3, 'z') o2 = transform.delete_helper(3) expected = transform.delete_helper(4) received = transform.transform(o2, o1) self.assertEqual(expected, received)
def test_insert_insert_5(self): o1 = transform.insert_helper(0, 'z') o2 = transform.insert_helper(0, 'x') expected = transform.insert_helper(1, 'x') received = transform.transform(o2, o1) self.assertEqual(expected, received)
def test_insert_insert_3(self): o1 = transform.insert_helper(3, 'z') o2 = transform.insert_helper(3, 'z') expected = None received = transform.transform(o2, o1) self.assertEqual(expected, received)
def test_delete_delete_1(self): o1 = transform.delete_helper(0) o2 = transform.delete_helper(0) expected = None received = transform.transform(o2, o1) self.assertEqual(expected, received)
def main(): """ Entry point for all code """ print("starting up") df = format_data.read_data("data/biodeg.csv") df = transform.transform(df, "class") nn_model.build_nn(df, "class")
def main(): extra_directives.setup() blockdiag_redmine_support.setup() try: output = transform(writer=WikiWriter(), part='html_body') if output: sys.stdout.write(output) except Exception, e: sys.stdout.write('<strong style="color:red">Error Parsing ReSt: %r</strong>' % e)
def _format(self, word): transliteration = self._config.get('transliteration') if transliteration is not None: transliterated = transform(word, {"rules": transliteration}) else: transliterated = word if self._args.phonetic: return ('%s [%s]' % (transliterated, word)).encode('utf-8') return transliterated.encode('utf-8')
def main(cmd_args): A, C, core_vars = init(cmd_args) subcmd = cmd_args[0] # subcommand if subcmd == 'prep': import prep prep.prepare(A, C, core_vars) elif subcmd == 'anal': import anal anal.analyze(A, C, core_vars) elif subcmd == 'transform': import transform transform.transform(A, C, core_vars) elif subcmd == 'plot': import plot plot.plot(A, C, core_vars) elif subcmd == 'plotmp': import plotmp plotmp.plotmp(A, C, core_vars)
def load(self, node): super(TransformList, self).load(node) transforms = node.find('Transforms') if transforms == None: raise MalformedMessageError("TransformList requires Transforms tag") for transnode in transforms.getchildren(): trans = transform.transform(node=transnode) self.transforms.append( trans )
def save_html(docxfile, filename): result = transform(docxfile) result = fixups.fixup(docxfile, result) print("=== Unrecognized styles") for k, v in sorted(fixups.unrecognized_styles.items(), key=lambda pair: pair[1]): print(k, v) print() htmodel.save_html(filename, result)
def swap_values(means, sdevs, beta, vcv, dropbin, totals): # Find the bins in beta and vcv bins = find_bins(means, sdevs, beta, vcv) # Transform the entire beta and vcv matrix T = transform.transform(len(beta), bins, dropbin, totals) vcv2 = transform.swap_vcv(vcv, T) beta2 = transform.swap_beta(beta, T) means2 = [beta2[ii] for ii in bins] sdevs2 = [np.sqrt(vcv2[ii, ii]) for ii in bins] return means2, sdevs2
def create_line_transform(self, name, command): try: if command == 4: data = self.initial_lines_data[name] print(data) self.create_polygon_line(data) else: tmp_coordinates = [] data = self.lines_data[name] for i in range(0, len(data), 2): if command == 1: tmp_coordinates.append(data[i] + self.cur_x) tmp_coordinates.append(data[i + 1] + self.cur_y) elif command == 2: tmp_coordinates.append(transform(self.center_x, self.kx, data[i])) tmp_coordinates.append(transform(self.center_y, self.ky, data[i + 1])) elif command == 3: tmp_coordinates.append(turn_x(self.center_x, data[i], self.center_y, data[i + 1], self.beta)) tmp_coordinates.append(turn_y(self.center_x, data[i], self.center_y, data[i + 1], self.beta)) self.lines_data[name] = tmp_coordinates self.create_polygon_line(self.lines_data[name]) except KeyError: print('Incorrect key to create line')
def run(self): max_surf = len(self.lens.surface_list) - 1 for ids in range(1, max_surf): surf0 = self.lens.surface_list[ids - 1] surf1 = self.lens.surface_list[ids] surf2 = self.lens.surface_list[ids + 1] # transform if tilted or decentered surface if surf1.decentered or surf1.tilted: self.ray_list[-1] = transform(self.ray_list[-1], surf1.tilt, surf1.decenter).run() # == refract ================================================== ray_new = refract(surf0, surf1, self.ray_list[-1], self.wave).run() self.ray_list.append(ray_new) # remove transform if surf1.decentered or surf1.tilted: self.ray_list[-1] = transform(self.ray_list[-1], surf1.tilt, surf1.decenter).unrun() if surf2.decentered or surf2.tilted: self.ray_list[-1] = transform(self.ray_list[-1], surf2.tilt, surf2.decenter).run() # == transfer ================================================== ray_new = transfer(surf1, surf2, self.ray_list[-1], self.wave).run() self.ray_list.append(ray_new) if surf2.decentered or surf2.tilted: self.ray_list[-1] = transform(self.ray_list[-1], surf2.tilt, surf2.decenter).unrun() return self
def test_unrotated_picture(self): width = 10 height = 60 lon_min = 20 lon_max = 30 lat_min = 10 lat_max = 70 top_left = np.array([lon_min, lat_max]) top_right = np.array([lon_max, lat_max]) bottom_left = np.array([lon_min, lat_min]) bottom_right = np.array([lon_max, lat_min]) # check that top left is sane res = transform.transform(np.array([0, 0]), top_right, top_left, bottom_left, bottom_right, np.array([width, height])) self.assertTrue(np.allclose(res, top_left)) # check that top right is sane res = transform.transform(np.array([width, 0]), top_right, top_left, bottom_left, bottom_right, np.array([width, height])) self.assertTrue(np.allclose(res, top_right)) # check that bottom left is sane res = transform.transform(np.array([0, height]), top_right, top_left, bottom_left, bottom_right, np.array([width, height])) self.assertTrue(np.allclose(res, bottom_left)) # check that bottom right is sane res = transform.transform(np.array([width, height]), top_right, top_left, bottom_left, bottom_right, np.array([width, height])) self.assertTrue(np.allclose(res, bottom_right))
def mol_to_svg(self, mol, before=None, after=None): """before and after should be methods or functions that will take one argument - svg_out instance and do whatever it wants with it - usually adding something to the resulting DOM tree""" self.document = dom.Document() top = dom_extensions.elementUnder( self.document, "svg", attributes=(("xmlns", "http://www.w3.org/2000/svg"), ("version", "1.0"))) self.top = dom_extensions.elementUnder(top, "g", attributes=(("stroke", "#000"), ("stroke-width", "1.0"))) x1, y1, x2, y2 = None, None, None, None for v in mol.vertices: if x1 == None or x1 > v.x: x1 = v.x if x2 == None or x2 < v.x: x2 = v.x if y1 == None or y1 > v.y: y1 = v.y if y2 == None or y2 < v.y: y2 = v.y w = int(x2 - x1 + 2 * self.margin) h = int(y2 - y1 + 2 * self.margin) top.setAttribute("width", str(w)) top.setAttribute("height", str(h)) self.transformer = transform.transform() self.transformer.set_move(-x1 + self.margin, -y1 + self.margin) self.molecule = mol if before: before(self) for e in copy.copy(mol.edges): self._draw_edge(e) for v in mol.vertices: self._draw_vertex(v) if after: after(self) return self.document
def run(self): if self._args.words is None: words = [self._generate() for x in range(self._args.repeat)] else: words = self._args.words for word in words: if 'changes' in self._config: for change in self._config['changes']: word = transform(word, change) if self._args.verbose: print '%s: %s' % (change['name'], self._format(word)) if 'changes' not in self._config or not self._args.verbose: print self._format(word) else: print
def run_inner_product(dataset, percent, rg_type, recall, seed=808, root="../../data"): # performance of random projection is bad # after transformed into inner product problem prefix = f"{root}/{dataset}/{dataset}" xb = fvecs_read(f"{prefix}_base.fvecs") xq = fvecs_read(f"{prefix}_query.fvecs") rg = _load_rg(seed, dataset, root, xb, xq, percent, rg_type=rg_type) gt = _load_gt(seed, dataset, root, xb, xq, rg, percent, rg_type=rg_type) if recall: scale = np.percentile(np.abs(xb), 75, axis=0, keepdims=True) xb /= scale xq /= scale rg /= scale xq = xq[:100] rg = rg[:100] gt = gt[:100] for p in [2, 4, 8, 16]: print("p = {}, ranked by p-dist".format(p)) x, q = transform(xb, xq, rg, p=p, intervals=False) dist = -np.dot(q, x.T) test_recalls(np.argsort(dist), gt) print("p = {}, ranked by random projection".format(p)) x, q = transform(xb, xq, rg, p=p, intervals=False) proj = np.random.normal(size=(x.shape[1], 32)) x, q = np.dot(x, proj), np.dot(q, proj) dist = -np.dot(q, x.T) test_recalls(np.argsort(dist), gt)
def complete(self, word, state): """Return the next possible completion for ``word``. This is called successively with ``state == 0, 1, 2, ...`` until it returns ``None``. The completion should begin with ``word``. :param word: the word to complete :param state: an int, used to iterate over the choices """ try: import rl # TODO: doing this manually right now, but may make sense to # exploit rl.completion.suppress_append = True except ImportError: pass word = transform(word, self.transforms, word=True) try: match = self.get_matches(word)[state] return transform(match, self.transforms, word=True, inverse=True) except IndexError: return None
def cal_fitness(self): ''' Calculate fittness score, ''' # cluster_result, score = kmeans(transform(self.chromosome,RAW_DATA), K,1) cluster_result = hclust(transform(self.chromosome, RAW_DATA), K) ari = getScore('ARI', cluster_result, labelname=LABELNAME, label=LABEL) # ami = getScore('AMI',cluster_result,labelname=LABELNAME,label=LABEL) ami = 0 mcc = getScore('MCC', cluster_result, labelname=LABELNAME, label=LABEL) gini = getScore('gini', cluster_result) fitness = ari return fitness, ari, ami, mcc, gini
def test_transform1(self): """ :return: pass or fail if the transform method works with ds1 and ds2 """ print("test_transform1") ds1_headers = ["date", "col1", "col2"] ds1 = create_dataset("ds1", ds1_headers, "date") ds1.df = pandas.DataFrame(columns=ds1_headers) ds1.df.loc[0] = pandas.Series({ "date": "2020-09-11", "col1": "test1", "col2": "test" }) ds1.df.loc[1] = pandas.Series({ "date": "2020-09-15", "col1": "test2", "col2": "test" }) ds1.df.loc[2] = pandas.Series({ "date": "2020-09-18", "col1": "test3", "col2": "test" }) ds2_headers = ["DateString", "Name"] ds2 = create_dataset("ds2", ds2_headers, "DateString") ds2.df = pandas.DataFrame(columns=ds2_headers) ds2.df.loc[0] = pandas.Series({ "DateString": "2020-09-12", "Name": "Test1" }) ds2.df.loc[1] = pandas.Series({ "DateString": "2020-09-18", "Name": "Test2" }) # TODO: Is this a problem result = transform.transform(ds1, ds2) self.assertEqual(len(result), 1) stat = result[0] self.assertIsNotNone(stat) self.assertIsInstance(stat, classes.CovidStat) self.assertEqual(stat.idx, 0) self.assertEqual(str(stat.date), "2020-09-18 00:00:00") self.assertIsNone(stat.cases, None) self.assertIsNone(stat.deaths, None) self.assertIsNone(stat.recovered, None)
def pipeline_img(img, img_name): left_lane = LeftLane() right_lane = RightLane() left_lane.debug = True right_lane.debug = True left_lane.name = get_save_name(img_name, '7_windowed_left') right_lane.name = get_save_name(img_name, '7_windowed_right') # undistort image img = transform.undistort(img, mtx, dist) save_image(img, img_name, '1_undistort') # save example image with mask lines img_lines = img.copy() cv2.polylines(img_lines, [src.astype(int)], True, (0, 0, 255), 3) save_image(img_lines, img_name, '2_undistort_lines') # create combined threshold img_thresh = transform.combined_thresh(img) # save binary image to output folder save_image(img_thresh * 255, img_name, '3_binary') # transform image based on src and dst defined above & save img_transform = transform.transform(img, M) save_image(img_transform, img_name, '4_transformed') # save example image with mask lines for transformed image img_lines = img_transform.copy() cv2.polylines(img_lines, [dst.astype(int)], True, (0, 0, 255), 3) save_image(img_lines, img_name, '5_transformed_lines') # create combined threshold img_transform = transform.combined_thresh(img_transform) # save binary image to output folder save_image(img_transform * 255, img_name, '6_warped_binary') left_lane.find_lane_for_frame(img_transform) right_lane.find_lane_for_frame(img_transform) name = get_save_name(img_name, '8_lanes_found') plot_polyline(img_transform, name, left_lane, right_lane) lane_img = create_lane_image(img, img_transform, left_lane, right_lane) save_image(lane_img, img_name, '9_result') return lane_img
async def make_audio(title: str): article = await repo.find_article(title) assert article text = article.full_text() index = 0 empty = Path("3s.mp3").read_bytes() for paragraph in text.split("\n"): if not paragraph: continue for sentence in utils.tokenize(paragraph): path = Path("audio") / f"{index:03}.mp3" audio = empty if sentence == "---" else transform.transform( sentence) path.write_bytes(audio) print(f"Save {path}") index += 1
def mol_to_svg( self, mol, before=None, after=None): """before and after should be methods or functions that will take one argument - svg_out instance and do whatever it wants with it - usually adding something to the resulting DOM tree""" self.document = dom.Document() top = dom_extensions.elementUnder( self.document, "svg", attributes=(("xmlns", "http://www.w3.org/2000/svg"), ("version", "1.0"))) self.top = dom_extensions.elementUnder( top, "g", attributes=(("stroke", "#000"), ("stroke-width", "1.0"))) x1, y1, x2, y2 = None, None, None, None for v in mol.vertices: if x1 == None or x1 > v.x: x1 = v.x if x2 == None or x2 < v.x: x2 = v.x if y1 == None or y1 > v.y: y1 = v.y if y2 == None or y2 < v.y: y2 = v.y w = int( x2 - x1 + 2*self.margin) h = int( y2 - y1 + 2*self.margin) top.setAttribute( "width", str( w)) top.setAttribute( "height", str( h)) self.transformer = transform.transform() self.transformer.set_move( -x1+self.margin, -y1+self.margin) self.molecule = mol if before: before( self) for e in copy.copy( mol.edges): self._draw_edge( e) for v in mol.vertices: self._draw_vertex( v) if after: after( self) return self.document
def hough_lines(img): processed = transform(img) lines = cv2.HoughLines(processed, 1, np.pi / 180, 60) if lines is not None: for line in lines: rho, theta = line[0] a = np.cos(theta) b = np.sin(theta) x0 = a * rho y0 = b * rho x1 = int(x0 + 1000 * (-b)) y1 = int(y0 + 1000 * (a)) x2 = int(x0 - 1000 * (-b)) y2 = int(y0 - 1000 * (a)) cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2) return img
def run_workflow(ACCESS_TOKEN): URIS = [] PLAYLIST_ID = '4syTBdEq3o8D3ClyKFAbGj' # retrieve all of the song files song_files = read_all_files('music') # authenticate with spotify auth = authenticate(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET) for file in song_files: # parse song into track object track_obj = transform(file) is_last_song = song_files[len(song_files) - 1] == file skip = none_check(track_obj, is_last_song, URIS, PLAYLIST_ID, ACCESS_TOKEN) if skip == True: continue # move it to the dropbox move(root() + track_obj.filepath, "dropbox") # use request obj to search for the song in spotify response_items = spotify_req.search("", track_obj, auth['access_token']) skip = none_check(response_items, is_last_song, URIS, PLAYLIST_ID, ACCESS_TOKEN) if skip == True: move(dropbox() + track_obj.filepath, "quarantine") continue # take parsed response find best match match = find_match(track_obj, response_items) URIS.append("spotify:track:" + match.id) # delete the track from the dropbox delete(dropbox() + track_obj.filepath) if (len(URIS) == 99 or is_last_song == True): spotify_req.add_to_spotify(URIS, PLAYLIST_ID, ACCESS_TOKEN) URIS = []
def check(filename, max_tests): c = circ.parse(filename) try: cnf = transform.transform(c) except Exception as e: print_error("Transformation of circuit '%s' failed." % c.name) print(e) print(traceback.format_exc()) return (False, 0, 0) inputs = c.getInputs() outputs = c.getOutputs() def validate(sol): invalues = dict() for i in inputs: try: invalues[i] = sol[i] except KeyError: invalues[i] = False result = c.simulate(invalues) for o in outputs: try: oval = sol[o] except KeyError: print_error( "Did not find value for output signal '%s' in solution" % o) return False if oval != result[o]: print_error("Inconsistent output value for signal '%s'" % o) return False return True tests = 0 good = 0 solutions = allSAT(cnf, c.getInputs()) while tests < max_tests: solution = next(solutions) if not solution: break if validate(solution): good += 1 tests += 1 return (True, good, tests)
def Run(self): old_filename = self.mei_file if (len(old_filename) < EXT_LENGTH or old_filename[-EXT_LENGTH:] not in EXT): logging.info("No file extension provided; " + EXT[0] + " used.") old_filename += EXT[0] old_MEI_doc = XmlImport.documentFromFile(old_filename) logging.info('running test case ' + self.name + ' Input: ' + old_filename) new_MEI_doc = transform(old_MEI_doc, self.transform_data) new_filename = (old_filename[:-EXT_LENGTH] + self.outsuffix + '_' + old_filename[-EXT_LENGTH:]) status = XmlExport.meiDocumentToFile(new_MEI_doc, new_filename) if status: logging.info("Done. Transformed file saved as " + new_filename) pass else: logging.error("Transformation failed") return new_MEI_doc
def __getitem__(self, idx): sample = self._samples[idx] img = sample.get_data() labels_orig = sample.get_label() n_orig = len(labels_orig) labels = list(labels_orig) img = transform(img, self._cfg, self._is_train, not self._prepared) if self._debug_imgs: self._debug_image(img, labels_orig, sample.id) channel_swap = (2, 0, 1) img = np.transpose(img, axes=channel_swap) if self._output_idx: return img, np.float32(labels), np.int32(idx) else: return img, np.float32(labels)
def cipher(): """ Transforms a message into the relative cryptogram by applying a sum module 26 of k positions in the alphabet, where k is the cypher key. """ k = int(raw_input('communicate the key: ')) msg = raw_input('communicate the message: ') # msg is a vector of 140 characters containing the message crt = [''] * 140 # crt is a vector of 140 characters containing the cryptogram for h in range(0, 140): if h < len(msg): t = transform(k, msg[h]) crt[h] = t print ''.join(crt)
def upload_file(): if request.method == "POST": # check if the post request has the file part if "file" not in request.files: flash("No file part") return redirect(request.url) file = request.files["file"] # if user does not select file, browser also # submit a empty part without filename if file.filename == "": flash("No selected file") return redirect(request.url) if file and allowed_file(file.filename): fout = transform(file_in=file, file_out=io.BytesIO()) return send_file(fout, as_attachment=True, attachment_filename="output.png") return render_template_string(r""" <!doctype html> <head> <title>Convert An Image</title> </head> <body> <h1>Convert An Image to Text!</h1> {% with messages = get_flashed_messages() %} {% if messages %} <ul class=flashes> {% for message in messages %} <li>{{ message }}</li> {% endfor %} </ul> {% endif %} {% endwith %} <p>Note that this may take a minute to process.</p> <form method=post enctype=multipart/form-data> <p> <input type=file name=file> <input type=submit value=Upload> </p> </form> </body> """)
def run_one(original_xml_path, expected_xml_path, apply_sorter=True): transformed_xml = transform(original_xml_path, raise_if_invalid=False) if apply_sorter: transformed_xml = sort_transform(transformed_xml) transformed_xml_str = ET.tostring(transformed_xml, pretty_print=True, **xml_declaration_props) with open(original_xml_path.replace('.xml', '_converted.xml'), 'wb') as f: f.write(transformed_xml_str) expected_xml = ET.parse(expected_xml_path) if apply_sorter: expected_xml = sort_transform(expected_xml) expected_xml_str = ET.tostring(expected_xml, pretty_print=True) text_diff_ratio = SequenceMatcher(None, expected_xml_str, transformed_xml_str).ratio() #diff = main.diff_trees(transformed_xml, expected_xml, formatter=formatting.XMLFormatter()) #print(diff) #print(transformed_xml_str.decode("utf-8")) validation_error = '' if is_valid(transformed_xml) else '!' print(original_xml_path, text_diff_ratio, validation_error)
def pipeline_writeup(display=True, save=True): ###################################################################### ### Camera calibration (camera matrix and distortion coefficients) ### cal_img_dir = '../camera_cal' test_img_dir = '../test_images' nx = 9 # the number of inside corners in x ny = 6 # the number of inside corners in y mtx, dist = get_calibration(cal_img_dir, test_img_dir, nx, ny, display=display, save=save) # dist_pickle = pickle.load( open( "wide_dist_pickle.p", "rb" ) ) # mtx = dist_pickle["mtx"] # dist = dist_pickle["dist"] ###################################################################### ### Read test image ### # img_path = sorted(glob.glob(os.path.join(test_img_dir, '*.png')))[3] # img_RGB = (mpimg.imread(img_path)*255).astype(np.uint8) img_path = sorted(glob.glob(os.path.join(test_img_dir, '*.jpg')))[0] img_RGB = mpimg.imread(img_path) if display: plt.imshow(img_RGB) plt.show() ###################################################################### ### Use different thresholds ### lane_binary, lane_gray = threshold(img_RGB, display=display, save=save) ###################################################################### ### Undistortion and perspective transform ### ## corners for perspective transform choosen mannually (x,y) img_H, img_W = img_RGB.shape[:2] trans_src = np.float32([[225,697], [1078,697], [705,460], [576,460]]) offset = 360 trans_dst = np.float32([[img_W/2-offset,img_H], [img_W/2+offset,img_H], [img_W/2+offset,0], [img_W/2-offset,0]]) # trans_src = np.float32([[585,460], [203,720], [1127,720], [695,460]]) # trans_dst = np.float32([[320,0], [320,720], [960,720], [960,0]]) warped_img, M, Minv = transform(lane_gray, trans_src, trans_dst, mtx, dist, display=display, save=save) ###################################################################### ### Undistortion and perspective transform ### _, _, _, _, _,_,_ = sliding_win_search(warped_img, None, None, display=display, save=save)
def main(args): prediction = args.prediction train = args.train if prediction: test_path = args.data_dir test_file = args.test_file context = args.Context res_dir = args.res_dir test_SNPs = pd.read_csv(test_path + '/' + test_file, sep='\t') test_SNPs = test_SNPs[['Chromosome','Start','End']] data = score.get_score(test_SNPs) ''' kde get transformed score ''' trans_score = transform.transform(data, context) ''' load WEVar model, and predict WEVar score ''' res = model.WEVar(trans_score, context) test_SNPs['WEVar_{}'.format(context)] = res test_SNPs.to_csv('{}/{}'.format(res_dir,test_file), index=False, sep='\t') elif train: data_path = args.data_dir train_file = args.train_file train_SNPs = pd.read_csv(data_path + '/' + train_file, sep='\t') X = train_SNPs[['Chromosome', 'Start', 'End']] X = score.get_score(X) Y = train_SNPs['Labels'].to_numpy() train_model.train(X,Y, train_file) train_model.test(X,Y, train_file)
def comparison(comparisons, calflat, rawdatadir='', overwrite=False): basenames = [] temp_basenames = '' for infile in comparisons.split(','): # bias subtraction, overscan region removing, # bad pixel retoring, hedear correction ovname, stat = bias_overscan(infile, rawdatadir=rawdatadir,\ overwrite=overwrite) if stat == False: return # extraction and making each channel image basename, stat = mkchimage(ovname, calflat, overwrite=overwrite) if stat == True: basenames.append(basename) else: return identify_dispersion(basenames, overwrite=overwrite) fitcoord_dispersion(basenames, overwrite=overwrite) for basename in basenames: # Transorming stat = transform(basename, basenames[0], calflat, overwrite=overwrite) if stat == False: return '', False # Making data cube cubefile, stat = mkcube(basename, 'wc', overwrite=overwrite) if stat == False: return cubefile, False # Getting shift between sky spectrum and object spectra sky_shift_data, stat = get_sky_shift(basename, overwrite=overwrite) if stat == False: return sky_shift_data, False return
def run_config(config, search, bitcodefile, originalConfig, limit): print_config(config, "config_temp.json") result = transform.transform(bitcodefile, "config_temp.json") if result == 1: print "check VALID_config_" + str( search) + ".json for a valid config file" print_config( config, "VALID_config_" + bitcodefile + "_" + str(search) + ".json") print_diff(config, originalConfig, "diff_" + str(search) + ".cov") utilities.log_config(config, "VALID", "log.bf", search) elif result == 0: print "\tINVALID CONFIG" print_config( config, "INVALID_config_" + bitcodefile + "_" + str(search) + ".json") utilities.log_config(config, "INVALID", "log.bf", search) elif result == -1: print "\tFAIL TYPE 1" print_config( config, "FAIL1_config_" + bitcodefile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL1", "log.bf", search) elif result == -2: print "\tFAIL TYPE 2" print_config( config, "FAIL2_config_" + bitcodefile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL2", "log.bf", search) elif result == -3: print "\tFAIL TYPE 3" print_config( config, "FAIL3_config_" + bitcodefile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL3", "log.bf", search) search += 1 if search > limit and limit != -1: sys.exit(0) return search
def main(): ''' expectedf = open('./expected.txt') expected = expectedf.read() expectedArray = expected.split('\n') f = open('./one.txt') testCases = f.read() testCasesArray = testCases.split('\n') ''' expectedArray = getFile('./expected.txt') testCasesArray = getFile('./one.txt') table = PrettyTable() table.field_names = ['Test Case', 'Result', 'Expected', 'Success'] for i in range(len(testCasesArray)): case = testCasesArray[i] expect = expectedArray[i] npd = case.split(' ') result = transform(npd[0], int(npd[1]), int(npd[2])) success = False if result == expect: success = True table.add_row([case, result, expect, success])
def run_config(config, bitcodeFile, searchConfig, originalConfig): global search utilities.print_config(config, "config_temp.json") result = transform.transform(bitcodeFile, "config_temp.json") if result == 1: utilities.print_config(config, "VALID_config_" + bitcodeFile + "_" + str(search) + ".json") utilities.log_config(config, "VALID", "log.dd", search) utilities.print_diff(searchConfig, originalConfig, "diff_" + bitcodeFile + "_" + str(search) + ".json") elif result == 0: utilities.print_config(config, "INVALID_config_" + bitcodeFile + "_" + str(search) + ".json") utilities.log_config(config, "INVALID", "log.dd", search) elif result == -1: utilities.print_config(config, "FAIL1_config_" + bitcodeFile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL1", "log.dd", search) elif result == -2: utilities.print_config(config, "FAIL2_config_" + bitcodeFile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL2", "log.dd", search) elif result == -3: utilities.print_config(config, "FAIL3_config_" + bitcodeFile + "_" + str(search) + ".json") utilities.log_config(config, "FAIL3", "log.dd", search) search += 1 return result
def pipeline_vid(img): # 1. undistort image img = transform.undistort(img, mtx, dist) # 2. transform image based on src and dst defined above img_transform = transform.transform(img, M) # 3. create combined threshold img_transform = transform.combined_thresh(img_transform) # 4. create new polynom values for current image found = left_lane.find_lane_for_frame(img_transform) if (found != True): left_lane.find_lane_for_frame(img_transform) found = right_lane.find_lane_for_frame(img_transform) if (found != True): right_lane.find_lane_for_frame(img_transform) lane_img = create_lane_image(img, img_transform, left_lane, right_lane) return lane_img
def test_transform2(self): """ :return: pass or fail if the transform method works with ds1 """ print("test_transform2") ds1_headers = ["date", "col1", "col2"] ds1 = create_dataset("ds1", ds1_headers, "date") ds1.df = pandas.DataFrame(columns=ds1_headers) ds1.df.loc[0] = pandas.Series({ "date": "2020-09-11", "col1": "test1", "col2": "test" }) ds1.df.loc[1] = pandas.Series({ "date": "2020-09-15", "col1": "test2", "col2": "test" }) ds1.df.loc[2] = pandas.Series({ "date": "2020-09-18", "col1": "test3", "col2": "test" }) result = transform.transform(ds1, None) self.assertEqual(len(result), 3) idx = 0 for stat in result: self.assertIsNotNone(stat) self.assertIsInstance(stat, classes.CovidStat) self.assertEqual(stat.idx, idx) self.assertEqual(str(stat.date), f"{ds1.df.loc[idx][0]} 00:00:00") self.assertIsNone(stat.cases, None) self.assertIsNone(stat.deaths, None) self.assertIsNone(stat.recovered, None) idx += 1
def main(): """ Script to perform ETL on a DVF file Source: https://cadastre.data.gouv.fr/data/etalab-dvf/latest/csv/ """ logging.info("ETL >> Start") # ARGS & CHECKS args = get_args() if args["year"] is None: logging.error("Year not specified") logging.error("ETL >> Failed") return if args["db"] is None: logging.error("DB not specified") logging.error("ETL >> Failed") return if args["collection"] is None: logging.error("Collection not specified") logging.error("ETL >> Failed") return print(f">> PERFORMING ETL ON DVF_{args['year']} <<" ) if args["verbose"] else None # >> EXTRACT logging.info("Extract >> Start") print("\n>> EXTRACT <<") if args["verbose"] else None try: extract(args) logging.info("Extract >> End") except Exception: logging.error("Extract >> Failed") logging.error("ETL >> Failed") return # >> TRANSFORM logging.info("Transform >> Start") print("\n>> TRANSFORM <<") if args["verbose"] else None try: transform(args) logging.info("Transform >> End") except Exception: logging.error("Transform >> Failed") logging.error("ETL >> Failed") return # >> LOAD logging.info("Load >> Start") print("\n>> LOAD <<") if args["verbose"] else None try: load(args) logging.info("Load >> End") except Exception: logging.error("Load >> Failed") logging.error("ETL >> Failed") return logging.info("ETL >> End") print(f">> EOF <<") if args["verbose"] else None
def kunkel_full(protocol, params): growth_media = params["construct_setup"]['growth_media'] #num_colonies = params["construct_setup"]['num_colonies'] ssDNA = params["construct_setup"]['ssDNA'] mutant_constructs = [] # make mutant objects for accessibility construct_collect = {} for csv_row in params["construct_setup"]['mutant_upload']: if csv_row["mutant_label"] not in construct_collect.keys(): construct_collect[csv_row["mutant_label"]] = [] construct_collect[csv_row["mutant_label"]].append({ "sequence": csv_row["sequence"], "purification": csv_row["purification"], "scale": csv_row["scale"], "oligo_label": csv_row["oligo_label"] }) else: construct_collect[csv_row["mutant_label"]].append({ "sequence": csv_row["sequence"], "purification": csv_row["purification"], "scale": csv_row["scale"], "oligo_label": csv_row["oligo_label"] }) oligo_collect = {} for row in params["construct_setup"]["mutant_upload"]: if (row["sequence"] not in oligo_collect.keys() and row["oligo_label"] in protocol.refs.keys()): raise RuntimeError("You cannot specify two different " "oligos to be synthesized with the " "same name %s" % row['oligo_label']) elif row["sequence"] not in oligo_collect.keys(): oligo_collect[row["sequence"]] = { "sequence": row["sequence"], "purification": row["purification"], "scale": row["scale"], "destination": protocol.ref(row["oligo_label"], None, "micro-2.0", storage="cold_4").well(0) } for mut in construct_collect.keys(): mut_oligos = [o for o in construct_collect[mut]] mutant = Mutant(mut) for oligo in mut_oligos: mutant.add_oligos(oligo_collect[oligo["sequence"]]["destination"]) mutant_constructs.append(mutant) oligos_to_synthesize = [] for o in oligo_collect.keys(): scale_default(len(oligo_collect[o]["sequence"]), oligo_collect[o]["scale"], oligo_collect[o]["destination"].container.name) oligos_to_synthesize.append(oligo_collect[o]) protocol.oligosynthesize(oligos_to_synthesize) assemble_params = { 'ssDNA': ssDNA, 'constructs': [{ 'mutant_name': mu.name, 'oligos': mu.oligos } for mu in mutant_constructs], 'mutant_objs': mutant_constructs } annealing_plate = assemble(protocol, assemble_params) protocol.unseal(annealing_plate) transform_params = { #'num_colonies': num_colonies, 'growth_media': growth_media, 'constructs': [mu.anneal_well for mu in mutant_constructs], 'mutant_objs': mutant_constructs } # get agar plates back from transform protocol agar_plates = transform(protocol, transform_params) for agar_plate in agar_plates: protocol.cover(agar_plate)
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:5] for contour in contours: perimeter = cv2.arcLength(contour, True) approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True) if len(approx) == 4: document = approx break cv2.drawContours(img, [document], -1, (0, 255, 0), 2) cv2.imshow("Outline", img) cv2.waitKey(0) cv2.destroyAllWindows() warped = transform(orig, document.reshape(4, 2)) warped = cv2.cvtColor(warped, cv2.COLOR_BGR2GRAY) # calculate a threshold mask T = threshold_local(warped, 11, offset = 10, method = "gaussian") warped = (warped > T).astype("uint8") * 255 cv2.imshow("Original", img) cv2.imshow("Scanned", warped) cv2.imwrite("output.jpg", warped) cv2.waitKey(0) cv2.destroyAllWindows()
import SETTINGS import logging import sqlaload as sl from extract import extract from entities import create_entities, update_entities from load import load from setup import setup, make_grano from transform import transform from network_entities import update_network_entities if __name__ == '__main__': import sys logging.basicConfig(level=logging.DEBUG) assert len(sys.argv) == 3, "Usage: %s [ir_source_file] [ap_source_file]" ir_source_file = sys.argv[1] ap_source_file = sys.argv[2] engine = sl.connect(SETTINGS.ETL_URL) extract(engine, ir_source_file, ap_source_file) update_network_entities(engine, 'network_entities.csv') create_entities(engine) update_entities(engine, 'entities.csv') transform(engine) grano = make_grano() setup(engine, grano) load(engine, grano)
FP = 200.0 #Far plane WIDTH = 1800 HEIGHT = 840 THETA = 45.0 P = vector(0.0, 0.0, 1.0) #Vector in the up direction E = point(80.0, 80.0, 40.0) #Set the camera position G = point(0.0, 0.0, 00.0) #Set the gaze point L = point(0.0, 0.0, 40.0) #Set the light position C = (1.0, 1.0, 1.0) #Light color I = (1.0, 1.0, 1.0) #Light intensity #Create and position three torii torusT1 = transform().translate() torusCol1 = (0, 255, 0) torusRef1 = (0.2, 0.4, 0.4, 10.0) torusInnerRadius1 = 20.0 torusOuterRadius1 = 2.0 torus1 = parametricTorus(torusT1, torusInnerRadius1, torusOuterRadius1, torusCol1, torusRef1, (0.0, 2.0 * pi), (0.0, 2.0 * pi), (2.0 * pi / 256.0, 2.0 * pi / 64.0)) torusT2 = transform().translate() torusCol2 = (255, 0, 0) torusRef2 = (0.2, 0.4, 0.4, 10.0) torusInnerRadius2 = 15.0 torusOuterRadius2 = 2.0 torus2 = parametricTorus(torusT2, torusInnerRadius2, torusOuterRadius2, torusCol2, torusRef2, (0.0, 2.0 * pi),
def rcca_decout(vars): decout_sdl = ["\n","BEGIN :: func:%s\n" % config.decOutFunctionName, "input := list{%s, %s, %s}\n" % (config.partialCT, config.keygenBlindingExponent, vars['pk_value']), "%s := expand{T0, T1, T2}\n" % config.partialCT, "%s := T0 %s (T2^%s)\n" % (config.rccaRandomVar, vars['dec_op'], config.keygenBlindingExponent), # recover R "%s := DeriveKey( %s )\n" % (vars['session_key'], config.rccaRandomVar), # recover session key "%s := SymDec(%s, T1)\n" % (config.M, vars['session_key']), # use session key to recover M "%s" % vars['hashList'], "%s" % (vars['hashListStmt']), # recover 'randomness' calculated for encrypt "BEGIN :: if\n", "if { (T0 == (%s * (%s ^ %s))) and (T2 == (%s ^ (%s / %s))) }\n" % (config.rccaRandomVar, vars['pk_value'], vars['s'], vars['pk_value'], vars['s'], config.keygenBlindingExponent), # verify T0 and T1 are well-formed "output := %s\n" % config.M, "else\n", "error('invalid ciphertext')\n", "END :: if\n", "END :: func:%s\n" % config.decOutFunctionName] return decout_sdl if __name__ == "__main__": sdl_file = sys.argv[1] sdlVerbose = False if len(sys.argv) > 2 and sys.argv[2] == "-v": sdlVerbose = True parseFile(sdl_file, sdlVerbose) keygenVarList, var_info = transform(sdlVerbose) rcca(var_info) print("\n")
# Print the question print('\nQuestion {k}/{n}:\n'.format(k=i+1, n=questions.shape[0])) print(questions.iloc[i, 0] + '\n') # Get the user's response response = None # Placeholder value while response is None or response < -2. or response > 2.: response = float(input(input_text)) # Increment the user's position pos += response*questions.iloc[i, 1:].values # Apply some scaling to the position based on how far it was possible # to move in each dimension pos = transform(pos, questions)[0] print('Your position in 3D is ' + str(pos) + '.') # Plot two of the three coordinates plt.rc("font", size=20, family="serif", serif="Computer Sans") plt.rc("text", usetex=True) plt.figure(figsize=(8, 8)) plt.plot(pos[0], pos[1], 'ro', markersize=10) plt.axis([-5., 5., -5., 5.]) plt.gca().set_xticks(np.linspace(-5., 5., 11)) plt.gca().set_yticks(np.linspace(-5., 5., 11)) plt.gca().set_xticklabels([]) plt.gca().set_yticklabels([]) plt.grid(True)
def main(server, sql_extract , sql_load ): data = extract.extract(server, sql_extract ) data = transform.transform(data) return load.load( data, sql_load )
def continent(seed, itrans, sea, niter): # ========================================================= # Generate random continent # ========================================================= # random.seed(seed=4); itrans, sea = 5, 0.465 # penesulas # random.seed(seed=7); itrans, sea = 7, 0. # mountainous # random.seed(seed=13); itrans, sea = 13, 0. # good plains # ========================================================= pertsize = [1.0, 2.0, 4.0, 6.0, 4.0, 2.0, 1.0, 0.5, 0.2, 0.2, 0.1, 0.05] assert niter <= len(pertsize) random.seed(seed=seed) h = randimage(pertsize[0:niter]) # ========================================================= # Generate transformation function # ========================================================= from transform import transform h = transform(h,itrans) # ========================================================= # Flat out the sea # ========================================================= if sea < h[0,0]: # find suitable sea level sea = h[1,1] + 0.01 h = (h - sea) / (1 - sea) is_sea = (h < 0) h[is_sea] = 0.0 # ========================================================= # Plateau variation # ========================================================= pertsize = [1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 64.0, 64.0, 32.0, 16.0] gamma = randimage(pertsize[0:niter]) b = exp((gamma - 0.3)) h = power(h * 0.6 + 0.2, b) - power(0.2, b) h = h / h.max() h[is_sea] = 0.0 # import pylab; pylab.contour(b); pylab.show() # ========================================================= # Add grid perturbation # ========================================================= dh1, dh2 = zeros(h.shape), zeros(h.shape) dh1[:,0:-1] = absolute(h[:,0:-1] - h[:,1:]) dh1[:,-1] = dh1[:,-2] dh1[:,1:-1] = (dh1[:,1:-1] + dh1[:,0:-2]) / 2 dh2[0:-1,:] = absolute(h[0:-1,:] - h[1:,:]) dh2[-1,:] = dh2[-2,:] dh2[1:-1,:] = (dh2[1:-1,:] + dh1[0:-2,:]) / 2 dh = dh1 + dh2 dh = (dh - dh.min()) / (dh.max() - dh.min()) p = random.random(h.shape) * sqrt((dh * 0.9 + h * 0.1) * h) p[h == 0] = 0 h += p * 2.0 h /= h.max() h *= (1.0 - h.mean() * 0.7) return h
def deform(image, p, q): '''Transforming an original image using given control points p and their new points q''' # Preconditions: # - image: numpy.array representation of the image with dimension # (x, y, 3) # - p: list of tuples representing the control points # - q: list of tuples representing the intended final position # of the control points # Postcondition: # - ret_img: numpy.array representation of deformed image with # dimension (x, y, 3) x_step = 20 y_step = 20 x_lines = np.array([ *range(0, image.shape[1] - 1, x_step), image.shape[1] - 1]) y_lines = np.array([ *range(0, image.shape[0] - 1, y_step), image.shape[0] - 1]) vertices = np.stack(np.meshgrid(x_lines, y_lines), axis=2) extra_points = np.array([ [0, 0], # top left [0, image.shape[1] - 1], # top right [image.shape[0] - 1, 0], # bottom right [image.shape[0] - 1, image.shape[1] - 1], # bottom left corner ]) full_p = np.concatenate([p, extra_points], axis=0) full_q = np.concatenate([q, extra_points], axis=0) new_vertices = np.array([[transform(v, full_p, full_q) for v in row] for row in vertices]) # Just in case some of the transformed pixels are transformed out of # the dimensions of the original image. max_x, max_y = new_vertices.max(axis=1).max(axis=0).astype(np.int) new_image = np.tile(utils.colors['red'], (max_x + 1, max_y + 1, 1)) for i in range(new_vertices.shape[0] - 1): for j in range(new_vertices.shape[1] - 1): # The order of the vertices is important. The rows of # new_quad must specify the edges of a quadrilateral. So, # for all rows, new_quad[l - 1], new_quad[l] are an edge # in a quadrilateral, where l is just some index. new_quad = np.array([ new_vertices[i, j], new_vertices[i + 1, j], new_vertices[i + 1, j + 1], new_vertices[i, j + 1], ]) old_quad = np.array([ vertices[i, j], vertices[i + 1, j], vertices[i + 1, j + 1], vertices[i, j + 1], ]) points = utils.enumerate_points_in_polygon(new_quad) points_x, points_y = points[:, 0], points[:, 1] inside_picture = ( (points_x >= 0) & (points_x < new_image.shape[1]) & (points_y >= 0) & (points_y < new_image.shape[0])) points = points[inside_picture] # it's possible for all the pixels in new_quad to lie # outside the image. if (points.shape[0] == 0): continue new_x_coords, new_y_coords = new_quad[:, 0], new_quad[:, 1] old_x_coords, old_y_coords = old_quad[:, 0], old_quad[:, 1] interpx = interp2d(new_x_coords, new_y_coords, old_x_coords) interpy = interp2d(new_x_coords, new_y_coords, old_y_coords) estimated_old_points = np.array([ np.squeeze([interpx(x, y), interpy(x, y)]) for x, y in points ]) nearest_old_pixels = np.around(estimated_old_points).astype(np.int) print("Shape of nearest_old_pixels", nearest_old_pixels.shape) print("new quad:", new_quad) print("shape of points:", points.shape) near_x, near_y = nearest_old_pixels[:, 0], nearest_old_pixels[:, 1] in_bounds = ( (near_x < image.shape[1]) & (near_x >= 0) & (near_y < image.shape[0]) & (near_y >= 0)) old_colors = np.empty((nearest_old_pixels.shape[0], 3)) """ print("Shape of in_bounds", in_bounds.shape) print("Shape of old colors", old_colors.shape) print("Shape of old_colors[in_bounds]", old_colors[in_bounds].shape) print("Shape of nearest_old_pixels", nearest_old_pixels.shape) print("Shape of nearest_old_pixels[in_bounds]", nearest_old_pixels[in_bounds].shape) print("Shape of index arrays:", [A.ravel().shape for A in np.split( nearest_old_pixels[in_bounds], 2, axis=1)]) print("Shape of image[nearest_old_pixels_idx]", image[[A.ravel() for A in np.split( nearest_old_pixels[in_bounds], 2, axis=1)]].shape) print("Shape of new_image", new_image.shape) print("Shape of new_image[split points]", new_image[np.split(points, 2, axis=1)[::-1]].shape) print("Shape of new_image[points_idx]", new_image[[ A.ravel() for A in np.split( points, 2, axis=1)][::-1]].shape) """ # This is the basic idea, but some of the # nearest_old_pixels can be outside of the old image's # dimensions. # old_colors = image[np.split(nearest_old_pixels, 2, axis=1)[::-1]] # new_image[np.split(points, 2, axis=1)[::-1]] = old_colors valid_x, valid_y = [A.ravel() for A in np.split(nearest_old_pixels[in_bounds], 2, axis=1)] #print(image[valid_y, valid_x].shape) old_colors[in_bounds] = image[valid_y, valid_x] #print(old_colors.shape) """ old_colors[in_bounds] = image[[A.ravel() for A in np.split( nearest_old_pixels[in_bounds], 2, axis=1)][::-1]] """ old_colors[~in_bounds] = utils.colors['red'] new_image[[A.ravel() for A in np.split(points, 2, axis=1)][::-1]] = old_colors return new_image
ckey = 'xx' + 'xx'.join(sorted(list(all_conjs))) + 'xx' if len(all_conjs)>0 else None pkey = 'xx' + 'xx'.join(sorted(list(all_person))) + 'xx' if len(all_person)>0 else None morphemes = [] #if pkey: morphemes.append(pkey) if ckey and not attached and not present: morphemes.append(ckey) verb = all_verbs.pop() if ckey and attached and not present: verb += ckey if present and '3ps' in pkey: verb += 'xx3psxx' morphemes.append(verb) #if clitics != []: # morphemes += ['xx'+cl+'xx' for cl in clitics] return ' '.join(morphemes) def process(self, tokens): return ' '.join([self.analyze(t) for t in tokens]) if __name__ == '__main__': #analyzer = Analyzer(sys.argv[1]) language = sys.argv[1] line = sys.stdin.readline() while line: line = unicode(line, 'utf-8') #print analyzer.process(line.strip().split()).encode('utf-8') print transform(language, line.strip()).encode('utf-8') line = sys.stdin.readline()
def prepare_dumb_transformer( self): tr = transform.transform() tr.set_scaling( self.paper_to_canvas_coord( 1)) return tr
def generate(data_dir, batch_size=16, image_size=640, min_text_size=8, shrink_ratio=0.4, thresh_min=0.3, thresh_max=0.7, is_training=True): split = 'train' if is_training else 'test' with open(osp.join(data_dir, f'{split}_list.txt')) as f: image_fnames = f.readlines() image_paths = [osp.join( data_dir, f'{split}_images', image_fname.strip()) for image_fname in image_fnames] gt_paths = [osp.join(data_dir, f'{split}_gts', image_fname.strip( ) + '.txt') for image_fname in image_fnames] all_anns = load_all_anns(gt_paths) transform_aug = iaa.Sequential( [iaa.Fliplr(0.5), iaa.Affine(rotate=(-10, 10)), iaa.Resize((0.5, 3.0))]) dataset_size = len(image_paths) indices = np.arange(dataset_size) if is_training: np.random.shuffle(indices) current_idx = 0 b = 0 while True: if current_idx >= dataset_size: if is_training: np.random.shuffle(indices) current_idx = 0 if b == 0: # Init batch arrays batch_images = np.zeros( [batch_size, image_size, image_size, 3], dtype=np.float32) batch_gts = np.zeros( [batch_size, image_size, image_size], dtype=np.float32) batch_masks = np.zeros( [batch_size, image_size, image_size], dtype=np.float32) batch_thresh_maps = np.zeros( [batch_size, image_size, image_size], dtype=np.float32) batch_thresh_masks = np.zeros( [batch_size, image_size, image_size], dtype=np.float32) batch_loss = np.zeros([batch_size, ], dtype=np.float32) i = indices[current_idx] image_path = image_paths[i] anns = all_anns[i] image = cv2.imread(image_path) # show_polys(image.copy(), anns, 'before_aug') if is_training: transform_aug = transform_aug.to_deterministic() image, anns = transform(transform_aug, image, anns) image, anns = crop(image, anns) image, anns = resize(image_size, image, anns) # show_polys(image.copy(), anns, 'after_aug') # cv2.waitKey(0) anns = [ann for ann in anns if Polygon(ann['poly']).is_valid] gt = np.zeros((image_size, image_size), dtype=np.float32) mask = np.ones((image_size, image_size), dtype=np.float32) thresh_map = np.zeros((image_size, image_size), dtype=np.float32) thresh_mask = np.zeros((image_size, image_size), dtype=np.float32) for ann in anns: poly = np.array(ann['poly']) height = max(poly[:, 1]) - min(poly[:, 1]) width = max(poly[:, 0]) - min(poly[:, 0]) polygon = Polygon(poly) # generate gt and mask if polygon.area < 1 or min(height, width) < min_text_size or ann['text'] == '###': cv2.fillPoly(mask, poly.astype(np.int32)[np.newaxis, :, :], 0) continue else: distance = polygon.area * \ (1 - np.power(shrink_ratio, 2)) / polygon.length subject = [tuple(l) for l in ann['poly']] padding = pyclipper.PyclipperOffset() padding.AddPath(subject, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) shrinked = padding.Execute(-distance) if len(shrinked) == 0: cv2.fillPoly(mask, poly.astype( np.int32)[np.newaxis, :, :], 0) continue else: shrinked = np.array(shrinked[0]).reshape(-1, 2) if shrinked.shape[0] > 2 and Polygon(shrinked).is_valid: cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1) else: cv2.fillPoly(mask, poly.astype( np.int32)[np.newaxis, :, :], 0) continue # generate thresh map and thresh mask draw_thresh_map(ann['poly'], thresh_map, thresh_mask, shrink_ratio=shrink_ratio) thresh_map = thresh_map * (thresh_max - thresh_min) + thresh_min image = image.astype(np.float32) image[..., 0] -= mean[0] image[..., 1] -= mean[1] image[..., 2] -= mean[2] batch_images[b] = image batch_gts[b] = gt batch_masks[b] = mask batch_thresh_maps[b] = thresh_map batch_thresh_masks[b] = thresh_mask b += 1 current_idx += 1 if b == batch_size: inputs = [batch_images, batch_gts, batch_masks, batch_thresh_maps, batch_thresh_masks] outputs = batch_loss yield inputs, outputs b = 0
#!/usr/bin/env python import transform import time if __name__ == '__main__': t = time.time() val = transform.transform() print 'Time:', (time.time() - t) * 1000 print 'Result:', val
def main(): return transform(writer=Writer(), part='html_body')
sigma = sigma.union(file.readline().split()) initialState = file.readline().split()[0] finalStates = finalStates.union(file.readline().split()) for line in file: values = line.split() current = {} for i in range(1, len(values), 2): statesReached = values[i+1] if statesReached == 'empty': current[values[i]] = [] else: current[values[i]] = statesReached[1:-1].split(',') delta[values[0]] = current dfaStates, dfaSigma, dfaDelta, dfaInitialState, dfaFinalStates = t.transform(states, sigma, delta, initialState, finalStates) outFile.write('{}\n'.format(' '.join(map(lambda x: 'empty' if x == '' else x, dfaStates)))) outFile.write('{}\n'.format(' '.join(dfaSigma))) outFile.write('{}\n'.format(dfaInitialState)) outFile.write('{}\n'.format(' '.join(dfaFinalStates))) for s in dfaDelta: text = [s] if s != '' else ['empty'] for k,v in dfaDelta[s].iteritems(): text.append(k if k != '' else 'empty') text.append(v if v != '' else 'empty') outFile.write('{}\n'.format(' '.join(text))) outFile.close()