def read_message(self): cprint('server before receive', bcolors.WARNING) message = self.sock.recv_pyobj() cprint('server received message ' + str(message.shape), bcolors.WARNING) self.sock.send('OK') return message
def getMtime(self, name_list: [], year_list: []): global header referer = 'http://search.mtime.com/search/?q={}' search_url = 'http://service.channel.mtime.com/Search.api?Ajax_CallBack=true&Ajax_CallBackType=Mtime.Channel.Services&Ajax_CallBackMethod=GetSearchResult&Ajax_CrossDomain=1&Ajax_RequestUrl={0}&t={1}&Ajax_CallBackArgument0={2}&Ajax_CallBackArgument1=0&Ajax_CallBackArgument2=290&Ajax_CallBackArgument3=0&Ajax_CallBackArgument4=1' movie_url = 'http://service.library.mtime.com/Movie.api?Ajax_CallBack=true&Ajax_CallBackType=Mtime.Library.Services&Ajax_CallBackMethod=GetMovieOverviewRating&Ajax_CrossDomain=1&Ajax_RequestUrl={0}&t={1}&Ajax_CallBackArgument0={2}' for i in range(len(name_list)): name = name_list[i] header['Referer'] = referer.format(parse.quote(name)) t = time.strftime('%Y%m%d%H%M%S0000', time.localtime()) while True: resp = self.connect(search_url.format(header['Referer'], t, name), isEtree=False, useProxy=True) try: js_data = re.search(r'= (.*?});', resp).group(1) break except Exception: cprint('Invalid. Retry') search_datas = json.loads(js_data)['value'] if 'movieResult' in search_datas and 'moreMovies' in search_datas[ 'movieResult']: search_datas = search_datas['movieResult']['moreMovies'] else: continue movie_list = [] for search_data in search_datas: if search_data['movieTitle'].startswith('{} '.format(name)): try: y = int(search_data['movieTitle'][-5:-1]) except: continue if y <= int(year_list[i]) and y >= int(year_list[i]) - 3: url = search_data['movieUrl'] movieId = url.split('/')[-2] header['Referer'] = url resp = self.connect(movie_url.format(url, t, movieId), isEtree=False, useProxy=True) js_data = re.search(r'= (.*?});', resp).group(1) movie_data = json.loads(js_data)['value'] movie = { 'CNName': movie_data['movieTitle'], 'mtimeMark': movie_data['movieRating']['RatingFinal'] if 'movieRating' in movie_data else '', 'mtimeNum': movie_data['movieRating']['Usercount'] if 'movieRating' in movie_data else '', 'mtimeWant': movie_data['movieRating']['AttitudeCount'] if 'movieRating' in movie_data else '', 'movieId': movieId } movie_list.append(movie) cprint(movie, 'magenta') self.excel.writeSheet(movie_list)
def __repr__(self): s = '' for (a, b, c) in self.snp[np.array(['name', 'count', 'frequency'])]: alleles = 2.0 * sum(b[0:3]) s += ('%-15s' % (a,) + ' ' + util.cprint(b, '%5d') + ' ' + util.cprint(c, '%7.5f') + ' [%7.5f %7.5f]' % ((2 * b[0] + b[1]) / alleles, (2 * b[2] + b[1]) / alleles)) s += '\n' return s
def process_chapters(chapters): processed_chapters = [] for chapter in chapters: if not re.match(r"^\w+,\d\d:\d\d:\d\d\.\d\d\d$", chapter): exit_with_error(chapter) processed_chapters.append(chapter.split(",")) util.cprint(processed_chapters, "blue") return processed_chapters
def getFilmInfo(self, movie_list=[]): for movie in movie_list: self.getDouban(movie) self.getDBInfo(movie) if ('category' in movie and '电视' in movie['category']) or self.getMYInfo(movie): move_list.remove(movie) cprint(movie, 'magenta') print()
def handle_movie(folder, movie, compression): if not os.path.isdir(os.path.join(folder, "Compressed")): os.mkdir(os.path.join(folder, "Compressed")) util.cprint(f"Found movie: '{movie}'", "green") com_movie = os.path.join(folder, "Compressed", movie) movie = os.path.join(folder, movie) util.run_command( f'ffmpeg -i "{movie}" -map 0:v -map 0:a -map 0:s? -scodec copy -crf ' f'{compression} "{com_movie}"')
def handle_movie(folder, movie): if not os.path.isdir(os.path.join(folder, "Streamable")): os.mkdir(os.path.join(folder, "Streamable")) util.cprint(f"Found movie: '{movie}'", "green") movie_mp4 = os.path.join( folder, "Streamable", re.sub(r"(\.mkv|\.mp4)$", ".mp4", movie)) movie = os.path.join(folder, movie) util.run_command(f'ffmpeg -i "{movie}" -c copy "{movie_mp4}"')
def getFont(response): woff = re.search(r"url\('(.*\.woff)'\)", response) if woff is None: return None font_url = 'http:' + woff.group(1) cprint('download:\t' + font_url, 'cyan') font_file = requests.get(font_url).content writeFont(font_file) return parseFont()
def handle_movie(folder, movie): base = os.path.splitext(movie)[0] util.cprint(f"Found movie: '{movie}'", "green") if os.path.isfile(os.path.join(folder, f"{base}.srt")): merge_command(folder, movie, "srt") elif os.path.isfile(os.path.join(folder, f"{base}.ass")): merge_command(folder, movie, "ass") else: util.cprint("No matching subtitle file found!", "red")
def createSheet(self): cprint( '{0} -----创建新表格-----'.format( time.strftime('%a %b %d %Y %H:%M:%S', time.localtime())), 'cyan') self.sheet = self.book.add_sheet('sheet' + str(self.sheet_index)) self.sheet_index += 1 for key in self.header.keys(): self.sheet.write(0, self.header[key], key)
def load_items(self): self.db_items = [] if 'image_sets' in self.params: for image_set in self.params['image_sets']: print(image_set) if image_set.startswith('pascal') or image_set.startswith('sbd'): if image_set.startswith('pascal'): pascal_db = util.PASCAL(self.params['pascal_path'], image_set[7:]) elif image_set.startswith('sbd'): pascal_db = util.PASCAL(self.params['sbd_path'], image_set[4:]) #reads single image and all semantic classes are presented in the label if self.params['output_type'] == 'single_image': items = pascal_db.getItems(self.params['pascal_cats'], self.params['areaRng'], read_mode = util.PASCAL_READ_MODES.SEMANTIC_ALL) #reads pair of images from one semantic class and and with binary labels elif self.params['output_type'] == 'image_pair': items = pascal_db.getItems(self.params['pascal_cats'], self.params['areaRng'], read_mode = util.PASCAL_READ_MODES.SEMANTIC) items = self._remove_small_objects(items) else: raise Exception('Only single_image and image_pair mode are supported') self.db_items.extend(items) else: raise Exception cprint('Total of ' + str(len(self.db_items)) + ' db items loaded!', bcolors.OKBLUE) #reads pair of images from one semantic class and and with binary labels if self.params['output_type'] == 'image_pair': items = self.db_items #In image_pair mode pair of images are sampled from the same semantic class clusters = util.PASCAL.cluster_items(self.db_items) #for set_id in clusters.keys(): # print clusters[set_id].length #db_items will be a list of tuples (set,j) in which set is the set that img_item belongs to and j is the index of img_item in that set self.db_items = [] for item in items: set_id = item.obj_ids[0] imgset = clusters[set_id] assert(imgset.length > self.params['k_shot']), 'class ' + imgset.name + ' has only ' + imgset.length + ' examples.' in_set_index = imgset.image_items.index(item) self.db_items.append((imgset, in_set_index)) cprint('Total of ' + str(len(clusters)) + ' classes!', bcolors.OKBLUE) self.orig_db_items = copy.copy(self.db_items) assert(len(self.db_items) > 0), 'Did not load anything from the dataset' #assert(not self.params.has_key('db_cycle') or len(self.db_items) >= self.params['db_cycle']), 'DB Cycle should can not be more than items in the database = ' + str(len(self.db_items)) #it forces the update_seq_index function to shuffle db_items and set seq_index = 0 self.seq_index = len(self.db_items)
def forward(self, bottom, top): cprint( 'Queue size ' + str(self.queue.qsize()) + ', ' + 'Batch size ' + str(self.batch_size), bcolors.OKGREEN) for itt in range(self.batch_size): item = self.dequeue() assert item is not None for i in range(len(self.top_names)): obj_list = item[self.top_names[i]] #items are interleaved inds = np.arange(len(obj_list)) * self.batch_size + itt top[i].data[inds, ...] = self.__stack(obj_list)
def run(self): try: while True: item = None while item is None: item, info = self.load_next_frame() self.queue.put((item, info)) except: cprint('An Error Happended in run()',bcolors.FAIL) cprint(str("".join(traceback.format_exception(*sys.exc_info()))), bcolors.FAIL) self.queue.put(None) raise Exception("".join(traceback.format_exception(*sys.exc_info())))
def connect_test(self, url, proxy, timeout=7): cprint('proxy connect test {}'.format(proxy), 'cyan') cprint(url, 'cyan') retry_count = 3 while retry_count > 0: try: resp = requests.get( url, headers=header, verify=sslVerify, timeout=timeout, proxies={'https': 'https://{}'.format(proxy)}) body = etree.HTML(resp.text) # item = body.xpath('//div[@class="global-nav-items"]') if resp.text.find('异常请求') > 0: cprint('IP异常') # elif item is None or item == []: # cprint('IP无效') # return False else: return True except requests.exceptions.RequestException: cprint('retry connect test', 'yellow') retry_count -= 1 delete_proxy(proxy) return False
def writeSheet(self, movie_list): cprint( '{0} -----写入excel表格-----'.format( time.strftime('%a %b %d %Y %H:%M:%S', time.localtime())), 'cyan') for movie in movie_list: if self.now_index > 65500: self.createSheet() self.now_index = 1 for key in self.header.keys(): if key in movie: self.sheet.write(self.now_index, self.header[key], str(movie[key]).strip()) self.now_index += 1
def setup(self, bottom, top): params = eval(self.param_str) check_params(params, port=None) self.port = params['port'] self.context = zmq.Context.instance() self.sock = self.context.socket(zmq.REQ) self.sock.bind('tcp://*:' + self.port) cprint('client set up', bcolors.OKBLUE) #self.sock.send_pyobj(np.zeros((256,256)) ) #self.sock.recv() #print bcolors.OKGREEN + 'dummy message sent'+bcolors.ENDC if len(top) > 0: top[0].reshape(1)
def getEndata(self): global header header['dnt'] = '1' url = 'http://www.endata.com.cn/API/GetData.ashx' form_data = '&MethodName=BoxOffice_GetMovieData_List_Area' resp = self.connect(url, form_data, isGet=False, isEtree=False, useProxy=True) areas = json.loads(resp)['Data']['Table'] area_list = [] for area in areas: area_list.append(area['id']) print(area_list) form_data = 'areaId= {0} &typeId=0&year= {1} &initial=&pageIndex={2}&pageSize=10&MethodName=BoxOffice_GetMovieData_List' for year in range(start_year, end_year + 1): for area in area_list: page = 1 movie_list = [] while True: resp = self.connect(url, form_data.format(area, year, page), isGet=False, isEtree=False, useProxy=True) data = json.loads(resp)['Data'] box_none = False for row in data['Table']: if row['BoxOffice'] is None or row[ 'BoxOffice'] == '' or row['BoxOffice'] == 0: box_none = True break movie = { 'CNName': row['MovieName'], 'ENName': row['MovieEnName'], 'year': row['releaseYear'], 'boxOffice': row['BoxOffice'] } movie_list.append(movie) total = data['Table1'][0]['TotalPage'] cprint( 'curPage:{0}\ttotalPage:{1}\tyear:{2}'.format( page, total, year), 'magenta') if page >= total or box_none: break page += 1 self.excel.writeSheet(movie_list)
def parseFont(): cprint('open:\t' + cur_path, 'cyan') cur_font = TTFont(cur_path) uni_list = cur_font.getGlyphOrder()[2:] cur_axis = getAxis(cur_font) font_dict = {} for i in range(len(uni_list)): min_avg, uni = 99999, None for j in range(len(uni_base_list)): avg = compare_axis(cur_axis[i], base_axis[j]) if avg < min_avg: min_avg = avg uni = uni_base_list[j] font_dict['&#x' + uni_list[i][3:].lower() +';'] = maoyan_dict[uni] remove(cur_path) return font_dict
def saveExcel(self): savePath = path.join(filePath, fileName) + '.xls' if path.exists(savePath): remove(savePath) try: cprint( '{0} -----保存excel表格-----'.format( time.strftime('%a %b %d %Y %H:%M:%S', time.localtime())), 'cyan') if not path.exists(filePath): makedirs(filePath) self.book.save(savePath) except Exception as args: cprint(args) input('数据保存失败!\nPress any key to exit!') sys.exit()
def forward(self, bottom, top): cprint('Queue size ' + str(self.queue.qsize()), bcolors.OKBLUE) for itt in range(self.batch_size): #im1, im1_masked, im2, label = self.batch_loader.load_next_image() item = self.queue.get() if item is None: self.process.terminate() raise Exception top[0].data[itt, ...] = item['current_image'] #im1 top[1].data[itt, ...] = item['fg_image'] #im1_fg top[2].data[itt, ...] = item['bg_image'] #im1_bg top[3].data[itt, ...] = item['next_image'] #im2 top[4].data[itt, ...] = item['current_mask'] #label top[5].data[itt, ...] = item['label'] #label for i in range(len(self.flow_names)): flow_name = self.flow_names[i] top[i + 6].data[itt, ...] = item[flow_name] #inverse flow
def gen_cart_stack(init_commands, clock_commands, mode, loud=False): final_command_obj = None if clock_commands or init_commands: entities = [] entities.append(sands.normal_sand("activator_rail")) for command in init_commands: if hasattr(command, "cmd"): if loud: cprint(command.prettystr()) entities.append(cart(command.cmd)) offset = 1 for command in clock_commands: if offset == 1: command.block = "repeating_command_block" if loud: cprint(command.prettystr()) entities.append(cart_command_block(offset, command, 1, mode)) offset += 1 filloffset = offset+1 offset += 2 entities.append(cart_command_block(offset, Command(format("clone ~ ~-2 ~ ~ ~-{o1} ~ ~ ~-{o2} ~ replace move", o1=offset-1,o2=offset+2)))) offset += 1 entities.append(cart_command_block(offset, Command(format("fill ~ ~-{o1} ~ ~ ~-{o2} ~ air", o1=offset,o2=offset+2)))) offset += 1 activatesand = sands.normal_sand("command_block") activatesand["TileEntityData"] = {"auto": 1} entities.append(cart_block(offset, "air")) entities.append(cart(nbt.cmd(format("summon FallingSand ~ ~{o} ~ ", o=offset), activatesand, True))) entities.append(cart_command_block(filloffset, Command(format("fill ~ ~ ~ ~ ~{o} ~ air", o=offset-filloffset)))) entities.append(cart("kill @e[r=1,type=MinecartCommandBlock]")) stack = ride(entities) final_stack = sands.ride([ stack, sands.normal_sand("redstone_block"), sands.normal_sand("barrier") ], False) final_command_obj = nbt.cmd("summon FallingSand ~ ~1 ~ ", final_stack) final_command = nbt.JSON2Command(final_command_obj) return final_command
def getFilms(self, start=1): global header cprint('start to get films', 'green') cur = start url_prefix = 'https://www.yugaopian.cn/movlist/__' for year in range(start_year, end_year + 1): for month in range(start_month, end_month + 1): while (True): url = url_prefix + str(year) + '-' + str( month) + '__' + str(cur) header['Referer'] = 'https://www.yugaopian.cn/movlist' body = self.connect(url) if body is None: pass data_list = body.xpath( 'body/div[@class="wrapper"]//div[@class="movlist"]//a') move_list = [] for data in data_list: movie_name = data.xpath( 'span[@class="item-title"]/text()')[0] movie_date = data.xpath( 'span[@class="item-pubtime"]/text()' )[0][:-2].split('-') movie = { 'CNName': movie_name, 'year': movie_date[0], 'month': movie_date[1], 'day': movie_date[2], 'pubYear': self.getPubYear(data.attrib['href']) } move_list.append(movie) self.getFilmInfo(move_list) self.excel.writeSheet(move_list) index_list = body.xpath('//p[@class="page-nav"]/a/text()') if str.isdigit(index_list[-1]): max_index = index_list[-1] else: max_index = index_list[-2] if cur - int(max_index) == 0: break cur = cur + 1 cur = 1
def sub(self, string): while self.regex.search(string): found = self.regex.finditer(string) for find in found: params = self.param_regex.search(find.group()).group()[1:-1] params = re.sub(r",\s", ",", params) paraml = params.split(",") parsedparams = [] for i in paraml: if i: if i[0] == '"': i = i[1:-1].replace('\\"', '"').replace('\\\\', '\\') parsedparams.append(i) try: output = self.function(self.replacewith, self.params, parsedparams) except: cprint("{params} is not a valid argument list for ${funcname}.", color=bcolors.RED, params=params, funcname=self.name) output = "" string = string.replace(find.group(), output) return string
def browser_connect(self, url): if self.browser_TTL == 0 or self.browser is None: if self.browser is not None: self.browser.quit() self.browser_TTL = 200 time.sleep(1) options = webdriver.ChromeOptions() # options.add_argument('--headless') options.add_argument('--disable-gpu') while True: # self.proxy = get_proxy().get("proxy") self.proxy = proxyPool.get_proxy() if self.connect_test(url, self.proxy): options.add_argument('--proxy-server=http://{}'.format( self.proxy)) break options.add_experimental_option( "prefs", {"profile.managed_default_content_settings.images": 2}) self.browser = webdriver.Chrome(chrome_options=options) self.browser.set_page_load_timeout(20) self.browser.set_script_timeout(20) # self.browser.implicitly_wait(20) cprint('use browser openning {}'.format(url), 'cyan') isOk = True try: self.browser.get(url) self.browser_TTL -= 1 except TimeoutException: self.browser_TTL -= 1 cprint('timeout', 'yellow') except Exception: isOk = False cprint('browser error') return isOk
def tearDownClass(): cprint.header('\nBenchmark Summary') print("(new/goal)") for key, val in summary.items(): time, number, per, repeat = val per *= 1000 # Print which function call this is print(f'{key} [x{number}]') # Set color according to how well it did if key in goals: gtime, gnumber, gper, grepeat = goals[key] gper *= 1000 err = (per - gper) / gper if -0.1 < err < 0.1: color = 'blue' elif 0.1 < err < 0.50: color = 'warning' elif 0.50 < err: color = 'fail' elif err < -0.10: color = 'green' # Color print the result cprint( color, f'{round(per, 3)}/{round(gper,3)} ms, {round(err*100, 1)}%' ) else: cprint.blue(f'(new) {round(per, 3)}ms') ## Save output to a json file with open('benchmark-summary.json', 'w') as f: json.dump(summary, f, indent=True)
def getDBInfo(self, movie: dict): url = movie['url'] if url == '': cprint(movie['CNName'] + ' douban url is None', 'red') return body = self.connect(url) if body is None: return dbMark = body.xpath('//strong[@property="v:average"]/text()') dbNum = body.xpath('//span[@property="v:votes"]/text()') movie['dbMark'] = '' if dbMark == [] else dbMark[0] movie['dbNum'] = '' if dbNum == [] else dbNum[0] rates = body.xpath( '//div[@class="ratings-on-weight"]//span[@class="rating_per"]/text()' ) if rates != []: for i in range(5): movie['db%dm' % (i + 1)] = rates[4 - i] info_list = body.xpath('string(//div[@id="info"])').replace( ' ', '').split('\n') for info in info_list: if info.startswith('导演'): movie['directors'] = info.split(':')[1] elif info.startswith('编剧'): movie['writers'] = info.split(':')[1] elif info.startswith('主演'): movie['stars'] = info.split(':')[1] elif info.startswith('类型'): movie['category'] = info.split(':')[1] elif info.startswith('制片'): movie['country'] = info.split(':')[1] elif info.startswith('语言'): movie['language'] = info.split(':')[1] imdbUrl = body.xpath('//div[@id="info"]/a[last()]/@href') if imdbUrl is not None and imdbUrl != []: self.getImdbInfo(movie, imdbUrl[0])
def forward(self, bottom, top): if bottom[0].data.shape[1] == 1: #mask is a Nx1xHxW dimensional matrix fg_mask = bottom[0].data[:, 0, :, :] elif bottom[0].data.shape[1] == 2: #mask is a Nx2xHxW dimensional matrix max_val = np.max(bottom[0].data, axis=1).reshape((bottom[0].data.shape[0], 1) + bottom[0].data.shape[2:]) nd = bottom[0].data - max_val exp = np.exp(nd) fg_mask = exp[:, 1, :, :] / exp.sum(1) else: raise Exception cprint('client sends ' + str(fg_mask.shape), bcolors.WARNING) self.sock.send_pyobj(fg_mask) cprint('client waiting for response', bcolors.WARNING) response = self.sock.recv() if response != 'OK': cprint('FATAL ERROR: response is not OK', bcolors.FAIL)
def load_frame(self, player, first_index, second_index): cprint( 'Loading pair = ' + player.name + ', ' + str(first_index) + ', ' + str(second_index), bcolors.WARNING) if second_index in first_index: return None images1 = [] labels1 = [] image_path = [] shape1 = self.first_shape for ind in first_index: frame1_dict = player.get_frame(ind) image1, label1, shape1 = self.__prepross(frame1_dict, shape1) images1.append(image1.transpose((2, 0, 1))) labels1.append(label1) image_path.append(frame1_dict['image_path']) item = dict(first_img=images1, image1_path=image_path) if second_index is not None: frame2_dict = player.get_frame(second_index) image2, label2, shape = self.__prepross(frame2_dict, self.second_shape) item['second_img'] = [image2.transpose((2, 0, 1))] item['image2_path'] = [ frame2_dict['image_path'], ] if self.deploy_mode: first_semantic_labels = [] first_mask_orig = [] first_img_orig = [] for ind in first_index: a, b, c = self.__get_deploy_info(player, ind) first_semantic_labels.append(a) first_mask_orig.append(b) first_img_orig.append(c) deploy_info = dict(seq_name=player.name, first_index=first_index, first_img_orig=first_img_orig, first_mask_orig=first_mask_orig, first_semantic_labels=first_semantic_labels) if second_index is not None: second_semantic_labels, second_mask_orig, second_img_orig = self.__get_deploy_info( player, second_index) deploy_info.update( second_index=second_index, second_img_orig=second_img_orig, second_mask_orig=second_mask_orig, second_semantic_labels=second_semantic_labels) item['deploy_info'] = deploy_info #create first_labels for i in range(len(self.first_label_params)): name, down_scale, offset = self.first_label_params[i] item[name] = [] for label1 in labels1: nlabel1 = util.change_coordinates(label1, down_scale, offset) nlabel1 = (nlabel1 - self.first_label_mean) * self.first_label_scale assert (self.__is_integer(nlabel1)) item[name].append(nlabel1.reshape((1, ) + nlabel1.shape)) if second_index is not None: #create second_labels for i in range(len(self.second_label_params)): name, down_scale, offset = self.second_label_params[i] nlabel2 = util.change_coordinates(label2, down_scale, offset) assert (self.__is_integer(nlabel2)) item[name] = [nlabel2.reshape((1, ) + nlabel2.shape)] if self.has_cont: item['cont'] = [0] + [1] * (len(first_index) - 1) return item
def exit_with_error(chapter): util.cprint( 'Chapters should be formatted like so: "Name,00:00:00.000"' f' not as "{chapter}"', "red") sys.exit(1)
def generate_xml_file(chapters): output_string = '<?xml version="1.0" encoding="UTF-8"?>' output_string += '<!DOCTYPE Chapters SYSTEM "matroskachapters.dtd">' output_string += "<Chapters><EditionEntry>" for chapter in chapters: output_string += generate_chapter_xml(chapter) output_string += "</EditionEntry></Chapters>" with open("chapters.temp.xml", "w") as f: f.write(output_string) if __name__ == "__main__": parser = argparse.ArgumentParser( description="Add a list of chapters to an mkv") parser.add_argument("file", help="Location of the mkv file to modify") parser.add_argument( "chapters", nargs="+", help='Add a list of chapters like this: "Opening,00:00:00.000" ' '"Part 1,00:01:30.000"') args = parser.parse_args() if not args.file.endswith(".mkv"): util.cprint("Input file must be an mvk", "red") sys.exit(1) if not os.path.isfile(args.file): util.cprint(f"Mkv file could not be found: '{args.file}'", "red") sys.exit(1) processed_chapters = process_chapters(args.chapters) generate_xml_file(processed_chapters) util.run_command(f'mkvpropedit --chapters chapters.temp.xml "{args.file}"') os.remove("chapters.temp.xml")
def getSheetById(self, index=0): self.openCheck() if index >= self.workBook.nsheets: cprint('index out of bounds') return self.workBook.sheet_by_index(index)
def openCheck(self): if self.workBook is None: cprint('Didn\'t open a xls file yet')
def parse_cmd(cindex, commands, functions, variables, func_regex): outcommands = [] command = commands[cindex].strip() if not command or comment_regex.match(command): return cindex, [], functions, variables, func_regex for var in variables: command = variables[var].sub(command) while func_regex.search(command): for macro in functions: command = functions[macro].sub(command) if define_regex.match(command): command_split = define_tag_regex.sub("", command).split() if len(command_split) < 2: return cindex, [], functions, variables, func_regex name = command_split[0] contents = " ".join(command_split[1:]) if macro_regex.match(name): params = param_regex.search(name).group()[1:-1].split(",") name = word_regex.search(name).group() functions[name] = CmdMacro(name, params, contents) func_regex = re.compile("\\$("+"|".join(map(lambda x: functions[x].name, functions))+")"+CmdMacro.param, re.IGNORECASE) return cindex, [], functions, variables, func_regex if word_regex.match(name): variables[name] = CmdVariable(name, contents) elif undefine_regex.match(command): command_split = undefine_regex.sub("", command).split() for i in command_split: if i in variables: del variables[i] if i in functions: del functions[i] elif import_regex.match(command): if context is None: return cindex, [], functions, variables, func_regex libraryname = import_regex.sub("", command).strip() if not libraryname: return cindex, [], functions, variables, func_regex if isinstance(context, str): if os.path.exists(os.path.join(context, libraryname)): importedcontext = context importedname = libraryname lib = open(os.path.join(context,libraryname)) elif os.path.exists(os.path.join(context, libraryname+".1cc")): importedcontext = context importedname = libraryname+".1cc" lib = open(os.path.join(context, libraryname+".1cc")) else: cprint("Failed to import {lib}. File not found.", color=bcolors.RED, lib=libraryname) return [] else: lib = None for i in context: if os.path.exists(os.path.join(i, libraryname)): importedcontext = i importedname = libraryname lib = open(os.path.join(i,libraryname)) break elif os.path.exists(os.path.join(i, libraryname+".1cc")): importedcontext = i importedname = libraryname+".1cc" lib = open(os.path.join(i, libraryname+".1cc")) break if not lib: cprint("Failed to import {lib}. File not found.", color=bcolors.RED, lib=libraryname) return [] outcommands += preprocess(lib.read().split("\n"), importedcontext, importedname) elif for_regex.match(command): cindex, out, functions, variables, func_regex = parse_for(cindex, commands, functions, variables, func_regex) outcommands += parse_section(out, functions, variables, func_regex) else: outcommands.append(command) return cindex, outcommands, functions, variables, func_regex
def gen_stack(init_commands, clock_commands, mode, loud=False): final_command_obj = None if clock_commands or init_commands: command_sands = [] repeatoffsets = [] if mode == 'i': if clock_commands and isinstance(clock_commands[0], Command): repeatoffsets.append(len(clock_commands) + 2) for command in clock_commands: if command.block == "repeating_command_block" and not command.cond and command is not clock_commands[0]: repeatoffsets.append(len(clock_commands) - clock_commands.index(command) + 2 + len(repeatoffsets)) filloffset = len(init_commands) + len(repeatoffsets) if filloffset: filloffset += 1 if filloffset: if loud: cprint("minecraft:command_block:0\n - Initialization", color=bcolors.DARKGRAY, allow_repeat=True) sand = normal_sand("command_block") if mode == 'i': sand["TileEntityData"] = { "auto": 1 } command_sands.append(sand) for command in init_commands: if loud: cprint(command.prettystr(), allow_repeat=True) command_sands.append(generate_sand(command, 0)) for offset in repeatoffsets[::-1]: blockdata = Command(format("blockdata ~ ~-{offset} ~ {auto:1b}", offset = offset), init=True) if loud: cprint(blockdata.prettystr(), allow_repeat=True) sand = generate_sand(blockdata, 0) command_sands.append(sand) if filloffset: fill = Command(format("fill ~ ~-1 ~ ~ ~{offset} ~ air", offset = filloffset), init=True) if loud: cprint(fill.prettystr(), allow_repeat=True) cprint("minecraft:barrier\n - Initialization", color=bcolors.DARKGRAY, allow_repeat=True) command_sands.append(generate_sand(fill, 0)) command_sands.append(normal_sand("barrier")) for command in clock_commands[::-1]: if command is clock_commands[0] and isinstance(command, Command): command.block = "repeating_command_block" command_sands.append(generate_sand(command, 1)) else: sand = generate_sand(command, 1) if command.block == "repeating_command_block" and command.cond: sand["TileEntityData"]["auto"] = 1 command_sands.append(sand) if loud: cprint(command.prettystr(), allow_repeat=True) final_command_obj = nbt.cmd("summon FallingSand ~ ~1 ~ ", ride(command_sands, False)) final_command = nbt.JSON2Command(final_command_obj) return final_command