def printScreen(self): scn = self.__screen scn.clear() scn.border(0) # Title scn.addstr(0, 35, "PIZZA CLUB", curses.A_STANDOUT) # construct inner Window begin_x = 1 ; begin_y = 1 height = 22 ; width = 78 win = curses.newwin(height, width, begin_y, begin_x) # Menu menu_text = "press q to exit" win.addstr(height - 1, width - len(menu_text) - 1, menu_text) # Brewskeeball win.addstr(3, 0, Util.centerString("Next Matchup", width)) win.addstr(5, 0, Util.centerString(self.__brewskeeball.matchupTeams(), width)) win.addstr(7, 0, Util.centerString("%s at %s" % (self.__brewskeeball.matchupDate(), self.__brewskeeball.matchupTime()), width)) # Render screen scn.refresh() win.refresh()
def update(): ''' Sets target's siteurl, blogname, and homepage ''' Util.validate_role() config = env.config db_table = 'wp_options' entries = { 'siteurl': config['site_url'], 'blogname': config['site_name'], 'home': config['site_url'] } with local_tunnel(config['db_port']): cnx = mysql.connector.connect(user=config['db_user'], password=config['db_pass'], host='127.0.0.1', port=config['db_port'], database=config['db_name']) cnx.start_transaction() cursor = cnx.cursor() update_option = ("UPDATE `{db_table}` " "SET `option_value`=%s " "WHERE `option_name` LIKE %s".format(db_table=db_table)) for key, value in entries.iteritems(): cursor.execute(update_option, (value, key)) cnx.commit() cnx.close()
def __init__(self): ''' initializes: 1. graph database connection 2. datastore connection 3. graph database indices required 4. url and templates for interaction with the graph database REST API ''' Util.__init__(self) if os.environ.get('NEO4J_URL'): graph_db_url = urlparse(os.environ.get('NEO4J_URL')) neo4j.authenticate( "{host}:{port}".format(host = graph_db_url.hostname, port = graph_db_url.port), graph_db_url.username, graph_db_url.password ) self.graphdb = neo4j.GraphDatabaseService( 'http://{host}:{port}/db/data'.format(host = graph_db_url.hostname, port = graph_db_url.port) ) else: self.graphdb = neo4j.GraphDatabaseService() self.node_index = self.graphdb.get_or_create_index(neo4j.Node, 'NODE') self.disambiguation_index = self.graphdb.get_or_create_index(neo4j.Node, self.DISAMBIGUATION) self._url = lambda present_node: 'http://localhost:7474/db/data/node/{0}'.format(present_node) self._template = lambda target_node: { "to" : self._url(target_node), "relationships": { "type": "sibling" }, "cost_property": "weight", "algorithm" : "dijkstra" } self.DataM = RelDataStore()
def _get_uid(cls): user_id = Util.get_user_id() if not user_id: print("welcome, creating id for you.") user_id = requests.get(cls.get_user_id_url).text Util.set_user_id(user_id) return user_id
def main(): worm = Worm("http://11.11.0.64:8099/", "http://11.11.0.64:4041/stages/", True, False, "Flint") # worm = Worm("http://211.69.198.208:8080/", "http://211.69.198.208:8080/history/app-20150722101951-0008/stages/", "Flint") # finish_sparks = worm.get_finish_sparks() running_spark = worm.get_finish_spark() if running_spark != None: running_stages = running_spark.get_running_stages() finished_stages = running_spark.get_finished_stages() stages = [] for finished_stage in finished_stages: stage_dict = { "stage_id": finished_stage.get_stage_id(), "stage_duration": Util.format_time(finished_stage.get_duration()), "submit_time": finished_stage.get_submit_time(), "tasks_percent": 100.0, "gc_time": round(finished_stage.get_gc_time(), 1), } stages.append(stage_dict) for running_stage in running_stages: stage_dict = { "stage_id": running_stage.get_stage_id(), "stage_duration": Util.format_time(running_stage.get_duration()), "submit_time": running_stage.get_submit_time(), "tasks_percent": Util.format_tasks_percent(running_stage.get_tasks_percent()), "gc_time": round(running_stage.get_gc_time(), 1), } stages.append(stage_dict) # print stages format_spark = {} format_spark["app_name"] = running_spark.get_app_name() format_spark["total_time"] = Util.format_time(running_spark.get_total_time()) format_spark["status"] = running_spark.get_status() format_spark["property"] = running_spark.get_property() format_spark["stages"] = stages print format_spark
def on_search_enter_key(self, entry): # find /home/shercoder/ \( ! -regex '.*/\..*' \) | grep "soccer" search_terms = entry.get_text() is_tag_search = False if search_terms.startswith('@'): search_terms = search_terms[1:] is_tag_search = True else: search_terms = search_terms.split(' ') if entry.get_text(): allfiles = [] if is_tag_search: results = self._index_manager.search_documents(search_terms) for hit in results: allfiles.append((hit['filename'], hit['filepath'])) else: for root, dirs, files in os.walk(HOME): files = [f for f in files if not f[0] == '.'] dirs[:] = [d for d in dirs if not d[0] == '.'] for term in search_terms: for filename in fnmatch.filter(files, "*{}*".format(term)): allfiles.append((filename, os.path.join(root, filename))) self._treeview.get_model().generate_search_tree(allfiles) else: self._treeview.get_model().generate_tree(HOME) Util.clear_notebook(self._notebook)
def kernel_version(min=None, max=None): """ Return kernel version or test for a minimum and/or maximum version min -- minimal kernel version required max -- maximum kernel version required """ if not Firewall._kernelversion: # query iptables version kvcmd = subprocess.Popen(Firewall.kernelversioncmd, stdout=subprocess.PIPE) result = kvcmd.communicate()[0] Firewall._kernelversion = result.strip() # still no version number? - raise PyromanException(an exception) if not Firewall._kernelversion: raise Error("Couldn't get kernel version!") if not min and not max: return Firewall._kernelversion if min: if Util.compare_versions(Firewall._kernelversion, min) < 0: return False if max: if Util.compare_versions(Firewall._kernelversion, max) > 0: return False return True
def _dumpVariable( self ): Util.trace( "---------------------------" ) Util.trace( "index: " + str( self.index_ ) ) Util.trace( "name: " + str( self.name_ ) ) #Util.trace( "ext: " + str( self.ext_ ) ) Util.trace( "mtime: " + str( self.mtime_ ) ) Util.trace( "bgColor: " + str( self.bgColor_ ) )
def build(self, is_main=False, is_method=False, is_class=False): ''' Copies the base java file, moves the changes into it and compiles it. If an Exception occurs, the change is rolled back. ''' shutil.copyfile(START_FILE, MAIN_FILE) self.parse_file(MAIN_FILE) # Attempt to compile the new change try: Util.compile_java([MAIN_FILE] + self.full_class_names) except Exception as e: if verbose_output == True: print 'Exception: %s' % e else: print 'An Exception occurred compiling the program' # Determine who caused the exception and remove the code if is_main is True: self.main_input.pop() elif is_method is True: self.methods.pop() self.method_names.pop() elif is_class is True: self.classes.pop() self.class_names.pop() filename = self.full_class_names.pop() Util.remove_file(filename) return if is_main is True: self.run()
def post(self, answerId): print 'hiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiiii' print answerId message = self.get_argument("message") userId = self.get_current_user_id() answer = Answer.queryByAnswerId(answerId) # 异常1 回答不存在 if not answer : self.write(Util.response(None, -1, u'回复不存在')) return # 请输入评论内容 if message == "": self.write(Util.response(None, -1, u'请输入评论内容')) return # 不能评论锁定的问题 question = Question.queryById(answer.question_id) if question.lock == 1 : self.write(Util.response(None, -1, u'不能评论锁定的问题')) return # 你没有发表评论的权限(略) # 插入一条评论 AnswerComment.addAnswerComment(answerId, userId, message) rsm = {'item_id':answerId, 'type_name':'answer'} self.write(Util.response(rsm, 1, None))
def create_grid(self): general_header = Util.create_label("<big>GENERAL</big>", align=Gtk.Align.START) general_header.set_use_markup(True) self.attach(general_header, 0, 0, 1, 1) # create attribute labels filename_label = Util.create_label("Name:") filesize_label = Util.create_label("Filesize:") location_label = Util.create_label("Location:") last_modified_label = Util.create_label("Last Modified:") last_access_label = Util.create_label("Last Access:") # create attribute values filename = Util.create_info_label(os.path.basename(self._filepath)) filesize = self.file_or_folder_size_widget() location = Util.create_info_label(os.path.dirname(self._filepath), ellipsize=True) last_modified = Util.create_info_label(time.ctime(self._filestat.st_mtime)) last_access = Util.create_info_label(time.ctime(self._filestat.st_atime)) # add all widgets to the grid self.attach(filename_label, 0, 1, 1, 1) self.attach(filename, 1, 1, 1, 1) self.attach(filesize_label, 0, 2, 1, 1) self.attach(filesize, 1, 2, 1, 1) self.attach(location_label, 0, 3, 1, 1) self.attach(location, 1, 3, 1, 1) self.attach(last_modified_label, 0, 4, 1, 1) self.attach(last_modified, 1, 4, 1, 1) self.attach(last_access_label, 0, 5, 1, 1) self.attach(last_access, 1, 5, 1, 1)
def run(self): # The main ETL processing log.info("START") t1 = Util.start_timer("total ETL") # Get the ETL Chain pipeline config strings # Default is to use the section [etl], but may be overidden on cmd line config_section = self.options_dict.get('config_section') if config_section is None: config_section = 'etl' chains_str = self.configdict.get(config_section, 'chains') if not chains_str: raise ValueError('ETL chain entry not defined in section [etl]') # Multiple Chains may be specified in the config chains_str_arr = chains_str.split(',') for chain_str in chains_str_arr: # Build single Chain of components and let it run chain = Chain(chain_str, self.configdict) chain.assemble2() # Run the ETL for this Chain chain.run() Util.end_timer(t1, "total ETL") log.info("ALL DONE")
def generate_zip_file(self, xplan_object): self.xplan_object = xplan_object logging.info('Loading object file...') name = os.path.splitext(os.path.basename(self.filename))[0] object_file = name + '_obj.xml' self.soup = BeautifulSoup(open(object_file).read()) logging.info('Updating object file...') for object in self.soup.find_all("conditions", attrs={"type": "List"}): reference = object.get('reference') if not reference: continue found_references = xplan_object.get_item_by_reference(reference) if found_references: # if found, clear all conditions for that section, preparing for new/updated conditions self.__update_conditions(object, found_references) for xplan_item in found_references: data = xplan_item.get_item() self.__append_new_data(object, data) logging.info('Saving to zip file...') output_zip = name + '_new.zip' xml_file = name + '.xml' f = open(xml_file, 'w') f.write(self.get_xml()) f.close() try: shutil.copyfile('page.zip', output_zip) Util.append_to_zip(output_zip, xml_file) except Exception as e: logging.error('[zip:error] ' + str(e))
def optimizeDirectory(self, path, callback, extension = "", minimize = False): """optimizes a directory Arguments: path -- path to directory callback -- function to run the file through extension -- extension to search for in the directory minimize -- whether or not we should minimize the file contents (html) Returns: void """ directory = path + "_opt" skip = self.prepareDirectory(directory) if skip is True: return for dir_file in Util.getFilesFromDir(path, extension): if Util.isDir(dir_file): self.optimizeSubdirectory(dir_file, callback, directory, extension, minimize) continue new_path = directory + "/" + Util.getFileName(dir_file) self.optimizeFile(dir_file, callback, minimize, new_path)
def check_path_type(self, data_path): ''' Check whether the data_path is a scan, section or fov. ''' # we should check how many levels deep is the IMAGE_COORDINATES_FILE # level 0: this is a FOV # level 1: this is a section # level 2: this is a scan if os.path.exists( os.path.join( data_path, settings.IMAGE_COORDINATES_FILE)): return 'FOV' if os.path.exists( os.path.join( data_path, Util.get_first_level_subdir(data_path), settings.IMAGE_COORDINATES_FILE)): return 'SECTION' if os.path.exists( os.path.join( data_path, Util.get_second_level_subdir(data_path), settings.IMAGE_COORDINATES_FILE)): return 'SCAN' return None
def optimizeSubdirectory(self, path, callback, new_path, extension = "", minimize = False): """optimizes a subdirectory within a directory being optimized Arguments: path -- path to directory callback -- function to run the file through new_path -- path to optimized parent directory extension -- extension to search for in the directory minimize -- whether or not we should minimize the file contents (html) Returns: void """ subdir_path = new_path + "/" + path.split("/").pop() skip = self.prepareDirectory(subdir_path) if skip is True: return for dir_file in Util.getFilesFromDir(path, extension): if Util.isDir(dir_file): self.optimizeSubdirectory(dir_file, callback, subdir_path, extension, minimize) continue new_file_path = subdir_path + "/" + Util.getFileName(dir_file) self.optimizeFile(dir_file, callback, minimize, new_file_path)
def optimizeJsManifest(self): contents = Util.fileGetContents(self.config.js_manifest) for key, value in self.manifest_ids.items(): if "#" + value in self.id_map: contents = re.sub(r'((?<!\$)\${1}[A-Z0-9_]+\s?=\s?[\'|\"])(' + value + ')([\'|\"][,|;])', r'\1' + self.id_map["#" + value].replace("#", "") + r'\3', contents) for key, value in self.manifest_classes.items(): if "." + value in self.class_map: contents = re.sub(r'(\${2}[A-Z0-9_]+\s?=\s?[\'|\"])(' + value + ')([\'|\"][,|;])', r'\1' + self.class_map["." + value].replace(".", "") + r'\3', contents) if self.config.rewrite_constants: constants = re.findall(r'(\s+?(var\s)?([A-Z0-9_]+)\s?=\s?[\'|\"](.*?)[\'|\"][,|;])', contents) new_constants = {} i = 0 for constant in constants: # underscore variables are ignored if constant[2][0] == "_": continue i += 1 new_constant = re.sub(r'=(.*)([,|;])','= ' + str(i) + r'\2', constant[0]) contents = contents.replace(constant[0], new_constant) new_manifest = Util.prependExtension("opt", self.config.js_manifest) Util.filePutContents(new_manifest, contents) if self.config.show_savings: SizeTracker.trackFile(self.config.js_manifest, new_manifest)
def processMaps(self): """loops through classes and ids to process to determine shorter names to use for them and creates a dictionary with these mappings Returns: void """ # reverse sort so we can figure out the biggest savings classes = self.class_counter.items() classes.sort(key = itemgetter(1), reverse=True) for class_name, savings in classes: small_class = "." + VarFactory.getNext("class") # adblock extensions may block class "ad" so we should never generate it # also if the generated class already exists as a class to be processed # we can't use it or bad things will happen while small_class == ".ad" or Util.keyInTupleList(small_class, classes): small_class = "." + VarFactory.getNext("class") self.class_map[class_name] = small_class ids = self.id_counter.items() ids.sort(key = itemgetter(1), reverse=True) for id, savings in ids: small_id = "#" + VarFactory.getNext("id") # same holds true for ids as classes while small_id == "#ad" or Util.keyInTupleList(small_id, ids): small_id = "#" + VarFactory.getNext("id") self.id_map[id] = small_id
def _get_day_link(self): ''' 获取当天的分享链接 ''' cur_day = Util.get_time() if cur_day == None: self.errors.append('Get sys-time error') return [] mainpage = self._get_content(self.config.get_vip_url()) if mainpage != None: #first try to find year-month-day published time_str = '-'.join(cur_day) #print time_str re_str = '<em>%s</em>.*</a></label> <a href="(.*\.html)"' % time_str #print re_str links = self._get_links(mainpage, re_str) if len(links) != 0: #ok, no need more pass else: #seconf try to find year-mond-(day-1) published for the next day day_before = Util.get_day_before(cur_day) chars = self._gen_choices(cur_day) time_str = '-'.join(day_before) re_str = '<em>%s</em>.*</a></label> <a href="(.*\.html)" title=".*[%s].*"' % (time_str, '|'.join(chars)) links = self._get_links(mainpage, re_str) return links return []
def parse_args(args_list): log.info("Stetl version = %s" % __version__) argparser = argparse.ArgumentParser(description='Invoke Stetl') argparser.add_argument('-c ', '--config', type=str, help='ETL config file in .ini format', dest='config_file', required=False) argparser.add_argument('-s ', '--section', type=str, help='Section in the config file to execute, default is [etl]', dest='config_section', required=False) argparser.add_argument('-a ', '--args', type=str, help='Arguments or .properties files to be substituted for symbolic {argN}s in Stetl config file,\ as -a "arg1=foo arg2=bar" and/or -a args.properties, multiple -a options are possible', dest='config_args', required=False, action='append') argparser.add_argument('-d ', '--doc', type=str, help='Get component documentation like its configuration parameters, e.g. stetl doc stetl.inputs.fileinput.FileInput', dest='doc_args', required=False) args = argparser.parse_args(args_list) if args.config_args: args_total = dict() for arg in args.config_args: if os.path.isfile(arg): log.info('Found args file at: %s' % arg) args_total = Util.merge_two_dicts(args_total, Util.propsfile_to_dict(arg)) else: # Convert string to dict: http://stackoverflow.com/a/1248990 args_total = Util.merge_two_dicts(args_total, Util.string_to_dict(arg)) args.config_args = args_total return args
def run(self): path = self._path self._streamRef = FSEventStreamCreate( kCFAllocatorDefault, self.eventsCallback, path, [path], kFSEventStreamEventIdSinceNow, # sinceWhen 1.0, # latency 0, ) if self._streamRef is None: Util.trace("FSEventStreamCreate is failed") return if False: FSEventStreamShow(self._streamRef) FSEventStreamScheduleWithRunLoop(self._streamRef, CFRunLoopGetCurrent(), kCFRunLoopDefaultMode) startedOK = FSEventStreamStart(self._streamRef) if not startedOK: Util.trace("failed to start the FSEventStream") return # if True: # timer = CFRunLoopTimerCreate( # FSEventStreamGetSinceWhen(streamRef), # CFAbsoluteTimeGetCurrent() + settings.flush_seconds, # settings.flush_seconds, # 0, 0, timer_callback, streamRef) # CFRunLoopAddTimer(CFRunLoopGetCurrent(), timer, kCFRunLoopDefaultMode) CFRunLoopRun()
def __del__(self): Util.trace("destroy DirCheck instance") streamRef = self._streamRef # Stop / Invalidate / Release FSEventStreamStop(streamRef) FSEventStreamInvalidate(streamRef) FSEventStreamRelease(streamRef)
def on_post_save(self, view): file = view.file_name() if not (Util.is_scala(file) or Util.is_java(file)): return env = getEnvironment(view.window()) if env and env.is_connected() and env.client.analyzer_ready: TypeCheckFilesReq([view.file_name()]).run_in(env, async=True)
def __init__(self, name, ip, iface, hostname="", loginfo=""): """ Create a new host object, with the given name, IP specification and interface name -- Nickname for the host ip -- IP specification for the host or subnet (e.g. "127.0.0.1 10.0.0.0/24") iface -- Interface nickname this is connected to (only one!) """ # verify and store name if name == "" and not Util.verify_name(name): raise PyromanException("Host '%s' lacking a valid name at %s" \ % (name, iface, loginfo)) if Firewall.hosts.has_key(name): raise PyromanException("Duplicate host specification: '%s' at %s" % (name, loginfo)) self.name = name # verify and store IPs if ip == "": raise PyromanException("Host '%s' definition lacking IP address at %s" % (name, loginfo)) self.ip = Util.splitter.split(ip) for i in self.ip: if not Util.verify_ipnet(i): raise PyromanException("IP specification '%s' invalid for host '%s' at %s" \ % (i, name, loginfo)) # verify and store interface self.iface = iface if iface == "": raise PyromanException("Host definition '%s' lacking kernel interfaces at %s" \ % (name, loginfo)) # store "real" hostname (which may be longer than nick) # this is used for "localhost detection" self.hostname = hostname # store loginfo self.loginfo = loginfo # register with firewall Firewall.hosts[name] = self
def moveActiveNode(self, direction, create=False): left = (-1, 0) right = (1, 0) above = (0, -1) below = (0, 1) activeNode = self.activeNode coords = (0, 0) newNode = None if direction == K_UP: coords = Util.addLists(activeNode.coords, above) newNode = self.getNode(coords) elif direction == K_DOWN: coords = Util.addLists(activeNode.coords, below) newNode = self.getNode(coords) elif direction == K_LEFT: coords = Util.addLists(activeNode.coords, left) newNode = self.getNode(coords) elif direction == K_RIGHT: coords = Util.addLists(activeNode.coords, right) newNode = self.getNode(coords) if newNode is None: if create is True: self.addNode(coords) newNode = self.getNode(coords) else: return self.changeActiveNode(newNode)
def set_from_options(self, options): self.camera.hflip = u.dotRead(options, 'hflip', False) self.camera.vflip = u.dotRead(options, 'vflip', False) # set supplementary options self.options = { "use_video_port": u.dotRead(options, 'use_video_port', False) }
def parse_args(): log.info("Stetl version = %s" % __version__) argparser = argparse.ArgumentParser(description='Invoke Stetl') argparser.add_argument('-c ', '--config', type=str, help='ETL config file in .ini format', dest='config_file', required=False) argparser.add_argument('-s ', '--section', type=str, help='Section in the config file to execute, default is [etl]', dest='config_section', required=False) argparser.add_argument('-a ', '--args', type=str, help='Arguments or .properties file to be substituted for {argN}s in config file, as -a "arg1=foo arg2=bar" or -a args.properties', dest='config_args', required=False) argparser.add_argument('-d ', '--doc', type=str, help='Get component documentation like its configuration parameters, e.g. stetl doc stetl.inputs.fileinput.FileInput', dest='doc_args', required=False) args = argparser.parse_args() if args.config_args: if os.path.isfile(args.config_args): log.info('Found args file at: %s' % args.config_args) args.config_args = Util.propsfile_to_dict(args.config_args) else: # Convert string to dict: http://stackoverflow.com/a/1248990 args.config_args = Util.string_to_dict(args.config_args) return args
def main(): """ Example: UnitXObjectの変数を保存し,取り出し,確認する. """ from simulator import Simulator s = Simulator() UnitXObject.manager = s.get_manager() UnitXObject.scopes = s.get_scopes() # Regist part crr_scope = s.get_scopes().peek() crr_scope['x'] = UnitXObject(value=1.5, varname='x', is_none=False, unit=Unit(ex_numer=u'm', numer=u'cm', ex_denom=None, denom=None)) crr_scope['y'] = UnitXObject(value=1500, varname='y', is_none=False, unit=Unit(ex_numer=u'm', numer=u'km', ex_denom=u'時', denom=u'分')) s.get_scopes().new_scope() # Find & Show part found_scope = s.get_scopes().peek().find_scope_of('x') Util.dump(s.get_scopes()) # Checking equals() tmp_obj = UnitXObject(value=1.5, varname='x', is_none=False, unit=Unit(ex_numer=None, numer=u'cm', ex_denom=None, denom=None)) print tmp_obj print crr_scope['x'] == tmp_obj # Clear part s.get_scopes().del_scope() s.get_scopes().del_scope() return Constants.EXIT_SUCCESS
def frames2batch(k = 12,batch_size = 1024, is_calib = False): pos = util.get_files(rootdir = 'F:\\train_data\\pos\\') neg = util.get_files(rootdir = 'F:\\train_data\\neg\\') pos = shuffle(pos) neg = shuffle(neg) total = pos + neg total = shuffle(total) batch = [] c = 0 bpath = 'F:\\train_data\\batch\\' for item_path in total: frame = fr.get_frame(item_path) frame_r = fr.resize_frame(frame,(k,k)) if frame_r == None: continue vec = fr.frame_to_vect(frame_r) label = 1 if item_path.split('\\')[-1].find('pos') > 0 else 0 print(item_path,label) batch.append((vec,label)) if len(batch) > 0 and len(batch) % batch_size == 0: batch = sp.array(batch) sp.savez(bpath + str(c) + '_' + str(k) + ('_' if not is_calib else '_calib-') + 'net',batch) batch = [] c += 1 if len(batch) > 0 and len(batch) % batch_size == 0: batch = sp.array(batch) sp.savez(bpath + str(c) + '_' + str(k) + ('_' if not is_calib else '_calib') + '-net',batch) batch = [] c += 1
def callback(self, cmd, action, target, msg): if msg is None: self._home.publish_msg(cmd, u"时间格式错误") return False, None if msg.endswith(u'点') or \ msg.endswith(u'分'): t = Util.gap_for_timestring(msg) elif msg.endswith(u"秒"): t = int(Util.cn2dig(msg[:-1])) elif msg.endswith(u"分钟"): t = int(Util.cn2dig(msg[:-2]))*60 elif msg.endswith(u"小时"): t = int(Util.cn2dig(msg[:-2]))*60*60 else: self._home.publish_msg(cmd, u"时间格式错误") return False if t is None: self._home.publish_msg(cmd, u"时间格式错误") return False, None DEBUG("thread wait for %d sec" % (t, )) self._home.publish_msg(cmd, action + target + msg) threading.current_thread().waitUtil(t) if threading.current_thread().stopped(): return False self._home.setResume(True) count = 7 Sound.play( Res.get_res_path("sound/com_bell") , True, count) self._home.setResume(False) return True
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Dec 6 20:24:46 2019 @author: rangeetpan """ from util import Util import numpy as np from keras.datasets import fashion_mnist (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() x_train = x_train.astype('float32') / 255.0 x_train = x_train.reshape(60000, 784) NN = Util(784, 100, 10, 0.1) count = 1 ##############Algorithm Based Initialization################# delta = 0.003 # change delta exit_flag = False input_W = 0.008 * np.random.randn(784, 100) hidden_W = 0.008 * np.random.randn(100, 10) input_B = 0.008 * np.random.randn(100) hidden_B = 0.008 * np.random.randn(10) NN.input_W = input_W NN.input_B = input_B NN.hidden_W = hidden_W NN.hidden_B = hidden_B for i in range(400): loss, node_hidden, node_output = NN.forwardPropagation(x_train, y_train) #print(loss) NN.backwardPropagation(x_train, y_train, loss, node_hidden, node_output) accuracy = NN.accuracyComputation(x_train, y_train)
class Start: #############################Change the location of the gcc compiler accordingly############################### gccCompilerPath = 'C:\\MinGW\\bin\\gcc.exe' ############################################################################################################### empty = set([]) util = Util() preferred_screen = "" def makeWindow(self, root, type): root.destroy() if type == 'Beginner': self.preferred_screen = type if os.path.getsize('novice.txt') > 0: with open('novice.txt', 'rb') as n_s: load_start = pickle.load(n_s) novice = NoviceWindow(load_start.listBoxBuffer) else: novice = NoviceWindow(self.empty) with open('settings.txt', 'wb') as settings: pickle.dump(self, settings, -1) elif type == 'Medium': self.preferred_screen = type if os.path.getsize('typical.txt') > 0: with open('typical.txt', 'rb') as t_s: load_start = pickle.load(t_s) medium = TypicalWindow(load_start.listBoxBuffer) else: medium = TypicalWindow(self.empty) with open('settings.txt', 'wb') as settings: pickle.dump(self, settings, -1) # pickle.dump(medium, settings, -2) elif type == 'Expert': self.preferred_screen = type expert = ExpertWindow() with open('settings.txt', 'wb') as settings: pickle.dump(self, settings, -1) # pickle.dump(expert, settings, -2) def __init__(self): root = tk.Tk() root.title("Smart Gcc GUI") width_of_window = 300 height_of_window = 300 canvas = tk.Canvas(root, width=width_of_window, height=height_of_window) canvas.pack() """ Setting the window to the middle of the screen """ self.util.center_window(width_of_window, height_of_window, root) frame0 = tk.Frame(root, bg="#8C8C8C") frame0.place(relx=0, rely=0, relwidth=1, relheight=1) frame1 = tk.Frame(frame0, bg="#8C8C8C", bd=3) frame1.place(relx=0.5, rely=0.2, relwidth=0.5, relheight=0.6, anchor='n') lbl_user = tk.Label(frame0, text="Select your user type", font="TimesNewRoman", bg="#8C8C8C", bd=5, padx=4) lbl_user.place(relx=0.24, rely=0.04) btn_beginner = tk.Button( frame1, text="Beginner", width=6, bg="#EFEEEA", activebackground="#BCE27F", bd=1, command=lambda: self.makeWindow(root, btn_beginner.cget('text'))) btn_beginner.place(relx=0.5, rely=0.01, relwidth=0.98, relheight=0.2, anchor='n') btn_medium = tk.Button( frame1, text="Medium", width=6, bg="#EFEEEA", activebackground="#BCE27F", bd=1, command=lambda: self.makeWindow(root, btn_medium.cget('text'))) btn_medium.place(relx=0.5, rely=0.25, relwidth=0.98, relheight=0.2, anchor='n') btn_expert = tk.Button( frame1, text="Expert", width=6, bg="#EFEEEA", activebackground="#BCE27F", bd=1, command=lambda: self.makeWindow(root, btn_expert.cget('text'))) btn_expert.place(relx=0.5, rely=0.5, relwidth=0.98, relheight=0.2, anchor='n') # btn_beginner.bind('<Button-1>', self.makeWindow) root.mainloop()
def load_model(self): model = Util.load("../model/model/lgb_initial.pkl") self.model = model
def save_model(self): Util.dump(self.model, "../model/model/lgb_initial.pkl")
# -*- coding: utf-8 -*- from util import Util from nltk.corpus import stopwords from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.metrics.pairwise import linear_kernel from stemming.porter2 import stem import nltk import re import string nltk.download('punkt') nltk.download('stopwords') default_stopwords = stopwords.words('english') util = Util() class Recommendation: def __init__(self, df): self.logger = util.set_logger('recommendation') self.map_dict = self.mapping(df) self.matrix = self.create_matrix(df) def __preproccessing(self, text): """ Preprocess to input data :param str text: text for preproccessing, description + genre :return: processed text :rtype: str """
def parseUrl(url, item_url): soup = Util.getUrl(url) data_list = [] for item in soup.find_all('li', itemtype='http://schema.org/Offer'): data_list.append(Util.json_object(item, item_url)) return data_list
def get_community_folders(self): # return list of folders under /data/_raw return Util().getFolderList(self.get_raw_data_folder())
import pickle import pandas as p import io import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import CountVectorizer from util import Util u = Util() from sklearn.linear_model import MultiTaskLasso, ElasticNet, Ridge from sklearn import cross_validation from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier, AdaBoostRegressor from sklearn.feature_selection import SelectKBest, chi2 from sklearn.svm import LinearSVC from sklearn import svm from scipy import sparse from sklearn.linear_model import MultiTaskLasso from sklearn.preprocessing import Normalizer from sklearn.decomposition import TruncatedSVD import time import nltk from nltk.tokenize import wordpunct_tokenize np.set_printoptions(suppress=True, precision=3) #import logging as log #install the logging module to enable logging of the results #log.basicConfig(filename='C:/results.txt', format='%(message)s', level=log.DEBUG) ''' Hot to use this script: 1. Add paths to the data files (first line in main) 2. Load data with pandas or otherwise (line two) 3. Build your array of features (vectorizers) 4a. Choose your array of classifies (clfs)
#!/usr/bin/python3 import sys from util import Util if __name__ == '__main__': input = sys.stdin.read() tokens = input.split() a = int(tokens[0]) b = int(tokens[1]) print(Util.sum(a, b))
def run_train_cv(self) -> None: """クロスバリデーションでの学習・評価を行う 学習・評価とともに、各foldのモデルの保存、スコアのログ出力についても行う """ self.logger.info(f'{self.run_name} - start training cv') if self.cv_method == 'KFold': self.logger.info(f'{self.run_name} - cv method: {self.cv_method}') else: self.logger.info( f'{self.run_name} - cv method: {self.cv_method} - target: {self.cv_target_column}' ) scores = [] # 各foldのscoreを保存 va_idxes = [] # 各foldのvalidationデータのindexを保存 preds = [] # 各foldの推論結果を保存 # 各foldで学習を行う for i_fold in range(self.n_splits): # 学習を行う self.logger.info(f'{self.run_name} fold {i_fold} - start training') model, va_idx, va_pred, score = self.train_fold(i_fold) self.logger.info( f'{self.run_name} fold {i_fold} - end training - score {score}' ) if self.model_name == 'nn': history = model.load_history() acc = history.history["acc"] val_acc = history.history["val_acc"] loss = history.history["loss"] val_loss = history.history["val_loss"] epochs = range(len(loss)) fig = plt.figure() img_loss_path = os.path.join(self.out_dir_name, f'_loss_{i_fold}.png') plt.plot(epochs, loss, label="loss", ls="-", marker="o") plt.plot(epochs, val_loss, label="val_loss", ls="-", marker="x") plt.title('Model loss') plt.ylabel("loss") plt.xlabel("val_loss") plt.legend(loc="best") # ファイルに保存 fig.savefig(img_loss_path) fig = plt.figure() img_acc_path = os.path.join(self.out_dir_name, f'_acc_{i_fold}.png') plt.plot(epochs, acc, label="acc", ls="-", marker="o") plt.plot(epochs, val_acc, label="val_acc", ls="-", marker="x") plt.title('Model acc') plt.ylabel("acc") plt.xlabel("val_acc") plt.legend(loc="best") # ファイルに保存 fig.savefig(img_acc_path) # モデルを保存する model.save_model(self.out_dir_name) # 結果を保持する va_idxes.append(va_idx) scores.append(score) preds.append(va_pred) # 各foldの結果をまとめる va_idxes = np.concatenate(va_idxes) order = np.argsort(va_idxes) preds = np.concatenate(preds, axis=0) preds = preds[order] self.logger.info( f'{self.run_name} - end training cv - score {np.mean(scores)}') # 学習データでの予測結果の保存 if self.save_train_pred: Util.dump_df_pickle( pd.DataFrame(preds), self.out_dir_name + f'.{self.run_name}-train.pkl') # 評価結果の保存 self.logger.result_scores(self.run_name, scores) # shap feature importanceデータの保存 if self.calc_shap: self.shap_feature_importance()
#!/usr/bin/python3 import sys from util import Util if __name__ == '__main__': n = int(input()) data = [ int(x) for x in input().split() ] print(Util.max_pairwise_product(data))
def set_product_data(self , page_url, soup, product_ctx ) : # # try : product_data = ProductData() crw_post_url = '' # 상품 카테고리 # ########################### # 상품 이미지 확인 # ########################### self.set_product_image_third( product_data, product_ctx ) # 품절여부 확인 #self.set_product_soldout_first(product_data, product_ctx ) ########################### # 상품명/URL ########################### crw_post_url = self.set_product_name_url_fourth( product_data, product_ctx , 'strong', 'name') if(crw_post_url == '') : crw_post_url = self.set_product_name_url_fourth( product_data, product_ctx , 'p', 'name') ############################## # 가격 # <ul class="spec"> # <li class="displaynone custom" style="text-decoration:line-through; font-size:11px;">원</li> # <li class="price ">84,000원 <span class="displaynone"></span> # </li> # <li class="price sale displaynone"></li> # <li class="icon"></li> # <li class="cart"></li> # <li class="color displaynone"> # </li> # <li class="likeButton displaynone"><button type="button"><strong></strong></button></li> # </ul> ############################## #self.set_product_price_brand_first(product_data, product_ctx) li_ctx_list = product_ctx.find_all('li', class_='price') for li_ctx in li_ctx_list : is_sale_price=False class_name_list = li_ctx.attrs['class'] for class_name in class_name_list : if(class_name == 'sale') : is_sale_price = True split_list = li_ctx.get_text().split('(') price_value = int( __UTIL__.get_only_digit( split_list[0].strip() ) ) if(is_sale_price ) : product_data.crw_price_sale = price_value else : product_data.crw_price = price_value if( crw_post_url != '' ) : self.set_product_url_hash( product_data, crw_post_url) rtn = True except Exception as ex: __LOG__.Error('에러 : set_product_data') __LOG__.Error(ex) pass return True
def _resolve_replacement_ship_by_asset(self, mode, slot_config): """Method that finds a resolves a replacement ship by class or ship asset. Args: mode (str): specifies whether the resolution is by 'ship' or 'class' slot_config (dict): dictionary containing the slot's config Returns: bool: True if a successful switch was made; False otherwise """ cache_override = True self.temp_ship_config_dict = {} self.temp_ship_position_dict = {} self._switch_shiplist_sorting('class') # start search from cached position, if available if slot_config['slot'] in self.position_cache: Util.log_msg("Jumping to cached page {}.".format( self.position_cache[slot_config['slot']])) self._navigate_to_shiplist_page( self.position_cache[slot_config['slot']]) while (not self.temp_ship_position_dict and self.current_shiplist_page < self.ship_page_count): ship_search_threads = [] for ship in slot_config['ships']: ship_search_threads.append( Thread(target=self._match_shiplist_ships_func, args=(mode, ship[mode], ship))) Util.multithreader(ship_search_threads) if not self.temp_ship_position_dict: # no matches on this page; continue loop self._navigate_to_shiplist_page(self.current_shiplist_page + 1) continue if cache_override: # update cache on first encounter self._set_position_cache(slot_config['slot']) cache_override = False ship_position_list = [ i for j in [ self.temp_ship_position_dict[x] for x in self.temp_ship_position_dict ] for i in j ] ship_position_list.sort() ship_position_list = self._filter_ships_on_level( ship_position_list) Util.log_msg( "Potential replacement ships found in page {} positions {}". format(self.current_shiplist_page, ", ".join([str(i) for i in ship_position_list]))) if mode == 'ship': for ship in self.temp_ship_position_dict: for position in self.temp_ship_position_dict[ship]: if position not in ship_position_list: # ship in position did not pass filtering on level continue availability = ( self._choose_and_check_availability_of_ship( position, slot_config['criteria'])) if availability is True: return True elif availability == 'dupe': break elif mode == 'class': for position in ship_position_list: if self._choose_and_check_availability_of_ship( position, slot_config['criteria']) is True: return True # no available ships on this page; reset matches and continue loop self.temp_ship_config_dict = {} self.temp_ship_position_dict = {} if 'sparkle' in slot_config['criteria']: # if in sparkle mode and we didn't see any valid ships here, # don't jump to this page on the next pass cache_override = True self._navigate_to_shiplist_page(self.current_shiplist_page + 1) if 'sparkle' in slot_config['criteria']: # if in sparkle mode and we reach this point, we've exhausted the # list of possible ships; disable the combat module self.combat.disable_module() return False
def ship_switch_logic(self): """Primary logic loop which goes through the 6 ship slots and switches ships as necessary. Only avilable for Fleet 1. """ self._set_shiplist_counts() # loop through slots and switch ships as necessary for slot in range(0, 6): if slot not in self.config.ship_switcher: continue slot_config = self.config.ship_switcher[slot] if self._check_need_to_switch_ship(slot, slot_config['criteria']): Util.wait_and_click_and_wait( self.module_regions['panels'][slot], 'shiplist_button.png', self.regions['lower_right'], 'page_first.png') Util.rejigger_mouse(self.regions, 'top') if self._resolve_replacement_ship(slot_config): self.stats.increment_ships_switched() if 'sparkle' in slot_config['criteria']: # if this is a sparkle slot, update the sparkle cache self._set_sparkle_cache(slot) else: Util.check_and_click(self.regions['top_submenu'], 'fleet_1_active.png') self.module_regions['panels'][0].wait('shiplist_button.png', 2) # check new fleet status # TODO: only checks on damage and repair states only, not fatigue! Util.kc_sleep(2) fleet = self.fleets[1] damage_counts = fleet.check_damages(self.kc_region) if (fleet.get_damage_counts_at_threshold( self.config.combat['repair_limit']) == 0 and damage_counts['repair'] == 0): # all ships in fleet pass checks: continue sortie fleet.needs_resupply = True # force resupply attempt Util.log_msg( "Fleet is ready to sortie. Updating next sortie time.") self.combat.set_next_combat_time() else: fleet.print_damage_counts(repair=True) Util.log_msg("Fleet is still not ready to sortie.")
def goto(cls, regions, destination, max_sidestep=1): """Method to call to detect the current location and move to the specified destination, with or without sidesteps. Args: regions (dict): dict of pre-defined kcauto-kai regions destination (str): name of the destination max_sidestep (int, optional): the max number of sidesteps to take; in the current implementation the name and type is a misnomer: if it is a non-zero number the code will sidestep once, otherwise never sidestep (should be renamed to 'sidestep' with bool type) Returns: bool: True if navigation was successful, False if no actions were taken """ sidestep = bool(randint(0, max_sidestep)) Util.rejigger_mouse(regions, 'top') Util.kc_sleep() # Figure out where we are current_location = None if regions['home_menu'].exists('home_menu_sortie.png'): Util.log_msg("At home") current_location = cls.home elif regions['side_menu'].exists('side_menu_home.png'): Util.log_msg("At side menu") current_location = cls.side_menu elif regions['lower_left'].exists('top_menu_home.png'): Util.log_msg("At top menu") current_location = cls.top_menu if current_location.name == 'home': # Starting from home screen if destination == 'home': # Already at home # Util.log_msg('Already at home.') return False elif destination == 'refresh_home': # Refresh home Util.log_msg("Refreshing home.") destination = 'home' current_location = current_location.navigate_to( regions, cls._choose_sidestep(destination)) else: # Go to and side menu sub screen Util.log_msg( "Navigating to {} screen.".format(destination)) if destination in ('combat', 'pvp', 'expedition'): current_location = current_location.navigate_to( regions, 'sortie') else: if sidestep: current_location = current_location.navigate_to( regions, cls._choose_sidestep(destination)) elif current_location.name == 'side_menu': # Starting from a main menu item screen if destination in ('home', 'refresh_home'): # Go or refresh home Util.log_msg('Going home.') destination = 'home' if sidestep: current_location = current_location.navigate_to( regions, cls._choose_sidestep(destination)) elif current_location.name == 'top_menu': # Starting from top menu item. Theoretically, the script should # never attempt to go anywhere but home from here if destination in ('home', 'refresh_home'): Util.log_msg('Going home.') destination = 'home' current_location.navigate_to(regions, destination) return True
def _change_shiplist_page(self, target): """Method that clicks on the arrow and page number navigation at the bottom of the ship list. 'first', 'prev', 'next', 'last' targets will click their respective arrow buttons, while an int target between 1 and 5 (inclusive) will click the page number at that position at the bottom of the page (left to right). Args: target (str, int): specifies which navigation button to press """ if target == 'first': Util.check_and_click(self.regions['lower'], 'page_first.png', Globals.EXPAND['arrow_navigation']) elif target == 'prev': Util.check_and_click(self.regions['lower'], 'page_prev.png', Globals.EXPAND['arrow_navigation']) elif target == 'next': Util.check_and_click(self.regions['lower'], 'page_next.png', Globals.EXPAND['arrow_navigation']) elif target == 'last': Util.check_and_click(self.regions['lower'], 'page_last.png', Globals.EXPAND['arrow_navigation']) elif 1 <= target <= 5: zero_target = target - 1 x_start = 512 + (zero_target * 21) + (zero_target * 11) x_stop = x_start + 11 y_start = 444 y_stop = 452 Util.click_coords(self.kc_region, Util.randint_gauss(x_start, x_stop), Util.randint_gauss(y_start, y_stop))
def generate(image, prob, label, n=10, thumb=False, rotate=True, flip=True, randomize_slice=False, randomize_label=False, max_per_slice=-1, fill_labels=True, sample_rate=10): ''' ''' # run through all slices if randomize_slice: z_s = np.arange(image.shape[0]) np.random.shuffle(z_s) else: z_s = range(image.shape[0]) for z in z_s: if fill_labels: # fill segmentation print 'Filling segmentation' label_zeros = Util.threshold(label[z], 0) label_filled = Util.fill(label[z], label_zeros.astype(np.bool)) label_filled_relabeled = skimage.measure.label( label_filled).astype(np.uint64) else: print 'Skip filling' label_filled_relabeled = label print label_filled_relabeled.shape print 'Working on z', z slice_counter = 0 if randomize_label: labels = np.arange( len(Util.get_histogram(label_filled_relabeled))) np.random.shuffle(labels) else: labels = range( 1, len(Util.get_histogram(label_filled_relabeled)) ) # we ignore background 0 which should not exist anyways print labels for l in labels: # if slice_counter >= max_per_slice: # continue neighbors = MergeError.grab_neighbors(label_filled_relabeled, l) for n in neighbors: upper_limit = 10 # m = MergeError() print l, n patches = MergeError.analyze_border( image, prob, label_filled_relabeled, l, n, sample_rate=sample_rate) for s in patches: yield s # for s in MergeError.create(image[z], prob[z], label_filled_relabeled, l, n, thumb=thumb): # yield s # yield s slice_counter += 1
return None return t if (__name__ == "__main__"): d = Data() #d.testwrite('test','test1.csv',[['jacky','cool'],['jacky','smart']]) #c = d.testread('Book1.csv') #c = d.read("",'ClanList') #print(c) #for i in c: #for z in i: #print(z) #print("done") #print(d.getWMostRecent()) print(d.getExpectedData()) #print(d.trackClan('MIA',[['mod','shitter'],['ddak','absolut trash'],['warlord','legend']])) #d.addToClanlist('asdf') #print(d.read('','ClanList')) #print(d.getShipID("Dresden")) #print(d.getShipStats(d.getShipID("Alaska")))' ut = Util() print(d.getMostRecent("MIA-E/Modulatus")) print(d.getLatestBeforeDate("MIA-E/Modulatus", ut.countWeekSec())) print(d.read("MIA-E/Modulatus", d.getMostRecent("MIA-E/Modulatus/"))) print( d.read("MIA-E/Modulatus", d.getLatestBeforeDate("MIA-E/Modulatus", ut.countWeekSec()))) #data = [["wr",50.532],["avgdmg",203021],['kills',2],['pr',2410]] #d.testwrite("MIA/test",str(ut.getGMTTime())+'.csv',data)
def generateSnapListData(startSeconds, endSeconds, oneTimeSeriesRespJson, urlHead, urlSelfPath, amountInfoDic): oneSnapShotList = oneTimeSeriesRespJson['snapshots'] oneSnapShotJson = copy.deepcopy(oneSnapShotList[0]) responseTimesPerHour = 3600 / int(properties.requestPeriod) * int( properties.qualityResponseAmount) markQuality = int timelist = generateTimeListData(startSeconds, endSeconds, oneTimeSeriesRespJson, urlHead, urlSelfPath, amountInfoDic) timeSeriesLength = len(timelist) snapList = [] count = 0 if amountInfoDic['markQuality'] is 4: oneSnapShotJson = generate3variables(oneSnapShotJson) global qualityCount global goodQualityCount, badQualityCount, reworkQualityCount global statusCount for oneVarDataJson in oneSnapShotJson['variables']: oneVarDataJson['value'] = str(round(random.uniform(0, 100), 1)) oneVarDataJson['qualityCode'] = '00000000' # randomCount = int(( # 1 if (random.randint(0, responseTimesPerHour) // int(amountInfoDic['qualityTimesPerHour']) == 0) else 0)) randomCount = random.randint(0, 1) if amountInfoDic['qualityExist'] is not True: if oneVarDataJson['name'] == "NCProgramStatus": if statusCount <= 240: oneVarDataJson['value'] = str(3) + ".0" elif statusCount <= 300: oneVarDataJson['value'] = str(4) + ".0" else: oneVarDataJson['value'] = str(4) + ".0" statusCount = 0 statusCount += 1 elif oneVarDataJson['name'] == "Feedoverride": oneVarDataJson['value'] = str(random.randint(3, 6) * 11) + ".0" else: oneVarDataJson['name'] = amountInfoDic['variableName'] if oneVarDataJson['name'] == "Good_Piece_Num": goodQualityCount += randomCount oneVarDataJson['value'] = goodQualityCount if oneVarDataJson['name'] == "Bad_Piece_Num": badQualityCount += (random.randint(1, 100) // 95) oneVarDataJson['value'] = badQualityCount if oneVarDataJson['name'] == "Rework_Piece_Num": reworkQualityCount += randomCount oneVarDataJson['value'] = reworkQualityCount qualityCount += randomCount while count < timeSeriesLength: tempOneSnapShotJson = copy.deepcopy(oneSnapShotJson) utcTime = Util.timeStampToUtcTime(timelist[count]) tempOneSnapShotJson['time'] = utcTime snapList.append(tempOneSnapShotJson) count += 1 if markQuality == 1: goodQualityCount = qualityCount elif markQuality == 2: badQualityCount = qualityCount elif markQuality == 3: reworkQualityCount = qualityCount return snapList
def set_product_data(self, page_url, soup, product_ctx): # # try: product_data = ProductData() crw_post_url = '' #################################### # 상품 이미지 확인 # 상품 링크 정보 및 상품코드 # 카테고리 # # <div class="thumb"> # <a href="/shop/shopdetail.html?branduid=1000006164&xcode=007&mcode=006&scode=001&type=X&sort=order&cur_code=007&GfDT=aWx3UQ%3D%3D"><img class="MS_prod_img_m" src="/shopimages/sizeoo/0070060000702.jpg?1589180862" onmouseover="this.src='/shopimages/sizeoo/007006000070.jpg?1589180862'" onmouseout="this.src='/shopimages/sizeoo/0070060000702.jpg?1589180862'" alt="" title=""></a> # </div> #################################### img_div_list = product_ctx.find_all('div', class_='thumb') for img_div_ctx in img_div_list: product_link_list = img_div_ctx.find_all('a') img_list = img_div_ctx.find_all('img') for img_ctx in img_list: img_src = '' if ('src' in img_ctx.attrs): split_list = img_ctx.attrs['src'].strip().split('?') img_src = split_list[0].strip() if (img_src != ''): img_link = self.set_img_url(self.BASIC_IMAGE_URL, img_src) product_data.product_img = self.get_hangul_url_convert( img_link) break for product_link_ctx in product_link_list: if ('href' in product_link_ctx.attrs): crw_post_url = self.get_crw_post_url( product_link_ctx, 'href') if (crw_post_url != ''): self.get_crw_goods_code(product_data, crw_post_url) self.get_category_value(product_data, crw_post_url) break #################################### # 상품명 및 브랜드 # <li class="name"><span class="MK-product-icons"></span> 데이지 비치 원피스 (옐로우)</li> #################################### name_strong_list = product_ctx.find_all('li', class_='name') for name_strong_ctx in name_strong_list: product_data.crw_name = name_strong_ctx.get_text().strip() # # 이름 앞에 브랜드명이 있음. # [스텔라&츄이] 츄이스 치킨 디너패티 if (0 == product_data.crw_name.find('[')): brand_list = product_data.crw_name.split(']') product_data.crw_brand1 = brand_list[0][1:].strip() #################################### # 가격 / 품절 여부 확인 # # # <li class="price"> # <span><s>32,000</s>원</span> # 32,000원 # </li> # #------------품절시 ---------------- # <li class="price"> # Sold Out # </li> #################################### div_list = product_ctx.find_all('li', class_='price') for div_ctx in div_list: sell_price = div_ctx.get_text().strip() consumer_ctx = div_ctx.find('span') consumer_price = '' if (consumer_ctx != None): consumer_price = consumer_ctx.get_text().strip() product_data.crw_price = int( __UTIL__.get_only_digit(consumer_price)) crw_price_sale = sell_price.replace(consumer_price, '').strip() product_data.crw_price_sale = int( __UTIL__.get_only_digit(crw_price_sale)) # 품절시 가격없이 Sold Out 문구 나옴. if (0 < crw_price_sale.strip().find('Out')): product_data.crw_is_soldout = 1 if (crw_post_url != ''): #if( self.PRODUCT_URL_HASH.get( crw_post_url , -1) == -1) : self.set_product_data_sub(product_data, crw_post_url) self.process_product_api(product_data) rtn = True except Exception as ex: __LOG__.Error('에러 : set_product_data') __LOG__.Error(ex) pass return True
def __init__(self): self.ut = Util() self.data_extractor = DataExtractor() self.mapping = self.data_extractor.location_mapping()
def set_product_data(self, page_url, soup, product_ctx): # # try: product_data = ProductData() crw_post_url = '' #################################### # 상품 이미지 확인 # 상품 링크 정보 및 상품코드 # 카테고리 # # <div class="thumb"> # <div class="over_view -mos01"> # <ul> # <li><a href="javascript:viewdetail('011000000054', '1', '');" aria-label="확대보기" class="hint--top"><i class="xi-search" alt="확대보기"></i></a></li> # <li><a href="javascript:viewdetail('011000000054', '1', '');" aria-label="관심상품" class="hint--top"><i class="xi-heart-o" alt="관심상품"></i></a></li> # <li><a href="/shop/shopdetail.html?branduid=2243605&xcode=009&mcode=000&scode=&type=P&sort=regdate&cur_code=009&GfDT=bm1%2BW1w%3D" aria-label="상세보기" class="hint--top"><i class="xi-bars" alt="상세보기"></i></a></li> # </ul> # </div> # <a href="/shop/shopdetail.html?branduid=2243605&xcode=009&mcode=000&scode=&type=P&sort=regdate&cur_code=009&GfDT=bm1%2BW1w%3D"><img class="MS_prod_img_s" src="/shopimages/coates1024/0110000000543.gif?1581155993"></a> # </div> #################################### img_div_list = product_ctx.find_all('div', class_='thumb') for img_div_ctx in img_div_list: product_link_list = img_div_ctx.find_all('a') img_list = img_div_ctx.find_all('img') for img_ctx in img_list: img_src = '' if ('src' in img_ctx.attrs): split_list = img_ctx.attrs['src'].strip().split('?') img_src = split_list[0].strip() if (img_src != ''): img_link = self.set_img_url(self.BASIC_IMAGE_URL, img_src) product_data.product_img = self.get_hangul_url_convert( img_link) break for product_link_ctx in product_link_list: if ('href' in product_link_ctx.attrs): crw_post_url = self.get_crw_post_url( product_link_ctx, 'href') if (crw_post_url != ''): self.get_crw_goods_code(product_data, crw_post_url) self.get_category_value(product_data, crw_post_url) break #################################### # 상품명 및 브랜드 # <li class="name">스카이 브라운 클래식 카시트</li> #################################### name_strong_list = product_ctx.find_all('li', class_='name') for name_strong_ctx in name_strong_list: product_data.crw_name = name_strong_ctx.get_text().strip() # # 이름 앞에 브랜드명이 있음. # [스텔라&츄이] 츄이스 치킨 디너패티 if (0 == product_data.crw_name.find('[')): brand_list = product_data.crw_name.split(']') product_data.crw_brand1 = brand_list[0][1:].strip() #################################### # 가격 / 품절 여부 확인 # # # <li> # <div class="over_sale -mos">30%</div> # <span class="price01">39,500원</span> # <span class="price02">27,600원</span> # </li> # # #################################### div_list = product_ctx.find_all('ul') for div_ctx in div_list: sell_ctx = div_ctx.find('span', class_='price02') consumer_ctx = div_ctx.find('span', class_='price01') soldout_ctx = div_ctx.find('li', class_='soldout') if (soldout_ctx != None): product_data.crw_is_soldout = 1 if (consumer_ctx != None): product_data.crw_price = int( __UTIL__.get_only_digit( consumer_ctx.get_text().strip())) if (sell_ctx != None): product_data.crw_price_sale = int( __UTIL__.get_only_digit(sell_ctx.get_text().strip())) if (crw_post_url != ''): #if( self.PRODUCT_URL_HASH.get( crw_post_url , -1) == -1) : self.set_product_data_sub(product_data, crw_post_url) self.process_product_api(product_data) rtn = True except Exception as ex: __LOG__.Error('에러 : set_product_data') __LOG__.Error(ex) pass return True
def load_model(self): model = Util.load("models/model_lgb4.pkl") self.model = model
class Task1(): def __init__(self): self.ut = Util() self.data_extractor = DataExtractor() self.mapping = self.data_extractor.location_mapping() def generate_imgximg_edgelist(self, image_list1, image_list2, image_feature_map, k): """ Method: generate_imgximg_edgelist returns image to image similarity in form of an edge list """ imgximg_edgelist_file = open(constants.VISUALIZATIONS_DIR_PATH + "entire_graph_file.txt", "w") image_id_mapping_file = open(constants.DUMPED_OBJECTS_DIR_PATH + "image_id_mapping.pickle", "wb") image_id_mapping = {} for index1 in range(0, len(image_list1)): local_img_img_sim_list = [] for index2 in range(0, len(image_list2)): image1 = image_list1[index1] image2 = image_list2[index2] features_image1 = image_feature_map[image1] features_image2 = image_feature_map[image2] score = 1 / (1 + self.calculate_similarity(features_image1, features_image2)) imgximg_edgelist_file.write(str(image1) + " " + str(image2) + " " + str(score) + "\n") local_img_img_sim_list.append((image1, image2, score)) self.top_k(local_img_img_sim_list, k) image_id_mapping[image1] = index1 pickle.dump(["Image_id mapping:", image_id_mapping], image_id_mapping_file) image_id_mapping_file.close() def calculate_similarity(self, features_image1, features_image2): """ Method: image-image similarity computation""" return self.ut.compute_euclidean_distance(np.array(features_image1), np.array(features_image2)) def top_k(self, graph_list, k): reduced_graph_file = open(constants.VISUALIZATIONS_DIR_PATH + "reduced_graph_file_" + str(k) + ".txt", "a+") top_k = sorted(graph_list, key=lambda x:(-x[2], x[1], x[0]))[0:k] for iter in top_k: reduced_graph_file.write(str(iter[0]) + " " + str(iter[1]) + " " + str(iter[2]) + "\n") def create_graph(self, k): reduced_graph_file = open(constants.VISUALIZATIONS_DIR_PATH + "reduced_graph_file_" + str(k) + ".txt", "r") visualise_graph_file = open(constants.VISUALIZATIONS_DIR_PATH + "visualisation_graph_file.txt", "w") task1_output_file = open(constants.TASK1_OUTPUT_FILE, "w") visualise_len = 10 * int(k) for iter in range(visualise_len): visualise_graph_file.write(reduced_graph_file.readline()) count = 0 for iter in reduced_graph_file: image_id = iter.split(" ") count += 1 if count <= k: task1_output_file.write(image_id[1] + "\n") else: count = 0 task1_output_file.write("####\n") task1_output_file.close() visualise_graph_file.close() g = nx.read_edgelist(constants.VISUALIZATIONS_DIR_PATH + "visualisation_graph_file.txt", nodetype=int, \ data=(('weight',float),), create_using=nx.DiGraph()) print("graph created") nx.draw(g, with_labels=True) plt.show() return g def runner(self): """ Method: runner implemented for all the tasks, takes user input, and prints desired results. """ try: k = int(input("Enter the value of k:\t")) image_feature_map = self.data_extractor.prepare_dataset_for_task1(self.mapping) image_list = list(image_feature_map.keys()) self.generate_imgximg_edgelist(image_list, image_list, image_feature_map, k) self.create_graph(k) except Exception as e: print(constants.GENERIC_EXCEPTION_MESSAGE + "," + str(type(e)) + "::" + str(e.args))
def handle(self, msg): self.botLog.debug(msg) content_type, chat_type, chat_id = telepot.glance(msg) id, username = msg['from']['id'], msg['from']['username'] #combine = dict(list(activeTickets.items()) + list(activeRepTickets.items())) if (content_type, chat_type, chat_id, id, username) is None: self.botLog.critical("Сообщение не обработано.") return if (content_type != 'text'): self.send(username, chat_id, '', '[NOT IMPLEMENTED] Данный контент не поддерживается.') return message = msg['text'] if (self.checkAuth(id, username)): if (chat_type == 'private'): main_keyboard = [['Домены', 'Хостинг'], ['Другое', 'Выход']] sub_keyboard = [['Sub option #1_1', 'Sub option #1_2'], ['Sub option #1_3', 'Sub option #1_4'], ['Back to Main menu']] if message == '/start': self.sendMessage(chat_id, '.', reply_markup={ 'keyboard': main_keyboard, 'resize_keyboard': True }) elif message in [j for i in main_keyboard for j in i]: if message == 'Домены': sub_buttons = {'keyboard': sub_keyboard} self.sendMessage(chat_id, '.', reply_markup=sub_buttons) elif message == 'Выход': self.sendMessage( chat_id, ".", reply_markup={'remove_keyboard': True}) elif message in [j for i in sub_keyboard for j in i]: # an option from Sub keyboard is chosen: if message == 'Sub option #1_1': self.sendMessage(chat_id, 'Sub selected %s' % message) if message == 'Back to Main menu': self.sendMessage( chat_id, 'Main options', reply_markup={'keyboard': main_keyboard}) else: self.sendMessage( chat_id, 'Invalid Message. please select an option from keyboard' ) elif (chat_type == 'group'): # Replace double spaces and cleanup message = ' '.join(message.strip().split()) if (message[0] == '/'): if (len(message.split(' ')) > 1): checkCmd = message.split(' ')[0] else: checkCmd = message self.botLog.warning("Получена комманда: %s" % checkCmd) if (checkCmd == '/help'): self.sendMessageGroup(""" /help - Данное меню. /update - Проверка наличия обновлений. /fupdate - Принудительное обновление. /version - Отображает версию ядра. /uptime - Отображает время с момента запуска. /exclude - Добавляет или удаляет доменное имя в список исключений. (/exclude domain.by) /cpreload - Принудительно загружает список аккаунтов из cpanel. /bemail - Блокировка авторизации для почтового аккаунта (/blockemail) /unemail - Разблокировка авторизации для почтового аккаунта (/unblockemail) /restore - Функция для тестирования ответа сервера (/restore email) /smail - Блокировка возможности отправки исходящей почты для аккаунта (/suspendemail email) /unsmail - Разблокировка возможности отправки исходящей почты для аккаунта (/unsuspendemail email) /session - Генерирует одноразовую ссылку для авторизации в cpanel пользователя (/session domain.by) Следующие команды используются , как ответ(reply) на сообщение: .move - Перемещает заявку на менеджеров. .restore - Генерирует ссылку для сброса пароля. .spam - Перемещает заявку в спам с блокировкой отправителя в hd. .close - Перемещает заявку в закрытые. .exclude - Добавляет или удаляет доменное имя в список исключений. Пример: .exclude domain.by .ssh - Добавляет пользователю возможность подключения по ssh. .ftp - Предоставляет информацию по подключению к FTP. """) return if (checkCmd == '/restore'): subcommand = message.split()[1] self.sendMessageGroup( '[/restore]%s' % (self.restoreCpanelPassword(subcommand))) return if (checkCmd == '/ssh'): subcommand = message.split()[1] self.sendMessageGroup( '[/ssh]%s' % (self.grantAccessToSsh(subcommand))) return if (checkCmd == '/update'): self.sendMessageGroup( "Проводим проверку наличия обновлений...") Util.checkUpdate(self.botLog, self) return if (checkCmd == '/fupdate'): self.sendMessageGroup( "Производим принудительное обновление...") Util.checkUpdate(self.botLog, self, True) return if (checkCmd == '/version'): self.sendMessageGroup( 'Текущая версия: %s \nВерсия на сервере: %s' % (Util.getCurrentVersion(), Util.getVersionAtServer())) return if (checkCmd == '/uptime'): self.sendMessageGroup('Время работы: %s' % (Util.getUpime())) return if (checkCmd == '/cpreload'): self.sendMessageGroup( 'Принудительная загрузка хостинг аккаунтов.') loadDataFromServers(True) self.sendMessageGroup( "...Завершено.Найдено %s аккаунтов." % (len(getAccountsList()))) self.botLog.info("...Загружено %s аккаунтов." % (len(getAccountsList()))) return if (checkCmd == '/exclude'): if (len(message.split(' ')) > 1): subcommand = message.split(' ')[1] else: self.botLog.critical( "[/exclude] Имя домена не указано.") self.sendMessageGroup( "Имя домена не указано. Cписок исключений: %s" % (",".join(Config.getExcludeDomainList()))) return tempExcludeList = Config.getExcludeDomainList() if (subcommand in tempExcludeList): tempExcludeList.remove(subcommand) else: tempExcludeList.append(subcommand) Config.setConfigValue('exclude', 'create', ",".join(tempExcludeList)) Config.saveConfig() self.sendMessageGroup( "[.exclude] Сохранен список исключений: %s" % (",".join(tempExcludeList))) return if (checkCmd in ['/bemail', '/blockemail']): self.blockByEmail(message) return if (checkCmd in ['/unemail', '/unblockemail']): self.unBlockEmail(message) return if (checkCmd in ['/smail', '/suspendemail']): self.suspendOutgoingEmail(message) return if (checkCmd in ['/unsmail', 'unsuspendemail']): self.unSuspendOutgoingEmail(message) return if (checkCmd == '/session'): if (len(message.split(' ')) > 1): subcommand = message.split(' ')[1] else: self.botLog.critical( "[/session] Имя домена не указано.") self.sendMessageGroup("Имя домена не указано.") return try: answer = getDataFromApi( '/api/session/{0}'.format(subcommand)) _str = "" for server in answer: _str += "".join( "[{0}] \n Cсылка: {1} \n\n".format( server, answer[server]['url'])) self.sendMessageGroup(_str) except Exception as exc: self.botLog.critical( "[/session] Во время выполнения возникло исключение: %s" % repr(exc)) self.sendMessageGroup( "[/session] Во время выполнения возникло исключение: %s" % repr(exc)) return self.botLog.critical( "[command] Команда не обработана: %s" % checkCmd) self.sendMessageGroup( "[command] Команда не обработана: %s" % checkCmd) return try: #Implement accept reply to ticket message if msg['reply_to_message'] is not None: #The don`t ticket`s reply if (re.search('\[(Ticket|Reply)]\[(.+?)]', msg['reply_to_message']['text']) is None): self.botLog.error( "[handle][NOT_ERROR] Не удалось извлечь идентификатор заявки.\n" ) return ticket_id = re.search( '\[(Ticket|Reply)]\[(.+?)]', msg['reply_to_message']['text']).group(2) original_message_id = ( GroupId, msg['reply_to_message']['message_id']) ticket_email = Datebase().getEmailByTicketId(ticket_id) if (message[0] == '.'): command = message.split(' ')[0] self.botLog.warning("Получена комманда: %s" % command) if (command == '.restore'): try: reset_answer = self.restoreCpanelPassword( ticket_email) trueAnswer = [ 'не найдено зарегистрированных услуг', 'Сбросить пароль вы можете по ссылке' ] self.botLog.warning(reset_answer) if any(x in reset_answer for x in trueAnswer): hdapi.postQuickReply( ticket_id, reset_answer, HdTicketStatus.CLOSED, self) else: self.sendMessageGroup(reset_answer) except Exception as exc: self.botLog.critical( "[.restore] Во время выполнения возникло исключение: %s" % repr(exc)) self.sendMessageGroup( "[.restore] Во время выполнения возникло исключение: %s" % repr(exc)) return if (command == '.move'): try: dept_id = dept.DOMAIN Datebase().moveTicketTo( dept_id.value, ticket_id) self.deleteMessage(original_message_id) self.botLog.critical( "[.move] Заявка %s перемещена в отдел: %s" % (ticket_id, dept_id.name)) self.sendMessageGroup( "[.move] Заявка %s перемещена в отдел: %s" % (ticket_id, dept_id.name)) except Exception as exc: self.botLog.critical( "[.move] Во время выполнения возникло исключение: %s" % repr(exc)) return if (command == '.spam'): spam_email = ticket_email Datebase().setSpamEmail(spam_email) Datebase().setTicketSpam(ticket_id) self.deleteMessage(original_message_id) return if (command == '.ssh'): try: reset_answer = self.grantAccessToSsh( ticket_email) trueAnswer = [ 'не найдено зарегистрированных услуг', 'как в панели управления хостингом', 'отсутствует возможность доступа', ] self.botLog.warning(reset_answer) if any(x in reset_answer for x in trueAnswer): hdapi.postQuickReply( ticket_id, reset_answer, HdTicketStatus.CLOSED, self) else: self.sendMessageGroup(reset_answer) except Exception as exc: self.botLog.critical( "[.ssh] Во время выполнения возникло исключение: %s" % repr(exc)) self.sendMessageGroup( "[.ssh] Во время выполнения возникло исключение: %s" % repr(exc)) return if (command == '.ftp'): try: reset_answer = self.howToConnectFtp( ticket_email) trueAnswer = [ 'Подробная информация по настройке FTP', 'как и для доступа в cPanel' ] self.botLog.warning(reset_answer) if any(x in reset_answer for x in trueAnswer): hdapi.postQuickReply( ticket_id, reset_answer, HdTicketStatus.CLOSED, self) else: self.sendMessageGroup(reset_answer) except Exception as exc: self.botLog.critical( "[.ftp] Во время выполнения возникло исключение: %s" % repr(exc)) self.sendMessageGroup( "[.ftp] Во время выполнения возникло исключение: %s" % repr(exc)) return if (command == '.close'): Datebase().setTicketClose(ticket_id) self.deleteMessage(original_message_id) return if (command == '.exclude'): subcommand = message.split(' ')[1] if (subcommand is None or subcommand == ""): self.botLog.critical( "[.exclude] Имя домена не указано.") return tempExcludeList = Config.getExcludeDomainList() if (subcommand in tempExcludeList): self.deleteMessage(original_message_id) tempExcludeList.remove(subcommand) else: tempExcludeList.append(subcommand) Config.setConfigValue( 'exclude', 'create', ",".join(tempExcludeList)) Config.saveConfig() Datebase().setTicketClose(ticket_id) self.deleteMessage(original_message_id) self.sendMessageGroup( "[.exclude] Сохранен список исключений: %s" % (",".join(tempExcludeList))) return self.botLog.critical( "[command] Команда не обработана: %s" % command) self.sendMessageGroup( "[command] Команда не обработана: %s" % command) return hdapi.postQuickReply(ticket_id, msg['text'], HdTicketStatus.OPEN, self) except Exception as exc: self.botLog.debug("[Exception][handle] %s" % repr(exc)) self.sendMessageMe("[Exception][handle] %s" % repr(exc)) pass else: self.send(username, chat_id, message, 'Вы не авторизованы ¯\_(ツ)_/¯')
def save_model(self): Util.dump(self.model, "models/model_lgb4.pkl")
from util import Util from patient import Patient from kfolds import KFolds from statisticscalculator import StatisticsCalculator from knnclassifier import KnnClassifier K_FOLDS = 10 K_NEIGHBORS = 5 # PASSO 1: Lendo do arquivo e normalizando os valores: data_dictionary = Util.read_file('diabetes.csv') normalized_data_dictionary = Util.normalize_data_dictionary(data_dictionary) # PASSO 2: O código deve realizar a divisão dos dados originais em k folds de forma estratificada. # O valor K a ser utilizado é o que está na variável K_FOLDS. patients = Patient.data_dictionary_to_patient_list(normalized_data_dictionary) k_folds_aux = KFolds.patients_to_k_folds(patients, K_FOLDS) # PASSO 3: Iterativamente treinar um modelo utilizando k-1 folds e testá-lo no fold restante, # variando o fold de teste a cada repetição deste processo. O retorno do método de # treino dos modelos é uma lista dos resultados de cada iteração - cada modelo. # O valor K de vizinhos a ser utilizado é o que está na variável K_NEIGHBORS. results_per_iteration = KnnClassifier.k_fold_cross_validation( k_folds_aux, K_NEIGHBORS) # PASSO 4: A cada teste realizado, deverão ser calculadas e armazenadas as medidas de acurácia e F1-measure. final_statistics = StatisticsCalculator.get_statistics(results_per_iteration) # PASSO 5: Os valores são colocados em um formato de tabela a partir do objeto Statistics. table = StatisticsCalculator.statistics_to_table(final_statistics)
def withHashMapLookup(self, string): return not Util.has_none(string) and len( self._createHashMap(string)) == len(string)
table_html = table.get_attribute('innerHTML') # PARSE HTML table_soup = BeautifulSoup(table_html, 'lxml') # PULL ROWS BY CERTAIN PARAMETERS table_data = table_soup.find_all("div", {"class": "slick-row"}) filename = '/tmp/arena_rev_test.csv' with open(filename, 'w') as csvfile: writer = csv.writer(csvfile, delimiter=',') header = ['Campaign Name', 'Impressions', 'Gross Revenue'] writer.writerow(header) for item in table_data: writer.writerow([ item.contents[0].text, item.contents[7].text, item.contents[8].text ]) csvfile.close() #Mail message to recipients message = Util().get_arena_rtb() mail_user = arena_config.mail_user mail_pwd = arena_config.mail_pwd mail_recipients = list() mail_recipients.append(arena_config.email) mail_subject = 'RTB Dashboard Report - ' + arena_config.yesterday_date Util().send_mail(mail_user, mail_pwd, mail_subject, mail_recipients, message, filename)
def poll_queue(self): component_start_time = int(time.time()) adpred_db_connect = MySQLdb.connect( pb_properties.adpred_db_connect_host, pb_properties.adpred_db_connect_user, pb_properties.adpred_db_connect_password, pb_properties.adpred_db_connect_db) adpred_db_connect.autocommit(False) cursor = adpred_db_connect.cursor() # strat_algo = {} util = Util() try: # reservation_details_statement = """ # SELECT DISTINCT # strategy_id,algo_id, dimension_type, event_type # FROM PB_QUEUE # WHERE status = "pending" # """ _process_name = util.generate_process_name() logging.info( "In poll_queue, _process_name = {}".format(_process_name)) reserve_row_statement = """ UPDATE DOM_REC_QUEUE JOIN (SELECT MIN(dom_rec_id) as dom_rec_id FROM DOM_REC_QUEUE WHERE status = 'new' AND NOT EXISTS (SELECT 1 from DOM_REC_QUEUE where status = 'in_process') ) as min_dom_rec USING(dom_rec_id) SET process_name = '{}', status = 'in_process', date_in_process = now() """.format(_process_name) cursor.execute(reserve_row_statement) _columns = "dom_rec_id, bid_list_name, bid_list_id, strategy_name, line_id," + \ "target_cpm, uniques, min_bid, max_bid, strategy_id, status, " \ "process_name, date_new, date_in_process, date_processed, date_error" reservation_details_statement = """ SELECT {} FROM DOM_REC_QUEUE WHERE process_name = '{}' """.format(_columns, _process_name) logging.info( ' In db, adpred_db_connect, executing the sql statement: ' + reservation_details_statement) cursor.execute(reservation_details_statement) dom_rec_queue_row = {} for row in cursor: # strategy_id_str = str(row[0]) # algo_id_str = str(row[1]) # dimension_type_str = str(row[2]) # event_type_str = str(row[3]) # logging.info('') # logging.info(' The earliest time that Algorithm ' + algo_id_str + # ' \twas requested and still pending is at ' + dimension_type_str) # strat_algo[strategy_id_str] = algo_id_str _column_names = [ _column_name.strip() for _column_name in _columns.split(",") ] dom_rec_queue_row = { k: v if v != None else "-1" for k, v in zip(_column_names, row) } # dom_rec_queue_row.update((x, y * 2) for x, y in dom_rec_queue_row.items()) logging.info("type={}".format(type(dom_rec_queue_row))) logging.info( 'row reservation was successful. Queue row = {}'.format( row)) # self.update_join_request_status_given_cursor(cursor, strategy_id_str, algo_id_str, # dimension_type_str, event_type_str) logging.info('') logging.info(' Going to commit (i.e., release the lock)') adpred_db_connect.commit( ) # Note: commit (i.e., release the lock) right after select for update except: logging.info('') logging.info(' Going to rollback (i.e., release the lock)') adpred_db_connect.rollback( ) # Note: rollback (i.e., release the lock) if anything goes wrong logging.error(traceback.format_exc()) raise cursor.close() logging.info( ' Afer poll_queue, total elapsed time = {}, poll_queue method execution time = {}, dom_rec_queue_row = {}' .format(epoch.get_time_delta(self.processing_start_time), epoch.get_time_delta(component_start_time), dom_rec_queue_row)) return dom_rec_queue_row