def _gather_parameters(self): """Gather options which take a value. """ longOpt, shortOpt = [], '' docs, settings, synonyms, dispatch = {}, {}, {}, {} parameters = [] reflect.accumulateClassList(self.__class__, 'optStrings', parameters) if parameters: log.msg("Warning: Options.optStrings is deprecated, please use optParameters instead.") reflect.accumulateClassList(self.__class__, 'optParameters', parameters) synonyms = {} for parameter in parameters: long, short, default, doc = util.padTo(4, parameter) if not long: raise ValueError, "A parameter cannot be without a name." docs[long] = doc settings[long] = default if short: shortOpt = shortOpt + short + ':' synonyms[short] = long longOpt.append(long + '=') synonyms[long] = long dispatch[long] = self._generic_parameter return longOpt, shortOpt, docs, settings, synonyms, dispatch
def command(self, opts, pattern): """Disable check bundles based on pattern Note: if you want to disable only some metrics for a check, use the disable_metrics command instead. Arguments: pattern -- search pattern for checks """ checks, groups = util.find_checks(self.api, pattern) if not checks: log.error("No matching checks found\n" % check_id) return print "Disabling the following check bundles: " bundle_ids = {} for c in checks: if c['bundle_id'] not in bundle_ids: print " %s" % c['name'] bundle_ids[c['bundle_id']] = c['name'] if util.confirm(): for c in bundle_ids: log.msg("Disabling %s" % bundle_ids[c]) self.api.disable_check_bundle(bundle_id=c)
def MatchSetAdditionalInfo(self, matchId, type, matchSettings, matchStart, duration): try: sql = ('INSERT INTO six_matches_info ' '(matchId, type, matchStart, duration, matchTime, timeLimit, numberOfPauses, ' 'conditionSetting, injuries, maxNoOfSubstitutions, ' 'matchTypeEx, matchTypePk, timeSetting, season, weather) ' 'VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)') log.debug("MatchSetAdditionalInfo: SQL: %s" % sql) self.dbController.dbWrite(0, sql, matchId, type, matchStart, duration, binascii.b2a_hex(matchSettings.match_time), binascii.b2a_hex(matchSettings.time_limit), binascii.b2a_hex(matchSettings.number_of_pauses), binascii.b2a_hex(matchSettings.condition), binascii.b2a_hex(matchSettings.injuries), binascii.b2a_hex(matchSettings.max_no_of_substitutions), binascii.b2a_hex(matchSettings.match_type_ex), binascii.b2a_hex(matchSettings.match_type_pk), binascii.b2a_hex(matchSettings.time), binascii.b2a_hex(matchSettings.season), binascii.b2a_hex(matchSettings.weather)) except: log.msg("Error inserting into six_matches_info: %s" % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines)
def __getattr__(self, name): """A getattr method to cause a class to be refreshed. """ updateInstance(self) log.msg("(rebuilding stale %s instance (%s))" % (str(self.__class__), name)) result = getattr(self, name) return result
def start(): global dev global arduino if use_arduino: # if device is not specified, pick the first one in the list if dev == '': devlist = subprocess.check_output(['python', '-m', 'serial.tools.list_ports']) dev = devlist.split()[0] # check or guess if Arduino is connected has_arduino = False for initial in DEV_INITIALS: if dev.startswith(initial): has_arduino = True # didn't find Arduino, so exit the program if not has_arduino: log.fail('Didn\'t find an Arduino port.') os.exit(1) log.msg('Connecting to Arduino at %s' % dev) arduino = serial.Serial(dev, 115200, timeout=1) arduino.read() log.ok('Arduino is connected')
def _error(error, retryDelay): retryDelay = min(retryDelay * 2, 120) log.msg( "Failed to determine server IP-address (ERROR: %s). " "Trying again in %d seconds" % (str(error), retryDelay) ) reactor.callLater(retryDelay, self.setIP, retryDelay, resetTime)
def gen_and_write_preds(bst, dtest, idtest): proba = bst.predict(dtest) msg(' writing predictions to file') with open('output-pred.csv', 'w') as f: f.write('ID,target\n') for (rowid, p) in zip(idtest, proba): f.write('{},{}\n'.format(rowid, p))
def deleteUser(self, username): results = yield self.userData.findByUsername(username) if not results: raise Exception("Unknown username: %s" % username) usr = results[0] yield self.userData.delete(usr) log.msg('User "%s" has been DELETED.' % username) defer.returnValue(usr)
def StartThreadingHttpServer(): if share.PLAYER_THREAD: return log.msg("Start threading player server on (127.0.0.1:%d)"%share.PORT) server = ThreadingHTTPServer(("127.0.0.1", share.PORT), HTTPHandler) share.PLAYER_THREAD = threading.Thread(target=server.serve_forever, args=()) share.PLAYER_THREAD.setDaemon(True) share.PLAYER_THREAD.start()
def renderError(self, error, request, responseCode=500): request.setHeader('Content-Type', 'text/xml') request.setResponseCode(responseCode) log.msg('SERVER ERROR: %s' % str(error.value)) request.write( '%s<error text="server error" href="/home">' '<details>%s</details>' '</error>' % (XML_HEADER, str(error.value))) request.finish()
def setRosterHash(self, userId, rosterHash): try: self._rosterHashes[userId] = rosterHash log.debug("config.setRosterHash: userId=%s" % userId) except: log.msg("ERROR in config.setRosterHash: %s" % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines)
def account(order_id): try: if is_naaspeksi(order_id): return settings.accounts.naaspeksi elif is_kiertue(order_id): return settings.accounts.kiertuetuotot except KeyError: log.msg("Tilausta %s ei löytynyt tietokannasta, oletetaan tiliksi %s" % (order_id, settings.accounts.lipunmyynti)) return settings.accounts.lipunmyynti
def __getattr__(self, name): """A getattr method to cause a class to be refreshed. """ if name == '__del__': raise AttributeError("Without this, Python segfaults.") updateInstance(self) log.msg("(rebuilding stale %s instance (%s))" % (reflect.qual(self.__class__), name)) result = getattr(self, name) return result
def MatchStatusSetAwayExit(self, matchId): try: sql = ('UPDATE six_matches_status SET updated=updated, awayExit=NOW() WHERE id=%s') self.dbController.dbWrite(0, sql, matchId) except: log.msg("Error in MatchStatusSetAwayExit: %s" % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines)
def send_image_to_arduino(user): global arduino # write data to file pydata = distort_and_format_image(user) data = struct.pack('%sB' % len(pydata), *pydata) # tell arduino to return motor to initial position arduino.write(struct.pack('B', RETURN)) arduino.flush() # send some metadata to arduino metadata = [nthbyte(out_width, 1), nthbyte(out_width, 0), nthbyte(motor_begin, 1), nthbyte(motor_begin, 0), nthbyte(motor_end, 1), nthbyte(motor_end, 0)] arduino.write(struct.pack('%sB' % len(metadata), *metadata)) arduino.flush() # wait until motor is back time.sleep(MOTOR_RETURN_TIME) # start focusing and capturing photo cap_thread = Thread(target=subprocess.check_output, args=[['gphoto2', '--force-overwrite', '--capture-image-and-download']]) cap_thread.start() log.msg('Capturing..') # wait until the camera finish focusing time.sleep(CAMERA_FOCUS_TIME) # send light data to arduino idx = 0 size = line_length pos = motor_begin arduino.readlines() while pos < motor_end - 3: s = arduino.readline().strip() if s.isdigit() == True: pos = float(s) if pos >= motor_begin and pos <= motor_end: idx = int(plot(pos, motor_begin, motor_end, 0, out_width)) buf = data[idx*size : idx*size+size] arduino.write(struct.pack('B', LINE)) arduino.write(buf) arduino.flush() arduino.write(struct.pack('B', FLUSH)) arduino.flush() # mark end of data arduino.write(struct.pack('B', END)) arduino.flush() # stop capturing photo cap_thread.join() log.ok('Finished capturing image') save_image(user) create_image_preview(user)
def dbInsert(self, key, sqlQuery, *args): startTime = time() poolItem = self.writePool.getPoolItem() log.msg('dbInsert-DEBUG: sql: %s' % sqlQuery) log.msg('dbInsert-DEBUG: args: %s' % str(args)) d = poolItem.value.runInteraction(self._insert, sqlQuery, args) d.addCallback(self.dbWriteSuccess, poolItem, startTime) d.addErrback(self.dbWriteError, poolItem, startTime) return d
def description(order_id): try: if is_naaspeksi(order_id): return "Lipunmyyntitulo, NääsPeksi" elif is_kiertue(order_id): return "Lipunmyyntitulo, kiertue, " + order_ids[order_id]["city"] except KeyError: log.msg("Tilausta %s ei löytynyt tietokannasta, oletetaan kuvaus Lipunmyyntituloksi" % order_id) return "Lipunmyyntitulo"
def command(self, opts, template_name, target, agent, *params): """Adds a check based on a template Arguments: template_name -- the name of the template file check_name -- the name for the check target -- the target of the check (can be a hostname) agent -- the agent to run the check from params -- other parameters (see below) Other parameters are specified as "param_name=value" and will be substituted in the template. Use {param_name} in the template. Some predefined parameters: {agent} -- the agent provided on the command line {target} -- the target provided on the command line {targetip} -- the ip of the target resolved in dns (may be same as target if target was provided as an IP) """ template = util.Template(template_name, "check") targetip = util.resolve_target(target) template_params = { 'agent': agent, 'target': target, 'targetip': targetip} template_params.update(template.parse_nv_params(params)) substituted = template.sub(template_params) # Add required parameters substituted['agent_id'] = util.get_agent(self.api, agent) substituted['target'] = targetip # Allow matching metrics by regex, find available metrics, and test # them against each regex in the metrics_regex key in the template to # see if they match. if 'metric_regex' in substituted: log.msg("Fetching available metrics for regex match") try: substituted['test_mode'] = 1 rv = self.api.add_check_bundle(**substituted) del substituted['test_mode'] except circonusapi.CirconusAPIError, e: log.error("Failed to fetch available metrics: %s" % e.error) sys.exit(1) available_metrics = rv['metrics'] for metric_type in available_metrics: for m in available_metrics[metric_type]: for regex in substituted['metric_regex']: if re.match(regex, m): substituted['metric_name'].append(m) break log.msg("Metrics to include in the check:") for m in sorted(substituted['metric_name']): print " %s" % m
def train_xgb_for_cv(dtrain, dtest, treecount): param = {'max_depth':max_depth, 'eta':eta, 'silent':silent, 'objective':objective, 'gamma':gamma, 'min_child_weight':min_child_weight, 'max_delta_step':max_delta_step, 'subsample':subsample, 'colsample_bytree':colsample_bytree, 'alpha':alpha, 'lambda':lambdaparam, 'scale_pos_weight':scale_pos_weight, 'seed':0, 'eval_metric':'auc'} msg('starting cv. maxrounds={}, params: {}'.format(treecount, param)) watchlist = [(dtrain,'train'), (dtest,'eval')] bst = xgb.train(param, dtrain, treecount, watchlist, early_stopping_rounds=10) return bst
def storeOnlineUsers(self, onlineUsers): try: sql = ('UPDATE six_stats SET onlineUsers=%s') yield self.dbController.dbWrite(0, sql, onlineUsers) log.debug('set six_stats.onlineUsers=%s' % onlineUsers) except: log.msg("Error in storeOnlineUsers: %s" % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines)
def start(): """Starts up the application server.""" log.msg('Starting server...') # pylint: disable=W0142 application = tornado.web.Application(URLS, **SETTINGS) # pylint: enable=W0142 port = options.get('port', util.get_configuration_value('port', 3333)) log.msg('Listening on port ' + str(port) + '.') application.listen(port) tornado.ioloop.IOLoop.instance().start()
def command(self, opts, duration, pattern, notes=""): """Schedule maintenance for rules matching the pattern Arguments: duration -- how long should the maintenance window last? pattern -- pattern to match the check name with notes -- optional notes for the maintenance window Duration should be of the form <integer>[m|h|d]. Examples: 10m == 10 minutes 4h == 4 hours 2d == 2 days """ if duration[-1] not in 'mhd': log.error("Duration needs to be of the form <integer>[m|h|d]") sys.exit(1) rules = self.api.list_rules() checks = self.api.list_checks(active='true') filtered_checks = {} for c in checks: if re.search(pattern, c['name'], flags=re.IGNORECASE): filtered_checks[c['check_id']] = c filtered_rules = [r for r in rules if r['check_id'] in filtered_checks] # Remove duplicates dedup_rules = {} for r in filtered_rules: dedup_rules[(r['check_id'], r['metric_name'], r['severity'])] = r filtered_rules = dedup_rules.values() log.msg("Scheduling maintenance for:") for r in sorted(filtered_rules): print " Sev %s : %s : %s (from %s)" % ( r['severity'], filtered_checks[r['check_id']]['name'], r['metric_name'], filtered_checks[r['check_id']]['agent']) if util.confirm(): log.msg("Setting maintenance:") for r in filtered_rules: log.msgnb("Sev %s : %s : %s..." % ( r['severity'], filtered_checks[r['check_id']]['name'], r['metric_name'])) try: self.api.add_maintenance( check_id=r['check_id'], start='now', stop=duration, metric_name=r['metric_name'], severity=r['severity'], notes=notes) log.msgnf("Success") except circonusapi.CirconusAPIError, e: log.msgnf("Failed") log.error(e.error)
def getStats(self, profileId): # wins, losses, draws results = yield defer.DeferredList( [self.matchData.getWins(profileId), self.matchData.getLosses(profileId), self.matchData.getDraws(profileId)] ) (_, wins), (_, losses), (_, draws) = results # goals results = yield defer.DeferredList( [self.matchData.getGoalsHome(profileId), self.matchData.getGoalsAway(profileId)] ) (_, (scored_home, allowed_home)) = results[0] (_, (scored_away, allowed_away)) = results[1] goals_scored = scored_home + scored_away goals_allowed = allowed_home + allowed_away # historic data historyWins = 0 historyLosses = 0 historyDraws = 0 historyDC = 0 try: results = yield self.matchData.getHistoryData(profileId) historyWins, historyLosses, historyDraws, historyDC = results except: log.msg("ERROR in logic.getStats:") exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines) # log.msg("HistoryData: W=%s L=%s D=%s DC=%s" % (historyWins, historyLosses, historyDraws, historyDC)) # streaks results = yield self.matchData.getStreaks(profileId) current, best = results # last 5 teams if hasattr(self.matchData, "getLastTeamsUsed"): teams = yield self.matchData.getLastTeamsUsed(profileId, 5) else: teams = None stats = user.Stats( profileId, wins, losses, draws, historyWins, historyLosses, historyDraws, historyDC, goals_scored, goals_allowed, current, best, teams, ) defer.returnValue(stats)
def UpdateMatchPointsAndRating(self, matchId, profileId, points, pointsDiff, rating, ratingDiff): try: sql = ('UPDATE six_matches_played ' 'SET points=%s, pointsDiff=%s, rating=%s, ratingDiff=%s ' 'WHERE match_id=%s AND profile_id=%s') self.dbController.dbWrite(0, sql, points, pointsDiff, rating, ratingDiff, matchId, profileId) except: log.msg("Error in data6.UpdateMatchPointsAndRating: %s" % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines)
def runEverything(self): """Run all currently-pending callbacks. """ q = self.queue[:] self.queue = [] for ticks, func, args in q: try: apply(func,args) except: log.msg('Exception in delayed function [all].') traceback.print_exc(file=log.logfile)
def get_agent(api, agent_name): rv = api.list_agents() agents = dict([(i['name'], i['agent_id']) for i in rv]) try: return agents[agent_name] except KeyError: log.error("Invalid/Unknown Agent: %s" % agent_name) log.msg("Valid Agents:") for a in agents: log.msgnf(" %s" % a) sys.exit(1)
def create_image_preview(user): log.msg('Creating preview..') im = cv2.imread(user.imagepath) preview = cv2.resize(im, PREVIEW_SIZE) filename = os.path.basename(user.imagepath) path = main.PREVIEW_DIR + filename cv2.imwrite(path, preview) user.previewpath = path log.ok('Created preview at ' + path)
def __getattr__(self, attr): """ I make sure that old style 'optObj.option' access still works. """ if attr == 'data': raise AttributeError('Options instance has no attribute data: You probably forgot to call Options.__init__ from your subclass.') #XXX GET RID OF ME! if self.opts.has_key(attr): log.msg("optionObject.option is deprecated! Use new-style optionObject['option'] instead! (This is only a warning) (%s, %s)" % (attr, self.opts[attr])) return self.opts[attr] else: raise AttributeError("%s instance has no attribute '%s'" % (self.__class__, attr))
def main(): if len(sys.argv) < 3: log.msg("Args not valid, only %d args." % (len(sys.argv),), "ERROR") print print "Usage: %s tiliote_file verkkomaksut_file" % sys.argv[0] print sys.exit() tiliote_filename = sys.argv[1] verkkomaksut_filename = sys.argv[2] kirjaaja(tiliote_filename, verkkomaksut_filename)
def WriteAccessLogEntry(self, userName, ip, logType): try: accesstime = int(time.time()) sql = ('INSERT INTO weblm_log_access (user, ip, accesstime, logType) ' 'VALUES (%s,%s,%s,%s)') logTime = int(time.time()) params = (userName, ip, accesstime, logType) yield self.dbController.dbWrite(0, sql, *params) except: log.msg("Error in WriteAccessLogEntry: %s" % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines)
def MatchStatusUpdate(self, minutes, state, scoreHome, scoreAway, scoreHomeReg, scoreAwayReg, matchId): try: sql = ( 'UPDATE six_matches_status ' 'SET minutes=%s, state=%s, scoreHome=%s, scoreAway=%s, scoreHomeReg=%s, scoreAwayReg=%s ' 'WHERE id=%s') log.msg("MatchStatusUpdate: [%s] [%s] [%s] [%s] [%s] [%s] [%s]" % (minutes, state, scoreHome, scoreAway, scoreHomeReg, scoreAwayReg, matchId)) self.dbController.dbWrite(0, sql, minutes, state, scoreHome, scoreAway, scoreHomeReg, scoreAwayReg, matchId) except: log.msg("Error updating six_matches_status: %s" % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines)
def MatchStatusSetCancel(self, matchId, profileCancelId): try: log.msg('MatchStatusSetCancel: matchId=%s profileCancelId=%s' % (matchId, profileCancelId)) sql = ( 'UPDATE six_matches_status SET updated=updated, homeCancel=NOW() WHERE id=%s AND (profileHome=%s OR profileHome2=%s OR profileHome3=%s)' ) self.dbController.dbWrite(0, sql, matchId, profileCancelId, profileCancelId, profileCancelId) sql = ( 'UPDATE six_matches_status SET updated=updated, awayCancel=NOW() WHERE id=%s AND (profileAway=%s OR profileAway2=%s OR profileAway3=%s)' ) self.dbController.dbWrite(0, sql, matchId, profileCancelId, profileCancelId, profileCancelId) except: log.msg("Error in MatchStatusSetCancel: %s" % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines)
def _storeTxn(self, transaction, match, hashHome, hashAway, lobbyName, roomName, season): def _writeStreak(profile_id, win): wins, best = 0, 0 sql = ('SELECT wins, best FROM six_streaks ' 'WHERE profile_id=%s') transaction.execute(sql, (profile_id, )) data = transaction.fetchall() if len(data) > 0: wins, best = data[0][0], data[0][1] if win: wins += 1 best = max(wins, best) else: wins = 0 sql = ('INSERT INTO six_streaks (profile_id, wins, best) ' 'VALUES (%s,%s,%s) ON DUPLICATE KEY UPDATE ' 'wins=%s, best=%s') log.msg('data6.py: _writeStreak: profile_id=%s wins=%s, best=%s' % (profile_id, wins, best)) transaction.execute(sql, (profile_id, wins, best, wins, best)) # record match result home_players = [match.teamSelection.home_captain] home_players.extend(match.teamSelection.home_more_players) away_players = [match.teamSelection.away_captain] away_players.extend(match.teamSelection.away_more_players) numParticipants = len(home_players) + len(away_players) log.msg('data6.py: MatchData: _storeTxn: numParticipants=%s' % numParticipants) sql = ( 'INSERT INTO six_matches ' '(score_home, score_away, score_home_reg, score_away_reg, team_id_home, team_id_away, hashHome, hashAway, lobbyName, roomName, minutes, season, numParticipants) ' 'VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)') transaction.execute( sql, (match.score_home, match.score_away, match.score_home_reg, match.score_away_reg, match.teamSelection.home_team_id, match.teamSelection.away_team_id, hashHome, hashAway, lobbyName, roomName, match.clock, season, numParticipants)) transaction.execute('SELECT LAST_INSERT_ID()') matchId = transaction.fetchall()[0][0] # record players of the match for profile in home_players: sql = ( 'INSERT INTO six_matches_played (match_id, profile_id, home, pointsDiff, ratingDiff) ' 'VALUES (%s, %s, 1, %s, %s)') transaction.execute(sql, (matchId, profile.id, 0, 0)) for profile in away_players: sql = ( 'INSERT INTO six_matches_played (match_id, profile_id, home, pointsDiff, ratingDiff) ' 'VALUES (%s, %s, 0, %s, %s)') transaction.execute(sql, (matchId, profile.id, 0, 0)) # update winning streaks (1on1) if numParticipants == 2: if match.score_home > match.score_away: # home win for profile in home_players: _writeStreak(profile.id, True) for profile in away_players: _writeStreak(profile.id, False) elif match.score_home < match.score_away: # away win for profile in home_players: _writeStreak(profile.id, False) for profile in away_players: _writeStreak(profile.id, True) else: # draw for profile in home_players: _writeStreak(profile.id, False) for profile in away_players: _writeStreak(profile.id, False) return matchId
class AppPipeline(object): def process_item(self, item, spider): log.msg("Catch an AppItem", level=log.INFO) return item
def error(self, error): log.msg('ERROR: error in DB retrieval: %s' % error.value) error.raiseException()
def __init__(self, serverConfig, dbConfig, userData, profileData, matchData, profileLogic): self.serverConfig = serverConfig self.dbConfig = dbConfig self.userData = userData self.profileData = profileData self.matchData = matchData self.profileLogic = profileLogic self.cipherKey = ('27501fd04e6b82c831024dac5c6305221974deb9388a2190' '1d576cbbe2f377ef23d75486010f37819afe6c321a0146d2' '1544ec365bf7289a') self.serverIP_lan = None self.serverIP_wan = None self.startDatetime = datetime.now() reactor.callLater(0, self.setIP) # initialize interface to listen on self.interface = self.serverConfig.get('ListenOn', '') # initialize MaxUsers, set to default if missing from config self.serverConfig.MaxUsers = self.serverConfig.get('MaxUsers', 1000) self.lobbies = [] for i, item in enumerate(serverConfig.Lobbies): try: name = item['name'] except TypeError: name = str(item) except KeyError: raise errors.ConfigurationError( 'Structured lobby definitions must ' 'include "name" attribute') try: lobbyType = item['type'] except TypeError: lobbyType = 'open' except KeyError: lobbyType = 'open' try: showMatches = item['showMatches'] except TypeError: showMatches = True except KeyError: showMatches = True try: checkRosterHash = bool(int(item['checkRosterHash'])) except: checkRosterHash = True aLobby = lobby.Lobby(name, 100) aLobby.showMatches = showMatches aLobby.checkRosterHash = checkRosterHash aLobby.typeStr = str(lobbyType) if lobbyType == 'noStats': aLobby.typeCode = 0x20 elif lobbyType == 'open': aLobby.typeCode = 0x5f elif isinstance(lobbyType, list): # restricted lobby divMap = {'A': 0, '3B': 1, '3A': 2, '2': 3, '1': 4} typeCode = 0 for divName in lobbyType: try: typeCode += 2**divMap[divName] except KeyError: raise errors.ConfigurationError( 'Invalid lobby type definition. ' 'Unrecognized division: "%s" ' % divName) aLobby.typeCode = typeCode else: aLobby.typeCode = 0x5f # default: open self.lobbies.append(aLobby) # auto-IP detector site try: self.ipDetectUri = self.serverConfig.IpDetectUri except AttributeError: self.ipDetectUri = 'http://mapote.com/cgi-bin/ip.py' # rating/points calculator self.ratingMath = rating.RatingMath(0.44, 0.56) # initialize online-list self.onlineUsers = dict() # initialize latest-info dict self._latestUserInfo = dict() # read banned-list, if available bannedYaml = self.serverConfig.BannedList if not bannedYaml.startswith('/'): fsroot = os.environ.get('FSROOT', '.') bannedYaml = fsroot + '/' + bannedYaml if os.path.exists(bannedYaml): self.bannedList = YamlConfig(bannedYaml) else: self.bannedList = YamlConfig(None, newYamlFile=bannedYaml) log.msg('NOTICE: banned-list file absent.') try: self.bannedList.Banned except AttributeError: self.bannedList.Banned = [] # make banned-list structure for quick checks self.makeFastBannedList() # set up periodical rank-compute reactor.callLater(5, self.computeRanks) # set up periodical date updates now = datetime.now() today = datetime(now.year, now.month, now.day) td = today + timedelta(days=1) - now reactor.callLater(0, self.systemDayChange)
def _error(error, retryDelay): retryDelay = min(retryDelay * 2, 120) log.msg('Failed to determine server IP-address (ERROR: %s). ' 'Trying again in %d seconds' % (str(error), retryDelay)) reactor.callLater(retryDelay, self.setIP, retryDelay, resetTime)
def pred_dtest(self): log.msg('started predicting') return self.tuner.predict(self.dtest)
def rebuild(module, doLog=1): """Reload a module and do as much as possible to replace its references. """ global lastRebuild lastRebuild = time.time() if hasattr(module, 'ALLOW_TWISTED_REBUILD'): # Is this module allowed to be rebuilt? if not module.ALLOW_TWISTED_REBUILD: raise RuntimeError, "I am not allowed to be rebuilt." if doLog: log.msg('Rebuilding %s...' % str(module.__name__)) ## Safely handle adapter re-registration from twisted.python import components components.ALLOW_DUPLICATES = 1 d = module.__dict__ _modDictIDMap[id(d)] = module newclasses = {} classes = {} functions = {} values = {} if doLog: log.msg(' (scanning %s): ' % str(module.__name__)) for k, v in d.items(): if type(v) == types.ClassType: # Failure condition -- instances of classes with buggy # __hash__/__cmp__ methods referenced at the module level... if v.__module__ == module.__name__: classes[v] = 1 if doLog: log.logfile.write("c") log.logfile.flush() elif type(v) == types.FunctionType: if v.func_globals is module.__dict__: functions[v] = 1 if doLog: log.logfile.write("f") log.logfile.flush() elif isinstance(v, type): if v.__module__ == module.__name__: newclasses[v] = 1 if doLog: log.logfile.write("o") log.logfile.flush() values.update(classes) values.update(functions) fromOldModule = values.has_key newclasses = newclasses.keys() classes = classes.keys() functions = functions.keys() if doLog: log.msg('') log.msg(' (reload %s)' % str(module.__name__)) # Boom. reload(module) # Make sure that my traceback printing will at least be recent... linecache.clearcache() if doLog: log.msg(' (cleaning %s): ' % str(module.__name__)) for clazz in classes: if getattr(module, clazz.__name__) is clazz: log.msg("WARNING: class %s not replaced by reload!" % reflect.qual(clazz)) else: if doLog: log.logfile.write("x") log.logfile.flush() clazz.__bases__ = () clazz.__dict__.clear() clazz.__getattr__ = __getattr__ clazz.__module__ = module.__name__ if newclasses: import gc for nclass in newclasses: ga = getattr(module, nclass.__name__) if ga is nclass: log.msg("WARNING: new-class %s not replaced by reload!" % reflect.qual(nclass)) else: for r in gc.get_referrers(nclass): if getattr(r, '__class__', None) is nclass: r.__class__ = ga if doLog: log.msg('') log.msg(' (fixing %s): ' % str(module.__name__)) modcount = 0 for mk, mod in sys.modules.items(): modcount = modcount + 1 if mod == module or mod is None: continue if not hasattr(mod, '__file__'): # It's a builtin module; nothing to replace here. continue changed = 0 for k, v in mod.__dict__.items(): try: hash(v) except TypeError: continue if fromOldModule(v): if type(v) == types.ClassType: if doLog: log.logfile.write("c") log.logfile.flush() nv = latestClass(v) else: if doLog: log.logfile.write("f") log.logfile.flush() nv = latestFunction(v) changed = 1 setattr(mod, k, nv) else: # Replace bases of non-module classes just to be sure. if type(v) == types.ClassType: for base in v.__bases__: if fromOldModule(base): latestClass(v) if doLog and not changed and ((modcount % 10) == 0): log.logfile.write(".") log.logfile.flush() components.ALLOW_DUPLICATES = 0 if doLog: log.msg('') log.msg(' Rebuilt %s.' % str(module.__name__)) return module
def find_check_bundle_pretty(api, pattern): log.msg("Retrieving matching checks") return find_check_bundle(api, pattern)
def _error(error): request.setResponseCode(500) log.msg('SERVER ERROR: %s' % str(error.value)) request.write('%s<error text="server error"/>' % XML_HEADER) request.finish()
async def run(): global stdscr, glblsec, do_refresh try: init() glblsec = Section() log.msg('Entering infinite disp/IO loop...') # infinite display loop while not death.die_all: await asyncio.sleep(0) if do_refresh: stdscr.refresh() do_refresh = False if (await check_termsize_change()): glblsec.recalc_coords() stdscr.clear() await glblsec.draw() await check_input() except Exception as e: log.msg('Infinite disp/IO loop failed!') log.msg(str(e)) log.msg(sys.exc_info()[0]) log.msg(traceback.format_exc()) sys.exit(-5) finally: log.msg('Finalizing curses display...') finalize()
def dbReadError(self, error, startTime, poolItem): log.msg('ALERT: dbReadError: %s (type: %s)' % (error.value, error.value.__class__)) return error
from scrapy import signalsfrom scrapy.xlib.pydispatch import dispatcherfrom scrapy import log import sqlite3from os import pathfrom android_apps_crawler import settings class AppPipeline(object): def process_item(self, item, spider): log.msg("Catch an AppItem", level=log.INFO) return item class SQLitePipeline(object): filename = '' conn = None def __init__(self): self.filename += settings.MARKET_NAME self.filename += ".db" self.filename = path.join(settings.DATABASE_DIR, self.filename) print self.filename self.conn = None dispatcher.connect(self.initialize, signals.engine_started) dispatcher.connect(self.finalize, signals.engine_stopped) def process_item(self, item, spider): try: self.conn.execute('insert into apps(url) values(?)', (item['url'],) ) self.conn.commit() log.msg("Inserting into database"); except sqlite3.IntegrityError: print "Duplicated" return item def initialize(self): if path.exists(self.filename): self.conn = sqlite3.connect(self.filename) else: self.create_table() self.conn.execute("PRAGMA journal_mode=WAL;") self.conn.commit() def finalize(self): if self.conn is not None: self.conn.commit() self.conn.close() self.conn = None def create_table(self): self.conn = sqlite3.connect(self.filename) self.conn.execute("create table apps( \ id integer primary key autoincrement, \ url varchar(100) not null unique, \ downloaded int default 0)" ) self.conn.commit()
def rebuild(module, doLog=1): """Reload a module and do as much as possible to replace its references. """ global lastRebuild lastRebuild = time.time() if hasattr(module, 'ALLOW_TWISTED_REBUILD'): # Is this module allowed to be rebuilt? if not module.ALLOW_TWISTED_REBUILD: assert 0, "I am not allowed to be rebuilt." if doLog: log.msg('Rebuilding %s...' % str(module.__name__)) d = module.__dict__ _modDictIDMap[id(d)] = module classes = {} functions = {} values = {} if doLog: print ' (scanning %s): ' % str(module.__name__), for k, v in d.items(): if type(v) == types.ClassType: # Failure condition -- instances of classes with buggy # __hash__/__cmp__ methods referenced at the module level... if v.__module__ == module.__name__: classes[v] = 1 if doLog: sys.stdout.write("c") sys.stdout.flush() elif type(v) == types.FunctionType: if v.func_globals is module.__dict__: functions[v] = 1 if doLog: sys.stdout.write("f") sys.stdout.flush() values.update(classes) values.update(functions) fromOldModule = values.has_key classes = classes.keys() functions = functions.keys() if doLog: print print ' (reload %s)' % str(module.__name__) # Boom. reload(module) # Make sure that my traceback printing will at least be recent... linecache.clearcache() if doLog: print ' (cleaning %s): ' % str(module.__name__), for clazz in classes: if getattr(module, clazz.__name__) is clazz: print "WARNING: class %s not replaced by reload!" % str(clazz) else: if doLog: sys.stdout.write("x") sys.stdout.flush() clazz.__bases__ = () clazz.__dict__.clear() clazz.__getattr__ = __getattr__ clazz.__module__ = module.__name__ if doLog: print print ' (fixing %s): ' % str(module.__name__), modcount = 0 for mk, mod in sys.modules.items(): modcount = modcount + 1 if mod == module or mod is None: continue if mod.__name__ != '__main__' and not hasattr(mod, '__file__'): # It's a builtin module; nothing to replace here. continue changed = 0 for k, v in mod.__dict__.items(): # print "checking for %s.%s" % (mod.__name__, k) try: hash(v) except TypeError: continue if fromOldModule(v): # print "Found a match! (%s.%s)" % (mod.__name__, k) if type(v) == types.ClassType: if doLog: sys.stdout.write("c") sys.stdout.flush() nv = latestClass(v) else: if doLog: sys.stdout.write("f") sys.stdout.flush() nv = latestFunction(v) changed = 1 setattr(mod, k, nv) else: # Replace bases of non-module classes just to be sure. if type(v) == types.ClassType: for base in v.__bases__: if fromOldModule(base): latestClass(v) if doLog and not changed and ((modcount % 10) == 0): sys.stdout.write(".") sys.stdout.flush() if doLog: print print ' Rebuilt %s.' % str(module.__name__) return module
def setUserInfo(self, usr, userInfo): try: self._latestUserInfo[usr.username] = userInfo log.debug('config.setUserInfo: username=%s' % usr.username) except: log.msg('ERROR in config.setUserInfo')
def _setIP(result): self.serverIP_wan = str(result).strip() if resetTime: self.startDatetime = datetime.now() log.msg('Server IP-address: %s' % self.serverIP_wan) log.msg('Fiveserver %s ready' % FiveServerConfig.VERSION)
def __init__(self, train_file, preproc=None, test_file=None, dev_size=3000, log_file_index=-1): self.seed = 0 #------- prepare log file -----------------# assert log_file_index >= 0 self.init_log(log_file_index) log.msg('****************************************') log.msg('*** log file initialized ********') #---------------------------------------------- # ------- preparing data ----------------------- # log.msg('* preparing data') try: self.train = np.load(train_file) self.test = np.load(test_file) if test_file else np.array([]) except: raise ValueError('Wrong train/test file input') #self.labels = self.train[:,0] #self.train = self.train[:,1:] if preproc: self.dtrain, self.dvalid, self.dtest = preproc( self.train, self.test) del self.train, self.test log.msg('data is ready to use.') # ------------ data is ready ----------------- # # -------------------------------------------- # # ------ initializa the parameters ----------- # self.params = { 'max_delta_step': 0, 'scale_pos_weight': 1, # calculated for each fold. #neg / #pos 'max_depth': 6, 'min_child_weight': 1, 'gamma': 0, 'subsample': 1, 'colsample_bytree': 1, 'reg_alpha': 0, 'reg_lambda': 1, 'eta': 0.01, 'objective': "binary:logistic", 'eval_metric': 'auc', 'n_jobs': -1, 'random_seed': self.seed } self.rounds = 800 self.esrounds = 50 # early stop rounds. # ------------------------------------------ # # ---- initializing xgb_tuner object ------ # self.tuner = xgb_tuner(self.dtrain, self.dvalid, self.params, logging=True, log_file_index=log_file_index, rounds=self.rounds, esrounds=self.esrounds) #---------------------------------------------------- del self.dtrain, self.dvalid log.msg('class is ready.')
def tilitin(data): conn = None insert_payment = """ INSERT INTO document (id, number, period_id, date) VALUES (nextval('document_id_seq'), %s, %s, %s) RETURNING id """ insert_row = """ INSERT INTO entry (id, document_id, account_id, debit, amount, description, row_number, flags) VALUES (nextval('entry_id_seq'), %s, (SELECT id FROM account WHERE number = %s), %s, %s, %s, %s, %s) """ try: conn = psycopg2.connect(host=settings.psql.host, user=settings.psql.user, password=settings.psql.passwd, database=settings.psql.db) conn.set_client_encoding('UTF8') cur = conn.cursor() cur.execute("SELECT MAX(number) FROM document WHERE period_id = %s" % (settings.period_id, )) document_number = cur.fetchone() if document_number[0] is None: document_number = 1 else: document_number = document_number[0] + 1 log.msg("Starting with document_number %d" % (document_number, )) for p in data: cur.execute( insert_payment, (document_number, settings.period_id, p['meta']['date'])) document_id = cur.fetchone()[0] log.msg("Inserted payment with document_id %d" % (document_id, )) row_number = 1 for row in p['payments']: if not row['account']: row['account'] = settings.accounts.muut_kulut cur.execute( insert_row, (document_id, str(row['account']), row['debit'], row['amount'], row['description'][:100], row_number, 0)) row_number += 1 log.msg("Inserted %d rows" % (row_number - 1)) if p['meta']['type'] == 'verkkomaksut': verkkomaksut.generate_batch_report(p, document_number) document_number += 1 except psycopg2.DatabaseError, e: if conn: conn.rollback() print 'Error %s' % e sys.exit(1)
def checkEmptyRooms(self): log.msg("checkEmptyRooms") try: for aLobby in self.lobbies: log.msg("checkEmptyRooms: Lobby: %s" % aLobby.name) for room in aLobby.rooms.itervalues(): roomName = room.name log.msg("checkEmptyRooms: Room: %s" % roomName) if room.isEmpty(): log.msg( "checkEmptyRooms: Room empty, trying to delete") aLobby.deleteRoom(room) log.msg("checkEmptyRooms: Room %s deleted" % roomName) except: log.msg('ERROR in config.py:checkEmptyRooms: %s' % sys.exc_info()[0]) exc_type, exc_value, exc_traceback = sys.exc_info() lines = traceback.format_exception(exc_type, exc_value, exc_traceback) log.msg("Lines: %s" % lines) reactor.callLater(600, self.checkEmptyRooms)
def dbWriteError(self, error, startTime, poolItem): log.msg('ALERT: dbWriteError: %s (type: %s)' % (error.value, error.value.__class__)) log.msg(error.getTraceback()) return error
def evaluate(self,observer,hash=None,key=None): log.msg('observe.py: Dynamic.evaluate called directly --> override this') log.msg('observer %s\nhash %s\nkey %s'%(observer,hash,key)) return DontTell
def logmsg(n,v,s) : log.msg( "best value for parameter : " + str(n) + " is " + str(v) + " , with score " + str(s) )
async def run(): # first stage: the main menu try: log.msg('Waiting display setup...') while disp.glblsec is None: await asyncio.sleep(0) try_login = Flag() log.msg('Setting up login screen sections...') sec = {} #SECtion registry sec['title'] = disp.glblsec.sub( disp.TextBox, 0, 0, 100, 33, text='Welcome to TextCord!', color=disp.make_color(fg=disp.BLUE, attr=disp.UNDERLINE|disp.BOLD), align=disp.TextBox.ALIGN_CENTER, voff=50) sec['login'] = disp.glblsec.sub( disp.BorderedBox, 15, 33, 70, 50) # key 27 is escape sec['email'] = sec['login'].sub( disp.Activator, 0, 0, 100, 33) sec['email_prompt'] = sec['email'].sub( disp.TextBox, 0, 0, 25, 100, text='Email: ', color_active=disp.make_color(attr=disp.BOLD|disp.REVERSE), align=disp.TextBox.ALIGN_RIGHT) sec['email_input'] = sec['email'].sub( disp.InputBox, 25, 0, 75, 100, do_send_input=True) sec['email'].add_key_handler( ord('\n'), lambda: disp.set_active(sec['passwd'])) sec['email'].add_key_handler( ord('\t'), lambda: disp.set_active(sec['passwd'])) sec['passwd'] = sec['login'].sub( disp.Activator, 0, 33, 100, 33) sec['passwd_prompt'] = sec['passwd'].sub( disp.TextBox, 0, 0, 25, 100, text='Password: '******'passwd_input'] = sec['passwd'].sub( disp.MaskedInput, 25, 0, 75, 100, do_send_input=True) sec['passwd'].add_key_handler( ord('\n'), lambda: try_login.wave()) sec['passwd'].add_key_handler( ord('\t'), lambda: disp.set_active(sec['btn_quit'])) sec['btn_quit'] = sec['login'].sub( disp.TextBox, 0, 66, 50, 33, text='Quit', color_active=disp.make_color(attr=disp.REVERSE), align=disp.TextBox.ALIGN_CENTER, voff=50) sec['btn_quit'].add_key_handler( ord('\n'), death.set_die_all) sec['btn_quit'].add_key_handler( ord('\t'), lambda: disp.set_active(sec['btn_login'])) sec['btn_login'] = sec['login'].sub( disp.TextBox, 50, 66, 50, 33, text='Login', color_active=disp.make_color(attr=disp.REVERSE), align=disp.TextBox.ALIGN_CENTER, voff=50) sec['btn_login'].add_key_handler( ord('\t'), lambda: disp.set_active(sec['email'])) sec['btn_login'].add_key_handler( ord('\n'), lambda: try_login.wave()) log.msg('Finalizing screen sections...') await disp.set_active(sec['email']) await disp.glblsec.draw() client = TextcordClient() log.msg('Entering deadwait loop...') while True: if death.die_all: # abort await client.close() return if try_login: await try_login.lower() error = False err_text = '' try: await client.login(sec['email_input'].text, sec['passwd_input'].text) except discord.LoginFailure as err: error = True err_text = 'Incorrect login credentials!' except discord.HTTPException as err: error = True err_text = 'Could not connect to server.' except: error = True err_text = 'Something went wrong!' if not error and not client.is_logged_in: error = True err_text = 'Failed to log in.' if error: await disp.glblsec.sub( disp.TextBox, 0, 83, 100, 17, text=err_text, color=disp.make_color(fg=disp.RED, attr=disp.BOLD), align=disp.TextBox.ALIGN_CENTER, voff=50).draw() else: break await asyncio.sleep(0) # we logged in successfully; clear this screen log.msg('Logged in.') disp.glblsec.disown_all() # disconnect all from root await disp.set_active(disp.glblsec) # deactivate login screen # delete all sections sec.clear() disp.stdscr.clear() # clear the screen # start discord connection and main UI try: done, pending = await asyncio.wait([ ui_thread(client), client.connect()]) assert not pending for future in done: if future.exception() is not None: raise future.exception() except: raise finally: await client.close() # finish death.die_all = True except Exception as e: log.msg('App failed!') log.msg(str(e)) log.msg(sys.exc_info()[0]) log.msg(traceback.format_exc()) raise
def render_GET(self, request): nodes = self.nodes.values() shuffle(nodes) log.msg("Received a request for nodes, responding...") if "format" in request.args: if request.args["format"][0] == "json": json_list = [] if "type" in request.args and request.args["type"][ 0] == "vendors": print "getting list of vendors" for node in nodes: if node.vendor is True: print "found vendor" node_dic = {} node_dic["ip"] = node.ip node_dic["port"] = node.port json_list.append(node_dic) sig = signing_key.sign(str(json_list)) resp = {"peers": json_list, "signature": hexlify(sig[:64])} request.write(json.dumps(resp, indent=4)) else: for node in nodes[:50]: node_dic = {} node_dic["ip"] = node.ip node_dic["port"] = node.port json_list.append(node_dic) sig = signing_key.sign(str(json_list)) resp = {"peers": json_list, "signature": hexlify(sig[:64])} request.write(json.dumps(resp, indent=4)) elif request.args["format"][0] == "protobuf": proto = peers.PeerSeeds() for node in nodes[:50]: peer = peers.PeerData() peer.ip_address = node.ip peer.port = node.port peer.vendor = node.vendor proto.peer_data.append(peer.SerializeToString()) sig = signing_key.sign("".join(proto.peer_data))[:64] proto.signature = sig uncompressed_data = proto.SerializeToString() request.write(uncompressed_data.encode("zlib")) else: proto = peers.PeerSeeds() if "type" in request.args and request.args["type"][0] == "vendors": for node in nodes: if node.vendor is True: peer = peers.PeerData() peer.ip_address = node.ip peer.port = node.port peer.vendor = node.vendor proto.peer_data.append(peer.SerializeToString()) sig = signing_key.sign("".join(proto.peer_data))[:64] proto.signature = sig uncompressed_data = proto.SerializeToString() request.write(uncompressed_data.encode("zlib")) else: for node in nodes[:50]: peer = peers.PeerData() peer.ip_address = node.ip peer.port = node.port peer.vendor = node.vendor proto.peer_data.append(peer.SerializeToString()) sig = signing_key.sign("".join(proto.peer_data))[:64] proto.signature = sig uncompressed_data = proto.SerializeToString() request.write(uncompressed_data.encode("zlib")) request.finish() return server.NOT_DONE_YET
import re from scrapy.spider import Spiderfrom scrapy.selector import Selectorfrom scrapy.http import Requestfrom scrapy.http import HtmlResponsefrom scrapy import log from urlparse import urlparsefrom urlparse import urljoin from android_apps_crawler.items import AppItemfrom android_apps_crawler import settingsfrom android_apps_crawler import custom_parser class AndroidAppsSpider(Spider): name = "android_apps_spider" scrape_rules = settings.SCRAPE_RULES def __init__(self, market=None, database_dir="../repo/databases/", *args, **kwargs): super(AndroidAppsSpider, self).__init__(*args, **kwargs) self.allowed_domains = settings.ALLOWED_DOMAINS[market] self.start_urls = settings.START_URLS[market] settings.MARKET_NAME = market settings.DATABASE_DIR = database_dir def parse(self, response): response_domain = urlparse(response.url).netloc appItemList = [] cookie = {} xpath_rule = self.scrape_rules['xpath'] for key in xpath_rule.keys(): if key in response_domain: appItemList.extend( self.parse_xpath(response, xpath_rule[key])) break custom_parser_rule = self.scrape_rules['custom_parser'] for key in custom_parser_rule.keys(): if key in response_domain: appItemList.extend( getattr(custom_parser, custom_parser_rule[key])(response)) break #if "appchina" in response_domain: # xpath = "//a[@id='pc-download' and @class='free']/@href" # appItemList.extend(self.parse_xpath(response, xpath)) #elif "hiapk" in response_domain: # xpath = "//a[@class='linkbtn d1']/@href" # appItemList.extend(self.parse_xpath(response, xpath)) #elif "android.d.cn" in response_domain: # xpath = "//a[@class='down']/@href" # appItemList.extend(self.parse_xpath(response, xpath)) #elif "anzhi" in response_domain: # xpath = "//div[@id='btn']/a/@onclick" # appItemList.extend(self.parse_anzhi(response, xpath)) #else: # pass sel = Selector(response) for url in sel.xpath('//a/@href').extract(): url = urljoin(response.url, url) yield Request(url, meta=cookie, callback=self.parse) for item in appItemList: yield item #def parse_appchina(self, response): # appItemList = [] # hxs = HtmlXPathSelector(response) # for url in hxs.select( # "//a[@id='pc-download' and @class='free']/@href" # ).extract(): # url = urljoin(response.url, url) # log.msg("Catch an application: %s" % url, level=log.INFO) # appItem = AppItem() # appItem['url'] = url # appItemList.append(appItem) # return appItemList def parse_xpath(self, response, xpath): appItemList = [] sel = Selector(response) for url in sel.xpath(xpath).extract(): url = urljoin(response.url, url) log.msg("Catch an application: %s" % url, level=log.INFO) appItem = AppItem() appItem['url'] = url appItemList.append(appItem) return appItemList #def parse_anzhi(self, response, xpath): # appItemList = [] # hxs = HtmlXPathSelector(response) # for script in hxs.select(xpath).extract(): # id = re.search(r"\d+", script).group() # url = "http://www.anzhi.com/dl_app.php?s=%s&n=5" % (id,) # appItem = AppItem() # appItem['url'] = url # appItemList.append(appItem) # return appItemList