def get_info(self): logging.debug("Checking for StockPickr User %s", self.user) sql = "select uid from spr_user where name=%s;" self.id = self.db.query(sql, self.user, one=True) if not self.id: self.db.insert("spr_user", {"name": self.user}) self.id = self.db.last_insert_id()
def get(self, version_id): self.check_version_id(version_id) cache_key = version_id + str(self.__class__) page = self.cache.get(cache_key) if self.cache_bust or not page: logging.debug('Cache miss with key: %s' % cache_key) retrieved_data = EmailTemplate.fetch_all(self.data_sources[version_id]) trail_blocks = deduplication.build_unique_trailblocks(retrieved_data, self.priority_list[version_id], excluded=self.exclude_from_deduplication()) today = datetime.datetime.now() date = today.strftime('%A %d %b %Y') template_name = self.template_names[version_id] + '.html' template = self.resolve_template(template_name) ads = {} if hasattr(self, 'ad_tag') and self.ad_tag: ad_fetcher = AdFetcher(self.ad_tag) for name, type in self.ad_config.iteritems(): ads[name] = ad_fetcher.fetch_type(type) page = template.render(ads=ads, date=date, data=self.additional_template_data(), **trail_blocks) self.cache.add(cache_key, page, 300) else: logging.debug('Cache hit with key: %s' % cache_key) self.response.out.write(page)
def test_rollback(self): return try: vtgate_conn = get_connection() count = 10 vtgate_conn.begin() vtgate_conn._execute( "delete from vt_insert_test", {}, KEYSPACE_NAME, 'master', keyranges=[self.keyrange]) kid_list = shard_kid_map[shard_names[self.shard_index]] for x in xrange(count): keyspace_id = kid_list[count%len(kid_list)] vtgate_conn._execute( "insert into vt_insert_test (msg, keyspace_id) values (%(msg)s, %(keyspace_id)s)", {'msg': 'test %s' % x, 'keyspace_id': keyspace_id}, KEYSPACE_NAME, 'master', keyspace_ids=[pack_kid(keyspace_id)]) vtgate_conn.commit() vtgate_conn.begin() vtgate_conn._execute( "delete from vt_insert_test", {}, KEYSPACE_NAME, 'master', keyranges=[self.keyrange]) vtgate_conn.rollback() results, rowcount = vtgate_conn._execute( "select * from vt_insert_test", {}, KEYSPACE_NAME, 'master', keyranges=[self.keyrange])[:2] logging.debug("ROLLBACK TEST rowcount %d count %d" % (rowcount, count)) self.assertEqual(rowcount, count, "Fetched rows(%d) != inserted rows(%d), rollback didn't work" % (rowcount, count)) except Exception, e: logging.debug("Write failed with error %s" % str(e)) raise
def schema_shell(self): """Performs the 'schema-shell' command.""" schema_shell_home = self.env.get(SCHEMA_SHELL_HOME) assert (schema_shell_home is not None), \ ("Environment variable undefined: %r" % SCHEMA_SHELL_HOME) assert os.path.isdir(schema_shell_home), \ ("Invalid home directory for KijiSchema shell: %r" % schema_shell_home) schema_shell_script = os.path.join(schema_shell_home, "bin", "kiji-schema-shell") assert os.path.isfile(schema_shell_script), \ ("KijiSchema shell not found: %r" % schema_shell_script) env = dict(self.env) classpath = env.get(KIJI_CLASSPATH, "").split(":") + list(self.express.get_classpath()) env[KIJI_CLASSPATH] = ":".join(classpath) java_opts = env.get("JAVA_OPTS", "") # FIXME: I cannot find any trace of the Java system property "express.tmpjars"! # java_opts += (" -Dexpress.tmpjars=%s" % ???) # Relevant for KijiSchema 1.1 only and will be removed in Express 3.0: java_opts += " -Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED" env["JAVA_OPTS"] = java_opts cmd = [schema_shell_script] logging.debug("Launching kiji-schema shell with:\n%s\with KIJI_CLASSPATH:\n%s", " \\\n\t".join(map(repr, cmd)), "\n".join(map(tab_indent, classpath))) logging.debug("Computed KIJI_CLASSPATH:") proc = subprocess.Popen(cmd, env=env) try: return proc.wait() except subprocess.SubProcessError: proc.kill()
def jar(self): """Performs the 'jar' command.""" class_name = getattr(self.flags, "class") if (class_name is None) and (len(self.args) > 0): class_name = self.pop_args_head() assert (class_name is not None), ("No class name specified with [--class=]<class>.") lib_jars = [] if self.flags.jars is not None: lib_jars.extend(self.flags.jars) classpath = list(self.express.get_classpath(lib_jars=lib_jars)) java_opts = [] if self.flags.java_opts is not None: java_opts = [self.flags.java_opts] user_args = list(self.args) logging.info("Running java class %r with parameters: %r", class_name, user_args) cmd = [ "java", # This property is only needed in kiji-schema v1.1 : "-Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED", ] + java_opts + [ "-classpath", ":".join(classpath), class_name, ] + user_args logging.debug("Running command:\n%s\n", " \\\n\t".join(map(repr, cmd))) return subprocess.call(cmd)
def getContestData(contestCode, expiryTime = None, writeInFile = None): logging.debug("In getContestData("+contestCode+')') expiryTime, writeInFile = getGlobals(expiryTime, writeInFile) data = {} if expiryTime > 0: data = checkInFile('contest/' + contestCode, expiryTime) if data is not None: return data else: data = {} URL = "https://www.codechef.com/api/contests/" + contestCode data = json.loads(requests.get(URL, headers={'User-Agent': 'Mozilla/5.0'}).text) #Make start_time and end_time keys directly in data data['start_time'] = data['time']['start'] data['end_time'] = data['time']['end'] #Removing unnecessary keys. keysToRemove = ['problems_data','time','problemsstats', 'user', 'announcements', 'rules', 'autoRefresh', 'banner', 'todos'] data = removeKeys(data, keysToRemove) #From here too. for contest in data['problems']: data['problems'][contest] = removeKeys(data['problems'][contest], ['status_url','submit_url','problem_url','allow_submission']) if writeInFile: writeToFile('contest/' + contestCode, data) logging.debug("getContestData() = " + json.dumps(data, indent = 4)) return data
def PublishManifest(self, manifest, version, build_id=None): """Publishes the manifest as the manifest for the version to others. Args: manifest: Path to manifest file to publish. version: Manifest version string, e.g. 6102.0.0-rc4 build_id: Optional integer giving build_id of the build that is publishing this manifest. If specified and non-negative, build_id will be included in the commit message. """ # Note: This commit message is used by master.cfg for figuring out when to # trigger slave builders. commit_message = 'Automatic: Start %s %s %s' % (self.build_names[0], self.branch, version) if build_id is not None and build_id >= 0: commit_message += '\nCrOS-Build-Id: %s' % build_id logging.info('Publishing build spec for: %s', version) logging.info('Publishing with commit message: %s', commit_message) logging.debug('Manifest contents below.\n%s', osutils.ReadFile(manifest)) # Copy the manifest into the manifest repository. spec_file = '%s.xml' % os.path.join(self.all_specs_dir, version) osutils.SafeMakedirs(os.path.dirname(spec_file)) shutil.copyfile(manifest, spec_file) # Actually push the manifest. self.PushSpecChanges(commit_message)
def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options): """ Run the vcpupin command and then check the result. """ if vm_ref == "name": vm_ref = vm.name elif vm_ref == "uuid": vm_ref = vm.get_uuid() # Execute virsh vcpupin command. cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True) if cmdResult.exit_status: if not status_error: # Command fail and it is in positive case. raise error.TestFail(cmdResult) else: # Command fail and it is in negative case. return else: if status_error: # Command success and it is in negative case. raise error.TestFail(cmdResult) else: # Command success and it is in positive case. # "--config" will take effect after VM destroyed. pid = None vcpu_pid = None if options == "--config": virsh.destroy(vm.name) else: pid = vm.get_pid() logging.debug("vcpus_pid: %s", vm.get_vcpus_pid()) vcpu_pid = vm.get_vcpus_pid()[vcpu] # Check the result of vcpupin command. check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)
def upsert_edit_project(self,filepath,filename,uuid,version,desc=None,opens_with=None): cursor = self.conn.cursor() matches=re.search(u'(\.[^\.]+)$',filename) file_xtn="" if matches is not None: file_xtn=str(matches.group(1)) else: raise ArgumentError("Filename %s does not appear to have a file extension" % filename) typenum=self.project_type_for_extension(file_xtn,desc=desc,opens_with=opens_with) #does the project entry already exist? If so leave it (avoiding database bloat) cursor.execute("SELECT id FROM edit_projects WHERE filename=%s AND filepath=%s", (filename, filepath, )) result = cursor.fetchone() if result is not None: logging.debug("Edit project {0}/{1} already exists in database, not touching it".format(filepath, filename)) return result[0] try: cursor.execute("insert into edit_projects (filename,filepath,type,lastseen,valid) values (%s,%s,%s,now(),true) returning id", (filename,filepath,typenum)) except psycopg2.IntegrityError as e: #this is kept in case of race conditions self.conn.rollback() cursor.execute("update edit_projects set lastseen=now(), valid=true where filename=%s and filepath=%s returning id", (filename,filepath)) result = cursor.fetchone() id = result[0] sqlcmd="update edit_projects set uuid=%s, version=%s where id=%s" cursor.execute(sqlcmd, (uuid,version,id)) self.conn.commit() return id
def __public_response(self, messages): message = None for m in messages: pretty_message = "%s [%s %s] %s" % (m.id, m.created_at, m.user.screen_name, m.text) logging.info("found public message: %s" % pretty_message) if not self.__analyzer.should_respond(m): logging.info("not responding") continue response = TwitterResponseAccessor.get_by_message_id(str(m.id)) if not response: message = m break else: logging.debug("found response to public message %s" % m.id) sent_message = None if message: # TODO: search for username also username = message.user.screen_name parsed_tweet = parse_tweet(message.text) plain_tweet = parsed_tweet.plain_text speaker = self.__select_speaker() sources, mix = Mixer(speaker).mix_response(plain_tweet, min_results=1, max_length=130-len(username)) response_text = "@%s %s" % (username, mix) logging.info("responding to public message %s: %s" % (message.id, response_text)) sent_message = self.__twitter.PostUpdate(response_text, message.id) TwitterResponseAccessor.create(str(message.id), response_id=str(sent_message.id), user=username, tweet_type=TwitterResponse.MENTION) self.__reporter.posted(response_text) return sent_message
def act(self, force_act=False, action=None, skip_responses=False): """ returns: (action, response) tuple. response type depends on the action that was performed. """ if not force_act: config = ConfigurationAccessor.get_or_create() if config and (config.is_tweeting is not None) and (not safe_int(config.is_tweeting)): logging.debug("config.is_tweeting is False; hiding") return () result = [] responded = False if not skip_responses: try: direct, response = self.respond() if (direct or response): # a response to a direct message or mention was generated responded = True if direct: result.append(direct.AsDict()) if response: result.append(response.AsDict()) except Exception, e: logging.error(e)
def process_users(users_file, keytab_path): with open(users_file) as csvfile: r = csv.DictReader(csvfile, delimiter=',') for row in r: if row['Name'] == '#': continue d = { 'username': row['Name'], 'description': row['Description'], 'first': row['Name'], 'last': row['Name'], 'realm': REALM, 'ipa_server': IPA_SERVER, 'path_prefix': keytab_path } if not user_exists(d['username']): create_user(d) # When a user is created in IPA, his password autoexpires. # Need to explicitly login or set the expiration to a later date so that the keytab # can be retrieved and used later change_krb_expiration(d['username']) create_keytab(d) for g in BASE_GROUPS: add_group(d['username'], g) else: logging.debug('User %s already exists' % d['username'])
def restore_zk(filename): """ Restores Zookeeper data from a fixed file in the local FS. Args: filename: A str, the path to the temporary Zookeeper backup file. """ handle = kazoo.client.KazooClient(hosts=ZK_DEFAULT_HOST) handle.start() with open(filename, 'r') as f: for line in f.readlines(): pair = json.loads(line) path = pair.keys()[0] value = pair.values()[0].decode('base64') try: handle.create(path, bytes(value), makepath=True) logging.debug("Created '{0}'".format(path)) except kazoo.exceptions.NodeExistsError: try: handle.set(path, bytes(value)) logging.debug("Updated '{0}'".format(path)) except kazoo.exceptions.BadArgumentsError: logging.warning("BadArgumentsError for path '{0}'".format(path)) except kazoo.exceptions.NoNodeError: logging.warning("NoNodeError for path '{0}'. Parent nodes are " "missing".format(path)) except kazoo.exceptions.ZookeeperError: logging.warning("ZookeeperError for path '{0}'".format(path)) handle.stop()
def traits(args): """ %prog traits directory Make HTML page that reports eye and skin color. """ p = OptionParser(traits.__doc__) opts, args = p.parse_args(args) if len(args) < 1: sys.exit(not p.print_help()) samples = [] for folder in args: targets = iglob(folder, "*-traits.json") if not targets: continue filename = targets[0] js = json.load(open(filename)) js["skin_rgb"] = make_rgb( js["traits"]["skin-color"]["L"], js["traits"]["skin-color"]["A"], js["traits"]["skin-color"]["B"]) js["eye_rgb"] = make_rgb( js["traits"]["eye-color"]["L"], js["traits"]["eye-color"]["A"], js["traits"]["eye-color"]["B"]) samples.append(js) template = Template(traits_template) fw = open("report.html", "w") print >> fw, template.render(samples=samples) logging.debug("Report written to `{}`".format(fw.name)) fw.close()
def apply(self, ui): logging.debug(self.movement) if self.movement == 'last': ui.current_buffer.focus_last() ui.update() else: MoveCommand.apply(self, ui)
def graph_user(user, depth=0): logging.debug("Searching for %s", user) logging.debug("At depth %d", depth) followers = following = [] result = [x.value for x in db.query(select_user % user)] if result: result = result.pop() following = result['following'] followers = result['followers'] if not GH.has_node(user): logging.debug("Adding %s to graph", user) GH.add_node(user) for follower in followers: if not GH.has_node(follower): GH.add_node(follower) logging.debug("Adding %s to graph", follower) if depth < max_depth: graph_user(follower, depth + 1) GH.add_edge(follower, user, {'weight': 2}) for follow in following: if not GH.has_node(follow): GH.add_node(follow) logging.debug("Adding %s to graph", follow) if depth < max_depth: graph_user(follow, depth + 1) if GH.has_edge(follow, user): GH[follow][user]['weight'] += 1 else: GH.add_edge(user, follow, {'weight': 1})
def __init__(self, openid_, issued, attrs=None, sreg_=None): logging.debug('init janrain openid object') self.openid = openid_ self.issued = issued self.attrs = attrs or {} self.sreg = sreg_ or {} self.is_iname = (xri.identifierScheme(openid_) == 'XRI')
def upsert_prelude_project(self,path=None,filename=None,uuid=None,version=None,nclips=None): cursor=self.conn.cursor() self.conn.commit() #if uuid is None: # raise DataError("You need to pass a valid uuid") #does the project entry already exist? If so leave it (avoiding database bloat) cursor.execute("SELECT id FROM prelude_projects WHERE filename=%s AND filepath=%s", (filename, path, )) result = cursor.fetchone() if result is not None: logging.debug("Prelude project {0}/{1} already exists in database, not touching it".format(path, filename)) return result[0] try: sqlcmd = """insert into prelude_projects (filepath,filename,uuid,version,clips,lastseen) values (%s,%s,%s,%s,%s,now()) returning id""" cursor.execute(sqlcmd,(path,filename,uuid,version,nclips)) except psycopg2.IntegrityError as e: #if we violate unique keys, try to update on filename self.conn.rollback() try: sqlcmd = """update prelude_projects set filepath=%s, filename=%s, uuid=%s, version=%s, clips=%s, lastseen=now() where filepath=%s and filename=%s returning id""" cursor.execute(sqlcmd,(path,filename,uuid,version,nclips,path,filename)) except psycopg2.IntegrityError as e: #if that causes a violation, try to update on uuid self.conn.rollback() sqlcmd = """update prelude_projects set filepath=%s, filename=%s, uuid=%s, version=%s, clips=%s, lastseen=now() where uuid=%s returning id""" cursor.execute(sqlcmd,(path,filename,uuid,version,nclips,uuid)) self.conn.commit() result=cursor.fetchone() return result[0] #return id of inserted row
def _LoadFromFile(self): """Read the version file and set the version components""" with open(self.version_file, 'r') as version_fh: for line in version_fh: if not line.strip(): continue match = self.FindValue('CHROME_BRANCH', line) if match: self.chrome_branch = match logging.debug('Set the Chrome branch number to:%s', self.chrome_branch) continue match = self.FindValue('CHROMEOS_BUILD', line) if match: self.build_number = match logging.debug('Set the build version to:%s', self.build_number) continue match = self.FindValue('CHROMEOS_BRANCH', line) if match: self.branch_build_number = match logging.debug('Set the branch version to:%s', self.branch_build_number) continue match = self.FindValue('CHROMEOS_PATCH', line) if match: self.patch_number = match logging.debug('Set the patch version to:%s', self.patch_number) continue logging.debug(self.VersionString())
def execute(self): targets = self.get_targets(self.is_non_synthetic_python_target) with self.invalidated(targets=targets) as invalidation_check: if not invalidation_check.invalid_vts: logging.debug(self.NOOP_MSG_HAS_TARGET_BUT_NO_SOURCE) return invalid_tgts = [vt.target for vt in invalidation_check.invalid_vts] sources = self._calculate_isortable_python_sources(invalid_tgts) if not sources: logging.debug(self.NOOP_MSG_HAS_TARGET_BUT_NO_SOURCE) return isort = self.context.products.get_data(IsortPrep.Isort) args = self.get_passthru_args() + sources # NB: We execute isort out of process to avoid unwanted side-effects from importing it: # https://github.com/timothycrosley/isort/issues/456 with pushd(get_buildroot()): workunit_factory = functools.partial(self.context.new_workunit, name='run-isort', labels=[WorkUnitLabel.TOOL, WorkUnitLabel.LINT]) cmdline, exit_code = isort.run(workunit_factory, args) if exit_code != 0: raise TaskError('{} ... exited non-zero ({}).'.format(cmdline, exit_code), exit_code=exit_code)
def process_request(self, request): # SESSION_KEY = '_auth_user_id' # BACKEND_SESSION_KEY = '_auth_user_backend' token = request.GET.get('token', False) token_id = request.GET.get('id', False) timestamp = request.GET.get('timestamp', False) if token and token_id and timestamp: logging.debug('SSO: user %s login attempt via SSO in with timestamp %s and token %s \n' % ( token_id, timestamp, token)) if self.check_token(token, token_id, timestamp): # everything passed, authenticate user logging.debug('SSO: user %s token and timestamp pass \n' % token_id) logging.debug('SSO: Attempting to authenticate as %s \n' % token_id) try: user = self.authenticate(token_id) except Exception as e: logging.debug('SSO: user %s does not exist, trying to create \n' % token_id) rooibos_LDAP.new_account_from_ldap(token_id) if user.username == token_id: try: # THIS WAS THE KEY TO IT WORKING user.backend = settings.SSO_BACKEND login(request, user) logging.debug('SSO: process_request - user.backend = %s' % user.backend) #return None #logging.debug(user.last_login) # logging.debug(request.session['_auth_user_id']) # logging.debug(request.session['_auth_user_backend']) except Exception: raise
def call(self, function, params=None): self.requestPerMinute += 1 now = datetime.utcnow() if self.requestPerMinute >= self.requestLimit: waittime = 60 - now.second logging.warning("Limit for request per minute exceeded. Waiting for: {0} sec.".format(waittime)) time.sleep(waittime) now = datetime.utcnow() if self.checkMinute != now.minute: self.requestPerMinute = 0 self.checkMinute = now.minute payload = '' try: p = "" if not params else '?' + "&".join( ["{key}={value}".format(key=k, value=v) for (k, v) in params.iteritems()]) url = "{base}.{func}{params}".format(base=self.baseConfig["url"], func=function, params=p) logging.debug("{0} {1} API call:{2}".format(self.checkMinute, self.requestPerMinute, url)) request = urllib2.Request(url, None, self.baseConfig["headers"]) stream = urllib2.urlopen(request) payload = stream.read() data = json.loads(payload) if isinstance(data, dict) and 'ruid' in data: logging.error('Api call failed with error: {0} Code: {1}'.format(data['message'], data['code'])) return None return data except Exception as e: logging.error('Error: {0} Context: {1}'.format(e, payload)) return None
def setup_logging(log_level): """Initializes the logging system. Args: log_level: Logging level. """ global _LOGGING_INITIALIZED if _LOGGING_INITIALIZED: logging.debug("setup_logging: logging system already initialized") return log_formatter = logging.Formatter( fmt="%(asctime)s %(levelname)s %(filename)s:%(lineno)s : %(message)s", ) # Override the log date formatter to include the time zone: def format_time(record, datefmt=None): time_tuple = time.localtime(record.created) tz_name = time.tzname[time_tuple.tm_isdst] return "%(date_time)s-%(millis)03d-%(tz_name)s" % dict( date_time=time.strftime("%Y%m%d-%H%M%S", time_tuple), millis=record.msecs, tz_name=tz_name, ) log_formatter.formatTime = format_time logging.root.handlers.clear() logging.root.setLevel(log_level) console_handler = logging.StreamHandler() console_handler.setFormatter(log_formatter) console_handler.setLevel(log_level) logging.root.addHandler(console_handler) _LOGGING_INITIALIZED = True
def make_parser(): """ Construct the command line parser """ logging.info("Constructing parser") description = "Store and retrieve snippets of text" parser = argparse.ArgumentParser(description=description) subparsers = parser.add_subparsers(help="Available commands") # Subparser for the put command logging.debug("Constructing put subparser") put_parser = subparsers.add_parser("put", help="Store a snippet") put_parser.add_argument("name", help="The name of the snippet") put_parser.add_argument("snippet", help="The snippet text") put_parser.add_argument("filename", default="snippets.csv", nargs="?", help="The snippet filename") put_parser.set_defaults(command="put") # Subparser for the get command logging.debug("Constructing get subparser") put_parser = subparsers.add_parser("get", help="Retrieve a snippet") put_parser.add_argument("name", help="The name of the snippet") put_parser.add_argument("filename", default="snippets.csv", nargs="?", help="The snippet filename") put_parser.set_defaults(command="get") return parser
def shell(self): """Performs the 'shell' command.""" lib_jars = [] if self.flags.jars is not None: lib_jars.extend(self.flags.jars.split(",")) env = dict(self.env) express_cp = self.express.get_classpath(lib_jars=lib_jars) env["EXPRESS_CP"] = ":".join(express_cp) dist_cache_paths = self.express.list_paths_for_dist_cache(lib_jars=lib_jars) env["TMPJARS"] = ",".join(dist_cache_paths) # EXPRESS_MODE environment variable must be a path to the mode specific scala script: script_name = self._MODE_SCRIPT.get(self.flags.mode) assert (script_name is not None), ("Invalid Express mode: %r." % self.flags.mode) script_path = os.path.join(self.express.home_dir, "bin", script_name) assert os.path.isfile(script_path), ("Script not found: %r" % script_path) env["EXPRESS_MODE"] = script_path # express shell binary needs to be in the same directory as this script shell_path = os.path.join(self.express.home_dir, "bin", "express-shell") assert os.path.isfile(shell_path), ("Shell not found: %r" % shell_path) cmd = [shell_path] + list(self.args) logging.debug( "Launching the express shell using the command:\n%s", " \\\n\t".join(map(repr, cmd))) proc = subprocess.Popen(cmd, env=env) try: return proc.wait() except subprocess.SubprocessError: proc.kill()
def execute(self, email_models): logging.debug("In Destiny::execute()") if not email_models: return emails_id = [] destinations = {} for destiny in self._plugins.keys(): destinations.setdefault(destiny, email_models[-1].get(destiny)) emails_id.append(email_models[-1].email_id()) for email_model in email_models[:-1]: for destiny in self._plugins.keys(): d_tables = destinations.get(destiny).get("tables") for d_table in d_tables: for k, v in d_table.iteritems(): m_tables = email_model.get(destiny).get("tables") for m_table in m_tables: if k in m_table: d_table.setdefault(k, []).extend(m_table[k]) emails_id.append(email_model.email_id()) for destiny, models in destinations.iteritems(): for forward in self._plugins.get(destiny): try: forward.execute(models) except Exception, e: logging.error("!! Error-execute: %s" % (str(e),)) logging.info("Add emails in queure error: %s" % str(emails_id)) for email_id in emails_id: self.add_email_error(email_id) continue
def refetch(source): """Refetch the author's URLs and look for new or updated syndication links that might not have been there the first time we looked. Args: source: models.Source subclass. Changes to property values (e.g. domains, domain_urls, last_syndication_url) are stored in source.updates; they should be updated transactionally later. Return: a dict of syndicated_url to a list of new models.SyndicatedPosts """ if not source.updates: source.updates = {} logging.debug('attempting to refetch h-feed for %s', source.label()) results = {} for url in _get_author_urls(source): results.update(_process_author(source, url, refetch=True)) now = util.now_fn() logging.debug('updating source last_hfeed_fetch %s', now) source.updates['last_hfeed_fetch'] = now return results
def test_restart(self): """test_restart tests that when starting a second vttablet with the same configuration as another one, it will kill the previous process and take over listening on the socket. If vttablet listens to other ports (like gRPC), this feature will break. We believe it is not widely used, so we're OK with this for now. (container based installations usually handle tablet restarts by using a different set of servers, and do not rely on this feature at all). """ if environment.topo_server().flavor() != 'zookeeper': logging.info("Skipping this test in non-github tree") return if tablet_62344.grpc_enabled(): logging.info("Skipping this test as second gRPC port interferes") return utils.run_vtctl(['CreateKeyspace', 'test_keyspace']) # create the database so vttablets start, as it is serving tablet_62344.create_db('vt_test_keyspace') tablet_62344.init_tablet('master', 'test_keyspace', '0') proc1 = tablet_62344.start_vttablet() proc2 = tablet_62344.start_vttablet() for timeout in xrange(20): logging.debug("Sleeping waiting for first process to die") time.sleep(1.0) proc1.poll() if proc1.returncode is not None: break if proc1.returncode is None: self.fail("proc1 still running") tablet_62344.kill_vttablet()
def tearDownModule(): global vtgate_server logging.debug("in tearDownModule") if utils.options.skip_teardown: return logging.debug("Tearing down the servers and setup") utils.vtgate_kill(vtgate_server) tablet.kill_tablets([shard_0_master, shard_0_replica, shard_1_master, shard_1_replica]) teardown_procs = [shard_0_master.teardown_mysql(), shard_0_replica.teardown_mysql(), shard_1_master.teardown_mysql(), shard_1_replica.teardown_mysql(), ] utils.wait_procs(teardown_procs, raise_on_error=False) environment.topo_server_teardown() utils.kill_sub_processes() utils.remove_tmp_files() shard_0_master.remove_tree() shard_0_replica.remove_tree() shard_1_master.remove_tree() shard_1_replica.remove_tree()
def extract(vm, remote_path, dest_dir): """ Extract the autotest .tar.bz2 file on the guest, ensuring the final destination path will be dest_dir. :param vm: VM object :param remote_path: Remote file path :param dest_dir: Destination dir for the contents """ basename = os.path.basename(remote_path) logging.debug("Extracting %s on VM %s", basename, vm.name) session.cmd("rm -rf %s" % dest_dir, timeout=240) dirname = os.path.dirname(remote_path) session.cmd("cd %s" % dirname) session.cmd("mkdir -p %s" % os.path.dirname(dest_dir)) e_cmd = "tar xjvf %s -C %s" % (basename, os.path.dirname(dest_dir)) output = session.cmd(e_cmd, timeout=240) autotest_dirname = "" for line in output.splitlines()[1:]: autotest_dirname = line.split("/")[0] break if autotest_dirname != os.path.basename(dest_dir): session.cmd("cd %s" % os.path.dirname(dest_dir)) session.cmd("mv %s %s" % (autotest_dirname, os.path.basename(dest_dir)))