def save_position(level): # dump the all province data into mongo # get all position key if level is 'province': positions = r_db.keys('??0000') elif level is 'city': positions = r_db.keys('????00') # produce every position for position in positions: # get the count of position count = r_db.get(position) # get the position info position_info = m_db.position.find_one({'_id': int(position)}) if not position_info: position_info = { '_id': int(position), 'position': '未知'.decode('utf8')} # create the mongo doc position_info.update({'count': int(count)}) # save the doc # clean the data if level is 'province': ret = m_db.card.province.save(position_info) r_db.move(position, 2) elif level is 'city': parent_pos = int(position) / 10000 * 10000 parent_info = m_db.position.find_one({'_id': parent_pos}) position_info['position'] = '{parent} {self}'.format( parent=parent_info['position'].encode('utf8'), self=position_info['position'].encode('utf8'),) ret = m_db.card.city.save(position_info) r_db.move(position, 3) logbook.info(ret)
def create_user(self, email, customer_id, tenant_id, password, enabled=True): name = self.make_id(customer_id, email, "user") user = find_first( self.get_users(), lambda x: getattr(x, "email", None) == email and getattr( x, "name", None) == name) if user is None: user = self.client_keystone.users.create(name=name, password=password, email=email, tenant_id=tenant_id, enabled=enabled) log.info( 'OpenStack create user. email: {}, user_id: {}, tenant_id: {}', email, user.id, tenant_id) else: log.info('User for email {} already exists: {}', email, user.id) self.client_keystone.users.update_password(user, password) user = self.client_keystone.users.update_tenant(user, tenant_id) return user
def _check_already_logged_in(self, username, relogin=False): horizon_dashboard_url = posixpath.join(self.horizon_url, 'project/') try: # ekosareva: passing cookies as a request param it's workaround. There is bug in requests lib: # response's cookie doesn't override session's cookie with the same name, as a result we have # 'multiple cookie' CookieConflictError exception. init_csrftoken = self.session.cookies.get('csrftoken') init_sessionid = self.session.cookies.get('sessionid') self.session.cookies.clear() logbook.debug("Request project page in horizon for {}. URL: {}", username, horizon_dashboard_url) horizon_response = self.session.get(horizon_dashboard_url, verify=False, stream=False, cookies={'csrftoken': init_csrftoken, 'sessionid': init_sessionid}) except requests.exceptions.RequestException as e: logbook.warning('Request exception happens during getting to Horizon on {} for user "{}": {}', horizon_dashboard_url, username, e) raise errors.HorizonRequestError() if horizon_response.status_code != 200: logbook.warning('Request to {} is not succeed for user "{}": status code = {}', horizon_dashboard_url, username, horizon_response.status_code) raise errors.HorizonRequestError() if horizon_response.url != horizon_dashboard_url: logbook.info("Request was redirected: {}", horizon_response.url) self.region = self._get_region(horizon_response.text) return False if not relogin and self._get_logged_in_username(horizon_response.text) != username: self.region = '' self.session.cookies.clear() logbook.warning('User "{}" has cookies for another user. Perform re-login.', username) return self._check_already_logged_in(username, relogin=True) return True
def send_email_limit_notification(email, days, language): block_date = arrow.utcnow() + datetime.timedelta(days=days) subject, body = MessageTemplate.get_rendered_message(MessageTemplate.CUSTOMER_BALANCE_LIMIT, language=language, block_date=block_date.datetime) logbook.info("Sending email with balance limit notification to {}", email) send_email.delay(email, subject, body)
def remove_label_due_to_comment(self, remove_label, comment, issue_working_state): remove_label_proper = self.get_label_by_name(remove_label) if not remove_label_proper in self.known_labels: logbook.info(u'Ignoring unknown label %s in comment %s by %s.' % (remove_label, comment.id, comment.user.login)) self.send_message( comment.user, u'Unknown label', u'(Your comment)[%s] appears to request that the label `%s` is removed from the issue but this does not seems to be a valid label.' % (comment.url, remove_label)) return if not self.user_may_alter_labels(comment.user): logbook.warning( u"Ignoring unathorised attempt to alter labels by %s through comment %s." % (comment.user.login, comment.url)) self.send_message( comment.user, u'Unable to alter label', u'(Your comment)[%s] appears to request that the label `%s` is removed from the issue but you do not have the required authorisation.' % (comment.url, remove_label_proper)) else: logbook.info("Removing label %s due to comment %s by %s" % (remove_label_proper, comment.id, comment.user.login)) self.remove_label(remove_label_proper, issue_working_state)
def create_bucket(bucket, mediatype, title, description='', creator=settings.archivedotorg_creator, date=date.today().strftime("%Y-%m-%d"), keywords=''): """ Create the bucket in archive.org """ if exists(bucket): info("Bucket " + bucket + " already exists.") return debug("Creating bucket " + bucket + " in archive.org.") headers = {"x-archive-meta-mediatype":mediatype, "x-archive-meta-collection":collection[mediatype], "x-archive-meta-title":str(title), "x-archive-meta-description":str(description), "x-archive-meta-creator":str(creator), "x-archive-meta-date":str(date), "x-archive-meta-subject":str(keywords), "x-archive-meta-licenseurl":"http://creativecommons.org/licenses/by-nc/3.0" } conn.create_bucket(bucket, headers)
def delete_snapshots(self, tenant_id, tenant_name=None): snapshots = self.get_snapshots(tenant_id=tenant_id) for snapshot in snapshots: log.info('Deleting snapshot {} of tenant {} (id: {})', snapshot.id, tenant_name, tenant_id) self.client_cinder.volume_snapshots.delete(snapshot.id) return len(snapshots)
def do_deferred_changes(cls, deferred_changes): logbook.info("Process pending deferred change {}", deferred_changes) deferred_changes.customer.update_tariff(deferred_changes.tariff_id, deferred_changes.user_id, deferred_changes.comment) Deferred.delete_by_customer(deferred_changes.customer_id)
def run_usage_collection(self, end=None): # Run usage collection on all tenants present in Keystone. db.session.close() tenants = Tenant.all_active_tenants() usage = {} for tenant in tenants: try: tenant_id = tenant.tenant_id # session can be closed during next call, so we should cache tenant_id except ObjectDeletedError as e: logbook.warning( "Tenant {} was removed from db (probably during cleanup after test: ", tenant_id, e) next_run_delay = None with TenantMutex(tenant) as mutex: if mutex: logbook.debug("Processing tenant: {}", tenant_id) tenant_usage = self.collect_usage(tenant, mutex, end) usage[tenant_id] = tenant_usage db.session.commit() next_run_delay = conf.fitter.min_tenant_interval if tenant_usage else conf.fitter.tenant_interval logbook.debug( "Create mutex for tenant {} to prevent very often access to ceilometer. Delay: {}", tenant, next_run_delay) if next_run_delay and not conf.test: mutex = TenantMutex(tenant) mutex.acquire(ttl_ms=next_run_delay * 1000) db.session.close() logbook.info("Usage collection run complete.") return usage
def register_file(): beam_id = request.json['beam_id'] beam = db.session.query(Beam).filter_by(id=beam_id).first() if not beam: logbook.error('Transporter attempted to post to unknown beam id {}', beam_id) abort(http.client.BAD_REQUEST) if beam.pending_deletion or beam.deleted: abort(http.client.FORBIDDEN) file_name = request.json['file_name'] f = db.session.query(File).filter_by(beam_id=beam_id, file_name=file_name).first() if not f: logbook.info("Got upload request for a new file: {} @ {}", file_name, beam_id) f = File(beam_id=beam_id, file_name=file_name, size=None, status="pending") db.session.add(f) db.session.commit() else: logbook.info("Got upload request for a existing file: {} @ {} ({})", file_name, beam_id, f.status) if not f.storage_name: f.storage_name = "{}/{}-{}".format( _assure_beam_dir(beam.id), f.id, f.file_name.replace("/", "__").replace("\\", "__")) db.session.commit() if not beam.combadge_contacted: beam.combadge_contacted = True db.session.commit() return jsonify({'file_id': str(f.id), 'should_beam': f.status != 'uploaded', 'storage_name': f.storage_name})
def update(object_type, args, object_id): """Handle the updating of an object.""" args = convert_arg_strings_to_dict(args) UPDATE_URL = uri_for_resource(object_type, object_id=object_id, args=args) if UPDATE_URL is None: # special cases if object_type == "email_job": check_requirements(["move_job"], args) UPDATE_URL = "users/current/move_jobs/%s/email_part/" elif object_type == "component_job": check_requirements(["move_job", "job_type"], args) UPDATE_URL = "users/current/move_jobs/%s/%s_part/" % (args['move_job'], args['job_type']) del args["job_type"] elif object_type == "email_folder": check_requirements(["move_job", "email_account"], args) UPDATE_URL = "users/current/move_jobs/%s/accounts/%s/email_folders/%s/" % (args['move_job'], args['email_account'], object_id) del args["move_job"] del args["email_account"] introspect_dictionary_for_uris(args) put(url=url(UPDATE_URL), data=args) logbook.info("%s with id %s was updated." % (object_type, object_id))
def create_flavor(self, params=None): can_modify = False immutable = params.pop('immutable', False) flavor_id = params["flavor_id"] flavor = self.get_flavor(flavor_id) if not flavor: service_info = self.client.service.create_vm(**params) logbook.info('Flavor "{}" created with id={}'.format(service_info['localized_name']['en'], service_info['service_id'])) can_modify = True else: service_info = flavor params.pop('flavor_id') diff = self.compare_fields(service_info, params, ['localized_name']) diff.extend(self.compare_fields(service_info['flavor'], params, list(service_info['flavor'].keys()))) if diff: diff_str = self.diff_to_str('Flavor "{}" (id:{})'.format(service_info['localized_name']['en'], service_info['service_id']), diff) logbook.info(diff_str) if not service_info['mutable']: logbook.warning('Service is immutable') if service_info['mutable'] and self.update: self.client.service.update_vm(service_info['service_id'], **params) if immutable and can_modify: self.client.service.immutable(service_info['service_id'])
def create_service(self, params=None): can_modify = self.update immutable = params.pop('immutable', False) service_list = self.client.service.list() service_list = filter(lambda service: service['localized_name'] == params['localized_name'], service_list['items']) service_info = next(service_list, None) if not service_info: service_info = self.client.service.create(**params) logbook.info('Service "{}" created with id={}'.format(service_info['localized_name']['en'], service_info['service_id'])) can_modify = True else: diff = self.compare_fields(service_info, params, ['localized_name', 'description']) if service_info['measure']['measure_id'] != params['measure']: diff.append(('measure', service_info['measure']['measure_id'], params['measure'])) if diff: diff_str = self.diff_to_str('Service <{}>'.format(service_info['service_id']), diff) logbook.info(diff_str) if not service_info['mutable']: logbook.warning('Service is immutable') if service_info['mutable'] and self.update: self.client.service.update(service_info['service_id'], **params) if immutable and can_modify: self.client.service.immutable(service_info['service_id'])
def login_os_user(self, username, password): if self._check_already_logged_in(username): logbook.info('User "{}" is logged in already and has valid cookies.', username) return {} horizon_auth_login = posixpath.join(self.horizon_url, 'auth/login/') data = { 'region': self.region, 'username': username, 'password': password, 'csrfmiddlewaretoken': self.session.cookies.get('csrftoken') } try: data_without_password = data.copy() data_without_password.pop("password") logbook.info("Try authorize in horizon: {}, {}", horizon_auth_login, data_without_password) horizon_auth_response = self.session.post(horizon_auth_login, data=data, verify=False, stream=False) except requests.exceptions.RequestException as e: logbook.warning('Request exception happens during posting data to {} for user "{}": {}', horizon_auth_login, username, e) raise errors.HorizonRequestError() if horizon_auth_response.status_code != 200 or horizon_auth_response.url == horizon_auth_login: logbook.warning('Unable to login into Horizon on {} for login = {} with status = {}: {}. Response url: {}', horizon_auth_login, username, horizon_auth_response.status_code, horizon_auth_response.text, horizon_auth_response.url) raise errors.HorizonUnauthorized() cookie = horizon_auth_response.headers.get('Set-Cookie') logbook.debug("Horizon cookies: {}", cookie) return http.cookies.SimpleCookie(cookie)
def check_customers_for_balance(self, time_now=None, name_prefix=None): logbook.info("Celery task: check customers for balance.") for customer in Customer.query.filter_by(blocked=False): is_send, lifetime = check_account_balance_limit(customer) if is_send: send_email_limit_notification(customer.email, int(lifetime), customer.locale_language())
def save_image(): password = request.forms.get('password') if password != config['Password']: end(403, "wrong password!") upfile = request.files.get('upfile') if not upfile: end(401, "no file in the request!") path = os.path.join(config['MediaRoot'], upfile.raw_filename) if not os.path.exists(path): filesize = -1 try: filesize = int(request.forms.get('filesize')) except TypeError: end(400, "missing file size in the request!") # save file info("upfile path: " + path) upfile.save(path) # check file size in request against written file size if filesize != os.stat(path).st_size: end(411, "file sizes do not match!") else: warn("file " + path + " already exists")
def _populate_db(num_sessions=50, delay_between_sessions=(5, 60), tests_per_session=(1, 50), test_duration=(10, 60), fail_percent=(0, 20)): import flux flux.current_timeline.set_time( flux.current_timeline.time() - (24 * 60 * 60), allow_backwards=True) flux.current_timeline.set_time_factor(0) with _get_client_context() as client: for session_index in range(_pick(num_sessions)): session_fail_percent = _pick(fail_percent) logbook.info('Populating session #{}', session_index + 1) if session_index > 0: flux.current_timeline.sleep(_pick(delay_between_sessions)) session = client.report_session_start() for test_index in range(_pick(tests_per_session)): logbook.info('Populating test #{}:#{}', session_index + 1, test_index + 1) test = session.report_test_start() flux.current_timeline.sleep(_pick(test_duration)) if session_fail_percent == 100 or random.randint(0, 100) < session_fail_percent: test.add_error() test.report_end() session.report_end()
def install_issue_defaults(self, issue): """Assign default issue labels, milestone and assignee, if any.""" defs = self.settings.NEW_ISSUE_DEFAULTS patch = {} milestone_title = defs.get('milestone') if milestone_title: milestone = self.github.Milestones.get_or_create_in_repository(self.repo_user, self.repo_name, milestone_title) patch['milestone'] = milestone.number if defs.get('labels') is not None: patch['labels'] = map(unicode, defs['labels']) if defs.get('assignee') is not None: patch['assignee'] = defs['assignee'] if len(patch): if not self.dry_run: try: issue.patch(**patch) except: logbook.error(u"Unable to change issue %s with attributes %r" % (issue, patch)) raise logbook.info(u"Installed defaults %r for issue %s." % (patch, issue))
def delete_security_groups(self, tenant_id, tenant_name=None): security_groups = self.get_neutron_security_groups(tenant_id=tenant_id) for security_group in security_groups: log.info('Deleting security group {} of tenant {} (id: {})', security_group['id'], tenant_name, tenant_id) self.client_neutron.delete_security_group(security_group['id']) return len(security_groups)
def run_usage_collection(self, end=None): # Run usage collection on all tenants present in Keystone. db.session.close() tenants = Tenant.all_active_tenants() usage = {} for tenant in tenants: try: tenant_id = tenant.tenant_id # session can be closed during next call, so we should cache tenant_id except ObjectDeletedError as e: logbook.warning("Tenant {} was removed from db (probably during cleanup after test: ", tenant_id, e) next_run_delay = None with TenantMutex(tenant) as mutex: if mutex: logbook.debug("Processing tenant: {}", tenant_id) tenant_usage = self.collect_usage(tenant, mutex, end) usage[tenant_id] = tenant_usage db.session.commit() next_run_delay = conf.fitter.min_tenant_interval if tenant_usage else conf.fitter.tenant_interval logbook.debug("Create mutex for tenant {} to prevent very often access to ceilometer. Delay: {}", tenant, next_run_delay) if next_run_delay and not conf.test: mutex = TenantMutex(tenant) mutex.acquire(ttl_ms=next_run_delay * 1000) db.session.close() logbook.info("Usage collection run complete.") return usage
def delete_vpns(self, tenant_id, tenant_name=None): vpns = self.get_vpns(tenant_id) for vpn in vpns: self.client_neutron.delete_vpnservice(vpn['id']) log.info("Deleting vpn {} of tenant {} (id: {})", vpn['id'], tenant_name, tenant_id) return len(vpns)
def current_stat(report_type, report_format, locale=None, force=False): if not Report.is_supported(report_type, report_format): raise errors.ReportFormatIsNotSupported() report_cache = ReportCache() report_task = ReportTask() report_id = ReportId(None, None, report_type, report_format, locale) if not force: data = report_cache.get_report(report_id) if data: if report_format == "json": return { "status": "completed", "report": json.loads(data.decode("utf-8")) } filename = "%s.%s" % (report_type, report_format) content_disposition = make_content_disposition( filename, bottle.request.environ.get('HTTP_USER_AGENT')) return bottle.HTTPResponse( body=data, content_type=Report.content_types[report_format], content_disposition=content_disposition) status = report_task.task_status(report_id) if not status: result = report_file_generate.delay(report_id) logbook.info("Created report_file task {} for {}", result.id, report_id) report_task.set(report_id, result.id) status = "started" return {"status": status}
def read_to_json(scale, dht): """ acquire measures and return them as a json serialized string :param scale: BeeHiveScale object :type scale: BeeHiveScale :param dht: DHTSensorController object :type dht: DHTSensorController :return: json result :rtype str """ _temperature = dht.temperature _humidity = dht.humidity logbook.info("DHT: {0}°C and {1}%rF".format(_temperature, _humidity)) return json.dumps({ "timestamp": datetime.datetime.now(pytz.timezone('Europe/Berlin')).isoformat(), "weight": { "value": acquire_weights(scale_obj=scale), "unit": "kg" }, "temperature": { "value": _temperature, "unit": "°C" }, "humidity": { "value": _humidity, "unit": "%" } })
def connect(self): info("Connecting...") yield from self.connection_lock ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) # sslcontext.options |= ssl.CERT_NONE self.reader, self.writer = ( yield from asyncio.open_connection(self.host, 64738, server_hostname='', ssl=ssl_context)) version = Mumble_pb2.Version() version.version = Protocol.VERSION_DATA version.release = "%d.%d.%d" % (Protocol.VERSION_MAJOR, Protocol.VERSION_MINOR, Protocol.VERSION_PATCH) version.os = platform.system() version.os_version = "Mumble %s asyncio" % version.release auth = Mumble_pb2.Authenticate() auth.username = self.username self.pinger = asyncio.Task(self.init_ping()) message = Mumble_pb2.UserState() message.self_mute = True message.self_deaf = True yield from self.send_protobuf(version) yield from self.send_protobuf(auth) yield from self.send_protobuf(message) asyncio.Task(self.join_channel(self.channel)) self.connected = True yield from self.read_loop()
def set_assignee_due_to_comment(self, new_assignee, comment, issue_working_state): if new_assignee: new_assignee_proper = self.get_assignee_login_by_name(new_assignee) if not new_assignee_proper: logbook.info( u'Ignoring unknown assignee %s in comment %s by %s.' % (new_assignee, comment.id, comment.user.login)) self.send_message( comment.user, u'Unknown assignee', u'(Your comment)[%s] appears to request that the assignee `%s` is set for the issue but this does not seems to be a repository collaborator.' % (comment.url, new_assignee)) return else: # You can clear the assignee. new_assignee_proper = None if not self.user_may_set_assignee(comment.user): logbook.warning( u"Ignoring unathorised attempt to alter assignee by %s through comment %s." % (comment.user.login, comment.url)) self.send_message( comment.user, u'Unable to alter assignee', u'(Your comment)[%s] appears to request that the assignee `%s` is set for the issue but you do not have the required authorisation.' % (comment.url, new_assignee_proper)) else: logbook.info("Setting assignee %s due to comment %s by %s" % (new_assignee_proper, comment.id, comment.user.login)) self.set_assignee(new_assignee_proper, issue_working_state)
def post_token(self): session = Session() if session.get("logged_in") == "true": session.extend() return redirect("/auth/login") from utils.format_checker import nyu_email_check email = request.form.get("email") print("email_received:", email) if not nyu_email_check(email): logbook.info("[GET EMAIL TOKEN] Wrong email format") return {"status": False, "message": "Email is of wrong format. Please provide NYU email"} query = User.select().where(User.email == email) if request.form.get("reset_password") == "true" and not(query.exists()): return {"status": False, "message": "This email has not been registered yet. Please register first"} if request.form.get("reset_password") != "true" and query.exists(): return {"status": False, "message": "This email has been registered"} token = TokenGenerator.generate() session["token"] = token session["email"] = email session.expire(600) email_helper = EmailHelper(receiver_email=email) email_helper.send_token(token) return {"status": True, "message": "A token has been sent to your mail box"}
def setUp(self): logbook.info("------------- {} ---------", self) super().setUp() conf.customer.test_customer = { 'test_period': {'blocking': 86400}, "balance": {} }
def validate(self, value): """ This method validates given value. It doesn't catch any exception, it is a task of internal executor. :param object value: The value we have to validate. :return Validated value. """ processed_value = value for type_validator in self.validators: try: processed_value = type_validator(value) except ValidateError as exc: if self.name != self.TOKEN_NAME: name = self.name if exc.subname: name += "." + exc.subname raise BadParameter(exc, name) raise BadRequest(exc) except HTTPError as exc: raise exc except Exception as exc: logbook.info(u"Invalid parameter {}: {} ({})".format(self.name, exc, type(exc))) logbook.debug("{}", traceback.format_exc()) if hasattr(type_validator, "__class__"): validator_name = type_validator.__class__.__name__ else: validator_name = getattr(type_validator, "__name__", "<unknown type>") raise BadRequest( _(u"Invalid parameter {}. It should have type {}"), self.name, validator_name) return processed_value
async def expire_snapshots(ctx, rule): """Expire existing snapshots for the rule. """ logbook.debug('Expire existing snapshots') snapshots = await load_snapshots(ctx) snapshots = filter_snapshots_by_rule(snapshots, rule) snapshots = { s['name']: pendulum.parse(s['creationTimestamp']) for s in snapshots } to_keep = expire(snapshots, rule.deltas) logbook.info('Out of {} snapshots, we want to keep {}', len(snapshots), len(to_keep)) for snapshot_name in snapshots: if snapshot_name in to_keep: logbook.debug('Keeping snapshot {}', snapshot_name) continue if snapshot_name not in to_keep: logbook.info('Deleting snapshot {}', snapshot_name) result = await exec(ctx.gcloud.snapshots().delete( snapshot=snapshot_name, project=ctx.config['gcloud_project']).execute)
def setUpClass(cls): if not configs.logging.initialized: handler = setup_logbook("system_tests", configs.logging) handler.push_application() configs.logging.initialized = True cls._current_method_name = 'setUpClass' logbook.info("--------------- setUpClass {} ---------------", cls)
def aggregate(self, report_id): logbook.info("Get detailed customer usage aggregation for {}", report_id) customer = Customer.get_by_id(report_id.customer_id) if not customer: raise Exception("Customer %s not found" % report_id.customer_id) with timed("get_usage simple"): aggregated_usage = ServiceUsage.get_detailed_usage( customer, report_id.start, report_id.end) tariffs = {} services = set() for usage in aggregated_usage: tariff = Tariff.get_by_id(usage.tariff_id) tariff_report = tariffs.get(usage.tariff_id) if tariff_report is None: tariff_report = self.tariff_report_type(tariff, customer) tariffs[usage.tariff_id] = tariff_report tariff_report.add_usage(usage) total = Counter() for tariff_id, tariff in tariffs.items(): total_tariff, currency = tariff.aggregate() total[currency] += total_tariff for t, value in total.items(): total[t] = decimal_to_string(value) logbook.info("Aggregated {} for {}. Services: {}", total, customer, services) return self.prepare_result(list(tariffs.values()), total, customer, report_id.start, report_id.end)
def stop_docker_container(name): running = _try_get_container(name) if running is not None: logbook.info("Stopping container {}", running['Id']) get_docker_client().stop(running['Id']) else: logbook.info("Container is not running. Not doing anything.")
def notify_managers_about_new_service_in_tariff(self, customer_id, flavor_name): customer = Customer.get_by_id(customer_id) if not customer: logbook.error("Customer {} not found in notify manager", customer_id) return logbook.info("notify manager about adding new service to tariff {}", customer.tariff.name) from api.admin.user import UserApi service_edit_url = urljoin( request_base_url(), posixpath.join(UserApi.ADMIN_FRONTEND_PATH, "tariffs", str(customer.tariff.tariff_id), "services")) customer_url = urljoin( request_base_url(), posixpath.join(UserApi.ADMIN_FRONTEND_PATH, "index", str(customer.customer_id), "info")) subject, body = MessageTemplate.get_rendered_message( MessageTemplate.NEW_SERVICE_IN_TARIFF, language=preferred_language(), account=customer.email, flavor_name=flavor_name, tariff=customer.tariff.name, customer_url=customer_url, service_edit_url=service_edit_url) logbook.info("Sending email notification to delete data of {}", customer.email) send_email.delay(get_customers_manager(customer), subject, body)
def create_customer(self, params=None): confirmed = params.pop('confirmed', False) can_modify = self.update customer_list = self.client.customer.list(email=params['email']) if customer_list['total'] == 0: customer_info = self.client.customer.create(**params) logbook.info('Customer "{}" created with id={}'.format( customer_info['email'], customer_info['customer_id'])) can_modify = True else: customer_info = customer_list['items'][0] params.pop('email') diff = self.compare_fields(customer_info, params, ['locale']) diff.extend( self.compare_fields(customer_info['detailed_info'], params['detailed_info'])) if diff: diff_str = self.diff_to_str( 'Customer "{}" (id:{})'.format( customer_info['email'], customer_info['customer_id']), diff) logbook.info(diff_str) if self.update: self.client.customer.update(customer_info['customer_id'], **params) if confirmed and can_modify: self.client.customer.update(customer_info['customer_id'], confirm_email=True)
def wait(num_retries=60, retry_sleep_seconds=1): import sqlalchemy from flask_app.app import create_app app = create_app() uri = app.config['SQLALCHEMY_DATABASE_URI'] for retry in xrange(num_retries): logbook.info( "Testing database connection... (retry {0}/{1})", retry + 1, num_retries) if retry > 0: time.sleep(retry_sleep_seconds) try: sqlalchemy.create_engine(uri).connect() except sqlalchemy.exc.OperationalError as e: if 'does not exist' in str(e): break logbook.error( "Ignoring OperationError {0} (db still not availalbe?)", e) except Exception as e: logbook.error( "Could not connect to database ({0.__class__}: {0}. Going to retry...", e, exc_info=True) else: break else: raise RuntimeError("Could not connect to database") logbook.info("Database connection successful")
def create_news(self, params=None): published = params.pop('published', False) news_info = self.client.news.create(**params) logbook.info('News "{}" created with id={}'.format( news_info['subject'], news_info['news_id'])) if published: self.client.news.publish(news_info['news_id'], True)
def delete_ports(self, tenant_id, tenant_name=None): ports = self.get_ports(tenant_id=tenant_id) for port in ports: log.info('Deleting port {} of tenant {} (id: {})', port['id'], tenant_name, tenant_id) self.client_neutron.delete_port(port['id']) return len(ports)
def create_service(self, params=None): can_modify = self.update immutable = params.pop('immutable', False) service_list = self.client.service.list() service_list = filter( lambda service: service['localized_name'] == params[ 'localized_name'], service_list['items']) service_info = next(service_list, None) if not service_info: service_info = self.client.service.create(**params) logbook.info('Service "{}" created with id={}'.format( service_info['localized_name']['en'], service_info['service_id'])) can_modify = True else: diff = self.compare_fields(service_info, params, ['localized_name', 'description']) if service_info['measure']['measure_id'] != params['measure']: diff.append(('measure', service_info['measure']['measure_id'], params['measure'])) if diff: diff_str = self.diff_to_str( 'Service <{}>'.format(service_info['service_id']), diff) logbook.info(diff_str) if not service_info['mutable']: logbook.warning('Service is immutable') if service_info['mutable'] and self.update: self.client.service.update(service_info['service_id'], **params) if immutable and can_modify: self.client.service.immutable(service_info['service_id'])
def delete_volumes(self, tenant_id, tenant_name=None): volumes = self.get_volumes(tenant_id=tenant_id) for volume in volumes: log.info('Deleting volume {} of tenant {} (id: {})', volume.id, tenant_name, tenant_id) self.client_cinder.volumes.delete(volume.id) return len(volumes)
def create_flavor(self, params=None): can_modify = False immutable = params.pop('immutable', False) flavor_id = params["flavor_id"] flavor = self.get_flavor(flavor_id) if not flavor: service_info = self.client.service.create_vm(**params) logbook.info('Flavor "{}" created with id={}'.format( service_info['localized_name']['en'], service_info['service_id'])) can_modify = True else: service_info = flavor params.pop('flavor_id') diff = self.compare_fields(service_info, params, ['localized_name']) diff.extend( self.compare_fields(service_info['flavor'], params, list(service_info['flavor'].keys()))) if diff: diff_str = self.diff_to_str( 'Flavor "{}" (id:{})'.format( service_info['localized_name']['en'], service_info['service_id']), diff) logbook.info(diff_str) if not service_info['mutable']: logbook.warning('Service is immutable') if service_info['mutable'] and self.update: self.client.service.update_vm(service_info['service_id'], **params) if immutable and can_modify: self.client.service.immutable(service_info['service_id'])
def delete_subnets(self, tenant_id, tenant_name=None): subnets = self.get_subnets(tenant_id=tenant_id) for subnet in subnets: log.info('Deleting subnet {} of tenant {} (id: {})', subnet['id'], tenant_name, tenant_id) self.client_neutron.delete_subnet(subnet['id']) return len(subnets)
def delete_instances(self, tenant_id, tenant_name=None): servers = self.get_nova_servers(tenant_id=tenant_id) for server in servers: log.info('Deleting instance {} of tenant {} (id: {})', server.id, tenant_name, tenant_id) self.client_nova.servers.delete(server.id) return len(servers)
def validate(self, value): """ This method validates given value. It doesn't catch any exception, it is a task of internal executor. :param object value: The value we have to validate. :return Validated value. """ processed_value = value for type_validator in self.validators: try: processed_value = type_validator(value) except ValidateError as exc: if self.name != self.TOKEN_NAME: name = self.name if exc.subname: name += "." + exc.subname raise BadParameter(exc, name) raise BadRequest(exc) except HTTPError as exc: raise exc except Exception as exc: logbook.info(u"Invalid parameter {}: {} ({})".format( self.name, exc, type(exc))) logbook.debug("{}", traceback.format_exc()) if hasattr(type_validator, "__class__"): validator_name = type_validator.__class__.__name__ else: validator_name = getattr(type_validator, "__name__", "<unknown type>") raise BadRequest( _(u"Invalid parameter {}. It should have type {}"), self.name, validator_name) return processed_value
def acquire_weights(scale_obj, sleep_time=settings.WEIGHT_MEASURE_WAIT_TIME, measure_count=settings.WEIGHT_MEASURE_COUNT): """ to ensure accuracy of the weight do a number of measures with sleep time in between :param scale_obj: BeeHiveScale object :type scale_obj: BeeHiveScale :param sleep_time: how long to wait between measures :type sleep_time: float :param measure_count: umber of measures :type measure_count: int :return: list of measures :rtype list """ logbook.info("start acquire weights") _measures = [] for count in range(measure_count + 1): weight = scale_obj.weight logbook.info("weight: {weight} kg".format(weight=weight)) _measures.append(scale_obj.weight) if count < measure_count: logbook.debug("wait for {0}s".format(sleep_time)) time.sleep(sleep_time) return _measures
def install_issue_defaults(self, issue): """Assign default issue labels, milestone and assignee, if any.""" defs = self.settings.NEW_ISSUE_DEFAULTS patch = {} milestone_title = defs.get('milestone') if milestone_title: milestone = self.github.Milestones.get_or_create_in_repository( self.repo_user, self.repo_name, milestone_title) patch['milestone'] = milestone.number if defs.get('labels') is not None: patch['labels'] = map(unicode, defs['labels']) if defs.get('assignee') is not None: patch['assignee'] = defs['assignee'] if len(patch): if not self.dry_run: try: issue.patch(**patch) except: logbook.error( u"Unable to change issue %s with attributes %r" % (issue, patch)) raise logbook.info(u"Installed defaults %r for issue %s." % (patch, issue))
def tokens(self): """Handle auth requests, patch service catalog endpoint urls""" response = self.make_requests_request(bottle.request, urljoin(self.auth_url, 'tokens')) if response.status_code != 200 or not self.patch_service_catalog: logbook.info('Proxing tokens request to openstack without patching ({})', response.status_code) return self.make_bottle_response(response) try: parsed = response.json() for service_dict in parsed.get('access', {}).get('serviceCatalog', []): service_name = service_dict['name'] endpoint = service_dict['endpoints'][0] for item in endpoint: if item.endswith('URL'): name = service_name+'_'+item[:-3] self.service_mapping[name] = endpoint[item] # e.g. nova_public, keystone_admin endpoint[item] = self.urljoin(self.fake_auth_url, 'mock', name) + '/' dump = json.dumps(parsed) except Exception: logbook.exception('Error while patching service catalog') logbook.warning('Tokens content: {}', response.content) raise logbook.debug('service mapping is: {}', self.service_mapping) headers = self.filter_headers(response.headers) headers['Content-Length'] = len(dump) return bottle.HTTPResponse(dump, response.status_code, headers)
def set_milestone_due_to_comment(self, new_milestone, comment, issue_working_state): if new_milestone: new_milestone_proper = self.get_milestone_title_by_title( new_milestone) if not new_milestone_proper: logbook.info( u'Ignoring unknown milestone %s in comment %s by %s.' % (new_milestone, comment.id, comment.user.login)) self.send_message( comment.user, u'Unknown milestone', u'(Your comment)[%s] appears to request that the milestone `%s` is set for the issue but this does not seems to be a valid milestone.' % (comment.url, new_milestone)) return else: # You can clear the milestone. new_milestone_proper = None if not self.user_may_set_milestone(comment.user): logbook.warning( u"Ignoring unathorised attempt to alter milestone by %s through comment %s." % (comment.user.login, comment.url)) self.send_message( comment.user, u'Unable to alter milestone', u'(Your comment)[%s] appears to request that the milestone `%s` is set for the issue but you do not have the required authorisation.' % (comment.url, new_milestone_proper)) else: logbook.info( "Setting milestone %s due to comment %s by %s" % (new_milestone_proper, comment.id, comment.user.login)) self.set_milestone(new_milestone_proper, issue_working_state)
def test_redis_handler_rpush(): """ Test if rpush stores messages in the right order old items should be first on list """ import redis from logbook.queues import RedisHandler null_handler = logbook.NullHandler() redis_handler = RedisHandler(key='rpushed', push_method='rpush', level=logbook.INFO, bubble=True) with null_handler.applicationbound(): with redis_handler: logbook.info("old item") logbook.info("new item") time.sleep(1.5) r = redis.Redis(decode_responses=True) logs = r.lrange('rpushed', 0, -1) assert logs assert "old item" in logs[0] r.delete('rpushed')
def updated_state_per_label_removal_rules(self, issue, issue_working_state): issue_working_state = issue_working_state.copy() for trigger_label, labels_to_remove in self.settings.WHEN_LABEL_REMOVE_LABELS.items( ): if trigger_label in issue_working_state['labels']: for label in labels_to_remove: if label in issue_working_state['labels']: logbook.info( "Removing label %s due to label %s being set." % (label, trigger_label)) # This ensures that side effects of removing the label kick in. self.remove_label(label, issue_working_state) # Remove conflicting labels. backwards = list(reversed(issue_working_state['labels'])) for n, label in enumerate(backwards): if label in self.settings.MUTUALLY_EXCLUSIVE_LABELS: for other_label in backwards[n + 1:]: if other_label in self.settings.MUTUALLY_EXCLUSIVE_LABELS: logbook.info( "Removing label %s due to label %s being set." % (other_label, label)) # This ensures that side effects of removing the label kick in. self.remove_label(other_label, issue_working_state) # We've removed all conflicting labels at this stage so we're done. break return issue_working_state
def ted(ted_path): headers = {} headers["Accept-Encoding"] = "" headers["User-Agent"] = request.headers.get("User-Agent") headers["X-Real-IP"] = request.remote_addr headers["X-Forwarded-For"] = request.remote_addr headers["X-Forwarded-Proto"] = "https" logbook.info(request.access_route[-1]) logbook.info(request.remote_addr) logbook.info(request.headers.get("User-Agent")) ted_ret = get( "{}/{}".format(TED_PREFIX, ted_path), headers=headers, ) logbook.info(ted_ret.status_code) logbook.info("{}/{}".format(TED_PREFIX, ted_path)) assert ted_ret.ok ted_content = ted_ret.content return ted_content.replace( "ajax.googleapis.com", "cdnjs.cloudflare.com", ).replace( r'"/images', r'"{}/images'.format(TED_PREFIX), ).replace( r'"/css/i/ted-logo.png', r'"{}/css/i/ted-logo.png'.format(TED_PREFIX), ).replace( r'"/js/ZeroClipboard.min.js', r'"{}/js/ZeroClipboard.min.js'.format(TED_PREFIX), )
def parse_bank_metadata( bank: Path, queue: mp.Queue) -> Tuple[List[BankMetaData], List[BankMetaData]]: setup_logging(queue) streamed_flag = False memory_flag = False ret_memory = [] ret_streamed = [] try: logbook.info("parsing bank metadata '{bank}'", bank=bank.absolute()) with bank.open("r", encoding="utf-8") as f: for line in f: if line.lower().startswith("in memory audio"): memory_flag = True continue elif line.lower().startswith("streamed audio"): streamed_flag = True continue elif line == "\r\n" or line == "\n": streamed_flag = False memory_flag = False continue if memory_flag: ret_memory.append(parse_memory_audio_meta(line)) elif streamed_flag: ret_streamed.append(parse_streamed_audio_meta(line)) except Exception as e: logbook.exception("error parsing {bank}: {e}", bank=bank, e=repr(e)) return ret_memory, ret_streamed
def aggregate(self, report_id): logbook.info("Get customer usage aggregation for {}", report_id) customer = Customer.get_by_id(report_id.customer_id) if not customer: raise Exception("Customer %s not found" % report_id.customer_id) with timed("get_usage simple"): aggregated_usage = ServiceUsage.get_usage(customer, report_id.start, report_id.end) tariffs = {} services = set() for usage in aggregated_usage: service_id, tariff_id, cost, usage_volume = usage services.add(service_id) if not tariff_id: logbook.error("ServiceUsage {} is not completed. Tariff is not filled", usage) continue tariff = Tariff.get_by_id(tariff_id) tariff_report = tariffs.get(tariff_id) if tariff_report is None: tariff_report = self.tariff_report_type(tariff, customer) tariffs[tariff_id] = tariff_report tariff_report.add_usage(usage) total = Counter() for tariff_id, tariff in tariffs.items(): total_tariff, currency = tariff.aggregate() total[currency] += total_tariff for t, value in total.items(): total[t] = decimal_to_string(value) logbook.info("Aggregated {} for {}. Services: {}", total, customer, services) return self.prepare_result(list(tariffs.values()), total, customer, report_id.start, report_id.end)
def main(): logbook.StderrHandler(format_string='{record.level_name}: {record.message}' ).push_application() args = docopt(__doc__) BASE = path.expanduser(path.join('~', '.local', 'share', 'andrei')) makedirs(BASE, exist_ok=True) if args['modelize']: words = word_list(args['FILE'], args['--filter']) logbook.info( textwrap.shorten('Found {} words: {}'.format( len(words), ', '.join(words[:50])), width=70, placeholder='...')) gen = Generator(words, int(args['STATE_SIZE'])) gen.dump_model(path.join(BASE, args['NAME'])) print('Successfully generated model {}'.format(args['NAME'])) else: gen = Generator(path=path.join(BASE, args['--model'])) pw, ent = gen.generate_password(int(args['--entropy']), int(args['--min-word-len']), int(args['--max-word-len']), args['--specials'], args['--sep']) logbook.info('entropy: {:.3f}'.format(ent)) if args['--clip']: pyperclip.copy(pw) else: print(pw)