def set(self, cursor=None, user_id=None, staff_user_id=None, venue_id=None, manager=None, promo_perm=None, delete=None, **kwargs): if util.to_bool(delete): qry = {'delete': 'venue_staff', 'where': ('user_id = ?', 'venue_id = ?')} cursor.execute(util.query(**qry), (staff_user_id, venue_id)) qry = {'delete': 'venue_managers', 'where': ('user_id = ?', 'venue_id = ?')} cursor.execute(util.query(**qry), (staff_user_id, venue_id)) elif util.to_bool(manager): qry = {'select': 'id', 'table': 'venue_managers', 'where': ('user_id = ?', 'venue_id = ?'), 'order_by': 'id', 'limit': 1} cursor.execute(util.query(**qry), (staff_user_id, venue_id)) res = cursor.fetchone() if not res: qry = {'delete': 'venue_staff', 'where': ('user_id = ?', 'venue_id = ?')} cursor.execute(util.query(**qry), (staff_user_id, venue_id)) qry = {'insert_into': 'venue_managers', 'columns': ('user_id', 'venue_id', 'time')} cursor.execute(util.query(**qry), (staff_user_id, venue_id, util.now())) else: qry = {'select': 'id', 'table': 'venue_staff', 'where': ('user_id = ?', 'venue_id = ?'), 'order_by': 'id', 'limit': 1} cursor.execute(util.query(**qry), (staff_user_id, venue_id)) res = cursor.fetchone() if not res: qry = {'delete': 'venue_managers', 'where': ('user_id = ?', 'venue_id = ?')} cursor.execute(util.query(**qry), (staff_user_id, venue_id)) qry = {'insert_into': 'venue_staff', 'columns': ('user_id', 'venue_id', 'time', 'promo_perm')} cursor.execute(util.query(**qry), (staff_user_id, venue_id, util.now(), 1 if util.to_bool(promo_perm) else 0)) else: qry = {'update': 'venue_staff', 'set_values': ('promo_perm'), 'where': ('user_id = ?', 'venue_id = ?')} cursor.execute(util.query(**qry), (1 if util.to_bool(promo_perm) else 0, staff_user_id, venue_id)) return True
def set(self, cursor=None, facebook_id=None, user_id=None, venue_id=None, name=None, address=None, country=None, phone=None, email=None, email_verified=None, category_id=None, headline=None, tonight=None, website=None, facebook=None, twitter=None, v_facebook_id=None, twitter_id=None, twitter_token=None, twitter_secret=None, lat=None, lon=None, official=None, verified=None, customer_spend=None, authenticated=None, creator_version=None, **kwargs): data = {'name': name, 'address': address, 'country': country, 'phone': phone, 'email': email, 'email_verified': util.to_bool(email_verified), 'category_id': util.to_int(category_id), 'headline': headline, 'tonight': tonight, 'website': website, 'facebook': facebook, 'twitter': twitter, 'facebook_id': v_facebook_id, 'twitter_id': twitter_id, 'twitter_token': twitter_token, 'twitter_secret': twitter_secret, 'lat': util.to_float(lat), 'lon': util.to_float(lon), 'official': util.to_bool(official), 'verified': util.to_bool(verified), 'customer_spend': util.to_float(customer_spend), 'authenticated': util.to_bool(authenticated), 'creator': user_id, 'creator_version': creator_version} columns = [] values = [] for key, val in data.iteritems(): if val != None: columns.append(key) values.append(val) if venue_id: qry = {'update': 'venues', 'set_values': columns, 'where': 'id = ?'} values.append(venue_id) cursor.execute(util.query(**qry), values) else: qry = {'insert_into': 'venues', 'columns': columns} cursor.execute(util.query(**qry), values) cursor.execute(util.query(last_id=True)) return int(cursor.fetchone().identity)
def __init__(self, logger, config): self.sock = [] self.log = logger self.config = config self.uuidpool = {} # key MAC address value, value UUID value self.ippool = {} # key MAC address string, value assigned IP string self.filepool = {} # key IP string, value pathname self.states = {} # key MAC address string, value client state name_ = PRODUCT_NAME.split('-') name_[0] = 'bootp' self.netconfig = get_iface_config(self.config.get_bootp_bind_interface()) if not self.netconfig: raise BootpError('Unable to detect network configuration') keys = sorted(self.netconfig.keys()) self.log.info('Using %s' % ', '.join(map(':'.join, zip(keys, [self.netconfig[k] for k in keys])))) access = self.config.get_bootp_acl_type() if not access: self.acl = None else: access = access.lower() if access not in self.ACCESS_LOCAL + self.ACCESS_REMOTE: raise BootpError('Invalid access mode: %s' % access) if not self.config.has_section(access): raise BootpError("Missing access section '%s'" % access) self.acl = {} if access in self.ACCESS_LOCAL: for entry in self.config.options(access): self.acl[entry.upper()] = \ to_bool(self.config.get(access, entry)) self.access = access
def __init__(self, logger, config): self.sock = [] self.log = logger self.config = config self.uuidpool = {} # key MAC address value, value UUID value self.ippool = {} # key MAC address string, value assigned IP string self.filepool = {} # key IP string, value pathname self.states = {} # key MAC address string, value client state name_ = PRODUCT_NAME.split('-') name_[0] = 'bootp' self.netconfig = get_iface_config( self.config.get_bootp_bind_interface()) if not self.netconfig: raise BootpError('Unable to detect network configuration') keys = sorted(self.netconfig.keys()) self.log.info('Using %s' % ', '.join( map(':'.join, zip(keys, [self.netconfig[k] for k in keys])))) access = self.config.get_bootp_acl_type() if not access: self.acl = None else: access = access.lower() if access not in self.ACCESS_LOCAL + self.ACCESS_REMOTE: raise BootpError('Invalid access mode: %s' % access) if not self.config.has_section(access): raise BootpError("Missing access section '%s'" % access) self.acl = {} if access in self.ACCESS_LOCAL: for entry in self.config.options(access): self.acl[entry.upper()] = \ to_bool(self.config.get(access, entry)) self.access = access
def get(self, cursor=None, user_id=None, venue_id=None, getall=None, level=None, from_time=None, until_time=None, **kwargs): red = {'select': 'COUNT(id)', 'table': 'promotion_redemptions', 'where': 'promotion_id = promotions.id'} promo_qry = {'select': ['id', 'title', 'description', 'passcode', 'start', '[end]', 'maximum', 'creator', 'level', '(' + util.query(**red) + ') AS redemptions'], 'table': 'promotions', 'where': ['venue_id = ?', 'hidden != 1'], 'order_by': 'id DESC'} if from_time and until_time: own_red = {'select': 'COUNT(id)', 'table': 'promotion_redemptions', 'where': ('promotion_id = promotions.id', 'time >= ' + from_time, 'time < ' + until_time, 'user_id = ' + str(user_id))} promo_qry['select'].append('(' + util.query(**own_red) + ') AS own_redemptions') if not util.to_bool(getall): promo_qry['limit'] = 1 promo_qry['where'].append(str(util.now()) + ' >= start') promo_qry['where'].append('([end] = 0 OR [end] > ' + str(util.now()) + ')') promo_qry['where'].append('(maximum = 0 OR (' + util.query(**red) + ') < maximum)') promo_qry['where'].append(level + ' >= level') promo_qry['order_by'] = 'level DESC, id DESC' cursor.execute(util.query(**promo_qry), (venue_id,)) row = cursor.fetchone() if row: return {t[0]: val for t, val in zip(cursor.description, row)} else: return None cursor.execute(util.query(**promo_qry), (venue_id,)) return [util.row_to_dict(cursor, row) for row in cursor.fetchall()]
def set(self, cursor=None, user_id=None, venue_id=None, delete=None, promotion_id=None, title=None, description=None, start=None, end=None, maximum=None, passcode=None, level=None, **kwargs): if util.to_bool(delete) and promotion_id: qry = {'update': 'promotions', 'set_values': ('hidden'), 'where': 'id = ?'} cursor.execute(util.query(**qry), (1, promotion_id)) elif promotion_id: qry = {'update': 'promotions', 'set_values': ('title', 'description', 'start', '[end]', 'maximum', 'passcode', 'venue_id', 'level'), 'where': 'id = ?'} cursor.execute(util.query(**qry), (title, description, start, end, maximum, passcode, venue_id, level, promotion_id)) else: qry = {'insert_into': 'promotions', 'columns': ('title', 'description', 'start', '[end]', 'maximum', 'creator', 'passcode', 'venue_id', 'level')} cursor.execute(util.query(**qry), (title, description, start, end, maximum, user_id, passcode, venue_id, level)) return True
def __init__(self, username, password, latitude, longitude, language='de', server_url=_SERVER_URL, skip_upload=False, post_interval=300): """Initialize an instances of AWEKASThread. Required parameters: username: AWEKAS user name password: AWEKAS password language: Possible values include de, en, it, fr, nl Default is de latitude: Station latitude in decimal degrees Default is station latitude longitude: Station longitude in decimal degrees Default is station longitude Optional parameters: server_url: URL of the server Default is the AWEKAS site skip_upload: debugging option to display data but do not upload Default is False post_interval: The interval in seconds between posts. AWEKAS requests that uploads happen no more often than 5 minutes, so this should be set to no less than 300. Default is 300 max_backlog: How many records are allowed to accumulate in the queue before the queue is trimmed. Default is sys.maxint (essentially, allow any number). log_success: If True, log a successful post in the system log. Default is True. log_failure: If True, log an unsuccessful post in the system log. Default is True. timeout: How long to wait for the server to respond before giving up. Default is 60 seconds max_tries: How many times to try the post before giving up. Default is 3 retry_wait: How long to wait between retries when failures. Default is 5 seconds. """ super(AWEKASThread, self).__init__(protocol_name='AWEKAS', post_interval=post_interval) self.username = username self.password = password self.latitude = float(latitude) self.longitude = float(longitude) self.language = language self.server_url = server_url self.skip_upload = to_bool(skip_upload)
def set(self, cursor=None, user_id=None, venue_id=None, following=None, **kwargs): qry = {'select': 'id', 'table': 'venue_followers', 'where': ('user_id = ?', 'venue_id = ?'), 'order_by': 'id', 'limit': 1} cursor.execute(util.query(**qry), (user_id, venue_id)) res = cursor.fetchone() if util.to_bool(following) and not res: qry = {'insert_into': 'venue_followers', 'columns': ('user_id', 'venue_id')} cursor.execute(util.query(**qry), (user_id, venue_id)) elif not util.to_bool(following) and res: qry = {'delete': 'venue_followers', 'where': ('user_id = ?', 'venue_id = ?')} cursor.execute(util.query(**qry), (user_id, venue_id)) return True
def __init__(self, logger, parser): address = ('localhost', int(parser.get('httpd', 'port', '80'))) HTTPServer.__init__(self, address, self.ReqHandler) self.log = logger self.uuids = {} access = 'uuid' if parser.has_section(access): for entry in parser.options(access): self.uuids[entry.upper().strip()] = \ to_bool(parser.get(access, entry))
def __post__(self, env, email, firstname, lastname, gender, language, protected): self.app.update_user_details(self.username, email, firstname, lastname, gender, language, util.to_bool(protected)) v = view.JSONView(200) m = self.app.get_full_user_details(self.username) v.bind(m) return v
class BootpServer: """BOOTP Server Implements bootstrap protocol""" ACCESS_LOCAL = ['uuid', 'mac'] # Access modes, defined locally ACCESS_REMOTE = ['http'] # Access modes, remotely retrieved (ST_IDLE, ST_PXE, ST_DHCP) = range(3) # Current state def __init__(self, logger, config): self.sock = [] self.log = logger self.config = config self.uuidpool = {} # key MAC address value, value UUID value self.ippool = {} # key MAC address string, value assigned IP string self.filepool = {} # key IP string, value pathname self.states = {} # key MAC address string, value client state name_ = PRODUCT_NAME.split('-') name_[0] = 'bootp' self.bootp_section = '_'.join(name_) self.pool_start = self.config.get(self.bootp_section, 'pool_start') if not self.pool_start: raise BootpError('Missing pool_start definition') self.pool_count = int( self.config.get(self.bootp_section, 'pool_count', '10')) self.netconfig = get_iface_config(self.pool_start) if not self.netconfig: raise BootpError('Unable to detect network configuration') keys = sorted(self.netconfig.keys()) self.log.info('Using %s' % ', '.join( map(':'.join, zip(keys, [self.netconfig[k] for k in keys])))) nlist = self.config.get(self.bootp_section, 'notify') self.notify = [] if nlist: try: nlist = nlist.split(';') for n in nlist: n = n.strip().split(':') self.notify.append((n[0], int(n[1]))) except Exception, e: raise BootpError('Invalid notification URL: %s' % str(e)) access = self.config.get(self.bootp_section, 'access') if not access: self.acl = None else: access = access.lower() if access not in self.ACCESS_LOCAL + self.ACCESS_REMOTE: raise BootpError('Invalid access mode: %s' % access) if not self.config.has_section(access): raise BootpError("Missing access section '%s'" % access) self.acl = {} if access in self.ACCESS_LOCAL: for entry in self.config.options(access): self.acl[entry.upper()] = \ to_bool(self.config.get(access, entry)) self.access = access
def get(self, cursor=None, venue_id=None, from_time=None, until_time=None, own=None, user_id=None, **kwargs): qry = {'select': 'COUNT(id) AS cnt', 'table': 'venue_rsvps', 'where': ('venue_id = ?', 'maybe = 1', 'going = 0', 'time >= ?', 'time < ?')} values = (venue_id, from_time, until_time) if util.to_bool(own): qry['where'] += ('user_id = ?',) values += (user_id,) cursor.execute(util.query(**qry), values) maybe = cursor.fetchone().cnt qry = {'select': 'COUNT(id) AS cnt', 'table': 'venue_rsvps', 'where': ('venue_id = ?', 'going = 1', 'time >= ?', 'time < ?')} if util.to_bool(own): qry['where'] += ('user_id = ?',) cursor.execute(util.query(**qry), values) going = cursor.fetchone().cnt return {'maybe': maybe, 'going': going}
def _build(self, input_, core_state): action, env_output = input_ env_outputs = environments.StepOutput( reward=env_output.reward[None, ...], info=nest.map_structure(lambda t: t[None, ...], env_output.info), done=to_bool(tf.cast(env_output.done, tf.int64)[None, ...]), observation=(tf.to_float(env_output.observation[0])[None, ...], tf.zeros(()))) actions = action[None, ...] outputs, core_state, custom_rnn_output, torso_outputs = self.unroll( actions, env_outputs, core_state) return nest.map_structure(lambda t: tf.squeeze(t, 0), outputs), core_state, \ custom_rnn_output, torso_outputs
def set(self, cursor=None, user_id=None, venue_id=None, maybe=None, going=None, from_time=None, until_time=None, **kwargs): qry = {'select': 'id', 'table': 'venue_rsvps', 'where': ('user_id = ?', 'venue_id = ?', 'time >= ?', 'time < ?'), 'order_by': 'id', 'limit': 1} cursor.execute(util.query(**qry), (user_id, venue_id, from_time, until_time)) res = cursor.fetchone() if res: values = [] columns = [] if maybe: values.append(util.to_bool(maybe)) columns.append('maybe') if going: values.append(util.to_bool(going)) columns.append('going') values.append(res.id) qry = {'update': 'venue_rsvps', 'set_values': columns, 'where': 'id = ?'} cursor.execute(util.query(**qry), values) else: values = [user_id, venue_id, util.now()] columns = ['user_id', 'venue_id', 'time'] if maybe: values.append(util.to_bool(maybe)) columns.append('maybe') if going: values.append(util.to_bool(going)) columns.append('going') qry = {'insert_into': 'venue_rsvps', 'columns': columns} cursor.execute(util.query(**qry), values) return True
def get(self) -> Tuple[Dict, int]: # type: ignore api10 = request.args.get("api10") api14 = request.args.get("api14") entity = request.args.get("entity") entity12 = request.args.get("entity12") identifier = request.args.get("id") status = request.args.get("status") related = request.args.get("related") kwargs: Dict[str, Any] = {} if status: kwargs["status"] = status if identifier: kwargs["_id"] = identifier if api10: kwargs["api10"] = api10 if api14: kwargs["api14"] = api14 if entity12: kwargs["entity12"] = entity12 if entity: kwargs["entity"] = entity if related is not None: related = util.to_bool(related) if related and not entity12: objs = self.model.objects(**kwargs).only("entity12").all() # kwargs = {"entity12__in": list({x.entity12 for x in objs})} entity12 = util.reduce(list({x.entity12 for x in objs})) if isinstance(entity12, (list, set)): key = "entity12__in" else: key = "entity12" if entity12: kwargs = {key: entity12} logger.debug(f"production get: {kwargs}") # noqa return self._get(**kwargs), 200
def _set(name, value): if name.startswith('_') or name not in globals(): raise ValueError("Invalid configuration value '{}'".format(name)) t = type(globals()[name]) if t in (callable, property): raise ValueError('Cannot set derived configuration value') try: if t == bool: globals()[name] = util.to_bool(value) else: globals()[name] = t(value) except ValueError: raise ValueError("Invalid type for configuration value '{}', must be: {}".format(name, t)) logging.debug("Configuration value {} = {}".format(name, value))
def set(self, cursor=None, user_id=None, venue_id=None, caption=None, hide=None, post_id=None, image=None, **kwargs): if post_id and util.to_bool(hide): qry = {'update': 'posts', 'set_values': ('hidden'), 'where': 'id = ?'} cursor.execute(util.query(**qry), ('1', post_id)) return post_id else: qry = {'insert_into': 'posts', 'columns': ('user_id', 'venue_id', 'caption', 'time')} cursor.execute(util.query(**qry), (user_id, venue_id, caption, util.now())) cursor.execute(util.query(last_id=True)) post_added = int(cursor.fetchone().identity) azureutil.store(image.file, 'post', str(post_added)) return post_added
def _set(name, value): if name.startswith('_') or name not in globals(): raise ValueError("Invalid configuration value '{}'".format(name)) t = type(globals()[name]) if t in (callable, property): raise ValueError('Cannot set derived configuration value') try: if t == bool: globals()[name] = util.to_bool(value) else: globals()[name] = t(value) except ValueError: raise ValueError( "Invalid type for configuration value '{}', must be: {}".format( name, t)) logging.debug("Configuration value {} = {}".format(name, value))
def retrieve(self, cursor=None, user_id=None, term=None): if util.to_bool(term): qry = {'select': ['id', 'facebook_id', 'forename', 'surname' ], 'table': 'users', 'where': ("CONCAT(forename, \' \', surname) LIKE ?",), 'order_by': 'surname ASC, forename ASC'} cursor.execute(util.query(**qry), ("%" + term.replace(' ', "%") + "%",)) return [util.row_to_dict(cursor, row) for row in cursor] else: qry = {'select': ['id', 'facebook', 'twitter', 'forename', 'surname', 'age', 'birth_day', 'birth_month', 'birth_year', 'gender', 'employee', 'joined', 'country', 'language', 'email', 'top5', 'save_locally', 'last_login', 'last_facebook', 'last_twitter' ], 'table': 'users', 'order_by': 'id'} qry['select'].append('twitter_id') qry['select'].append('twitter_token') qry['select'].append('twitter_secret') qry.update({'where': 'id = ?', 'limit': 1}) cursor.execute(util.query(**qry), (user_id,)) res = cursor.fetchone() return util.row_to_dict(cursor, res)
def update_user_details(self, username, email, firstname, lastname, gender, language, protected): # validate parameters: if not validate_email(email): raise exception.InvalidParameterException("email") if not validate_firstname(firstname): raise exception.InvalidParameterException("firstname") if not validate_lastname(lastname): raise exception.InvalidParameterException("lastname") if not validate_gender(gender): raise exception.InvalidParameterException("gender") if not validate_language(language): raise exception.InvalidParameterException("language") if not isinstance(protected, bool): if isinstance(protected, str) and (protected.lower() in ["true", "false"]): protected = util.to_bool(protected) else: raise exception.InvalidParameterException("protected") # update user details: with self.__create_db_connection__() as conn: with conn.enter_scope() as scope: self.__test_active_user__(scope, username) # test email if not self.__user_db.user_can_change_email(scope, username, email): raise exception.ConflictException("Email address is already assigned.") # update user details: self.__user_db.update_user_details(scope, username, email, firstname, lastname, gender, language, protected) scope.complete()
def handle(self, sock, addr, data): sender = addr self.log.info('Sender: %s on socket %s' % (addr, sock.getsockname())) if len(data) < DHCPFormatSize: self.log.error('Cannot be a DHCP or BOOTP request - too small!') tail = data[DHCPFormatSize:] buf = list(struct.unpack(DHCPFormat, data[:DHCPFormatSize])) if buf[BOOTP_OP] != BOOTREQUEST: self.log.warn('Not a BOOTREQUEST') return options = self.parse_options(tail) if options is None: self.log.warn('Error in option parsing, ignore request') return # Extras (DHCP options) try: dhcp_msg_type = ord(options[53][0]) except KeyError: dhcp_msg_type = None server_addr = self.netconfig['address'] mac_addr = buf[BOOTP_CHADDR][:6] gi_addr = buf[BOOTP_GIADDR][:4] mac_str = ':'.join(['%02X' % ord(x) for x in mac_addr]) gi_str = '.'.join(['%d' % ord(x) for x in gi_addr]) self.log.debug("Gateway address: %s" % gi_str) # is the UUID received (PXE mode) if 97 in options and len(options[97]) == 17: uuid = options[97][1:] pxe = True self.log.info('PXE UUID has been received') # or retrieved from the cache (DHCP mode) else: uuid = self.uuidpool.get(mac_addr, None) pxe = False self.log.info('PXE UUID not present in request') uuid_str = uuid and ('%s-%s-%s-%s-%s' % \ tuple([hexlify(x) for x in uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16]])).upper() if uuid_str: self.log.info('UUID is %s for MAC %s' % (uuid_str, mac_str)) hostname = '' filename = '' # Basic state machine currentstate = self.states.setdefault(mac_str, self.ST_IDLE) newstate = currentstate if currentstate == self.ST_IDLE: if pxe and (dhcp_msg_type == DHCP_DISCOVER): # BIOS is booting up, and try to locate a DHCP server newstate = self.ST_PXE elif currentstate == self.ST_PXE: if not pxe and (dhcp_msg_type == DHCP_REQUEST): # OS is booting up, and confirm a previous DHCP dicovery newstate = self.ST_DHCP else: # currentstate == self.ST_DHCP if pxe: # OS was running but the BIOS is performing a DHCP request: # board has been restarted newstate = self.ST_PXE # if the state has not evolved from idle, there is nothing to do if newstate == self.ST_IDLE: self.log.info('Request from %s ignored (idle state)' % mac_str) sdhcp = self.config.get_bootp_allow_simple_dhcp() simple_dhcp = sdhcp and to_bool(sdhcp) if not simple_dhcp: return # construct reply buf[BOOTP_OP] = BOOTREPLY self.log.info('Client IP: %s' % socket.inet_ntoa(buf[7])) if buf[BOOTP_CIADDR] == '\x00\x00\x00\x00': self.log.debug('Client needs its address') host_data = self.get_host_data_for_mac(mac_str) ipaddr = host_data['address'] self.log.debug("IPADDR: {0}".format(ipaddr)) if not ipaddr: self.log.error("Can't get IP address!") return ip = None if mac_str in self.ippool: ip = self.ippool[mac_str] self.log.info('Lease for MAC %s already defined as IP %s' % \ (mac_str, ip)) else: self.ippool[mac_str] = ipaddr ip = ipaddr if not ip: #raise BootpError('No more IP available in definined pool') self.log.error("Can not find IP assigned to mac: %s" % mac_str) return mask = iptoint(self.netconfig['mask']) reply_broadcast = iptoint(ip) & mask reply_broadcast |= (~mask) & ((1 << 32) - 1) buf[BOOTP_YIADDR] = socket.inet_aton(ip) buf[BOOTP_SECS] = 0 buf[BOOTP_FLAGS] = BOOTP_FLAGS_NONE addr = (inttoip(reply_broadcast), addr[1]) self.log.debug('Reply to: %s:%s' % addr) else: buf[BOOTP_YIADDR] = buf[BOOTP_CIADDR] ip = socket.inet_ntoa(buf[BOOTP_YIADDR]) buf[BOOTP_SIADDR] = socket.inet_aton(server_addr) if gi_addr: self.log.debug('Reply via gateway: %s' % gi_str) buf[BOOTP_GIADDR] = socket.inet_aton(gi_str) # sname buf[BOOTP_SNAME] = \ '.'.join([host_data['hostname'], host_data['domain']]) # file buf[BOOTP_FILE] = 'boot_file' in host_data and host_data[ 'boot_file'] or self.config.get_bootp_default_boot_file() if not dhcp_msg_type: self.log.warn('No DHCP message type found, discarding request') return if dhcp_msg_type == DHCP_DISCOVER: self.log.debug('DHCP DISCOVER') dhcp_reply = DHCP_OFFER self.log.info('Offering lease for MAC %s: IP %s' % \ (mac_str, ip)) elif dhcp_msg_type == DHCP_REQUEST: self.log.debug('DHCP REQUEST') dhcp_reply = DHCP_ACK self.log.info('New lease for MAC %s: IP %s' % \ (mac_str, ip)) elif dhcp_msg_type == DHCP_RELEASE: self.log.info('DHCP RELEASE') if not self.notify: return elif dhcp_msg_type == DHCP_INFORM: self.log.info('DHCP INFORM') return elif dhcp_msg_type == DHCP_DECLINE: self.log.debug('DHCP DECLINE') return else: self.log.error('Unmanaged DHCP message: %d' % dhcp_msg_type) return pkt = struct.pack(DHCPFormat, *buf) pkt += struct.pack('!BBB', DHCP_MSG, 1, dhcp_reply) #server = socket.inet_aton(server_addr) # FIXME: Hardcoded relay and netmask # Add something in lines of: # networks: # 10.40.13.160: # netmask: 255.255.255.224 # gateway: 10.40.13.160 # dns: 10.40.1.80, 10.40.1.81 server = socket.inet_aton('10.40.13.161') pkt += struct.pack('!BB4s', DHCP_SERVER, 4, server) #mask = socket.inet_aton(self.netconfig['mask']) mask = socket.inet_aton('255.255.255.224') pkt += struct.pack('!BB4s', DHCP_IP_MASK, 4, mask) pkt += struct.pack('!BB4s', DHCP_IP_GATEWAY, 4, server) # FIXME: Serving only default DNS for now dns = self.config.get_bootp_default_dns() if dns: if dns.lower() == 'auto': dns = self.get_dns_server() or socket.inet_ntoa(server) dns = socket.inet_aton(dns) pkt += struct.pack('!BB4s', DHCP_IP_DNS, 4, dns) pkt += struct.pack('!BBI', DHCP_LEASE_TIME, 4, int(self.config.get_bootp_default_lease_time())) pkt += struct.pack('!BB', DHCP_END, 0) # do not attempt to produce a PXE-augmented response for # regular DHCP requests if pxe: extra_buf = self.build_pxe_options(options, server) if not extra_buf: return else: extra_buf = self.build_dhcp_options(hostname) # update the UUID cache if pxe: self.uuidpool[mac_addr] = uuid # send the response if gi_addr: sock.sendto(pkt + extra_buf, (gi_str, 67)) else: sock.sendto(pkt + extra_buf, addr) # update the current state if currentstate != newstate: self.log.info('Moving from state %d to state %d' % \ (currentstate, newstate)) self.states[mac_str] = newstate
self.log.info("Filename for IP %s is '%s'" % (ip, filename)) self.filepool[ip] = filename else: self.log.debug('No filename defined for IP %s' % ip) pkt = struct.pack(DHCPFormat, *buf) pkt += struct.pack('!BBB', DHCP_MSG, 1, dhcp_reply) server = socket.inet_aton(server_addr) pkt += struct.pack('!BB4s', DHCP_SERVER, 4, server) mask = socket.inet_aton(self.config.get( self.bootp_section, 'netmask', self.netconfig['mask'])) pkt += struct.pack('!BB4s', DHCP_IP_MASK, 4, mask) if to_bool(self.config.get(self.bootp_section, 'set_gateway', True)): gateway = socket.inet_aton( self.config.get(self.bootp_section, 'gateway', server_addr)) pkt += struct.pack('!BB4s', DHCP_IP_GATEWAY, 4, gateway) dns = self.config.get(self.bootp_section, 'dns', None) if dns: if dns.lower() == 'auto': dns = self.get_dns_server() or socket.inet_ntoa(server) dns = socket.inet_aton(dns) pkt += struct.pack('!BB4s', DHCP_IP_DNS, 4, dns) pkt += struct.pack('!BBI', DHCP_LEASE_TIME, 4, int(self.config.get(self.bootp_section, 'lease_time', str(24*3600)))) pkt += struct.pack('!BB', DHCP_END, 0)
from db import db from main import app from tests.models import TestModel # noqa from tests.utils import MockAsyncDispatch import util from util.jsontools import load_json import calc # noqa ECHO = False import pytest # * custom markers pytest.mark.cionly = pytest.mark.skipif( not util.to_bool(os.getenv("CI")), reason="run on CI only", ) @pytest.fixture(scope="session") def celery_config(): return config.CeleryConfig.items() @pytest.fixture(scope="session") def celery_worker_parameters(): return config.CeleryConfig.items() @pytest.fixture(scope="session")
def comment_exists(self, scope, id): return util.to_bool( execute_scalar(scope.get_handle(), "select count(id) from object_comment where id=%s", id))
class BaseConfig: """Base configuration""" load_dotenv(".env") """ General """ FLASK_ENV = os.getenv("FLASK_ENV", "development") DEFAULT_COLLECTION_INTERVAL = {"hours": 1} ENV_NAME = os.getenv("ENV_NAME", socket.gethostname()) DEFAULT_PROJECTION = "wgs84" """ Sentry """ SENTRY_ENABLED = to_bool(os.getenv("SENTRY_ENABLED")) SENTRY_DSN = os.getenv("SENTRY_DSN", None) SENTRY_LEVEL = os.getenv("SENTRY_LEVEL", 40) SENTRY_EVENT_LEVEL = os.getenv("SENTRY_EVENT_LEVEL", 40) SENTRY_ENV_NAME = os.getenv("SENTRY_ENV_NAME", ENV_NAME) SENTRY_RELEASE = f"{project}-{version}" """ Datadog """ DATADOG_ENABLED = to_bool(os.getenv("DATADOG_ENABLED")) DATADOG_API_KEY = os.getenv("DATADOG_API_KEY", os.getenv("DD_API_KEY", None)) DATADOG_APP_KEY = os.getenv("DATADOG_APP_KEY", os.getenv("DD_APP_KEY", None)) DATADOG_DEFAULT_TAGS = { "environment": ENVIRONMENT_MAP.get(FLASK_ENV, FLASK_ENV), "service_name": project, "service_version": version, } """ Config """ CONFIG_BASEPATH = "./config" COLLECTOR_CONFIG_PATH = abs_path(*os.getenv('COLLECTOR_CONFIG_PATH', CONFIG_BASEPATH+"/collector.yaml").rsplit("/", 1)) # type: ignore # noqa COLLECTOR_CONFIG_PATH = abs_path(CONFIG_BASEPATH, "collector.yaml") COLLECTOR_CONFIG = load_config(COLLECTOR_CONFIG_PATH) PARSER_CONFIG_PATH = abs_path(CONFIG_BASEPATH, "parsers.yaml") PARSER_CONFIG = load_config(PARSER_CONFIG_PATH) QUERY_PATH = abs_path(CONFIG_BASEPATH, "templates/queries") EXPORT_PATH = abs_path(CONFIG_BASEPATH, "templates/exports") """ Logging """ LOG_LEVEL = os.getenv("LOG_LEVEL", 20) LOG_FORMAT = os.getenv("LOG_FORMAT", "funcname") WEB_LOG_REQUESTS = to_bool(os.getenv("WEB_LOG_REQUESTS", True)) WEB_LOG_RESPONSES = to_bool(os.getenv("WEB_LOG_RESPONSES", True)) WEB_LOG_SAMPLE_FRAC = float(os.getenv("WEB_LOG_SAMPLE_FRAC", 0.5)) WEB_LOG_SLOW_RESPONSE_THRESHOLD = float(os.getenv("WEB_LOG_SLOW_RESPONSE_THRESHOLD", 3)) # seconds # noqa CELERY_LOG_LEVEL = os.getenv("CELERY_LOG_LEVEL", LOG_LEVEL) CELERY_LOG_FORMAT = os.getenv("CELERY_LOG_FORMAT", LOG_FORMAT) """ --------------- Database --------------- """ DATABASE_DRIVER = os.getenv("DATABASE_DRIVER", "mongodb") DATABASE_USERNAME = os.getenv("DATABASE_USERNAME", None) DATABASE_PASSWORD = os.getenv("DATABASE_PASSWORD", None) DATABASE_HOST = os.getenv("DATABASE_HOST", "localhost") DATABASE_PORT = os.getenv("DATABASE_PORT", 27017) DATABASE_NAME = os.getenv("DATABASE_NAME", "default") DATABASE_AUTHENTICATION_SOURCE = "admin" DATABASE_URI = os.getenv("DATABASE_URI", None) REPLICA_SET = os.getenv("REPLICA_SET", None) # DATABASE_UUID_REPRESENTATION = "standard" # DATABASE_CONNECT = os.getenv("DATABASE_CONNECT", False) """ Celery """ BROKER_URL = os.getenv("CELERY_BROKER_URL", "redis://localhost:6379/0") CELERY_TASK_LIST = ["celery_queue.tasks"] CELERYD_TASK_TIME_LIMIT = to_int(os.getenv( "CELERYD_TASK_TIME_LIMIT", 60 * 60 * 12 )) # 12 hours CELERY_TASK_SERIALIZER = "json" CELERY_ACCEPT_CONTENT = ["json"] # CELERYD_MAX_TASKS_PER_CHILD = to_int(os.getenv("CELERYD_MAX_TASKS_PER_CHILD", 1000)) # CELERYD_MAX_MEMORY_PER_CHILD = to_int(os.getenv("CELERYD_MAX_MEMORY_PER_CHILD", 250000)) # 250MB # noqa CELERY_ENABLE_REMOTE_CONTROL = False # required for sqs CELERY_SEND_EVENTS = False # required for sqs CELERY_DEFAULT_QUEUE = f"{project}-default" # sqs queue name CELERY_ROUTES = ("celery_queue.routers.hole_direction_router",) CELERY_TASK_CREATE_MISSING_QUEUES = to_bool(os.getenv( "CELERY_TASK_CREATE_MISSING_QUEUES", False )) CELERY_TASK_ALWAYS_EAGER = to_bool(os.getenv("CELERY_TASK_ALWAYS_EAGER", False)) """ Celery Beat """ CELERYBEAT_SCHEDULER = "redbeat.RedBeatScheduler" CELERYBEAT_LOAD_ENDPOINTS: bool = to_bool(os.getenv("CELERYBEAT_LOAD_ENDPOINTS", True)) CELERYBEAT_MAX_LOOP_INTERVAL: int = to_int(os.getenv("CELERYBEAT_MAX_LOOP_INTERVAL", 30)) REDBEAT_REDIS_URL = os.getenv("IHS_CRON_URL") REDBEAT_KEY_PREFIX = f"{project}:" REDBEAT_LOCK_TIMEOUT = CELERYBEAT_MAX_LOOP_INTERVAL * 5 """ API """ API_CLIENT_TYPE = os.getenv("IHS_CLIENT_TYPE", "legacy") API_BASE_URL = os.getenv("IHS_URL") API_USERNAME = os.getenv("IHS_USERNAME") API_PASSWORD = os.getenv("IHS_PASSWORD") API_APP_KEY = os.getenv("IHS_APP_KEY") API_SYNC_WINDOW_MINUTES = os.getenv("IHS_SYNC_WINDOW_MINUTES", 1440 * 7) API_HEADERS = { "Username": API_USERNAME, "Password": API_PASSWORD, "Application": API_APP_KEY, } API_WSDL_DIR = abs_path(CONFIG_BASEPATH, "wsdl") API_WSDLS = { "session": abs_path(API_WSDL_DIR, "{version}/Session.wsdl"), "querybuilder": abs_path(API_WSDL_DIR, "{version}/QueryBuilder.wsdl"), "exportbuilder": abs_path(API_WSDL_DIR, "{version}/ExportBuilder.wsdl"), } API_DOMAIN = "US" TASK_BATCH_SIZE = to_int(os.getenv("IHS_TASK_BATCH_SIZE", 50)) SIMULATE_EXPENSIVE_TASKS = to_bool(os.getenv("IHS_SIMULATE_EXPENSIVE_TASKS", False)) @property def show(self): return [x for x in dir(self) if not x.startswith("_")] @property def api_params(self): return { key.lower().replace("api_", ""): getattr(self, key) for key in dir(self) if key.startswith("API_") } @property def datadog_params(self): return { key.lower().replace("datadog_", ""): getattr(self, key) for key in dir(self) if key.startswith("DATADOG_") } @property def sentry_params(self): return { key.lower().replace("sentry_", ""): getattr(self, key) for key in dir(self) if key.startswith("SENTRY_") } @property def endpoints(self): return self.COLLECTOR_CONFIG.endpoints @property def functions(self): return self.COLLECTOR_CONFIG.functions @property def database_params(self): return { key.lower().replace("database_", ""): getattr(self, key) for key in dir(self) if key.startswith("DATABASE_") } def database_uri(self, hide_password=False, include_auth_source=True): db = self.database_params username = db.get("username") password = "******" if hide_password else db.get("password") auth_source = ( f"?authSource={db.get('authentication_source')}" if include_auth_source else "" ) if self.REPLICA_SET: replica_set = ( f"?replicaSet={self.REPLICA_SET}" if not auth_source else f"&replicaSet={self.REPLICA_SET}" ) else: replica_set = "" driver = db.get("driver", "") host = db.get("host", "") port = db.get("port", "") dbname = db.get("name", "") at = "@" if username else "" colon = ":" if username is not None else "" username = username or "" password = password or "" return f"{driver}://{username}{colon}{password}{at}{host}:{port}/{dbname}{auth_source}{replica_set}" # noqa def __repr__(self): """ Print configuration summary """ hr = "-" * shutil.get_terminal_size().columns + "\n" tpl = "{name:>25} {value:<50}\n" string = "" string += tpl.format(name="app config:", value=APP_SETTINGS) string += tpl.format(name="flask app:", value=FLASK_APP) string += tpl.format(name="flask env:", value=self.FLASK_ENV) string += tpl.format( name="backend:", value=self.database_uri(hide_password=True, include_auth_source=False), ) string += tpl.format(name="broker:", value=self.BROKER_URL) # string += tpl.format(name="result broker:", value=self.CELERY_RESULT_BACKEND) string += tpl.format(name="collector:", value=self.API_BASE_URL) return hr + string + hr
def object_exists(self, scope, guid): return util.to_bool(execute_scalar(scope.get_handle(), "select count(guid) from Object where guid=%s and deleted=false", guid))
def is_locked(self, scope, guid): return util.to_bool(execute_scalar(scope.get_handle(), "select locked from Object where guid=%s", guid))
def __post__(self, env, guid, up): self.app.vote(self.username, guid, util.to_bool(up)) return self.__get_voting__(guid)
def get(self, cursor=None, user_id=None, term=None, following_only=None, my_lat=None, my_lon=None, distance=None, own=None, quiet=None, trending=None, from_time=None, until_time=None, promotions=None, level=None, around_me=None, **kwargs): subqry = {'select': 'COUNT(id)', 'table': 'venue_followers', 'where': ('user_id = ' + str(user_id), 'venue_id = venues.id')} red = {'select': 'COUNT(id)', 'table': 'promotion_redemptions', 'where': 'promotion_id = promotions.id'} promoqry = {'select': 'COUNT(id)', 'table': 'promotions', 'where': ('venue_id = venues.id', str(util.now()) + ' >= start', '([end] = 0 OR [end] > ' + str(util.now()) + ')', '(maximum = 0 OR (' + util.query(**red) + ') < maximum)', level + ' >= level', 'hidden != 1')} managerqry = {'select': 'COUNT(id)', 'table': 'venue_managers', 'where': ('user_id = ' + str(user_id), 'venue_id = venues.id')} staffqry = {'select': 'COUNT(id)', 'table': 'venue_staff', 'where': ('user_id = ' + str(user_id), 'venue_id = venues.id')} staffppqry = {'select': 'SUM(promo_perm)', 'table': 'venue_staff', 'where': ('user_id = ' + str(user_id), 'venue_id = venues.id')} fields = ['id', 'name', 'address', 'country', 'phone', 'email', 'email_verified', 'category_id', 'headline', 'tonight', 'website', 'facebook', 'twitter', 'facebook_id', 'twitter_id', 'twitter_token', 'twitter_secret', 'lat', 'lon', 'official', 'verified', 'customer_spend', 'authenticated', 'creator', '(' + util.query(**managerqry) + ') AS manager', '(' + util.query(**staffqry) + ') AS staff', '(' + util.query(**staffppqry) + ') AS promo_perm', "(" + util.query(**subqry) + ") AS following", '(' + util.query(**promoqry) + ') AS promotions'] order_by = ('name ASC',) if term: where = ("name LIKE ?",) elif util.to_bool(following_only): where = ("(" + util.query(**subqry) + ") > 0") elif own: where = ('(' + util.query(**managerqry) + ') = 1 OR (' + util.query(**staffqry) + ') = 1') elif my_lat and my_lon and distance: maybe = {'select': 'COUNT(id)', 'table': 'venue_rsvps', 'where': ('maybe = 1', 'venue_id = venues.id', 'going = 0', 'time >= ?', 'time < ?')} going = {'select': 'COUNT(id)', 'table': 'venue_rsvps', 'where': ('going = 1', 'venue_id = venues.id', 'time >= ?', 'time < ?')} if util.to_bool(quiet): order_by = ('(' + util.query(**maybe) +') + (' + util.query(**going) +') * 2 ASC',) elif util.to_bool(trending): order_by = ('(' + util.query(**maybe) +') + (' + util.query(**going) +') * 2 DESC',) else: order_by = ('((lat - ?) * (lat - ?) + (lon - ?) * (lon - ?)) ASC',) where = ('((lat - ?) * (lat - ?) + (lon - ?) * (lon - ?)) <= ? * ?',) if util.to_bool(promotions): where += ('(' + util.query(**promoqry) + ') > 0',) elif util.to_bool(quiet) or util.to_bool(trending): fields[0] = 'TOP(12) id' elif util.to_bool(around_me): psubqry = {'select': 'COUNT(id)', 'table': 'post_reports', 'where': ('post_id = posts.id')} post_count = {'select': 'CASE WHEN COUNT(id) > 0 THEN 1 ELSE 0 END', 'table': 'posts', 'where': ('posts.venue_id = venues.id', 'hidden = 0', '(' + util.query(**psubqry) + ') < 3', 'time > ' + str(util.now() - 691200))} order_by = ('(' + util.query(**post_count) + ') DESC',) + order_by else: where = '' qry = {'select': fields, 'table': 'venues', 'where': where, 'order_by': order_by} if term: cursor.execute(util.query(**qry), ("%" + term + "%",)) return [util.row_to_dict(cursor, row) for row in cursor] else: values = tuple() if my_lat and my_lon and distance: values += (float(my_lat), float(my_lat), float(my_lon), float(my_lon), float(distance), float(distance)) if util.to_bool(quiet) is None and util.to_bool(trending) is None: values += (float(my_lat), float(my_lat), float(my_lon), float(my_lon)) else: values += (from_time, until_time, from_time, until_time) cursor.execute(util.query(**qry), values) return [util.row_to_dict(cursor, row) for row in cursor]
def handle(self, sock, addr, data): sender = addr self.log.info('Sender: %s on socket %s' % (addr, sock.getsockname())) if len(data) < DHCPFormatSize: self.log.error('Cannot be a DHCP or BOOTP request - too small!') tail = data[DHCPFormatSize:] buf = list(struct.unpack(DHCPFormat, data[:DHCPFormatSize])) if buf[BOOTP_OP] != BOOTREQUEST: self.log.warn('Not a BOOTREQUEST') return options = self.parse_options(tail) if options is None: self.log.warn('Error in option parsing, ignore request') return # Extras (DHCP options) try: dhcp_msg_type = ord(options[53][0]) except KeyError: dhcp_msg_type = None server_addr = self.netconfig['address'] mac_addr = buf[BOOTP_CHADDR][:6] gi_addr = buf[BOOTP_GIADDR][:4] mac_str = ':'.join(['%02X' % ord(x) for x in mac_addr]) gi_str = '.'.join(['%d' % ord(x) for x in gi_addr]) self.log.debug("Gateway address: %s" % gi_str) # is the UUID received (PXE mode) if 97 in options and len(options[97]) == 17: uuid = options[97][1:] pxe = True self.log.info('PXE UUID has been received') # or retrieved from the cache (DHCP mode) else: uuid = self.uuidpool.get(mac_addr, None) pxe = False self.log.info('PXE UUID not present in request') uuid_str = uuid and ('%s-%s-%s-%s-%s' % \ tuple([hexlify(x) for x in uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16]])).upper() if uuid_str: self.log.info('UUID is %s for MAC %s' % (uuid_str, mac_str)) hostname = '' filename = '' # Basic state machine currentstate = self.states.setdefault(mac_str, self.ST_IDLE) newstate = currentstate if currentstate == self.ST_IDLE: if pxe and (dhcp_msg_type == DHCP_DISCOVER): # BIOS is booting up, and try to locate a DHCP server newstate = self.ST_PXE elif currentstate == self.ST_PXE: if not pxe and (dhcp_msg_type == DHCP_REQUEST): # OS is booting up, and confirm a previous DHCP dicovery newstate = self.ST_DHCP else: # currentstate == self.ST_DHCP if pxe: # OS was running but the BIOS is performing a DHCP request: # board has been restarted newstate = self.ST_PXE # if the state has not evolved from idle, there is nothing to do if newstate == self.ST_IDLE: self.log.info('Request from %s ignored (idle state)' % mac_str) sdhcp = self.config.get_bootp_allow_simple_dhcp() simple_dhcp = sdhcp and to_bool(sdhcp) if not simple_dhcp: return # construct reply buf[BOOTP_OP] = BOOTREPLY self.log.info('Client IP: %s' % socket.inet_ntoa(buf[7])) if buf[BOOTP_CIADDR] == '\x00\x00\x00\x00': self.log.debug('Client needs its address') host_data = self.get_host_data_for_mac(mac_str) ipaddr = host_data['address'] self.log.debug("IPADDR: {0}".format(ipaddr)) if not ipaddr: self.log.error("Can't get IP address!") return ip = None if mac_str in self.ippool: ip = self.ippool[mac_str] self.log.info('Lease for MAC %s already defined as IP %s' % \ (mac_str, ip)) else: self.ippool[mac_str] = ipaddr ip = ipaddr if not ip: #raise BootpError('No more IP available in definined pool') self.log.error("Can not find IP assigned to mac: %s" % mac_str) return mask = iptoint(self.netconfig['mask']) reply_broadcast = iptoint(ip) & mask reply_broadcast |= (~mask)&((1<<32)-1) buf[BOOTP_YIADDR] = socket.inet_aton(ip) buf[BOOTP_SECS] = 0 buf[BOOTP_FLAGS] = BOOTP_FLAGS_NONE addr = (inttoip(reply_broadcast), addr[1]) self.log.debug('Reply to: %s:%s' % addr) else: buf[BOOTP_YIADDR] = buf[BOOTP_CIADDR] ip = socket.inet_ntoa(buf[BOOTP_YIADDR]) buf[BOOTP_SIADDR] = socket.inet_aton(server_addr) if gi_addr: self.log.debug('Reply via gateway: %s' % gi_str) buf[BOOTP_GIADDR] = socket.inet_aton(gi_str) # sname buf[BOOTP_SNAME] = \ '.'.join([host_data['hostname'], host_data['domain']]) # file buf[BOOTP_FILE] = 'boot_file' in host_data and host_data['boot_file'] or self.config.get_bootp_default_boot_file() if not dhcp_msg_type: self.log.warn('No DHCP message type found, discarding request') return if dhcp_msg_type == DHCP_DISCOVER: self.log.debug('DHCP DISCOVER') dhcp_reply = DHCP_OFFER self.log.info('Offering lease for MAC %s: IP %s' % \ (mac_str, ip)) elif dhcp_msg_type == DHCP_REQUEST: self.log.debug('DHCP REQUEST') dhcp_reply = DHCP_ACK self.log.info('New lease for MAC %s: IP %s' % \ (mac_str, ip)) elif dhcp_msg_type == DHCP_RELEASE: self.log.info('DHCP RELEASE') if not self.notify: return elif dhcp_msg_type == DHCP_INFORM: self.log.info('DHCP INFORM') return elif dhcp_msg_type == DHCP_DECLINE: self.log.debug('DHCP DECLINE') return else: self.log.error('Unmanaged DHCP message: %d' % dhcp_msg_type) return pkt = struct.pack(DHCPFormat, *buf) pkt += struct.pack('!BBB', DHCP_MSG, 1, dhcp_reply) #server = socket.inet_aton(server_addr) # FIXME: Hardcoded relay and netmask # Add something in lines of: # networks: # 10.40.13.160: # netmask: 255.255.255.224 # gateway: 10.40.13.160 # dns: 10.40.1.80, 10.40.1.81 server = socket.inet_aton('10.40.13.161') pkt += struct.pack('!BB4s', DHCP_SERVER, 4, server) #mask = socket.inet_aton(self.netconfig['mask']) mask = socket.inet_aton('255.255.255.224') pkt += struct.pack('!BB4s', DHCP_IP_MASK, 4, mask) pkt += struct.pack('!BB4s', DHCP_IP_GATEWAY, 4, server) # FIXME: Serving only default DNS for now dns = self.config.get_bootp_default_dns() if dns: if dns.lower() == 'auto': dns = self.get_dns_server() or socket.inet_ntoa(server) dns = socket.inet_aton(dns) pkt += struct.pack('!BB4s', DHCP_IP_DNS, 4, dns) pkt += struct.pack('!BBI', DHCP_LEASE_TIME, 4, int(self.config.get_bootp_default_lease_time())) pkt += struct.pack('!BB', DHCP_END, 0) # do not attempt to produce a PXE-augmented response for # regular DHCP requests if pxe: extra_buf = self.build_pxe_options(options, server) if not extra_buf: return else: extra_buf = self.build_dhcp_options(hostname) # update the UUID cache if pxe: self.uuidpool[mac_addr] = uuid # send the response if gi_addr: sock.sendto(pkt + extra_buf, (gi_str, 67)) else: sock.sendto(pkt + extra_buf, addr) # update the current state if currentstate != newstate: self.log.info('Moving from state %d to state %d' % \ (currentstate, newstate)) self.states[mac_str] = newstate
class BaseConfig: """Base configuration""" FLASK_ENV = os.getenv("FLASK_ENV", "development") ENV_NAME = os.getenv("ENV_NAME", socket.gethostname()) """ Datadog """ DATADOG_ENABLED = to_bool(os.getenv("DATADOG_ENABLED", False)) DATADOG_API_KEY = os.getenv("DATADOG_API_KEY", None) DATADOG_APP_KEY = os.getenv("DATADOG_APP_KEY", None) """ General """ CONFIG_BASEPATH = "./config" """ Collector """ COLLECTOR_CONFIG_PATH = make_config_path(CONFIG_BASEPATH, "collector.yaml") COLLECTOR_CONFIG = load_config(COLLECTOR_CONFIG_PATH) COLLECTOR_BASE_URL = os.getenv("FRACFOCUS_BASE_URL", "http://fracfocusdata.org") COLLECTOR_URL_PATH = os.getenv("FRACFOCUS_URL_PATH", "/digitaldownload/FracFocusCSV.zip") COLLECTOR_DOWNLOAD_PATH = os.getenv("FRACFOCUS_DOWNLOAD_PATH", "/tmp/fracfocus") COLLECTOR_WRITE_SIZE = int(os.getenv("FRACFOCUS_WRITE_SIZE", "10000")) COLLECTOR_FILE_PREFIX = os.getenv("FRACFOCUS_FILE_PREFIX", "FracFocusRegistry") """ Parser """ PARSER_CONFIG_PATH = abs_path(CONFIG_BASEPATH, "parsers.yaml") PARSER_CONFIG = load_config(PARSER_CONFIG_PATH) """ Logging """ LOG_LEVEL = os.getenv("LOG_LEVEL", logging.INFO) LOG_FORMAT = os.getenv("LOG_FORMAT", "json") WEB_LOG_REQUESTS = to_bool(os.getenv("WEB_LOG_REQUESTS", True)) WEB_LOG_RESPONSES = to_bool(os.getenv("WEB_LOG_RESPONSES", True)) WEB_LOG_SAMPLE_FRAC = float(os.getenv("WEB_LOG_SAMPLE_FRAC", 0.5)) WEB_LOG_SLOW_RESPONSE_THRESHOLD = float( os.getenv("WEB_LOG_SLOW_RESPONSE_THRESHOLD", 3)) # seconds # noqa """ --------------- Sqlalchemy --------------- """ DATABASE_DIALECT = os.getenv("DATABASE_DIALECT", "postgres") DATABASE_DRIVER = os.getenv("DATABASE_DRIVER", get_default_driver(DATABASE_DIALECT)) DATABASE_USERNAME = os.getenv("DATABASE_USERNAME", "") DATABASE_PASSWORD = os.getenv("DATABASE_PASSWORD", "") DATABASE_HOST = os.getenv("DATABASE_HOST", "localhost") DATABASE_PORT = os.getenv("DATABASE_PORT", get_default_port(DATABASE_DRIVER)) DATABASE_SCHEMA = os.getenv("DATABASE_SCHEMA", "fracfocus") DATABASE_NAME = os.getenv("DATABASE_NAME", "fracfocus") DATABASE_URL_PARAMS = { "drivername": DATABASE_DRIVER, "username": DATABASE_USERNAME, "password": DATABASE_PASSWORD, "host": DATABASE_HOST, "port": DATABASE_PORT, "database": DATABASE_NAME, } SQLALCHEMY_DATABASE_URI = str(make_url(DATABASE_URL_PARAMS)) DEFAULT_EXCLUSIONS = ["updated_at", "inserted_at"] @property def show(self): return [x for x in dir(self) if not x.startswith("_")] @property def collector_params(self): return self.get_params_by_prefix("collector") @property def datadog_params(self): return self.get_params_by_prefix("datadog") @property def endpoints(self): return self.COLLECTOR_CONFIG.endpoints def __repr__(self): """ Print noteworthy configuration items """ hr = "-" * shutil.get_terminal_size().columns + "\n" tpl = "{name:>25} {value:<50}\n" string = "" string += tpl.format(name="app config:", value=APP_SETTINGS) string += tpl.format(name="flask app:", value=FLASK_APP) string += tpl.format(name="flask env:", value=self.FLASK_ENV) string += tpl.format(name="backend:", value=make_url( self.DATABASE_URL_PARAMS).__repr__()) string += tpl.format(name="collector:", value=self.COLLECTOR_BASE_URL) return hr + string + hr def get_params_by_prefix(self, kw: str): """ Return all parameters that begin with the given string. Example: kw = "collector" Returns: { "base_url": "example.com/api", "path": "path/to/data", "endpoints": {...} } """ if not kw.endswith("_"): kw = kw + "_" return { key.lower().replace(kw.lower(), ""): getattr(self, key) for key in dir(self) if key.startswith(kw.upper()) }
def handle(self, sock, addr, data): self.log.info('Sender: %s on socket %s' % (addr, sock.getsockname())) if len(data) < DHCPFormatSize: self.log.error('Cannot be a DHCP or BOOTP request - too small!') tail = data[DHCPFormatSize:] buf = list(struct.unpack(DHCPFormat, data[:DHCPFormatSize])) if buf[BOOTP_OP] != BOOTREQUEST: self.log.warn('Not a BOOTREQUEST') return options = self.parse_options(tail) if options is None: self.log.warn('Error in option parsing, ignore request') return # Extras (DHCP options) try: dhcp_msg_type = ord(options[53][0]) except KeyError: dhcp_msg_type = None server_addr = self.netconfig['server'] mac_addr = buf[BOOTP_CHADDR][:6] mac_str = '-'.join(['%02X' % ord(x) for x in mac_addr]) # is the UUID received (PXE mode) if 97 in options and len(options[97]) == 17: uuid = options[97][1:] pxe = True self.log.info('PXE UUID has been received') # or retrieved from the cache (DHCP mode) else: uuid = self.uuidpool.get(mac_addr, None) pxe = False self.log.info('PXE UUID not present in request') uuid_str = uuid and ('%s-%s-%s-%s-%s' % \ tuple([hexlify(x) for x in uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16]])).upper() if uuid_str: self.log.info('UUID is %s for MAC %s' % (uuid_str, mac_str)) hostname = '' filename = '' # Basic state machine currentstate = self.states.setdefault(mac_str, self.ST_IDLE) newstate = currentstate if currentstate == self.ST_IDLE: if pxe and (dhcp_msg_type == DHCP_DISCOVER): # BIOS is booting up, and try to locate a DHCP server newstate = self.ST_PXE elif currentstate == self.ST_PXE: if not pxe and (dhcp_msg_type == DHCP_REQUEST): # OS is booting up, and confirm a previous DHCP dicovery newstate = self.ST_DHCP else: # currentstate == self.ST_DHCP if pxe: # OS was running but the BIOS is performing a DHCP request: # board has been restarted newstate = self.ST_PXE # if the state has not evolved from idle, there is nothing to do if newstate == self.ST_IDLE: self.log.info('Request from %s ignored (idle state)' % mac_str) sdhcp = 'allow_simple_dhcp' simple_dhcp = \ self.config.has_option(self.bootp_section, sdhcp) and \ to_bool(self.config.get(self.bootp_section, sdhcp)) if not simple_dhcp: return # if access control is enable if self.access: # remote access is always validated on each request if self.access in self.ACCESS_REMOTE: # need to query a host to grant or reject access import urlparse import urllib netloc = self.config.get(self.access, 'location') path = self.config.get(self.access, pxe and 'pxe' or 'dhcp') timeout = int(self.config.get(self.access, 'timeout', '5')) parameters = {'mac': mac_str} if uuid: parameters['uuid'] = uuid_str if not pxe and mac_str in self.ippool: parameters['ip'] = self.ippool[mac_str] item = uuid_str or mac_str # only bother the authentication host when a state change is # required. if currentstate != newstate: query = urllib.urlencode(parameters) urlparts = (self.access, netloc, path, query, '') #print "\n" #print "urlpart is ", urlparts #print "\n" url = urlparse.urlunsplit(urlparts) self.log.info('Requesting URL: %s' % url) import urllib2 import httplib try: #up = urllib2.urlopen(url, timeout=timeout) proxy_handler = urllib2.ProxyHandler({}) opener = urllib2.build_opener(proxy_handler) urllib2.install_opener(opener) up = urllib2.urlopen(url, timeout=timeout) for l in up: try: # Look for extra definition within the reply k, v = [x.strip() for x in l.split(':')] k = k.lower() if k == 'client': hostname = v if k == 'file': filename = v except ValueError: pass except urllib2.HTTPError, e: self.log.error('HTTP Error: %s' % str(e)) self.states[mac_str] = self.ST_IDLE return except urllib2.URLError, e: self.log.critical('Internal error: %s' % str(e)) self.states[mac_str] = self.ST_IDLE return except httplib.HTTPException, e: self.log.error('Server error: %s' % type(e)) self.states[mac_str] = self.ST_IDLE return
def object_exists(self, scope, guid): return util.to_bool( execute_scalar( scope.get_handle(), "select count(guid) from Object where guid=%s and deleted=false", guid))
def handle(self, sock, addr, data): self.log.info('Sender: %s on socket %s' % (addr, sock.getsockname())) if len(data) < DHCPFormatSize: self.log.error('Cannot be a DHCP or BOOTP request - too small!') tail = data[DHCPFormatSize:] buf = list(struct.unpack(DHCPFormat, data[:DHCPFormatSize])) if buf[BOOTP_OP] != BOOTREQUEST: self.log.warn('Not a BOOTREQUEST') return options = self.parse_options(tail) if options is None: self.log.warn('Error in option parsing, ignore request') return # Extras (DHCP options) try: dhcp_msg_type = ord(options[53][0]) except KeyError: dhcp_msg_type = None server_addr = self.netconfig['server'] mac_addr = buf[BOOTP_CHADDR][:6] mac_str = '-'.join(['%02X' % ord(x) for x in mac_addr]) # is the UUID received (PXE mode) if 97 in options and len(options[97]) == 17: uuid = options[97][1:] pxe = True self.log.info('PXE UUID has been received') # or retrieved from the cache (DHCP mode) else: uuid = self.uuidpool.get(mac_addr, None) pxe = False self.log.info('PXE UUID not present in request') uuid_str = uuid and ('%s-%s-%s-%s-%s' % \ tuple([hexlify(x) for x in uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16]])).upper() if uuid_str: self.log.info('UUID is %s for MAC %s' % (uuid_str, mac_str)) hostname = '' filename = '' # Basic state machine currentstate = self.states.setdefault(mac_str, self.ST_IDLE) newstate = currentstate if currentstate == self.ST_IDLE: if pxe and (dhcp_msg_type == DHCP_DISCOVER): # BIOS is booting up, and try to locate a DHCP server newstate = self.ST_PXE elif currentstate == self.ST_PXE: if not pxe and (dhcp_msg_type == DHCP_REQUEST): # OS is booting up, and confirm a previous DHCP dicovery newstate = self.ST_DHCP else: # currentstate == self.ST_DHCP if pxe: # OS was running but the BIOS is performing a DHCP request: # board has been restarted newstate = self.ST_PXE # if the state has not evolved from idle, there is nothing to do if newstate == self.ST_IDLE: self.log.info('Request from %s ignored (idle state)' % mac_str) sdhcp = 'allow_simple_dhcp' simple_dhcp = \ self.config.has_option(self.bootp_section, sdhcp) and \ to_bool(self.config.get(self.bootp_section, sdhcp)) if not simple_dhcp: return # if access control is enable if self.access: # remote access is always validated on each request if self.access in self.ACCESS_REMOTE: # need to query a host to grant or reject access import urlparse import urllib netloc = self.config.get(self.access, 'location') path = self.config.get(self.access, pxe and 'pxe' or 'dhcp') timeout = int(self.config.get(self.access, 'timeout', '5')) parameters = {'mac' : mac_str} if uuid: parameters['uuid'] = uuid_str if not pxe and mac_str in self.ippool: parameters['ip'] = self.ippool[mac_str] item = uuid_str or mac_str # only bother the authentication host when a state change is # required. if currentstate != newstate: query = urllib.urlencode(parameters) urlparts = (self.access, netloc, path, query, '') #print "\n" #print "urlpart is ", urlparts #print "\n" url = urlparse.urlunsplit(urlparts) self.log.info('Requesting URL: %s' % url) import urllib2 import httplib try: #up = urllib2.urlopen(url, timeout=timeout) proxy_handler = urllib2.ProxyHandler({}) opener = urllib2.build_opener(proxy_handler) urllib2.install_opener(opener) up = urllib2.urlopen(url, timeout=timeout) for l in up: try: # Look for extra definition within the reply k, v = [x.strip() for x in l.split(':')] k = k.lower() if k == 'client': hostname = v if k == 'file': filename = v except ValueError: pass except urllib2.HTTPError, e: self.log.error('HTTP Error: %s' % str(e)) self.states[mac_str] = self.ST_IDLE return except urllib2.URLError, e: self.log.critical('Internal error: %s' % str(e)) self.states[mac_str] = self.ST_IDLE return except httplib.HTTPException, e: self.log.error('Server error: %s' % type(e)) self.states[mac_str] = self.ST_IDLE return
def comment_exists(self, scope, id): return util.to_bool(execute_scalar(scope.get_handle(), "select count(id) from object_comment where id=%s", id))
def build_learner(agent, agent_state, env_outputs, agent_outputs, learning_rate, gradients_buffer=None, episode_finished_mask=None, torso_variables=None, recurrent_variables=None): analysis_tensors = dict() learner_outputs, final_agent_state, custom_rnn_output, torso_outputs = agent.unroll( agent_outputs.action, env_outputs, agent_state, write_to_collection=True) rnn_v = custom_rnn_output[1][..., 0] rnn_thr = FLAGS.thr + agent.core.beta * custom_rnn_output[1][..., 1] rnn_pos = tf.nn.relu(rnn_v - rnn_thr) rnn_neg = tf.nn.relu(-rnn_v - rnn_thr) voltage_reg_rnn = tf.reduce_sum(tf.reduce_mean(tf.square(rnn_pos), 1)) voltage_reg_rnn += tf.reduce_sum(tf.reduce_mean(tf.square(rnn_neg), 1)) rnn_rate = tf.reduce_mean(custom_rnn_output[0], (0, 1)) rnn_mean_rate = tf.reduce_mean(rnn_rate) analysis_tensors['rnn_rate'] = rnn_mean_rate rate_loss = tf.reduce_sum(tf.square(rnn_rate - .02)) * 1. torso_from_collection = tf.get_collection('torso_output')[-1] conv1_z = torso_from_collection['c1_z'] conv2_z = torso_from_collection['c2_z'] lin_z = torso_from_collection['lin_z'] conv_1_rate = tf.reduce_mean(conv1_z, (0, 1)) conv_2_rate = tf.reduce_mean(conv2_z, (0, 1)) linear_rate = tf.reduce_mean(lin_z, (0, 1)) mean_conv_1_rate = tf.reduce_mean(conv_1_rate) mean_conv_2_rate = tf.reduce_mean(conv_2_rate) mean_linear_rate = tf.reduce_mean(linear_rate) analysis_tensors['conv1_z'] = conv1_z analysis_tensors['conv2_z'] = conv2_z analysis_tensors['conv1_rate'] = mean_conv_1_rate analysis_tensors['conv2_rate'] = mean_conv_2_rate analysis_tensors['linear_rate'] = mean_linear_rate conv1_v = torso_from_collection['c1_act'] conv2_v = torso_from_collection['c2_act'] analysis_tensors['conv1_v'] = conv1_v analysis_tensors['conv2_v'] = conv2_v conv_pos = tf.nn.relu(conv1_v - FLAGS.thr_scnn) conv_neg = tf.nn.relu(-conv1_v - FLAGS.thr_scnn) if FLAGS.voltage_reg_method == 'avg_all': voltage_reg = tf.reduce_sum(tf.square(tf.reduce_mean(conv_pos, (0, 1)))) voltage_reg += tf.reduce_sum( tf.square(tf.reduce_mean(conv_neg, (0, 1)))) elif FLAGS.voltage_reg_method == 'avg_time': voltage_reg = tf.reduce_sum( tf.reduce_mean(tf.square(tf.reduce_mean(conv_pos, 1)), 0)) voltage_reg += tf.reduce_sum( tf.reduce_mean(tf.square(tf.reduce_mean(conv_neg, 1)), 0)) conv_pos = tf.nn.relu(conv2_v - FLAGS.thr_scnn) conv_neg = tf.nn.relu(-conv2_v - FLAGS.thr_scnn) if FLAGS.voltage_reg_method == 'avg_all': voltage_reg += tf.reduce_sum( tf.square(tf.reduce_mean(conv_pos, (0, 1)))) voltage_reg += tf.reduce_sum( tf.square(tf.reduce_mean(conv_neg, (0, 1)))) elif FLAGS.voltage_reg_method == 'avg_time': voltage_reg += tf.reduce_sum( tf.reduce_mean(tf.square(tf.reduce_mean(conv_pos, 1)), 0)) voltage_reg += tf.reduce_sum( tf.reduce_mean(tf.square(tf.reduce_mean(conv_neg, 1)), 0)) reg_loss = rate_loss * FLAGS.rate_cost reg_loss += FLAGS.voltage_cost_rnn * voltage_reg_rnn reg_loss += FLAGS.voltage_cost_cnn * voltage_reg bootstrap_value = learner_outputs.baseline[-1] agent_outputs = nest.map_structure(lambda t: t[1:], agent_outputs) rewards = env_outputs.reward[1:] infos = nest.map_structure(lambda t: t[1:], env_outputs.info) done = to_bool(tf.cast(env_outputs.done, tf.int64)[1:]) learner_outputs = nest.map_structure(lambda t: t[:-1], learner_outputs) discounts = tf.to_float(~done) * FLAGS.discounting def scan_fun(_accumulator, _input): _discount, _reward = _input return _reward + _discount * _accumulator value_targets = tf.stop_gradient( tf.scan(scan_fun, (discounts, rewards), initializer=bootstrap_value, reverse=True)) adv = value_targets - learner_outputs.baseline shifted_done = tf.concat( (tf.cast(done[1:], tf.float32), tf.zeros((1, tf.shape(done)[1]))), 0) if episode_finished_mask is not None: finished_mask = (1 - tf.cumsum(shifted_done, 0)) * ( 1 - tf.cast(episode_finished_mask, tf.float32))[None] else: finished_mask = None pg_loss = compute_policy_gradient_loss(learner_outputs.policy_logits, agent_outputs.action, adv, mask=finished_mask) value_loss = FLAGS.baseline_cost * compute_baseline_loss( value_targets - learner_outputs.baseline, mask=finished_mask) entropy_loss = FLAGS.entropy_cost * compute_entropy_loss( learner_outputs.policy_logits, mask=finished_mask) p_actions = tf.nn.softmax(learner_outputs.policy_logits) loss_per_timestep = pg_loss + value_loss + entropy_loss total_loss = tf.reduce_sum(loss_per_timestep) total_loss += reg_loss total_loss_for_cnn = tf.reduce_sum( pg_loss + value_loss) / FLAGS.reg_factor_cnn + reg_loss num_env_frames = tf.train.get_global_step() optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=FLAGS.epsilon) cnn_optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=FLAGS.epsilon) normal_grad_norm = tf.zeros(()) elig_grad_norm = tf.zeros(()) rl_elig_grad_norm = tf.zeros(()) recurrent_grads_and_vars = optimizer.compute_gradients( total_loss, var_list=recurrent_variables) if FLAGS.ba_cnn or FLAGS.avg_ba_cnn: torso_grads_and_vars = cnn_optimizer.compute_gradients( total_loss_for_cnn, var_list=torso_variables) else: torso_grads_and_vars = cnn_optimizer.compute_gradients( total_loss, var_list=torso_variables) grads_and_vars = [*recurrent_grads_and_vars, *torso_grads_and_vars] eligibility_variables = agent.core.variable_list for i, (g, v) in enumerate(grads_and_vars): for ve in eligibility_variables: if ve == v: normal_grad_norm += tf.reduce_sum(tf.square(g)) if gradients_buffer is not None: add_ops = [] for g, v in grads_and_vars: for g_holder, v_holder in gradients_buffer: if v == v_holder: add_ops.append(tf.assign_add(g_holder, g)) train_op = tf.group(*add_ops) else: train_op = tf.group( optimizer.apply_gradients(recurrent_grads_and_vars), cnn_optimizer.apply_gradients(torso_grads_and_vars)) op = tf.no_op() with tf.control_dependencies([train_op, op]): num_env_frames_and_train = num_env_frames.assign_add( FLAGS.parallel_episodes * FLAGS.unroll_length * FLAGS.num_action_repeats) analysis_tensors['normal_grad_norm'] = normal_grad_norm analysis_tensors['elig_grad_norm'] = elig_grad_norm analysis_tensors['rl_elig_grad_norm'] = rl_elig_grad_norm analysis_tensors['pg_loss'] = pg_loss analysis_tensors['value_loss'] = value_loss analysis_tensors['entropy_loss'] = entropy_loss analysis_tensors['rate_loss'] = rate_loss * FLAGS.rate_cost analysis_tensors[ 'voltage_loss_rnn'] = voltage_reg_rnn * FLAGS.voltage_cost_rnn analysis_tensors['voltage_loss_cnn'] = voltage_reg * FLAGS.voltage_cost_cnn analysis_tensors['action_distribution'] = p_actions analysis_tensors['value_target'] = value_targets return (done, infos, num_env_frames_and_train, analysis_tensors), optimizer, grads_and_vars
def set(self, cursor=None, facebook_id=None, twitter_token=None, facebook=None, twitter=None, forename=None, surname=None, age=None, birth_day=None, birth_month=None, birth_year=None, gender=None, employee=None, country=None, language=None, email=None, top5=None, twitter_id=None, twitter_secret=None, save_locally=None, app_version=None, iphone_model=None, ios_version=None, last_facebook=None, last_twitter=None, **kwargs): if not facebook_id: raise cherrypy.HTTPError(403) qry = {'select': 'COUNT(id) AS count', 'table': 'users', 'where': 'facebook_id = ?'} cursor.execute(util.query(**qry), (facebook_id,)) res = cursor.fetchone().count data = {'twitter_id': twitter_id, 'twitter_token': twitter_token, 'twitter_secret': twitter_secret, 'facebook': facebook, 'twitter': twitter, 'forename': forename, 'surname': surname, 'age': util.to_int(age), 'birth_day': util.to_int(birth_day), 'birth_month': util.to_int(birth_month), 'birth_year': util.to_int(birth_year), 'gender': gender, 'employee': util.to_bool(employee), 'country': country, 'language': language, 'email': email, 'top5': util.to_bool(top5), 'save_locally': util.to_bool(save_locally), 'app_version': app_version, 'iphone_model': iphone_model, 'ios_version': ios_version, 'last_login': util.now(), 'last_facebook': util.to_bool(last_facebook), 'last_twitter': util.to_bool(last_twitter)} columns = [] values = [] for key, val in data.iteritems(): if val != None: columns.append(key) if val is not True and val is not False: values.append(val) else: if val: values.append('1') else: values.append('0') values.append(facebook_id) if res: qry = {'update': 'users', 'set_values': columns, 'where': 'facebook_id = ?'} cursor.execute(util.query(**qry), values) else: columns.append('facebook_id') columns.append('joined') values.append(util.now()) qry = {'insert_into': 'users', 'columns': columns} cursor.execute(util.query(**qry), values) qry = {'select': 'id', 'table': 'users', 'where': 'facebook_id = ?', 'order_by': 'id', 'limit': 1} cursor.execute(util.query(**qry), (facebook_id,)) user_id = cursor.fetchone().id return self.retrieve(cursor=cursor, user_id=user_id)
def is_locked(self, scope, guid): return util.to_bool( execute_scalar(scope.get_handle(), "select locked from Object where guid=%s", guid))