def __getSizeInGB__(size): '''convert size string to numerical size in GB''' logger = getMyLogger() s = 0 if size.find('PB') != -1: s = float(size.replace('PB', '')) * (1024**2) elif size.find('TB') != -1: s = float(size.replace('TB', '')) * 1024 elif size.find('GB') != -1: s = float(size.replace('GB', '')) elif size.find('MB') != -1: s = float(size.replace('MB', '')) / (1024) elif size.find('KB') != -1: s = float(size.replace('MB', '')) / (1024**2) elif size.find('B') != -1: s = float(size.replace('B', '')) / (1024**3) else: ## assuming unit of byte if input argument contains only numerical characters try: s = float(size) / (1024**3) except: logger.error('cannot convert size to bytes: %s' % s) raise return s
def __init__(self, host='localhost', port=25, credential=None, lvl=0): self.smtp_host = host self.smtp_port = port self.credential = credential self.logger = getMyLogger(name=self.__class__.__name__, lvl=lvl)
def setProjectRoleConfigActions(db_host, db_uid, db_pass, db_name, actions=[], lvl=0): '''set configuration actions in the project database as activated ''' logger = getMyLogger(lvl=lvl) if not mdb: logger.error('No MySQL library available. Function disabled.') else: ## TODO: make connection to MySQL, prepare and execute SQL statement cnx = __getMySQLConnector__(db_host, db_uid, db_pass, db_name, lvl=lvl) if not cnx: logger.error('Project DB connection failed') return else: crs = None try: ## in this case, we are using MySQLdb ## - disable autocommit that is by default enabled in MySQLdb package cnx.autocommit(False) except Exception: ## in this case, we are using mysql.connector ## - the mysql.connector doesn't have the autocommit function; ## but the transaction is enabled by default, and autocommit set to False. ## - we set autocommit to False anyway. cnx.autocommit = False try: ## get the db cursor crs = cnx.cursor() ## select actions that are not activted qry = 'UPDATE projectmembers SET activated=\'yes\',updated=%s WHERE project_id=%s AND user_id=%s AND created<=%s' data = [] for a in actions: data.append((a.atime, a.pid, a.uid, a.ctime)) ## execute queries via the db cursor, transaction *shoud be* enabled by default if data: for d in data: logger.debug(qry % d) crs.executemany(qry, data) ## commit the transaction if everything is fine cnx.commit() except Exception, e: logger.exception('Project DB update failed') try: cnx.rollback() except Exception, e: logger.exception('Project DB rollback failed') else:
def getProjectRoleConfigActions(db_host, db_uid, db_pass, db_name, lvl=0): '''retrieve pending configuration actions in the project database ''' logger = getMyLogger(lvl=lvl) actions = [] if not mdb: logger.error('No MySQL library available. Function disabled.') else: ## TODO: make connection to MySQL, prepare and execute SQL statement cnx = __getMySQLConnector__(db_host, db_uid, db_pass, db_name, lvl=lvl) if not cnx: logger.error('Project DB connection failed') return else: crs = None try: ## in this case, we are using MySQLdb ## - disable autocommit that is by default enabled in MySQLdb package cnx.autocommit(False) except Exception: ## in this case, we are using mysql.connector ## - the mysql.connector doesn't have the autocommit function; ## but the transaction is enabled by default, and autocommit set to False. ## - we set autocommit to False anyway. cnx.autocommit = False try: ## get the db cursor crs = cnx.cursor() ## select actions that are not activted qry = 'SELECT a.user_id,a.project_id,a.role,a.created,a.action,b.calculatedProjectSpace FROM projectmembers as a, projects as b WHERE a.activated=\'no\' AND b.calculatedProjectSpace > 0 AND a.project_id=b.id' crs.execute(qry) for (uid, pid, role, created, action, pquota) in crs: _a_new = ProjectRoleSettingAction(uid=uid, pid=pid, role=role, action=action, ctime=created, pquota=pquota) if actions.count(_a_new) > 0: ## when an action on same uid,pid is found, ## check the action's ctime and take the latest created one idx = actions.index(_a_new) if _a_new.ctime > actions[idx].ctime: actions[idx] = _a_new else: ## else, add the action to the list actions.append(_a_new) except Exception, e: logger.exception('Project DB select failed') else:
def getProjectOwner(db_host, db_uid, db_pass, db_name, pid, lvl=0): '''retrieve project owner information (name and email) ''' logger = getMyLogger(lvl=lvl) actions = [] if not mdb: logger.error('No MySQL library available. Function disabled.') else: ## TODO: make connection to MySQL, prepare and execute SQL statement cnx = __getMySQLConnector__(db_host, db_uid, db_pass, db_name, lvl=lvl) if not cnx: logger.error('Project DB connection failed') return else: crs = None try: ## in this case, we are using MySQLdb ## - disable autocommit that is by default enabled in MySQLdb package cnx.autocommit(False) except Exception: ## in this case, we are using mysql.connector ## - the mysql.connector doesn't have the autocommit function; ## but the transaction is enabled by default, and autocommit set to False. ## - we set autocommit to False anyway. cnx.autocommit = False try: ## get the db cursor crs = cnx.cursor() ## select actions that are not activted qry = 'SELECT a.id, a.email, a.firstName, a.lastName FROM users as a, projects as b WHERE a.id = b.owner_id AND b.id = \'%s\'' % pid crs.execute(qry) # TODO: fix this owner = {} for (owner_id, owner_email, owner_first_name, owner_last_name) in crs: owner['name'] = '%s %s' % (owner_first_name, owner_last_name) owner['email'] = owner_email except Exception, e: logger.exception('Project DB select failed') else:
def __getMySQLConnector__(host, uid, passwd, db, lvl=0): ''' establishes MySQL connector ''' logger = getMyLogger(lvl=lvl) cnx = None config = None if mdb.__name__ == 'MySQLdb': ### use MySQLdb library config = {'user': uid, 'passwd': passwd, 'db': db, 'host': host} try: cnx = mdb.connect(**config) except mdb.Error, e: logger.error('db query error %d: %s' % (e.args[0], e.args[1])) if cnx: cnx.close()
def __makeProjectDirectoryFS__(fpath, quota, ouid, ogid, lvl): '''create a project directory directly on the file system''' logger = getMyLogger(lvl=lvl) rc = True if os.path.exists(fpath): logger.warn('directory already exists: %s ... skip creation' % fpath) else: try: os.mkdir(fpath, 0550) os.chown(fpath, pwd.getpwnam(ouid).pw_uid, grp.getgrnam(ogid).gr_gid) except OSError, e: logger.error('cannot create new directory: %s' % fpath) rc = False
def createProjectDirectory(fpath, quota, type, cfg, lvl=0): '''general function for callers to make project directory''' logger = getMyLogger(lvl=lvl) ouid = cfg.get('PPS', 'PROJECT_DIR_OUID') ogid = cfg.get('PPS', 'PROJECT_DIR_OGID') rc = True if type == StorageType['fs_dir']: rc = __makeProjectDirectoryFS__(fpath, quota, ouid, ogid, lvl) elif type == StorageType['netapp_volume']: filer_admin = cfg.get('PPS', 'FILER_ADMIN') filer_mgmt_server = cfg.get('PPS', 'FILER_MGMT_SERVER') rc = __makeProjectDirectoryNetApp__(fpath, quota, ouid, ogid, filer_admin, filer_mgmt_server, lvl) else: logger.error('unknown storage type: %s' % type) return rc
# optional arguments parg.add_argument( '-l', '--loglevel', action='store', dest='verbose', type=int, choices=[0, 1, 2, 3], default=0, help= 'set one of the following verbosity levels. 0|default:WARNING, 1:ERROR, 2:INFO, 3:DEBUG' ) parg.add_argument( '-d', '--basedir', action='store', dest='basedir', default=cfg.get('PPS', 'PROJECT_BASEDIR'), help='set the basedir in which the project storages are located') args = parg.parse_args() logger = getMyLogger(name=os.path.basename(__file__), lvl=args.verbose) fs = Nfs4NetApp('', lvl=args.verbose) for id in os.listdir(args.basedir): fs.project_root = os.path.join(args.basedir, id) fs.delUsers(users=args.uid, recursive=False)
#!/bin/env python import sys import os sys.path.append( os.path.dirname(os.path.abspath(__file__)) + '/../external/lib/python') sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '../') from utils.Common import getConfig, getMyLogger from utils.IStorage import StorageType, createProjectDirectory ## load configuration file cfg = getConfig(os.path.dirname(os.path.abspath(__file__)) + '/etc/config.ini') fpath = '/project/0000000.03' quota = '%sGB' % '300' stype = 'netapp_volume' logger = getMyLogger(lvl=3) rc = createProjectDirectory(fpath, quota, StorageType[stype], cfg, lvl=3) if rc: # must refresh the PROJECT_BASEDIR to get access to the newly created volume os.listdir(cfg.get('PPS', 'PROJECT_BASEDIR')) if not os.path.exists(fpath): logger.error('created directory not available: %s' % fpath)
def updateProjectDatabase(roles, db_host, db_uid, db_pass, db_name, lvl=0): ''' update project roles in the project database ''' logger = getMyLogger(lvl=lvl) if not mdb: logger.error('No MySQL library available. Function disabled.') else: ## TODO: make connection to MySQL, prepare and execute SQL statement cnx = __getMySQLConnector__(db_host, db_uid, db_pass, db_name, lvl=lvl) if not cnx: logger.error('Project DB connection failed') return else: crs = None try: ## in this case, we are using MySQLdb ## - disable autocommit that is by default enabled in MySQLdb package cnx.autocommit(False) except Exception: ## in this case, we are using mysql.connector ## - the mysql.connector doesn't have the autocommit function; ## but the transaction is enabled by default, and autocommit set to False. ## - we set autocommit to False anyway. cnx.autocommit = False try: ## get the db cursor crs = cnx.cursor() ## delete project users first followed by inserting new users and roles qry1 = 'DELETE FROM acls WHERE project=%s' data1 = [] qry2 = 'INSERT INTO acls (project, user, projectRole) VALUES (%s, %s, %s)' data2 = [] for p, rd_list in roles.iteritems(): data1.append((p, )) for rd in rd_list: for k in PROJECT_ROLES: for u in rd[k]: data1.append((p, )) data2.append((p, u, k)) ## remove duplication data1 = list(set(data1)) ## execute queries via the db cursor, transaction *shoud be* enabled by default if data1: for d in data1: logger.debug(qry1 % d) crs.execute(qry1, d) if data2: for d in data2: logger.debug(qry2 % d) try: crs.execute(qry2, d) except mdb.IntegrityError, ierr: # cache IntegrityError and allow the update to continue logger.exception('Project DB integrity error: ' + qry2 % d) ## commit the transaction if everything is fine cnx.commit() except Exception, e: logger.exception('Project DB update failed') print("Error: {}".format(e)) ## something wrong, rollback the queries try: cnx.rollback() except Exception, e_ignore: logger.exception('Project DB rollback failed') raise e
def __makeProjectDirectoryNetApp__(fpath, quota, ouid, ogid, filer_admin, filer_mgmt_server, lvl): '''create a project directory directly on the NetApp filer running Data ONTAP''' logger = getMyLogger(lvl=lvl) def __exec_filer_cmd_ssh__(filer_admin, filer_mgmt_server, cmd, timeout=300, shell=None, lvl=0): '''private function for executing filer command via SSH interface''' if not shell: shell = Shell() return shell.cmd1('ssh %s@%s "%s"' % (filer_admin, filer_mgmt_server, cmd), allowed_exit=[0, 255], timeout=timeout) quotaGB = __getSizeInGB__(quota) if os.path.exists(fpath): logger.warn('directory already exists: %s ... skip creation' % fpath) return True else: s = Shell() ## 1. finding a proper aggregate for allocating storage space for the volume cmd = 'storage aggregate show -fields availsize,volcount -stat online' logger.debug('cmd listing aggregates: %s' % cmd) rc, output, m = __exec_filer_cmd_ssh__(filer_admin, filer_mgmt_server, cmd, timeout=120, shell=s, lvl=lvl) if rc != 0: logger.error('%s failed' % cmd) logger.error(output) return False ## parsing the line similar to the following one: ## aggr1a_fc 1.73TB 23 ## ## - field 1: aggregate name ## - field 3: free space ## - field 6: number of volumes on the same aggregate re_aggr_info = re.compile('^(aggr\S+)\s+(\S+[P,T,G,M,K]B)\s+([0-9]+)$') aggrs = [] for l in output.split('\n'): m = re_aggr_info.match(l.strip()) if m: aggrs.append({ 'name': m.group(1), 'availsize': __getSizeInGB__(m.group(2)), 'volcount': int(m.group(3)) }) else: pass g_aggr = sorted(aggrs, key=operator.itemgetter('availsize'), reverse=True)[0] if g_aggr['availsize'] <= quotaGB: logger.error( 'aggreate with largest available size smaller the project quota: %f < %f' % (g_aggr['availsize'], quotaGB)) return False logger.info('selected aggregate: %s' % repr(g_aggr)) ## 2. create QOS policy group for the project qos_policy_group = 'p%s' % fpath.split('/')[-1].replace('.', '_') cmd = 'qos policy-group show' rc, output, m = __exec_filer_cmd_ssh__(filer_admin, filer_mgmt_server, cmd, shell=s, lvl=lvl) if rc != 0: logger.error('%s failed' % cmd) logger.error(output) return False re_qos_exist = re.compile('^%s\s+.*' % qos_policy_group) ck_qos_exist = False for l in output.split('\n'): if re_qos_exist.match(l): ck_qos_exist = True logger.warn('QoS policy group already exists: %s' % qos_policy_group) break if not ck_qos_exist: cmd = 'qos policy-group create -policy-group %s -vserver atreides -max-throughput 6000iops' % qos_policy_group logger.debug('cmd creating qos policy group: %s' % cmd) rc, output, m = __exec_filer_cmd_ssh__(filer_admin, filer_mgmt_server, cmd, shell=s, lvl=lvl) if rc != 0: logger.error('%s failed' % cmd) logger.error(output) return False ## 3. create volume vol_name = 'project_%s' % fpath.split('/')[-1].replace('.', '_') cmd = 'volume create -vserver atreides -volume %s -aggregate %s -size %s -user %s -group %s -junction-path %s' % ( vol_name, g_aggr['name'], quota, ouid, ogid, fpath) cmd += ' -security-style unix -unix-permissions 0750 -state online -autosize false -foreground true' cmd += ' -policy dccn-projects -qos-policy-group %s -space-guarantee none -snapshot-policy none -type RW' % qos_policy_group cmd += ' -percent-snapshot-space 0' logger.debug('cmd creating volume: %s' % cmd) rc, output, m = __exec_filer_cmd_ssh__(filer_admin, filer_mgmt_server, cmd, shell=s, lvl=lvl) if rc != 0: logger.error('%s failed' % cmd) logger.error('%s' % output) return False ## 4. enable volume efficiency cmd = 'volume efficiency on -vserver atreides -volume %s' % vol_name logger.debug('cmd enabling volume efficiency: %s' % cmd) rc, output, m = __exec_filer_cmd_ssh__(filer_admin, filer_mgmt_server, cmd, shell=s, lvl=lvl) if rc != 0: logger.error('%s failed' % cmd) logger.error('%s' % output) return False ## 5. modify volume efficiency cmd = 'volume efficiency modify -schedule auto -vserver atreides -volume %s' % vol_name logger.debug('cmd setting volume efficiency: %s' % cmd) rc, output, m = __exec_filer_cmd_ssh__(filer_admin, filer_mgmt_server, cmd, shell=s, lvl=lvl) if rc != 0: logger.error('%s failed' % cmd) logger.error('%s' % output) return False return True