class FGInstances: instance = {} userinfo = [] projectinfo = {} default_projectinfo = {"ProjectId": None, "Title": None, "Institution": None, "ProjectLead": None, "Discipline": None} # "Completed":None, #"Department":None", #"Keywords":None, #"Results":None} pp = None db = None in_the_future = None in_the_past = None first_date = None last_date = None cloudplatform = None def __init__(self): self.db = FGDatabase() self.in_the_future = datetime.strptime( "3000-01-01 00:00:00", '%Y-%m-%d %H:%M:%S') self.in_the_past = datetime.strptime( "1970-01-01 00:00:00", '%Y-%m-%d %H:%M:%S') self.pp = pprint.PrettyPrinter(indent=0) self.first_date = datetime.strptime( "3000-01-01 00:00:00", '%Y-%m-%d %H:%M:%S') self.last_date = datetime.strptime( "1981-01-01 00:00:00", '%Y-%m-%d %H:%M:%S') def clear(self, opts=None): if not opts or opts == "instance": self.instance = {} if not opts or opts == "userinfo": self.userinfo = [] if not opts or opts == "projectinfo": self.projectinfo = {} def get_instance(self, index=None): return self.get_data(index) def get_data(self, index=None, withInfo=False): return self.get_instance_with_info(index, withInfo) def get_instance_with_info(self, index, withInfo): # This should be returned with same type but this isn't. if # index comes with it, returns 'dict' and without it, returns # list. what a unconsistent. if isinstance(index, (int, long)): instances = [self.instance[index]] else: instances = self.instance if not withInfo: return instances res = [] for ins in instances: try: userinfo = self.get_userinfo({"ownerid": ins["ownerId"], "username": ins["ownerId"]}) if userinfo: ins = dict(userinfo.items() + ins.items()) # Extension for project information prj_id = userinfo["project"] if prj_id: if prj_id.startswith("fg"): prj_id = int(prj_id[2:]) projectinfo = self.get_projectinfo( prj_id) # Remove 'fg' prefix if projectinfo: ins = dict(projectinfo.items() + ins.items()) else: ins = dict(ins.items() + self.default_projectinfo.items()) res.append(ins) except: pass # MISSING error exception for empty res return. it may occur # index error of the call in FGMetrics. i.e. ... [0] return res """ REPLACEMENT FOR get_userinfo get_projectinfo def _get_info(self, type, index=None): if isinstance(index, (int, long)): return type[index] elif isinstance(index, (dict)): # Search for key, val in index.iteritems(): for element in type: if key in element and type[key] == val: return element elif index in None: return type else: return None def get_userinfo(self, index=None): return _get_info(self.userinfo, index): def get_projectinfo(self, index=None): return _get_info(self.projectinfo, index): """ def get_userinfo(self, index=None): """should be replaced with above code""" if isinstance(index, (int, long)): return self.userinfo[index] elif isinstance(index, (dict)): # Search for key, val in index.iteritems(): for userinfo in self.userinfo: if key in userinfo and userinfo[key] == val: return userinfo elif index is None: if not self.userinfo: self.read_userinfo_from_db() return self.userinfo else: return None def get_projectinfo(self, index=None): """should be replaced with above code""" if isinstance(index, (int, long)): return self.projectinfo[index] elif isinstance(index, (dict)): for key, val in index.iteritems(): for projectinfo in self.projectinfo: if key in projectinfo and projectinfo[key] == val: return projectinfo elif index is None: if not self.projectinfo: self.read_projectinfo_from_db() return self.projectinfo else: return None def count(self): return len(self.instance) def read_from_db(self): self.read_instances() def read_instances(self, querydict={}, optional=""): self.clear("instance") self.instance = self.db.read(querydict, optional) def read_projectinfo_from_db(self): res = {} self.clear("projectinfo") prjinfo_list = self.db.read_projectinfo() res = {element["ProjectId"]: element for element in prjinfo_list} self.projectinfo = res def read_userinfo_from_db(self): self.read_userinfo() def read_userinfo(self, querydict={}, optional=""): self.clear("userinfo") self.userinfo = self.db.read_userinfo(querydict, optional) def read_userinfo_detail(self): self.clear("userinfo") self.userinfo = self.db.get_userinfo_detail() def read_cloudplatform(self, refresh=False): if not self.cloudplatform or refresh: self.cloudplatform = self.db.read_cloudplatform() return self.cloudplatform def get_cloudplatform_id(self, querydict={}): class ContinueOutOfALoop(Exception): pass for row in self.read_cloudplatform(): try: for key in querydict: if row[key] != querydict[key]: raise ContinueOutOfALoop return row["cloudPlatformId"] except ContinueOutOfALoop: continue return None def write_to_db(self): for key_current in self.instance: self.db.write(self.instance[key_current]) def write_userinfo_to_db(self): for key_current in self.userinfo: self.db.write_userinfo(key_current) def add(self, datarecord): """prints the information for each instance""" if datarecord["linetype"] == "print_ccInstance": instanceId = datarecord["instanceId"] ownerId = datarecord["ownerId"] timestamp = datarecord["ts"] status = datarecord["state"].lower() t = datarecord["date"] id = instanceId + " " + ownerId + " " + str(timestamp) instance = self.instance try: current = instance[id] # if were wereto do a data base this line needs to be # replaced except: current = datarecord # if not ("t_end" in current): try: current["t_end"] except: # time in the future f = self.in_the_future current["trace"] = { "pending": {"start": f, "stop": t}, "teardown": {"start": f, "stop": t}, "extant": {"start": f, "stop": t} } current["t_end"] = current["date"] current["t_start"] = current["ts"] # for naming consitency current["duration"] = 0.0 current["t_end"] = max(current["t_end"], t) current["trace"][status]["start"] = min( current["trace"][status]["start"], t) current["trace"][status]["stop"] = max( current["trace"][status]["stop"], t) instance[id] = current def update_traceinfo(self, row): ''' New version of add function ''' res = self.set_datetime(row) updated_res = self.update_to_list(res) return updated_res def set_datetime(self, row): future = self.in_the_future past = self.in_the_past # Set datetime row["t_start"] = row["ts"] row["t_end"] = self.get_t_end(row) row["duration"] = self.get_t_delta(row) row["trace"] = { "pending": {"start": future, "stop": past, "queue": deque("", 10)}, "extant": {"start": future, "stop": past, "queue": deque("", 10)}, "teardown": {"start": future, "stop": past, "queue": deque("", 10)} } return row def update_to_list(self, row): ''' Reflect to the current list ''' instanceId = row["instanceId"] ownerId = row["ownerId"] ts = row["ts"] key = instanceId + " " + ownerId + " " + str(ts) res = self.update_trace_datetime(key, row) # Update - this needs to be changed. if row["date"] > res["date"]: res["state"] = row["state"] res["date"] = row[ "date"] # max(res["date"], row["date"]) # we don't need date column res["duration"] = row[ "duration"] # max(res["duration"], row["duration"]) res["t_end"] = row["t_end"] # min(res["t_end"], row["t_end"]) self.instance[key] = res return res def update_trace_datetime(self, key, new): if key in self.instance: old = self.instance[key] else: old = new state = new["state"].lower() old["trace"][state]["queue"].append(new["date"]) queue = old["trace"][state]["queue"] old["trace"][state]["start"] = min( queue) # min(old["trace"][state]["start"], new["date"]) old["trace"][state]["stop"] = max( queue) # max(old["trace"][state]["stop"], new["date"]) return old def get_previous_state(self, state): state = state.lower() if state == "pending": return state elif state == "extant": return "pending" elif state == "teardown": return "extant" def get_t_end(self, row): return row["date"] ''' if row["state"] == "Teardown": return row["date"] else: return self.in_the_future ''' def get_t_delta(self, row): start = row["t_start"] last = row["date"] if row["state"] == "Teardown": if row["t_end"]: last = max(row["date"], row["t_end"]) t_delta = (last - start).total_seconds() if t_delta < 0: t_delta = timedelta(0).total_seconds() return t_delta def refresh(self): """calculates how long each instance runs in seconds""" for i in self.instance: values = self.instance[i] if values["state"] == "Teardown": t_delta = values["t_end"] - values["ts"] else: t_delta = values["date"] - values["ts"] if t_delta.total_seconds() < 0: t_delta = values["t_end"] - values["t_end"] values["duration"] = str(t_delta.total_seconds()) def set_userinfo(self): for i in self.instance: ownerid = self.instance[i]["ownerId"] try: new_userinfo = retrieve_userinfo_ldap(ownerid) self.add_userinfo(new_userinfo) except: continue def add_userinfo(self, new_userinfo): if self.userinfo.count(new_userinfo) == 0: self.userinfo.append(new_userinfo) '''
class FGConverter: # future = instances.in_the_future # past = instances.in_the_past s_date = None e_date = None platform = None platform_version = None hostname = None confname = None dbname_nova = None dbname_keystone = None query = None rows = None # from database records = None # for fg database userinfo = None cloudplatform = None argparser = None def __init__(self): self.db = FGDatabase() self.db_dest = FGDatabase() def __del__(self): self.db_close() def convert_to_fg(self): self.check_platform() self.db_connect() self.convert_instance() self.convert_userinfo() self.db_close() def check_platform(self): _check = getattr(self, "_check_platform_" + self.platform) _check() def _check_platform_nimbus(self): self.platform_version = self.platform_version or FGConst.DEFAULT_NIMBUS_VERSION self.db.db_type = self.db.db_type or FGConst.DEFAULT_NIMBUS_DB # this query is for sqlite3 because [timestamp] is only used on # sqlite3? self.query = 'SELECT t1.time as "t_start [timestamp]",\ t3.time as "t_end [timestamp]",\ t1.uuid as instanceId,\ t2.dn,\ t1.cpu_count as ccvm_cores,\ t1.memory as ccvm_mem,\ t1.vmm as serviceTag \ from create_events t1, user t2, remove_events t3 \ on t1.user_id=t2.id and t1.uuid=t3.uuid \ where t1.time >= \'' + str(self.s_date) + '\' and t3.time <= \'' + str(self.e_date) + '\'' def _check_platform_openstack(self): if not self.dbname_nova or not self.dbname_keystone or not self.db.dbhost or not self.db.dbuser or not self.db.dbpasswd: msg = "db info is missing" print msg raise ValueError(msg) self.platform_version = self.platform_version or FGConst.DEFAULT_OPENSTACK_VERSION self.db.db_type = self.db.db_type or FGConst.DEFAULT_OPENSTACK_DB self.db.dbname = self.dbname_nova self.query = 'SELECT created_at as trace_pending_start, \ launched_at as trace_extant_start,\ terminated_at as trace_teardown_start, \ deleted_at as trace_teardown_stop, \ id,\ user_id as ownerId,\ project_id as accountId,\ image_ref as emiId,\ kernel_id as kernelId,\ ramdisk_id as ramdiskId,\ key_data as keyName,\ vm_state as state,\ memory_mb as ccvm_mem, \ vcpus as ccvm_cores, \ host as serviceTag, \ reservation_id as reservationId,\ created_at as t_start, \ COALESCE(deleted_at, terminated_at, updated_at) as t_end, \ uuid as instanceId, \ access_ip_v4 as ccnet_publicIp,\ ephemeral_gb as ccvm_disk \ from instances \ where updated_at >= \'' + str(self.s_date) + '\' and updated_at <= \'' + str(self.e_date) + '\'' # COALESCE(launched_at, created_at, scheduled_at) as t_start, \ def db_connect(self): self.db.connect() self.db_dest.conf() self.db_dest.connect() def db_close(self): self.db.close() self.db_dest.close() def read_from_source(self): self.rows = self.db.query(self.query) def write_to_dest(self): self.db_dest.write_instance(self.records) def convert_instance(self): self.read_from_source() self.map_to_fg() self.write_to_dest() def convert_userinfo(self): _convert = getattr(self, "_convert_userinfo_of_" + self.platform) _convert() def _convert_userinfo_of_nimbus(self): return def _convert_userinfo_of_openstack(self): res = self._read_userinfo_of_nova_with_project_info() self.db_dest.write_userinfo(res) def _read_userinfo_of_nova_with_project_info(self): keystone = self.db keystone.dbname = self.dbname_keystone keystone.connect() # This is for Grizzly userinfo = keystone.query("select user_id, tenant_id, user.name as \ user_name, tenant.name as tenant_name \ from user_tenant_membership, tenant, user \ where user.id=user_tenant_membership.user_id \ and tenant.id=user_tenant_membership.tenant_id") # select id, name from user") if not userinfo: #This is for Folsom userinfo = keystone.query("select user_id, project_id as tenant_id,\ user.name as user_name, project.name as \ tenant_name from user_project_metadata, \ project, user where \ user.id=user_project_metadata.user_id and \ project.id=user_project_metadata.project_id") keystone.close() records = [] for row in userinfo: try: res = FGUtility.retrieve_userinfo_ldap(row["user_name"]) if not res: res = {} res["ownerid"] = row["user_id"] res["username"] = row["user_name"] res["project"] = row["tenant_name"] res["hostname"] = self.hostname print res records.append(res) except: print sys.exc_info() raise return records def read_cloudplatform(self): if self.cloudplatform: return self.cloudplatform = self.db_dest.read_cloudplatform() def get_cloudplatform_id(self, querydict={}): class ContinueOutOfALoop(Exception): pass self.read_cloudplatform() for row in self.cloudplatform: try: for key in querydict: if row[key] != querydict[key]: raise ContinueOutOfALoop return row["cloudPlatformId"] except ContinueOutOfALoop: continue return None def map_to_fg(self): rows = self.rows records = [] whereclause = {"platform": self.platform, "hostname": self.hostname, "version": self.platform_version} cloudplatformid = self.get_cloudplatform_id(whereclause) # 1. draw mapping table between openstack 'instances' and fg 'instance' table. # 2. define each column to know what exactly it means # 3. leave comments for missing and skipped columns # 4. check search options to see it is validate for row in rows: record = row try: record["instanceId"] = record["instanceId"][:15] record["ts"] = record["t_start"] # record["calltype"] = "" # record["userData"] = "" # record["kernelId"] = "" # record["emiURL"] = "" # record["t_start"] = row["t_start"] # record["t_end"] = row["t_end"] if record["t_end"] and record["t_start"]: record["duration"] = (record[ "t_end"] - record["t_start"]).total_seconds() # record["trace"] = { # "pending" : { "start" : self.future, "stop" : self.past, "queue" : deque("",10)}, # "extant" : { "start" : self.future, "stop" : self.past, "queue" : deque("",10)}, # "teardown" : { "start" : self.future, "stop" : self.past, "queue" : deque("",10)} # } # record["serviceTag"] = row["serviceTag"] or "" # record["groupNames"] = "" # record["keyName"] = "" # record["msgtype"] = "" # record["volumesSize"] = 0.0 # record["linetype"] = "" if "dn" in record and not "ownerId" in record: if len(record["dn"].split("CN=")) > 1: record["ownerId"] = record["dn"].split("CN=")[1] else: record["ownerId"] = record["dn"] try: del record["dn"] except: pass record["date"] = record["t_start"] # record["id"] = 0 # record["ncHostIdx"] = 0 # record["ccvm"] = { "mem" : record["ccvm_mem"], "cores" : record["ccvm_cores"], "disk" : record[0 } # if "ccvm_disk" in row: # record["ccvm"]["disk"] = row["ccvm_disk"] # if "emiId" in row: # record["emiId"] = row["emiId"] # else: # record["emiId"] = "" # record["ccnet"] = { "publicIp" : "", "privateMac" : "", # "networkIndex" : "", "vlan" : "", "privateIp" : "" } # record["ramdiskURL"] = "" # record["accountId"] = "" # record["kernelURL"] = "" # record["ramdiskId"] = "" # record["volumes"] = "" # record["launchIndex"] = 0 # record["bundleTaskStateName"] = "" # record["reservationId"] = "" record["platform"] = self.platform # record["euca_hostname"] = self.hostname # record["euca_version"] = self.platform_version # record["state"] = "Teardown" # need to be changed if not "state" in record: record["state"] = "Teardown" record["state"] = self.convert_state(record["state"]) record["cloudPlatformIdRef"] = cloudplatformid records.append(record) except: print sys.exc_info() print record pass self.records = records def convert_state(self, state): if self.platform == "openstack": if state == "active": return "Extant" elif state == "building": return "Pending" elif state == "deleted" or "shutoff": return "Teardown" elif state == "error": return state return state def set_instance_conf(self, confname=""): if confname and len(confname) > 0: self.db_dest.set_conf(confname) self.db_dest.update_conf() def set_parser(self): def_s_date = "19700101" def_e_date = "29991231" def_conf = "futuregrid.cfg" def_nova = "nova" def_keystone = "keystone" def_db = "mysql" parser = argparse.ArgumentParser() parser.add_argument("-s", "--from", dest="s_date", default=def_s_date, help="Start date to begin parsing (type: YYYYMMDD)") parser.add_argument("-e", "--to", dest="e_date", default=def_e_date, help="End date to finish parsing (type: YYYYMMDD)") parser.add_argument("-p", "--platform", required=True, help="Cloud platform name, required. (e.g. nimbus, openstack, eucalyptus, etc)") parser.add_argument("-pv", "--platform_version", help="Cloud platform version. (e.g. 2.9 for nimbus, essex for openstack, and 2.0 or 3.1 for eucalyptus)") parser.add_argument("-n", "--hostname", required=True, help="Hostname of the cloud platform, required. (e.g., hotel, sierra, india, alamo, foxtrot)") parser.add_argument("--conf", dest="conf", help="futuregrid.cfg filepath (e.g. $HOME/.futuregrid/futuregrid.cfg)") parser.add_argument("-db", "--database", default=def_db, help="database type to load (e.g. mysql or sqlite3)") # sqlite3 for nimbus parser.add_argument("-i", "--file", dest="input_file", help="the sqlite3 filename with path (e.g. /home/metric/nimbus/alamo/alamo)") # mysql for openstack parser.add_argument("-dbn", "--dbname_nova", default=def_nova, help="Database of nova to use") parser.add_argument("-dbk", "--dbname_keystone", default=def_keystone, help="Database of keystone to use") parser.add_argument("-dh", "--dbhost", help="Connect to database host") parser.add_argument("-du", "--dbuser", help="User for login of database") parser.add_argument("-dp", "--dbpass", help="Password to use when connecting to database server") parser.add_argument("-dP", "--dbport", default=3306, help="Port number to use for connection or 3306 for default") args = parser.parse_args() print args try: self.s_date = datetime.strptime(args.s_date, "%Y%m%d") self.e_date = datetime.strptime(args.e_date, "%Y%m%d") self.platform = args.platform self.platform_version = args.platform_version self.hostname = args.hostname self.confname = self.set_instance_conf(args.conf) self.db.db_type = args.database self.dbname_nova = args.dbname_nova self.dbname_keystone = args.dbname_keystone self.db.dbhost = args.dbhost self.db.dbuser = args.dbuser self.db.dbpasswd = args.dbpass self.db.dbport = args.dbport self.db.set_sqlite3_file(args.input_file) except: pass # print sys.exc_info()[0] self.argparser = parser