def APIAccountGroups(username, token, fileName): #list used to store test metadata agobjectlist = [] # Define the test dictionary aiddict = {} # Create the auth auth = APIAuthCreate(username, token) print('Extracting test data..') # Get the tests ags = APIGetaid(auth) # Loop through each test for accountGroups in ags: agData = {} agFields = ['accountGroupName', 'aid', 'organizationName', 'default'] for field in agFields: if field in accountGroups.keys(): agData[field] = accountGroups[field] else: agData[field] = "NULL" agobjectlist.append(agData) # WRITE TO CSV generateCSV(fileName, agobjectlist) return
def APITests(username, token, aid, fileName): # Define username and password # username = '******' # password = '******' #list used to store test metadata testobjectlist = [] # Define the test dictionary testdict = {} # Create the auth auth = APIAuthCreate(username, token) print('Extracting test data..') # Get the tests tests = APIGetTests(auth, aid) # Loop through each test for test in tests: testData = {} testFields = [ 'enabled', 'createdBy', 'createdDate', 'testId', 'testName', 'type', 'server', 'interval', 'httpInterval', 'httpTimeLimit', 'throughputDuration', 'bgpMeasurements', 'alertsEnabled', 'domain', 'liveShare', 'timeLimit', 'ftpTimeLimit', 'pageLoadTimeLimit', 'sipTimeLimit', 'dnsServers__serverName' ] for field in testFields: if field in test.keys(): testData[field] = test[field] else: testData[field] = "NULL" testobjectlist.append(testData) # Count numbers of agents for testObject in testobjectlist: curData = APIGetTestData(auth, testObject['testId']) searchCloud = "\"agentType\":\"Cloud\"" cloudAgents = curData.count(searchCloud) searchEnt = "\"agentType\":\"Enterprise\"" entAgents = curData.count(searchEnt) #print("Cloud {}, Ent {}".format(cloudAgents,entAgents)) testObject['cloudAgents'] = cloudAgents testObject['entAgents'] = entAgents # WRITE TO CSV generateCSV(fileName, testobjectlist) return
def APIAgentIPList(username, token, aid, fileName): # list used to store agent metadata # (this is what gets output to CSV at the end) agentobjectlist = [] # Define the agent dictionary agentdict = {} auth = APIAuthCreate(username, token) # Pull agent list for later agentsList = APIGetAgents(auth, aid) # Something to look at on the console print('Extracting agent data..') # Loop through each agent for agent in agentsList: agentData={} # Columns will be output in this order later. Order only affects output. agentFields=[ 'agentType', 'agentId', 'agentName', "location", 'countryId', 'ipAddresses' ] for field in agentFields: if field in agent.keys(): if field is 'ipAddresses': agentData[field]=", ".join(agent[field]) else: agentData[field]=agent[field] else: agentData[field]="NULL" agentobjectlist.append(agentData) # ALL OUTPUT CODE HERE # WRITE TO CSV generateCSV(fileName, agentobjectlist) return
def APIAlertRules(username, token, aid, fileName): # Define username and password # username = '******' # password = '******' #list used to store test metadata rulesobjectlist = [] # Define the test dictionary rulesdict = {} # Create the auth auth = APIAuthCreate(username, token) print('Extracting alert rules data..') # Get the tests rules = APIGetRules(auth, aid) # Loop through each test for rule in rules: rulesData = {} ruleFields = [ 'ruleId', 'ruleName', 'expression', 'direction', 'notifyOnClear', 'default', 'alertType', 'minimumSources', 'minimumSourcesPct', 'roundsViolatingOutOf', 'throughputDuration', 'roundsViolatingRequired' ] for field in ruleFields: if field in rule.keys(): rulesData[field] = rule[field] else: rulesData[field] = "" rulesobjectlist.append(rulesData) # WRITE TO CSV generateCSV(fileName, rulesobjectlist) return
def APIEnterpriseAgents(username, token, aid, fileName): #list used to store test metadata agentobjectlist = [] # Define the test dictionary agentdict = {} # Create the auth auth = APIAuthCreate(username, token) print('Extracting test data..') # Get the tests agentlist = APIGetAgents(auth, aid) # Loop through each test for agents in agentlist: agentData={} agentFields=[ 'agentId', 'agentName', 'location', 'countryId', 'ipAddresses', 'utilization', 'targetForTests', 'enabled', 'agentType', 'prefix', 'network' ] for field in agentFields: if field in agents.keys(): agentData[field]=agents[field] else: agentData[field]="NULL" agentobjectlist.append(agentData) # WRITE TO CSV generateCSV(fileName, agentobjectlist) return
from __future__ import print_function, division import os import torch import torch.nn as nn import torch.optim as optim import numpy as np import util from util import * args = util.get_args() if (args.generatecsv): util.generateCSV(args.data) liverDataset = util.LiverDataset(csv_file='data.csv', transform=util.transform_train) trainLoader = DataLoader(liverDataset, batch_size=args.batchsize, shuffle=True, num_workers=2) classes = {0: 'no_liver', 1: 'liver'} #net = models.resnet18(pretrained=True) net = models.densenet161(pretrained=True) util.set_parameter_requires_grad(net, args.feature_extract) #num_ftrs = net.fc.in_features #net.fc = nn.Linear(num_ftrs, num_classes) num_ftrs = net.classifier.in_features net.classifier = nn.Linear(num_ftrs, args.classes) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") net = net.to(device) finalconv_name = 'features' criteria = nn.CrossEntropyLoss()
def APIUnitCalculator(username, token, aid, fileName): # list used to store test metadata # (this is what gets output to CSV at the end) testobjectlist = [] # Define the test dictionary testdict = {} auth = APIAuthCreate(username, token) # Pull agent list for later agentsList = APIGetAgents(auth, aid) # Something to look at on the console print('Extracting test data..') # Get the test list tests = APIGetTests(auth, aid) # Loop through each test for test in tests: testData={} # Columns will be output in this order later. Order only affects output. testFields=[ 'enabled', 'createdBy', 'createdDate', 'testId', 'testName', 'type', 'server', 'interval', 'httpInterval', 'throughputDuration', 'bgpMeasurements', 'alertsEnabled', 'domain', 'liveShare', 'dnsServers__serverName', 'httpTimeLimit', 'timeLimit', 'ftpTimeLimit', 'pageLoadTimeLimit', 'sipTimeLimit', 'duration', 'direction', 'targetAgentId' ] # only one of these fields should apply to each test # whichever field is found first will be assigned to testData['otherTimeout'] comboFields=[ 'timeLimit', 'ftpTimeLimit', 'pageLoadTimeLimit', 'sipTimeLimit' ] for field in testFields: if field in test.keys(): if field in comboFields: testData['otherTimeout']=test[field] else: testData[field]=test[field] elif field not in comboFields: testData[field]="NULL" if "otherTimeout" not in testData.keys(): testData["otherTimeout"]="NULL" testobjectlist.append(testData) # Access data within individual tests, calculate consumption for testObject in testobjectlist: curData=APIGetTestData(auth,testObject['testId'], aid) # count cloud agents per test searchCloud="\"agentType\":\"Cloud\"" cloudAgents=curData.count(searchCloud) testObject['cloudAgents']=cloudAgents # count enterprise agents per test searchEnt="\"agentType\":\"Enterprise\"" entAgents=curData.count(searchEnt) testObject['entAgents']=entAgents # count dns servers per test dnsServers=curData.count("\"serverName\"") # determine if target is Cloud or Enterprise agent (for A2A unit calculation) if testObject['type'] == 'agent-to-agent': targetAgentType=agentType(testObject['targetAgentId'],agentsList) else: targetAgentType="NULL" # calculate unit consumption if testObject['enabled'] is 1: testObject['unitCost31d']=testCost(testObject['type'],testObject['httpTimeLimit'],testObject['otherTimeout'],testObject['interval'],cloudAgents,entAgents,dnsServers,testObject['duration'],testObject['direction'],targetAgentType,testObject['httpInterval']) else: testObject['unitCost31d']="NULL" # ALL OUTPUT CODE HERE # WRITE TO CSV generateCSV(fileName, testobjectlist) return