def main(input_file, settings={}, input_as_model=False, output_file='', output_as_model=False, merge_model_file='', level=1, delay=0, timeout=0, maxlen=0): merge_model = None if input_as_model: input_file = os.path.join(settings.get('model_path', ''), input_file) else: input_file = os.path.join(settings.get('input_path', ''), input_file) if output_as_model: model_file = os.path.join(settings.get('model_path', ''), output_file) elif output_file: output_file = os.path.join(settings.get('output_path', ''), output_file) if merge_model_file: merge_model_file = os.path.join(settings.get('model_path', ''), merge) with open(merge_model_file, 'r') as f: merge_model = Model(json.load(f)) model = Model(load_input(input_file), level) run(model, output_file, output_as_model, merge_model, level, delay, timeout, maxlen)
def performTestForParams(self): """"Performs the tests for each set of parameters.""" for params in self.testParams: # Generate the model m = Model.Model(params['matrix'], initial=params['initial'], seed=params['seed']) # Generate the inverse labeling il = m.inverseLabeling(params['size']) resultSize = sum([len(v) for (k, v) in il.items()]) # Make sure the labeling is the correct size self.assertEqual(resultSize, params['size'], ('Result Size: {0},' ' Expected Size: {1}.').format( resultSize, params['size'])) frequencies = self.calculateFrequency(m, il) frequencies = np.round(frequencies, decimals=params['precision']) expected = np.round(params['matrix'], decimals=params['precision']) equal = np.all(frequencies == expected) self.assertTrue(equal, ("Calculated next frequencies: {0}, " " Expected next frequencies: {1}").format( frequencies, expected))
def setUp(self): """Set up the parameters for the individual tests.""" transitionMatrix = np.array([[0.33, 0.33, 0.34], [0., 0.5, 0.5], [0.5, 0.5, 0]]) if self.id().split('.')[-1] == 'test_current': self.testParams = [ { 'model': Model.Model(transitionMatrix, initial=0, seed=5), 'ExpectedCurrentState': 0 }, { 'model': Model.Model(transitionMatrix, initial=1, seed=5), 'ExpectedCurrentState': 1 }, ]
def performTestForParams(self): """"Performs the tests for each set of parameters.""" for params in self.testParams: model = Model.genModel(params['positional'], **params['keyword']) actualMatrix = model.tMatrix result = np.all(actualMatrix == params['expectedMatrix']) self.assertTrue(result, ('Actual matrix: {0}' ' Expected matrix: {1}').format( actualMatrix, params['expectedMatrix']))
def setUp(self): """Set up the parameters for the individual tests.""" tMatrix = np.array([[0.33, 0.33, 0.34], [0., 0.5, 0.5], [0.5, 0.5, 0]]) if self.id().split('.')[-1] == 'test_exceptions': self.testParams = [ { 'model': Model.Model(tMatrix, initial=0, seed=5), 'positional': 6, 'keyword': { 'ontology': None }, 'exception': Model.OntologyError }, ]
def setUp(self): """Set up the parameters for the individual tests.""" tMatrix = np.array([[0.33, 0.33, 0.34], [0., 0.5, 0.5], [0.5, 0.5, 0]]) ontology = {0: [1], 1: [0], 2: [2]} linko = [({1}, set(), set()), ({2}, set(), {3, 5}), ({1}, set(), set()), ({2}, {1}, {5}), ({1}, set(), set()), ({2}, {1, 3}, set())] if self.id().split('.')[-1] == 'test_genlinkograph': self.testParams = [ { 'model': Model.Model(tMatrix, initial=0, seed=5), 'ontology': ontology, 'expectedLinko': linko }, ]
def genSingleOntologyStats(ontNext, ontLink, minLinkoSize, maxLinkoSize, stepLinkoSize, modelNum, runNum, precision=2, seeds=None): """Generate the stats on link models for a given ontology. inputs: ontNext: ontology used to generate Markov model that create the next state. ontLink: ontology used for constructing linkographs. minLinkoSize: the minimun number of nodes in the linkographs to consider. maxLinkoSize: the maximum number of nodes in the linkographs to consider. Note that the max is not included to match pythons convertions on lists and ranges. stepLinkoSize: the step size between minLinkoSize to maxLinkoSize for the number of linkographs to Consider. modelNum: the number of models. runNum: the number of linkographs to consider for each linkograph size. precision: the number of decimals places to use for the Markov models. seeds: a list of seeds to use for the generated next Markov models. The size of the list should be the same as the number of runs. output: a modelNum x number_of_linkographs array that records the Frobenius norm of the average Markov model for each model and each linkograph size. The (i, j) entry uses i-th model and the n-th size linkograph, constructs runNum number of linkographs of that size, finds the average link Markov model, and records the norm of this average. """ linkoSizes = range(minLinkoSize, maxLinkoSize, stepLinkoSize) ontSize = len(ontNext) absClasses = list(ontNext.keys()) absClasses.sort() results = np.zeros((modelNum, len(linkoSizes))) if seeds is None: seeds = [time.time()*i for i in range(modelNum)] models = [] # Create the generating models for i in range(modelNum): m = markel.genModelFromOntology(ontology=ontNext, precision=2, seed=seeds[i]) # Storing the model and the current state models.append(m) # For each size linkograph, generate the runNum links and # caculate the needed statistics. for size in linkoSizes: print('size: {0}'.format(size)) for modelIndex, m in enumerate(models): linkModels = np.zeros((ontSize, ontSize, runNum)) for i in range(runNum): # Randomize the initial state m.state = m.random.randint(1, len(m.absClasses)) - 1 linko = m.genLinkograph(size, ontology=ontLink) newModel = markel.genModelFromLinko(linko, precision=precision, ontology=None, seed=None, method='link_predictor', linkNum=1) linkModels[:, :, i] = newModel.tMatrix # Find the matrix norm for the average. index = (size - minLinkoSize)//stepLinkoSize norm = np.linalg.norm(np.mean(linkModels, axis=-1), ord='fro') results[modelIndex][index] = norm return results
# Read in the ontology ontFile = open('resources/ontology.json', 'r') ont = json.load(ontFile) ontFile.close() # Get the abstraction classes absClasses = list(ont.keys()) absClasses.sort() # Now we will get the Markov model functions import markov.Model as markel # For Markov model functions # We will generate a Markov model with six states based # off the absraction classes in absClasses model = markel.genModel(6, absClasses=absClasses, ontology=ont, seed=42) # The abstraction classes model.absClasses # The ontology model.ontology # The transition matrix model.tMatrix # The current state model.current() # Create a dot representation dotString = model.toDot()
help='precision of the probabilities') args = parser.parse_args() # Read in ontology ont = None with open(args.ontology[0], 'r') as ontFile: ont = json.load(ontFile) # Get list of absClasses absClasses = list(ont.keys()).sort() # Generate model model = markel.genModel(len(ont), absClasses=absClasses, ontology=ont, precision=args.precision, seed=args.seed) # Get the random variable's state for resetting it. ranstate = model.random.getstate() nnorms = np.zeros(args.max - args.min + 1) tnorms = np.zeros(args.max - args.min + 1) for i in range(args.min, args.max + 1): # Reset the random variable's state model.random.setstate(ranstate) # Generate a linkograph
parser.add_argument('-r', '--runs', type=int, default=100, help='the number of runs.') parser.add_argument('-p', '--precision', type=int, default=2, help='the number of runs.') args = parser.parse_args() # Extract the ontology model = markel.readJson(args.model) ontLink = None with open(args.ontLink, 'r') as ontLinkFile: ontLink = json.load(ontLinkFile) results = genSingleOntologyStats(model=model, ontLink=ontLink, minLinkoSize=args.minimum, maxLinkoSize=args.maximum, stepLinkoSize=args.step, runNum=args.runs, precision=args.precision) absClasses = list(model.absClasses)
from collections import Counter import linkograph.stats as stats import markov.Model as markel # Not going to use an ontology, but one needs to be defined for the # markov models to produce linkographs. ont = {} # Make some simple labels. Does not really matter what they are. absClasses = [str(i) for i in range(6)] # Create 1000 models models = [] for i in range(1000): seed = time.time() m = markel.genModel(6, absClasses=absClasses, ontology=ont, seed=seed) # Define the total count.for m in models: freq = Counter() # Make a linkograph with a 1000 nodes for each model and find the # label count. for m in models: linko = m.genLinkograph(1000) c = stats.totalLabels(linko) freq.update(c) print(freq)
def genSingleOntologyStats(ontNext, ontLink, minLinkoSize, maxLinkoSize, stepLinkoSize, runNum, precision=2, seeds=None): """Generate the stats on link models for a given ontology. inputs: ontNext: ontology used to generate Markov model that create the next state. ontLink: ontology used for constructing linkographs. minLinkoSize: the minimun number of nodes in the linkographs to consider. maxLinkoSize: the maximum number of nodes in the linkographs to consider. Note that the max is not included to match pythons convertions on lists and ranges. stepLinkoSize: the step size between minLinkoSize to maxLinkoSize for the number of linkographs to Consider. runNum: the number of linkographs to consider for each linkograph size. precision: the number of decimals places to use for the Markov models. seeds: a list of seeds to use for the generated next Markov models. The size of the list should be the same as the number of runs. output: a numLinkos x ontologySize x ontologySize x 2 array where numLinkos is to the floor of ((maxLinkoSize - 1) - minLinkoSize) // stepLinkoSize and ontologySize is the size of the ontology used by the given model. The first dimension is for the linkograph size. For example, an i in this dimension selects the linkograph of size minLinkoSize + i*stepLinkoSize. The second and third dimensions give the link in the link Markov model. Thus, a (j, k) in these two dimensions represent the link (j, k) in the tMatrix of the link Markov model. The fourth dimension selects the mean or standard deviation. A 0 is the mean and 1 is the standard devation. Thus, the (i, j, k, 0) entry is the mean over all the links from the ith abstraction class to the jth abstraction class for linkNum linkograph of size minLinkoSize + i*stepLinkoSize. A similar statement holds for the (i, j, k, 1) and the standard deviation. """ linkoSizes = range(minLinkoSize, maxLinkoSize, stepLinkoSize) ontSize = len(ontNext) absClasses = list(ontNext.keys()) absClasses.sort() results = np.zeros((len(linkoSizes), ontSize, ontSize, 2)) if seeds is None: seeds = [time.time() for i in range(runNum)] models = [] # Create the generating models for i in range(runNum): m = markel.genModelFromOntology(ontology=ontNext, precision=2, seed=seeds[i]) # Storing the model and the current state models.append(m) # For each size linkograph, generate the runNum links and # caculate the needed statistics. for size in linkoSizes: # currentModels packs the transition matrix for each run into # a single matrix. linkModels = np.zeros((ontSize, ontSize, runNum)) print('size: {0}'.format(size)) for i in range(runNum): m = models[i] # Randomize the initial state m.state = m.random.randint(1, len(m.absClasses)) - 1 linko = m.genLinkograph(size, ontology=ontLink) newModel = markel.genModelFromLinko(linko, precision=precision, ontology=None, seed=None, method='link_predictor', linkNum=1) linkModels[:, :, i] = newModel.tMatrix # Find the mean of each transition across the different runs. index = (size - minLinkoSize) // stepLinkoSize results[index, :, :, 0] = np.mean(linkModels, axis=-1) # Find the standard deviation across the difference runs. results[index, :, :, 1] = np.std(linkModels, axis=-1) return results
def genSingleOntologyStats(metric, ontNext, ontLink, minLinkoSize, maxLinkoSize, stepLinkoSize, modelNum, runNum, precision=2, seeds=None): """Generate the stats on link models for a given ontology. inputs: metric: the function to apply to the generated linkographs. ontNext: ontology used to generate Markov model that create the next state. ontLink: ontology used for constructing linkographs. minLinkoSize: the minimun number of nodes in the linkographs to consider. maxLinkoSize: the maximum number of nodes in the linkographs to consider. Note that the max is not included to match pythons convertions on lists and ranges. stepLinkoSize: the step size between minLinkoSize to maxLinkoSize for the number of linkographs to Consider. modelNum: the number of models. runNum: the number of linkographs to consider for each linkograph size. precision: the number of decimals places to use for the Markov models. seeds: a list of seeds to use for the generated next Markov models. The size of the list should be the same as the number of runs. output: a modelNum x number_of _linkographs. The (i, j) entry provides the average shannon entropy for the i-th model and j-th size linkgraph considered. """ linkoSizes = range(minLinkoSize, maxLinkoSize, stepLinkoSize) ontSize = len(ontNext) absClasses = list(ontNext.keys()) absClasses.sort() results = np.zeros((modelNum, len(linkoSizes))) if seeds is None: seeds = [time.time() * i for i in range(modelNum)] models = [] # Create the generating models for i in range(modelNum): m = markel.genModelFromOntology(ontology=ontNext, precision=2, seed=seeds[i]) # Storing the model and the current state models.append(m) # For each size linkograph, generate the runNum links and # caculate the needed statistics. for size in linkoSizes: print('size: {0}'.format(size)) for modelIndex, m in enumerate(models): # Collect entropy and complexity. metric_values = np.zeros(runNum) for i in range(runNum): # Randomize the initial state m.state = m.random.randint(1, len(m.absClasses)) - 1 linko = m.genLinkograph(size, ontology=ontLink) value = metric(linko) metric_values[i] = value # Find the mean across the runs. index = (size - minLinkoSize) // stepLinkoSize results[modelIndex][index] = np.mean(metric_values) return results
def genSingleOntologyStats(minLinkoSize, maxLinkoSize, stepLinkoSize, model, runNum, precision=2): """Generate the stats on link models for a given ontology. inputs: minLinkoSize: the minimun number of nodes in the linkographs to consider. maxLinkoSize: the maximum number of nodes in the linkographs to consider. Note that the max is not included to match pythons convertions on lists and ranges. stepLinkoSize: the step size between minLinkoSize to maxLinkoSize for the number of linkographs to Consider. model: the Markov model used to generate the linkographs. Note that the Markov model must have an ontology to generate the needed linkographs. runNum: the number of linkographs to consider for each linkograph size. precision: the number of decimals places to use for the Markov models. output: a numLinkos x ontologySize x ontologySize x 2 array where numLinkos is to the floor of (maxLinkoSize - minLinkoSize)/stepLinkoSize and ontologySize is the size of the ontology used by the given model. The first dimension is for the linkograph size. For example, an i in this dimension selects the linkograph of size minLinkoSize + i*stepLinkoSize. The second and third dimensions give the link in the link Markov model. Thus, a (j, k) in these two dimensions represent the link (j, k) in the tMatrix of the link Markov model. The fourth dimension selects the mean or standard deviation. A 0 is the mean and 1 is the standard devation. Thus, the (i, j, k, 0) entry is the mean over all the links from the ith abstraction class to the jth abstraction class for linkNum linkograph of size minLinkoSize + i*stepLinkoSize. A similar statement holds for the (i, j, k, 1) and the standard deviation. """ linkoSizes = range(minLinkoSize, maxLinkoSize, stepLinkoSize) ontSize = len(model.ontology) results = np.zeros((len(linkoSizes), ontSize, ontSize, 2)) # For each size linkograph, generate the runNum links and caculate # the needs statistics. for size in linkoSizes: # currentModels packs the transition matrix for each run into # a single matrix. currentModels = np.zeros((ontSize, ontSize, runNum)) print('Processing linkographs of size {0}'.format(size)) for i in range(runNum): # Change the state. model.state = model.random.randint(1, len(model.absClasses)) model.state -= 1 linko = model.genLinkograph(size) newModel = markel.genModelFromLinko(linko, precision=precision, ontology=model.ontology, seed=None, method='link_predictor', linkNum=1) currentModels[:, :, i] = newModel.tMatrix # Find the mean of each transition across the different runs. index = (size - minLinkoSize) // stepLinkoSize results[index, :, :, 0] = np.mean(currentModels, axis=-1) # Find the standard deviation across the difference runs. results[index, :, :, 1] = np.std(currentModels, axis=-1) return results
parser.add_argument('--graphGroupSize', type=int, default=10, help='The number of graphs to group.') args = parser.parse_args() # Extract the ontology ont = None with open(args.ontology[0], 'r') as ontFile: ont = json.load(ontFile) seed = int(math.modf(time.time())[0] * (10**7)) model = markel.genModelFromOntology(ont, precision=args.precision, seed=seed) results = genSingleOntologyStats(minLinkoSize=args.minimum, maxLinkoSize=args.maximum, stepLinkoSize=args.step, model=model, runNum=args.runs, precision=args.precision) # Create graphs for each of the transitions # legend = [] # for initAbs in model.absClasses: # for termAbs in model.absClasses: # legend.append(initAbs + ' -> ' + termAbs)
'distance for each power is graphed.') parser.add_argument('-t', '--targetModel', metavar='TARGET_MODEL.json', help=targetModelHelp) args = parser.parse_args() # Extract the ontology ont = None with open(args.ontology[0], 'r') as ontFile: ont = json.load(ontFile) tModel = None if args.targetModel: tModel = markel.readJson(args.targetModel) totalEvents = (args.maximum - args.minimum)//args.step # Record the events over the required number times for the # required number of runs. So the array is number_of_runs x # total_events size array. results = np.zeros((args.runs, totalEvents)) # Difference from supplied transition matrix. if tModel: # The distance are a number_of_runs x total_events size # array. The (i, j) entry corresponds to the ditance from the # tModel of the j-th power of the i-th Markov model's # tMatrix. distances = np.zeros((args.runs, totalEvents))