def test_15_estimate_complexity(self): lg = mod_log.Log(os.getcwd()) self.assertEqual(lg.estimate_complexity(10,10,10, 10), '0.1 seconds') self.assertEqual(lg.estimate_complexity(100,100,100, 100), '16.7 minutes') self.assertEqual(lg.estimate_complexity(100,100,100, 1000), '6.9 hours') self.assertEqual(lg.estimate_complexity(1000,1000,1000, 1000), '115.7 days') self.assertEqual(lg.estimate_complexity(9999,9999,9999, 9999), '190182.7 years')
def __init__(self, name='', fldr='', running=False): self.name = name self.fldr = fldr self.running = running self.characteristics = None # this optionally can be a vais.Character, skillpoints, rules self.ai = None # this is the external AI object that controls this agent self.results = [] self.coords = {} self.coords['x'] = 0 self.coords['y'] = 0 self.coords['z'] = 0 self.coords['t'] = 0 self.status = 'READY' if fldr == '': fldr = mod_cfg.fldrs['log_folder'] if fldr == '': print('ERROR - no log folder found') exit(1) self.mylog = mod_log.Log(fldr) self.mylog.record_command('agent', self.name + ' - initilising') # log agent name to get list of all agents self._log_agent_name('list_agent_names.txt') if self.running is True: self.start()
def index(): """ main function - outputs in following format BEFORE consolidation (which is TODO) # filename, word, linenumbers # refAction.csv, ActionTypeName, 1 # refAction.csv, PhysicalType, 1 # goals.csv, Cleanliness, 11 """ lg = mod_log.Log(mod_cfg.fldrs['localPath']) lg.record_command('Starting indexing', 'index.py') # sys.modules[self.__module__].__file__) if silent == 'N': print('------------------') print('Rebuilding Indexes') print('------------------') with open(ndxFile, "w") as ndx: ndx.write('filename, word, linenumbers\n') files_to_index = mod_fl.FileList([mod_cfg.fldrs['public_data_path'] + os.sep + 'core'], ['*.csv'], ignore_files, "files_to_index_filelist.csv") if silent == 'N': print(format_op_hdr()) for f in files_to_index.get_list(): buildIndex(f, ndxFile, silent) # now build the one big index file consolidate(ndxFile, opIndex ) lg.record_command('Finished indexing', 'index.py') #, fle.GetModuleName()) if silent == 'N': print('Done')
def __init__(self, name, fldr): self.name = name self.fldr = fldr self.lstPrograms = [] self.log_folder = mod_cfg.fldrs['log_folder'] self.lg = mod_log.Log(self.log_folder) self.lg.record_command( 'program', 'generating program list in - ' + self.log_folder) self.list_all_python_programs()
def test_01_new_log(self): lg = mod_log.Log(os.getcwd()) lg.record_process('test_cls_log.py', 'TEST LOG') lg.record_process('test_cls_log.py', 'another log entry') self.assertEqual(os.path.exists('process.log'), True) self.assertEqual(os.path.exists('_sessions.txt'), True) sum = mod_log.LogSummary(lg, '') extract = sum.extract_logs(os.getcwd() + os.sep + 'process.log', 'test_cls_log.py') self.assertEqual(len(extract) > 0, True) # ['"2015-10-23 22:24:47","000000054","Dunc[1934 chars],\n'])
def __init__(self, name): """ when using elsewhere include the line below super().__init__(self, *arg) """ self.name = name log_folder = mod_cfg.fldrs['log_folder'] self.log = cls_log.Log(log_folder) self.log.record_command('enviroment.py', 'Initilising base environment - ' + self.name)
def __init__(self, fldr=os.getcwd(), skills = None, info = None): self.fldr = fldr self.skills = skills self.prompt = ' Aggie ' + aggie_version + ' [EXIT | :LIST | ![add raw data] > ' if info: self.info = info else: self.info = Info() self.status = 'Ready' self.lg = mod_log.Log(fldr) self.lg.record_source('aggie.py','Hello, my name is Aggie.') self.lg.record_source('aggie.py','base folder is ' + self.fldr)
def __init__(self, name='', fldr='', running=False): self.name = name self.fldr = fldr self.running = running self.results = [] self.status = 'READY' if fldr == '': fldr = mod_cfg.fldrs['log_folder'] if fldr == '': print('ERROR - no log folder found') exit(1) self.mylog = cls_log.Log(fldr) self.mylog.record_command('agent', self.name + ' - initilising') if self.running is True: self.start()
def main(): """ Example to show AIKIF logging of results. Generates a sequence of random grids and runs the Game of Life, saving results """ iterations = 9 # how many simulations to run years = 3 # how many times to run each simulation width = 22 # grid height height = 78 # grid width time_delay = 0.03 # delay when printing on screen lg = mod_log.Log('test') lg.record_process('Game of Life', 'game_of_life_console.py') for _ in range(iterations): s, e = run_game_of_life(years, width, height, time_delay, 'N') lg.record_result("Started with " + str(s) + " cells and ended with " + str(e) + " cells")
def test_12_filter_by_program(self): # first create some sample log entries from separate programs lg = mod_log.Log(cfg.fldrs['log_folder']) lg.record_process('test_prog1.py', 'test_prog1.py - recording process') lg.record_source('test_prog1.py', 'test_prog1.py - recording source') lg.record_command('test_prog1.py', 'test_prog1.py - recording command') lg.record_result('test_prog1.py', 'test_prog1.py - recording result') lg.record_process('test_prog2.py', 'test_prog2.py - recording process') lg.record_source('test_prog2.py', 'test_prog2.py - recording source') lg.record_command('test_prog2.py', 'test_prog2.py - recording command') lg.record_result('test_prog2.py', 'test_prog2.py - recording result') # summarise by program sum = mod_log.LogSummary(lg, cfg.fldrs['log_folder']) sum.filter_by_program('prog1.py', cfg.fldrs['log_folder'] + os.sep + 'prog1.txt') sum.filter_by_program('prog2.py', cfg.fldrs['log_folder'] + os.sep + 'prog2.txt') self.assertEqual(os.path.isfile(cfg.fldrs['log_folder'] + os.sep + 'prog1.txt'), True) self.assertEqual(os.path.isfile(cfg.fldrs['log_folder'] + os.sep + 'prog2.txt'), True)
#!/usr/bin/python3 # -*- coding: utf-8 -*- # cls_data.py import os import sys sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) import cls_log as mod_log import config as mod_cfg #lg = mod_log.Log(mod_cfg.fldrs['log_folder']) lg = mod_log.Log(os.getcwd()) class Data(object): """ This is an experiment to provide a high level interface the various data conversion functions in AIKIF and the toolbox methods. For non critical (low bias rating) information, you use this to scan scan large numbers of unknown data sources (e.g. twitter feeds, web sites) and shouldn't have to worry too much about file formats / data types. """ def __init__(self, input_data, name='unamed data', data_type='', src=''): self.input_data = input_data self.content = {} self.name = name self.data_type = data_type self.src = src
'value': 'self' }, { 'label': 'source-type', 'value': 'comment' }, ] sample_rawdata = { 'metadata': sample_metadata, 'data': 'You should develop in Python 3 instead of 2 for new projects unless you have a dependant package that only works on version 2' } lg = cls_log.Log(os.getcwd()) lg.record_process('bias.py', 'starting bias.py') bias_schema = open(os.path.join(root_fldr, 'aikif', 'bias.schema'), 'r').readlines() print(bias_schema) class Bias(object): """ class to give a rough weighting to a piece of information based on source, time, context, etc. This is used when automatically parsing data to ensure that a random comment on a forum does not get equal weighting to a peer reviewed academic paper. There can be multiple biases, and each user can modify the
root_fldr = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.insert(1, root_fldr) root_path = root_fldr + os.sep + 'samples' import config import cls_log import core_data import aikif.lib.cls_filelist as fl password_folder = config.fldrs['pers_credentials'] log_folder = config.fldrs['log_folder'] data_folder = config.fldrs['pers_data'] #op_folder = os.path.join(config.fldrs['localPath'], '_PROD','data','core') op_folder = os.path.join(config.fldrs['localPath'], '_DEV', 'data', 'core') lg = cls_log.Log(log_folder) usr = '******' # file extenstions for all data files def main(): """ Generates selection of different events from various sources print('password_folder = ', password_folder) print('log_folder = ', log_folder) print('data_folder = ', data_folder) print('op_folder = ', op_folder) Screenshot folder is a text file as follows: Minecraft|C:\\Users\\Duncan\AppData\Roaming\\.minecraft\\screenshots Steam|E:\\games\\Steam\\userdata
import os # odd issue running this on Windows 10, so using local version import sys root_fldr = os.path.abspath( os.path.dirname(os.path.abspath(__file__)) + os.sep + ".." + os.sep + "aikif") sys.path.append(root_fldr) #import aikif.cls_log as mod_log #import aikif.config as mod_cfg import cls_log as mod_log import config as mod_cfg lg = mod_log.Log(mod_cfg.fldrs['log_folder']) lg.record_command('api_main', 'starting API server in ' + os.getcwd()) from flask import Flask, jsonify, abort, make_response from flask.ext.restful import Api, Resource, reqparse, fields, marshal from flask.ext.httpauth import HTTPBasicAuth app = Flask(__name__, static_url_path="") api = Api(app) auth = HTTPBasicAuth() app.config['BASIC_AUTH_USERNAME'] = '******' app.config['BASIC_AUTH_PASSWORD'] = '******' base_url = '/aikif/api/v1.0/' # http://127.0.0.1:5000/aikif/api/v1.0/facts/2 base_url = '/aikif/api/v2.0/'
import os import sys import urllib import urllib.request import getpass import socket root_folder = os.path.abspath( os.path.dirname(os.path.abspath(__file__)) + os.sep + "..") sys.path.append(root_folder) import config as mod_cfg import cls_log as mod_log lg = mod_log.Log(os.getcwd()) # TODO - fix this. not the best way def load_username_password(fname): """ use the config class to read credentials """ username, password = mod_cfg.read_credentials(fname) return username, password # load_username_password def get_user_name(): """ get the username of the person logged on """ return getpass.getuser()
def setUp(self): unittest.TestCase.setUp(self) self.mylog = mod_log.Log(test_fldr)