コード例 #1
0
def __get_stats( path_run, quiet = False ):
    path_datalib = os.path.join( path_run, 'stats', FILENAME_DATALIB )
    if os.path.exists( path_datalib ):
        return datalib.parse( path_datalib,
                              keycolname = 'step' )

    if not quiet:
        # This can take a long time, so let the user we're not hung
        print 'Converting stats files into datalib format for run', path_run

    tables = __create_tables( path_run, quiet )

    paths = glob.glob( os.path.join(path_run, 'stats', 'stat.*') )
    paths.sort( lambda x, y: __path2step(x) - __path2step(y) )

    for path in paths:
        __add_row( tables, path, quiet )

    if not quiet:
        print '\nwriting %s' % path_datalib

    datalib.write( path_datalib,
                   tables,
                   randomAccess = False )

    return tables
コード例 #2
0
def test__version3():
	colnames = ['ID', 'separation']
	coltypes = ['int', 'float']

	t1 = datalib.Table('1', colnames, coltypes)
	t2 = datalib.Table('2', colnames, coltypes)
	t3 = datalib.Table('3', colnames, coltypes)

	def __addrow(table, id, separation):
		row = table.createRow()
		row['ID'] = id
		row['separation'] = separation

	__addrow(t1, '2', .02)
	__addrow(t1, '3', .03)

	__addrow(t2, '3', .04)

	datalib.write( 'version3__table_fixed.plt', [t1,t2,t3] )
	datalib.write( 'version3__single_none.plt', [t1,t2,t3], randomAccess=False, singleSchema=True)

	tables1 = datalib.parse( 'version3__table_fixed.plt', keycolname = 'ID', tablename2key = int )
	tables2 = datalib.parse( 'version3__single_none.plt', keycolname = 'ID', tablename2key = int )

	assert( tables1[1][2]['separation'] == .02 )
	assert( tables1[1][3]['separation'] == .03 )
	assert( tables1[2][3]['separation'] == .04 )

	assert( tables2[1][2]['separation'] == .02 )
	assert( tables2[1][3]['separation'] == .03 )
	assert( tables2[2][3]['separation'] == .04 )
コード例 #3
0
ファイル: datautil.py プロジェクト: ferpalaria/polyworld
def main():
	argv = sys.argv[1:]

	if not len(argv):
		usage()

	path_in = argv[0]
	path_out = argv[1]
	tablename = argv[2]

	clauses = parse_clauses( argv[3:] )

	table = datalib.parse( path_in, [tablename] )[tablename]

	for args in clauses:
		mode = args[0]
		if mode == 'sort':
			table = dosort( table, args[1:] )
		elif mode == 'rowfilter':
			table = dorowfilter( table, args[1:] )
		else:
			print 'invalid mode:', mode
			sys.exit( 1 )

	datalib.write( path_out, [table] )
コード例 #4
0
def test__misc():
	print '-----------------------------------'
	
	COLNAMES = ['T', 'A', 'B', 'C']
	COLTYPES = ['int', 'int', 'int', 'int']
	
	t = datalib.Table('test', COLNAMES, COLTYPES, keycolname = 'T')
	
	row = t.createRow()
	
	row.set('T', 0)
	row['A'] = 10
	row.set('B', 11)
	row.set('C', 12)
	
	row = t.createRow()
	row['T'] = 1
	row['A'] = 20
	row['B'] = 21
	row['C'] = 22
	
	print t[0]['A']
	print t[1]['A']
	
	it = iterators.MatrixIterator(t, range(2), ['A','B'])
	for a in it:
	    print a
	
	datalib.write('/tmp/datalib1', t)
	
	print '-----------------------------------'
	
	table = datalib.Table(name='Example 2',
	                      colnames=['Time','A','B'],
	                      coltypes=['int','float','float'],
	                      keycolname='Time')
	row = table.createRow()
	row['Time'] = 1
	row['A'] = 100.0
	row['B'] = 101.0
	row = table.createRow()
	row['Time'] = 10001
	row['A'] = 200.0
	row['B'] = 201.0

	it = iterators.MatrixIterator(table, range(1,10002,10000), ['B'])
	for a in it:
	    print a
	
	datalib.write('/tmp/datalib2', table)
	
	tables = datalib.parse('/tmp/datalib2', keycolname = 'Time')
	
	table = tables['Example 2']
	print 'key=',table.keycolname
	
	print tables['Example 2'][1]['A']
コード例 #5
0
def copy_metrics(source_file, target_file):
	global test, overwrite, metrics_to_copy
	if test:
		print '  copying metrics (%s) from' % (','.join(metrics_to_copy)), source_file, 'to', target_file
	else:
		tables_to_copy = {}
		source_tables = datalib.parse(source_file)
		for metric in metrics_to_copy:
			tables_to_copy[metric] = source_tables[metric]
		datalib.write(target_file, tables_to_copy, append=True, replace=overwrite)
コード例 #6
0
def copy_metrics(source_file, target_file):
	global test, overwrite, metrics_to_copy
	if test:
		print '  copying metrics (%s) from' % (','.join(metrics_to_copy)), source_file, 'to', target_file
	else:
		tables_to_copy = {}
		source_tables = datalib.parse(source_file)
		for metric in metrics_to_copy:
			tables_to_copy[metric] = source_tables[metric]
		datalib.write(target_file, tables_to_copy, append=True, replace=overwrite)
コード例 #7
0
def __get_stats(path_run):
    path_datalib = os.path.join(path_run, 'stats', FILENAME_DATALIB)
    if os.path.exists(path_datalib):
        return datalib.parse(path_datalib, keycolname='step')

    tables = __create_tables(path_run)

    paths = glob.glob(os.path.join(path_run, 'stats', 'stat.*'))
    paths.sort(lambda x, y: __path2step(x) - __path2step(y))

    for path in paths:
        __add_row(tables, path)

        datalib.write(path_datalib, tables)

    return tables
コード例 #8
0
def rename_metrics_in_file(file, metrics_to_rename):
    global test, verbose
    tables = datalib.parse(file)
    not_renamed = []
    renamed = []
    for table in tables.values():
        if table.name in metrics_to_rename:
            renamed.append((table.name, metrics_to_rename[table.name]))
            table.name = metrics_to_rename[table.name]
        else:
            not_renamed.append(table.name)
    if renamed:
        if test:
            print "renaming", renamed, "in", file
        else:
            datalib.write(file, tables)
    if not_renamed and verbose:
        print "not renaming", not_renamed, "in", file
コード例 #9
0
def __get_stats( path_run ):
    path_datalib = os.path.join( path_run, 'stats', FILENAME_DATALIB )
    if os.path.exists( path_datalib ):
        return datalib.parse( path_datalib,
                              keycolname = 'step' )

    tables = __create_tables( path_run )

    paths = glob.glob( os.path.join(path_run, 'stats', 'stat.*') )
    paths.sort( lambda x, y: __path2step(x) - __path2step(y) )

    for path in paths:
        __add_row( tables, path )

        datalib.write( path_datalib,
                       tables )

    return tables
コード例 #10
0
def rename_metrics_in_file(file, metrics_to_rename):
	global test
	tables = datalib.parse(file)
	not_renamed = []
	renamed = []
	for table in tables.values():
		if table.name in metrics_to_rename:
			renamed.append((table.name, metrics_to_rename[table.name]))
			table.name = metrics_to_rename[table.name]
		else:
			not_renamed.append(table.name)
	if renamed:
		if test:
			print 'renaming', renamed, 'in', file
		else:
			datalib.write(file, tables)
	if test and not_renamed:
		print 'not renaming', not_renamed, 'in', file
コード例 #11
0
def rename_metrics_in_file(file, metrics_to_rename):
    global test, verbose
    tables = datalib.parse(file)
    not_renamed = []
    renamed = []
    for table in tables.values():
        if table.name in metrics_to_rename:
            renamed.append((table.name, metrics_to_rename[table.name]))
            table.name = metrics_to_rename[table.name]
        else:
            not_renamed.append(table.name)
    if renamed:
        if test:
            print 'renaming', renamed, 'in', file
        else:
            datalib.write(file, tables)
    if not_renamed and verbose:
        print 'not renaming', not_renamed, 'in', file
コード例 #12
0
def test__stream():
	colnames = ['ID', 'separation']
	coltypes = ['int', 'float']

	t1 = datalib.Table('1', colnames, coltypes)
	t2 = datalib.Table('2', colnames, coltypes)

	def __addrow(table, id, separation):
		row = table.createRow()
		row['ID'] = id
		row['separation'] = separation

	__addrow(t1, '1', .01)
	__addrow(t1, '2', .02)
	__addrow(t2, '11', .11)
	__addrow(t2, '12', .12)

	datalib.write( 'stream.plt', [t1,t2] )

	def stream_row( row ):
		print row['ID'], row['separation']

	datalib.parse( 'stream.plt', stream_row = stream_row )
コード例 #13
0
def __get_stats(path_run, quiet=False):
    path_datalib = os.path.join(path_run, 'stats', FILENAME_DATALIB)
    if os.path.exists(path_datalib):
        return datalib.parse(path_datalib, keycolname='step')

    if not quiet:
        # This can take a long time, so let the user we're not hung
        print 'Converting stats files into datalib format for run', path_run

    tables = __create_tables(path_run, quiet)

    paths = glob.glob(os.path.join(path_run, 'stats', 'stat.*'))
    paths.sort(lambda x, y: __path2step(x) - __path2step(y))

    for path in paths:
        __add_row(tables, path, quiet)

    if not quiet:
        print '\nwriting %s' % path_datalib

    datalib.write(path_datalib, tables, randomAccess=False)

    return tables
コード例 #14
0
def move_metrics_in_file(source_file, target_file, metrics_to_move):
	global test
	tables = datalib.parse(source_file)
	not_moving = []
	moving = []
	for table in tables.values():
		if table.name in metrics_to_move:
			moving.append((table.name, metrics_to_move[table.name]))
			table.name = metrics_to_move[table.name]
		else:
			not_moving.append(table.name)
	if moving:
		if test:
			print 'moving', moving, 'from', source_file, 'to', target_file
			if len(not_moving) == 0:
				print 'unlinking', source_file
			else:
				print 'not unlinking', source_file, 'due to unmoved metrics:', not_moving
		else:
			datalib.write(target_file, tables, append=True)
			if len(not_moving) == 0:
				os.unlink(source_file)
	elif test and not_moving:
		print 'not moving', not_moving, 'from', source_file, 'and not unlinking file'
コード例 #15
0
def move_metrics_in_file(source_file, target_file, metrics_to_move):
	global test
	tables = datalib.parse(source_file)
	not_moving = []
	moving = []
	for table in tables.values():
		if table.name in metrics_to_move:
			moving.append((table.name, metrics_to_move[table.name]))
			table.name = metrics_to_move[table.name]
		else:
			not_moving.append(table.name)
	if moving:
		if test:
			print 'moving', moving, 'from', source_file, 'to', target_file
			if len(not_moving) == 0:
				print 'unlinking', source_file
			else:
				print 'not unlinking', source_file, 'due to unmoved metrics:', not_moving
		else:
			datalib.write(target_file, tables, append=True)
			if len(not_moving) == 0:
				os.unlink(source_file)
	elif test and not_moving:
		print 'not moving', not_moving, 'from', source_file, 'and not unlinking file'
コード例 #16
0
		for line in complexity_all:
			fields = line.split()
			agent_number = fields.pop(0)

			for type in complexities_remaining:
				table = tables[type]
				row = table.createRow()
	
				row.set('AgentNumber', agent_number)
				row.set('Complexity', fields.pop(0))
	
		# --- Write to file and normalize data (eg sort and remove 0's)
		for type in complexities_remaining:
			table = tables[type]
	
			datalib.write(__path(type),
				      table)
	
			data = table.getColumn('Complexity').data
	
			tdata[type] = common_complexity.normalize_complexities(data)

	return complexities

####################################################################################
###
### FUNCTION make_percents()
###
####################################################################################
def make_percents(tables, totals):
	for table in tables.values():
		for row in table.rows():
コード例 #17
0
def computeEvents(metabolismCache, epochLen, path_run, path_output):
    birthsDeaths = common_logs.BirthsDeaths(path_run)

    class Epoch:
        def __init__(self, timestep):
            self.timestep = timestep
            self.mate_same = 0
            self.mate_diff = 0
            self.give_same = 0
            self.give_diff = 0
            self.contact_same = 0
            self.contact_diff = 0

    epochs = {}

    for agent in birthsDeaths.entries.keys():
        entry = birthsDeaths.getEntry(agent)

        if (entry.parent1 != None) and (entry.deathTimestep != None):
            epochIndex = entry.birthTimestep / epochLen
            try:
                epoch = epochs[epochIndex]
            except:
                epoch = Epoch(epochIndex * epochLen)
                epochs[epochIndex] = epoch

            parent1Metabolism = metabolismCache[entry.parent1]
            parent2Metabolism = metabolismCache[entry.parent2]

            if parent1Metabolism == parent2Metabolism:
                epoch.mate_same += 1
            else:
                epoch.mate_diff += 1

    def __process_contact_row(row):
        step = row["Timestep"]
        agent1 = row["Agent1"]
        agent2 = row["Agent2"]
        events = row["Events"]

        epochIndex = step / epochLen
        try:
            epoch = epochs[epochIndex]
        except:
            epoch = Epoch(epochIndex * epochLen)
            epochs[epochIndex] = epoch

        agent1Metabolism = metabolismCache[agent1]
        agent2Metabolism = metabolismCache[agent2]

        ngive = events.count("G")

        if agent1Metabolism == agent2Metabolism:
            epoch.contact_same += 1
            epoch.give_same += ngive
        else:
            epoch.contact_diff += 1
            epoch.give_diff += ngive

    datalib.parse(
        os.path.join(path_run, "events/contacts.log"), tablenames=["Contacts"], stream_row=__process_contact_row
    )

    # HACK: DROP LAST EPOCH SINCE IT'S USUALLY JUST ONE STEP
    epochIndexes = list(epochs.keys())
    epochIndexes.sort()
    del epochs[epochIndexes[-1]]

    colnames = ["Timestep", "Ms", "Md", "PercentSame"]
    coltypes = ["int", "int", "int", "float"]
    table = datalib.Table("AssortativeMating", colnames, coltypes)

    for epoch in epochs.values():
        row = table.createRow()
        row["Timestep"] = epoch.timestep
        row["Ms"] = epoch.mate_same
        row["Md"] = epoch.mate_diff
        row["PercentSame"] = 100 * fdiv(epoch.mate_same, epoch.mate_same + epoch.mate_diff)

    colnames = ["Timestep", "Ms", "Md", "Cs", "Cd", "Ab"]
    coltypes = ["int", "int", "int", "int", "int", "float"]
    table_contactNorm = datalib.Table("AssortativeMating_ContactNormalized", colnames, coltypes)

    for epoch in epochs.values():
        row = table_contactNorm.createRow()
        row["Timestep"] = epoch.timestep
        row["Ms"] = epoch.mate_same
        row["Md"] = epoch.mate_diff
        row["Cs"] = epoch.contact_same
        row["Cd"] = epoch.contact_diff
        row["Ab"] = fdiv(fdiv(epoch.mate_same, epoch.contact_same), fdiv(epoch.mate_diff, epoch.contact_diff))

    colnames = ["Timestep", "Gs", "Gd", "Cs", "Cd", "Ps", "Pd", "Bias"]
    coltypes = ["int", "int", "int", "int", "int", "float", "float", "float"]
    table_give = datalib.Table("Give", colnames, coltypes)

    for epoch in epochs.values():
        row = table_give.createRow()
        row["Timestep"] = epoch.timestep
        row["Gs"] = epoch.give_same
        row["Gd"] = epoch.give_diff
        row["Cs"] = epoch.contact_same
        row["Cd"] = epoch.contact_diff
        row["Ps"] = fdiv(epoch.give_same, epoch.contact_same * 2)
        row["Pd"] = fdiv(epoch.give_diff, epoch.contact_diff * 2)
        row["Bias"] = fdiv(fdiv(epoch.give_same, epoch.contact_same), fdiv(epoch.give_diff, epoch.contact_diff))

    colnames = ["Timestep", "Cs", "Cd", "PercentSame"]
    coltypes = ["int", "int", "int", "float"]
    table_contact = datalib.Table("Contact", colnames, coltypes)

    for epoch in epochs.values():
        row = table_contact.createRow()
        row["Timestep"] = epoch.timestep
        row["Cs"] = epoch.contact_same
        row["Cd"] = epoch.contact_diff
        row["PercentSame"] = 100 * fdiv(epoch.contact_same, epoch.contact_same + epoch.contact_diff)

    datalib.write(path_output, [table, table_contactNorm, table_give, table_contact])
コード例 #18
0
        for line in complexity_all:
            fields = line.split()
            agent_number = fields.pop(0)

            for type in complexities_remaining:
                table = tables[type]
                row = table.createRow()

                row.set('AgentNumber', agent_number)
                row.set('Complexity', fields.pop(0))

        # --- Write to file and normalize data (eg sort and remove 0's)
        for type in complexities_remaining:
            table = tables[type]

            datalib.write(__path(type), table)

            data = table.getColumn('Complexity').data

            tdata[type] = common_complexity.normalize_complexities(data)

    return complexities


####################################################################################
###
### FUNCTION divide(numerator, denominator)
###
####################################################################################
def divide(numerator, denominator):
    if denominator == 0:
コード例 #19
0
def analyze_recent_dir(complexities, recent_dir):
    outputpath = os.path.join(recent_dir, OutputFilename)

    print "- recent directory='%s'" % (recent_dir)
    print "- output='%s'" % (outputpath)

    #-----------------------------------------------------------------------------------
    #--
    #-- Find epoch/timestep directories
    #--
    #-----------------------------------------------------------------------------------
    timesteps = []
    # list all of the timesteps, make sure they are all integers (and directories), then sort them by number.
    for potential_timestep in os.listdir(recent_dir):
        if not potential_timestep.isdigit():
            continue  # if timestep IS NOT a digit (note, 0 is considered a digit), skip.
        if not os.path.isdir(os.path.join(recent_dir, potential_timestep)):
            continue  # if the timestep isn't a directory, skip it.

        timesteps.append(int(potential_timestep))  # add timestep to our list

    if len(timesteps) == 0:
        err('No epochs found. Not a valid recent directory.')

    timesteps.sort()  # sort the timesteps, lowest numbers come first.

    #-----------------------------------------------------------------------------------
    #--
    #-- Compute complexities for all timesteps
    #--
    #-- (store values to file in timestep dir)
    #--
    #-----------------------------------------------------------------------------------
    DATA = {}

    print "Final Timestep: %s" % (max(timesteps))
    print datetime.datetime.now()
    print "Processing:",

    for t in timesteps:
        timestep_directory = os.path.join(recent_dir, str(t))
        print '%s at %s...' % (t, datetime.datetime.now())
        sys.stdout.flush()

        DATA[t] = tdata = {}

        complexities_remaining = complexities

        if LegacyMode != 'off':
            complexities_read = read_legacy_complexities(
                complexities_remaining, timestep_directory, tdata)
            complexities_remaining = list_difference(complexities_remaining,
                                                     complexities_read)
            if len(complexities_remaining) != 0:
                if LegacyMode == 'force':
                    err('Failed to find data for %s' %
                        ','.join(complexities_remaining))

            print '  Legacy =', complexities_read

        if len(complexities_remaining) > 0:
            complexities_computed = compute_complexities(
                complexities_remaining, t, timestep_directory, tdata)
            complexities_remaining = list_difference(complexities_remaining,
                                                     complexities_computed)

        assert (len(complexities_remaining) == 0)

    #-----------------------------------------------------------------------------------
    #--
    #-- Create 'Avr' File
    #--
    #-----------------------------------------------------------------------------------
    AVR = algorithms.avr_table(DATA, complexities, timesteps)

    datalib.write(outputpath, AVR, append=True)

    #-----------------------------------------------------------------------------------
    #--
    #-- Create 'Norm' file
    #--
    #-----------------------------------------------------------------------------------
    tables = compute_bins(DATA, timesteps, complexities, AVR,
                          lambda row: row.get('min'),
                          lambda row: row.get('max'))

    outputpath = os.path.join(recent_dir,
                              OutputFilename2.replace('.', 'Norm.'))

    datalib.write(outputpath, tables)

    #-----------------------------------------------------------------------------------
    #--
    #-- Create 'Raw' file
    #--
    #-----------------------------------------------------------------------------------
    MAXGLOBAL = dict([(type, float('-inf')) for type in complexities])
    MINGLOBAL = dict([(type, float('inf')) for type in complexities])

    for avr_table in AVR.values():
        for row in avr_table.rows():
            type = avr_table.name

            MAXGLOBAL[type] = max(MAXGLOBAL[type], row.get('max'))
            MINGLOBAL[type] = min(MINGLOBAL[type], row.get('min'))

    tables = compute_bins(DATA, timesteps, complexities, AVR,
                          lambda row: MINGLOBAL[row.table.name],
                          lambda row: MAXGLOBAL[row.table.name])

    outputpath = os.path.join(recent_dir, OutputFilename2.replace('.', 'Raw.'))

    datalib.write(outputpath, tables)
コード例 #20
0
def write_data(dir, tables):
	avr_file = get_avr_file(dir)
# 	print 'dir =', dir
# 	print 'tables =', tables
	datalib.write(avr_file, tables, append=True)
コード例 #21
0

print '-----------------------------------'

t1 = datalib.Table('V1', ['Time', 'mean'], ['int', 'float'])
t2 = datalib.Table('V2', ['Time', 'mean'], ['int', 'float'])

for i in range(5):
    row = t1.createRow()
    row['Time'] = i
    row['mean'] = float(i * 10)
    row = t2.createRow()
    row['Time'] = i
    row['mean'] = float(i * 100)

datalib.write('test.plt', [t1, t2])

print_datalib('test.plt')

#--

t3 = datalib.Table('V3', ['Time', 'mean'], ['int', 'float'])
for i in range(5):
    row = t3.createRow()
    row['Time'] = i
    row['mean'] = float(i * 1000)

datalib.write('test.plt', t3, append=True)

print_datalib('test.plt')
コード例 #22
0
def test__append():
	print '-----------------------------------'
	
	t1 = datalib.Table('V1', ['Time', 'mean'], ['int', 'float'])
	t2 = datalib.Table('V2', ['Time', 'mean'], ['int', 'float'])
	
	for i in range(5):
		row = t1.createRow()
		row['Time'] = i
		row['mean'] = float(i*10)
		row = t2.createRow()
		row['Time'] = i
		row['mean'] = float(i*100)
	
	datalib.write('test.plt', [t1, t2])
	
	print_datalib('test.plt')
	
	#--
	
	t3 = datalib.Table('V3', ['Time', 'mean'], ['int', 'float'])
	for i in range(5):
		row = t3.createRow()
		row['Time'] = i
		row['mean'] = float(i*1000)
	
	datalib.write('test.plt', t3, append=True)
	
	print_datalib('test.plt')
	
	#--
	
	t4 = datalib.Table('V2', ['Time', 'mean'], ['int', 'float'])
	for i in range(5):
		row = t4.createRow()
		row['Time'] = i
		row['mean'] = i*0.1
	
	datalib.write('test.plt', t4, append=True)
	
	print_datalib('test.plt')
	
	#--
	
	t4 = datalib.Table('V2', ['Time', 'mean'], ['int', 'float'])
	for i in range(5):
		row = t4.createRow()
		row['Time'] = i
		row['mean'] = float(i*100)
	
	datalib.write('test.plt', t4, append=True, replace=False)
	
	print_datalib('test.plt')
	
	#--
	
	t4 = datalib.Table('V4', ['Time', 'mean'], ['int', 'float'])
	for i in range(5):
		row = t4.createRow()
		row['Time'] = i
		row['mean'] = i*0.1
	
	datalib.write('test.plt', t4)
	
	print_datalib('test.plt')
コード例 #23
0
ファイル: CalcEatLearn.py プロジェクト: vodenizeka/polyworld
        epoch.goodSecondPieceCount,
        epoch.goodSecondPieceCount + epoch.badSecondPieceCount)
    row['GoodAfterBad'] = __div(epoch.goodAfterBad, epoch.afterBadCount)
    row['GoodAfterGood'] = __div(epoch.goodAfterGood, epoch.afterGoodCount)

colnames = ['T', 'All', 'Good', 'Bad']
coltypes = ['int', 'int', 'int', 'int']
table_counts = datalib.Table('Counts', colnames, coltypes)

for tepoch in tepochs:
    epoch = epochs[tepoch]
    row = table_counts.createRow()
    row['T'] = epoch.timestep
    row['All'] = epoch.goodAllCount + epoch.badAllCount
    row['Good'] = epoch.goodAllCount
    row['Bad'] = epoch.badAllCount

colnames = ['T', 'GoodOverBad']
coltypes = ['int', 'float']
table_energy = datalib.Table('Energy', colnames, coltypes)

for tepoch in tepochs:
    epoch = epochs[tepoch]
    row = table_energy.createRow()
    row['T'] = epoch.timestep
    row['GoodOverBad'] = __div(epoch.goodAllEnergy, epoch.badAllEnergy * -1,
                               epoch.goodAllEnergy)

datalib.write(run + '/events/eatlearn.txt',
              [table_fractionGood, table_counts, table_energy])
コード例 #24
0
ファイル: CalcEatLearn.py プロジェクト: ferpalaria/polyworld
	row['IgnoreFirstEat'] = __div(epoch.goodIgnoreFirstCount, epoch.goodIgnoreFirstCount + epoch.badIgnoreFirstCount )
	row['IgnoreFirstEat_UniqueFood'] = __div( epoch.goodUniqueCount, epoch.goodUniqueCount + epoch.badUniqueCount )
	row['SecondPiece'] = __div( epoch.goodSecondPieceCount, epoch.goodSecondPieceCount + epoch.badSecondPieceCount )
	row['GoodAfterBad'] = __div( epoch.goodAfterBad, epoch.afterBadCount )
	row['GoodAfterGood'] = __div( epoch.goodAfterGood, epoch.afterGoodCount )


colnames = ['T', 'All', 'Good', 'Bad']
coltypes = ['int', 'int', 'int', 'int']
table_counts = datalib.Table( 'Counts', colnames, coltypes )

for tepoch in tepochs:
	epoch = epochs[tepoch]
	row = table_counts.createRow()
	row['T'] = epoch.timestep
	row['All'] = epoch.goodAllCount + epoch.badAllCount
	row['Good'] = epoch.goodAllCount
	row['Bad'] = epoch.badAllCount

colnames = ['T', 'GoodOverBad']
coltypes = ['int', 'float']
table_energy = datalib.Table( 'Energy', colnames, coltypes )

for tepoch in tepochs:
	epoch = epochs[tepoch]
	row = table_energy.createRow()
	row['T'] = epoch.timestep
	row['GoodOverBad'] = __div( epoch.goodAllEnergy, epoch.badAllEnergy * -1, epoch.goodAllEnergy )

datalib.write( run + '/events/eatlearn.txt', [table_fractionGood, table_counts, table_energy] )
コード例 #25
0
def write_data(dir, tables):
	avr_file = get_avr_file(dir)
# 	print 'dir =', dir
# 	print 'tables =', tables
	datalib.write(avr_file, tables, append=True)
コード例 #26
0
	print '--'

print '-----------------------------------'

t1 = datalib.Table('V1', ['Time', 'mean'], ['int', 'float'])
t2 = datalib.Table('V2', ['Time', 'mean'], ['int', 'float'])

for i in range(5):
	row = t1.createRow()
	row['Time'] = i
	row['mean'] = float(i*10)
	row = t2.createRow()
	row['Time'] = i
	row['mean'] = float(i*100)

datalib.write('test.plt', [t1, t2])

print_datalib('test.plt')

#--

t3 = datalib.Table('V3', ['Time', 'mean'], ['int', 'float'])
for i in range(5):
	row = t3.createRow()
	row['Time'] = i
	row['mean'] = float(i*1000)

datalib.write('test.plt', t3, append=True)

print_datalib('test.plt')
コード例 #27
0
def analyze_recent_dir(complexities, recent_dir):
	outputpath = os.path.join(recent_dir, OutputFilename);
	
	print "- recent directory='%s'" %(recent_dir)
	print "- output='%s'" % (outputpath)
	
	#-----------------------------------------------------------------------------------
	#--
	#-- Find epoch/timestep directories
	#--
	#-----------------------------------------------------------------------------------
	timesteps = []
	# list all of the timesteps, make sure they are all integers (and directories), then sort them by number.
	for potential_timestep in os.listdir( recent_dir ):
		if not potential_timestep.isdigit(): continue					# if timestep IS NOT a digit (note, 0 is considered a digit), skip.
		if not os.path.isdir( os.path.join(recent_dir, potential_timestep) ): continue	# if the timestep isn't a directory, skip it.
	
		timesteps.append( int(potential_timestep) )						# add timestep to our list
	
	if len(timesteps) == 0:
		err('No epochs found. Not a valid recent directory.')

	timesteps.sort()									# sort the timesteps, lowest numbers come first.
	
	#-----------------------------------------------------------------------------------
	#--
	#-- Compute complexities for all timesteps
	#--
	#-- (store values to file in timestep dir)
	#--
	#-----------------------------------------------------------------------------------
	DATA={ }
	
	print "Final Timestep: %s" % ( max(timesteps) )
	print "Processing:",
	
	for t in timesteps:
		timestep_directory = os.path.join(recent_dir, str(t))
		print '%s...\n' % (t),
		sys.stdout.flush()	
	
		DATA[t] = tdata = {}

		complexities_remaining = complexities

		if LegacyMode != 'off' :
			complexities_read = read_legacy_complexities(complexities_remaining,
								     timestep_directory,
								     tdata)
			complexities_remaining = list_difference(complexities_remaining,
								 complexities_read)
			if len(complexities_remaining) != 0:
				if LegacyMode == 'force':
					err('Failed to find data for %s' % ','.join(complexities_remaining))
			
			print '  Legacy =', complexities_read

		if len(complexities_remaining) > 0:
			complexities_computed = compute_complexities(complexities_remaining,
								     timestep_directory,
								     tdata)
			complexities_remaining = list_difference(complexities_remaining,
								 complexities_computed)

		assert(len(complexities_remaining) == 0)
	
	#-----------------------------------------------------------------------------------
	#--
	#-- Create 'Avr' File
	#--
	#-----------------------------------------------------------------------------------
	AVR = algorithms.avr_table(DATA,
				   complexities,
				   timesteps)
	
	datalib.write(outputpath, AVR, append=True)
	
	
	#-----------------------------------------------------------------------------------
	#--
	#-- Create 'Norm' file
	#--
	#-----------------------------------------------------------------------------------
	tables = compute_bins(DATA,
			      timesteps,
			      complexities,
			      AVR,
			      lambda row: row.get('min'),
			      lambda row: row.get('max'))
	
	outputpath = os.path.join(recent_dir, OutputFilename2.replace( '.', 'Norm.'))
	
	datalib.write(outputpath, tables)
	
	
	#-----------------------------------------------------------------------------------
	#--
	#-- Create 'Raw' file
	#--
	#-----------------------------------------------------------------------------------
	MAXGLOBAL = dict([(type, float('-inf')) for type in complexities])
	MINGLOBAL = dict([(type, float('inf')) for type in complexities])
	
	for avr_table in AVR.values():
		for row in avr_table.rows():
			type = avr_table.name
	
			MAXGLOBAL[type] = max(MAXGLOBAL[type], row.get('max'));
			MINGLOBAL[type] = min(MINGLOBAL[type], row.get('min'));
	
	tables = compute_bins(DATA,
			      timesteps,
			      complexities,
			      AVR,
			      lambda row: MINGLOBAL[row.table.name],
			      lambda row: MAXGLOBAL[row.table.name])
	
	outputpath = os.path.join(recent_dir, OutputFilename2.replace( '.', 'Raw.'))
	
	datalib.write(outputpath, tables)
コード例 #28
0
ファイル: metabolism.py プロジェクト: vodenizeka/polyworld
def computeEvents( metabolismCache, epochLen, path_run, path_output ):
    birthsDeaths = common_logs.BirthsDeaths( path_run )

    class Epoch:
        def __init__( self, timestep ):
            self.timestep = timestep
            self.mate_same = 0
            self.mate_diff = 0
            self.give_same = 0
            self.give_diff = 0
            self.contact_same = 0
            self.contact_diff = 0
    
    epochs = {}
    
    for agent in birthsDeaths.entries.keys():
        entry = birthsDeaths.getEntry( agent )
        
        if (entry.parent1 != None) and (entry.deathTimestep != None):
            epochIndex = entry.birthTimestep / epochLen
            try:
                epoch = epochs[epochIndex]
            except:
                epoch = Epoch( epochIndex * epochLen )
                epochs[epochIndex] = epoch
    
            parent1Metabolism = metabolismCache[ entry.parent1 ]
            parent2Metabolism = metabolismCache[ entry.parent2 ]
    
            if parent1Metabolism == parent2Metabolism:
                epoch.mate_same += 1
            else:
                epoch.mate_diff += 1
    
    def __process_contact_row( row ):
        step = row['Timestep']
        agent1 = row['Agent1']
        agent2 = row['Agent2']
        events = row['Events']

        epochIndex = step / epochLen
        try:
            epoch = epochs[epochIndex]
        except:
            epoch = Epoch( epochIndex * epochLen )
            epochs[epochIndex] = epoch

        agent1Metabolism = metabolismCache[ agent1 ]
        agent2Metabolism = metabolismCache[ agent2 ]

        ngive = events.count( 'G' )

        if agent1Metabolism == agent2Metabolism:
            epoch.contact_same += 1
            epoch.give_same += ngive
        else:
            epoch.contact_diff += 1
            epoch.give_diff += ngive

    datalib.parse( os.path.join(path_run, 'events/contacts.log'),
                   tablenames = ['Contacts'],
                   stream_row = __process_contact_row )

    # HACK: DROP LAST EPOCH SINCE IT'S USUALLY JUST ONE STEP
    epochIndexes = list(epochs.keys())
    epochIndexes.sort()
    del epochs[ epochIndexes[-1] ]
    
    colnames = ['Timestep', 'Ms', 'Md', 'PercentSame']
    coltypes = ['int', 'int', 'int', 'float']
    table = datalib.Table( 'AssortativeMating', colnames, coltypes )
    
    for epoch in epochs.values():
        row = table.createRow()
        row['Timestep'] = epoch.timestep
        row['Ms'] = epoch.mate_same
        row['Md'] = epoch.mate_diff
        row['PercentSame'] = 100 * fdiv(epoch.mate_same, epoch.mate_same + epoch.mate_diff)

    colnames = ['Timestep', 'Ms', 'Md', 'Cs', 'Cd', 'Ab']
    coltypes = ['int', 'int', 'int', 'int', 'int', 'float']
    table_contactNorm = datalib.Table( 'AssortativeMating_ContactNormalized', colnames, coltypes )
    
    for epoch in epochs.values():
        row = table_contactNorm.createRow()
        row['Timestep'] = epoch.timestep
        row['Ms'] = epoch.mate_same
        row['Md'] = epoch.mate_diff
        row['Cs'] = epoch.contact_same
        row['Cd'] = epoch.contact_diff
        row['Ab'] = fdiv( fdiv(epoch.mate_same, epoch.contact_same),
                          fdiv(epoch.mate_diff, epoch.contact_diff) );

    colnames = ['Timestep', 'Gs', 'Gd', 'Cs', 'Cd', 'Ps', 'Pd', 'Bias']
    coltypes = ['int', 'int', 'int', 'int', 'int', 'float', 'float', 'float']
    table_give = datalib.Table( 'Give', colnames, coltypes )
    
    for epoch in epochs.values():
        row = table_give.createRow()
        row['Timestep'] = epoch.timestep
        row['Gs'] = epoch.give_same
        row['Gd'] = epoch.give_diff
        row['Cs'] = epoch.contact_same
        row['Cd'] = epoch.contact_diff
        row['Ps'] = fdiv( epoch.give_same, epoch.contact_same * 2 )
        row['Pd'] = fdiv( epoch.give_diff, epoch.contact_diff * 2 )
        row['Bias'] = fdiv( fdiv(epoch.give_same, epoch.contact_same),
                            fdiv(epoch.give_diff, epoch.contact_diff) );

    colnames = ['Timestep', 'Cs', 'Cd', 'PercentSame']
    coltypes = ['int', 'int', 'int', 'float']
    table_contact = datalib.Table( 'Contact', colnames, coltypes )
    
    for epoch in epochs.values():
        row = table_contact.createRow()
        row['Timestep'] = epoch.timestep
        row['Cs'] = epoch.contact_same
        row['Cd'] = epoch.contact_diff
        row['PercentSame'] = 100 * fdiv( epoch.contact_same, epoch.contact_same + epoch.contact_diff )
    
    datalib.write( path_output, [table, table_contactNorm, table_give, table_contact] )