Пример #1
0
def runBeamDesignProblem():
    """
    Run an example case of the beam designer problem
    """
    p = Params()
    p.nAgents = 8
    p.nDims = 4
    p.nTeams = 2
    p.reps = 32
    p.steps = 100
    p.agentTeams = m.specializedTeams(p.nAgents, p.nTeams)
    p.teamDims = m.teamDimensions(p.nDims, p.nTeams)

    t = teamWorkSharing(p, BeamDesigner)
    return t
Пример #2
0
def run():
    t0 = timer.time()
    p = Params()
    #    p.reps=2
    # selfBias = 0.5
    # curatedTeams = False
    #    shareAcrossTeams = True

    #change team size and specialization
    p.nAgents = 12
    p.nTeams = 4
    p.nDims = 56
    p.agentTeams = m.specializedTeams(p.nAgents, p.nTeams)
    p.teamDims = m.teamDimensions(p.nDims, p.nTeams)
    p.reps = 16
    p.steps = 500

    pComms = np.linspace(0, 1, 6)

    allTeamObjects = []
    for pComm in pComms:
        if __name__ == '__main__' or 'kaboom.designScienceStudies.i_optimalCommRate':
            pool = multiprocessing.Pool(processes=16)
            allTeams = pool.starmap(carTeamWorkProcess,
                                    zip(range(p.reps), itertools.repeat(p)))
            print('next. time: ' + str(timer.time() - t0))
            for team in allTeams:
                allTeamObjects.append(team)
            pool.close()
            pool.join()
            print('finished a pComm round')
        else:
            print(__name__)
    # allTeams = [t for tl in allTeamObjects for t in tl]
    print("time to complete: " + str(timer.time() - t0))

    # name="nShares_long_smallTeam"
    # directory = saveResults(allTeamObjects,name)
    # plt.savefig(directory+"/"+name+".pdf")
    # plt.savefig(directory+"/"+name+".png",dpi=300)

    return allTeamObjects, pComms, p
def run():

    # ## B 2.3
    # Robustness on two diagonals
    #

    # PROBLEM SET 1
    #problems favor a range of styles from adaptive to mid to innovative

    t0 = timer.time()
    p = Params()

    p.curatedTeams = True

    p.reps = 32

    allTeams = []
    allTeamScores = []
    p.aiScore = 100
    aiRanges = np.linspace(0, 100, 6)

    #    pComm = 0.2

    #the diagonal across preferred style:
    roughnesses = np.logspace(-1, .7, num=6, base=10)  #[.2,.6,1.8,5.4]
    roughnesses = roughnesses[1:]
    speeds = np.logspace(-1, .7, num=6, base=10) / 100  # [.001,.004,.016,.048]
    speeds = speeds[1:]

    problemSet = [roughnesses, speeds]

    for aiRange in aiRanges:
        p.aiRange = aiRange
        teams = []
        teamScores = []
        for i in range(p.reps):
            scores, t = robustnessTest(p, problemSet)
            teams.append(t)
            teamScores.append(scores)
        allTeams.append(teams)
        allTeamScores.append(teamScores)
        print('next')
    print('time: %s' % (timer.time() - t0))

    #
    #teams = [ t for tt in allTeams for t in tt]
    #directory = saveResults(teams,'robustnessTest_diag1')
    #rFile = directory+'/'+'scoreMatrix.obj'
    #rPickle = open(rFile, 'wb')
    #pickle.dump(allTeamScores,rPickle)
    # f = open('/Users/samlapp/SAE_ABM/results/1542634807.0205839robustnessTest/scoreMatrix.obj','rb')
    # sm = pickle.load(f)

    # In[67]:

    #STADARDIZE for each problem!
    ats = np.array(allTeamScores)
    problemMeans = [np.mean(ats[:, :, i]) for i in range(len(problemSet[0]))]
    problemSds = [np.std(ats[:, :, i]) for i in range(len(problemSet[0]))]
    allTeamScoresStandardized = ats
    for j in range(len(allTeamScoresStandardized)):
        for i in range(len(problemSet[0])):
            for k in range(p.reps):
                allTeamScoresStandardized[
                    j, k, i] = (ats[j, k, i] - problemMeans[i]) / problemSds[i]
    np.shape(allTeamScoresStandardized)

    meanScores = [
        np.mean(t) for teamSet in allTeamScoresStandardized for t in teamSet
    ]
    meanGrouped = [[np.mean(t) for t in teamSet]
                   for teamSet in allTeamScoresStandardized]
    sdScores = [
        np.std(t) for teamSet in allTeamScoresStandardized for t in teamSet
    ]
    sdGrouped = [[np.std(t) for t in teamSet]
                 for teamSet in allTeamScoresStandardized]
    ranges = [t.dAI for teamSet in allTeams for t in teamSet]
    robustness = np.array(meanScores) - np.array(sdScores)
    robustnessGrouped = np.array(meanGrouped) - np.array(sdGrouped)

    # meanScores = np.array(meanScores)*-1

    plt.scatter(ranges, np.array(meanScores), c=[.9, .9, .9])
    # plt.title("9 problem matrix")

    cms = m.plotCategoricalMeans(ranges, meanScores)
    #plt.savefig('results/vii_set1_robustnessInv.pdf')

    stat, pscore = scipy.stats.ttest_ind(meanGrouped[0], meanGrouped[2])
    print("significance: p= " + str(pscore))
    corr, _ = scipy.stats.pearsonr(ranges[0:p.reps * 4],
                                   meanScores[0:p.reps * 4])
    print('Pearsons correlation: %.3f' % corr)

    # In[2]
    # PROBLEM SET 2
    # Now the other diagonal (all 5 problems prefer mid-range style)

    allTeamsD2 = []
    allTeamScoresD2 = []
    #    aiScore = 100
    aiRanges = np.linspace(0, 100, 6)

    #the diagonal across preferred style:
    roughnesses = np.logspace(-1, .7, num=6, base=10)  #[.2,.6,1.8,5.4]
    roughnesses = roughnesses[1:]
    speeds = np.logspace(-1, .7, num=6, base=10) / 100  # [.001,.004,.016,.048]
    speeds = speeds[1:]
    #reverse the order: pair large speed (small space) with small roughness
    speeds = speeds[::-1]

    problemSet = [roughnesses, speeds]

    for aiRange in aiRanges:
        p.aiRange = aiRange
        teams = []
        teamScores = []
        for i in range(p.reps):
            scores, t = robustnessTest(p, problemSet)
            teams.append(t)
            teamScores.append(scores)
        allTeamsD2.append(teams)
        allTeamScoresD2.append(teamScores)
        print('next')
    print('time: %s' % (timer.time() - t0))

    #teams = [ t for tt in allTeamsD2 for t in tt]
    #directory = saveResults(teams,'robustnessTest_diag2')
    #rFile = directory+'/'+'scoreMatrix.obj'
    #rPickle = open(rFile, 'wb')
    #pickle.dump(allTeamScores,rPickle)
    # f = open('/Users/samlapp/SAE_ABM/results/1542634807.0205839robustnessTest/scoreMatrix.obj','rb')
    # sm = pickle.load(f)

    #STADARDIZE for each problem!
    ats = np.array(allTeamScoresD2)
    problemMeans = [np.mean(ats[:, :, i]) for i in range(len(problemSet[0]))]
    problemSds = [np.std(ats[:, :, i]) for i in range(len(problemSet[0]))]
    allTeamScoresStandardized = ats
    for j in range(len(allTeamScoresStandardized)):
        for i in range(len(problemSet[0])):
            for k in range(p.reps):
                allTeamScoresStandardized[
                    j, k, i] = (ats[j, k, i] - problemMeans[i]) / problemSds[i]
    np.shape(allTeamScoresStandardized)

    meanScoresD2 = [
        np.mean(t) for teamSet in allTeamScoresStandardized for t in teamSet
    ]
    meanGroupedD2 = [[np.mean(t) for t in teamSet]
                     for teamSet in allTeamScoresStandardized]
    sdScoresD2 = [
        np.std(t) for teamSet in allTeamScoresStandardized for t in teamSet
    ]
    sdGroupedD2 = [[np.std(t) for t in teamSet]
                   for teamSet in allTeamScoresStandardized]
    ranges = [t.dAI for teamSet in allTeams for t in teamSet]
    # robustness = np.array(meanScores)-np.array(sdScores)
    # robustnessGrouped = np.array(meanGrouped)-np.array(sdGrouped)

    #flip the scores
    meanScoresD2 = np.array(meanScoresD2) * -1

    plt.scatter(ranges, np.array(meanScoresD2), c=[.9, .9, .9])

    cms = m.plotCategoricalMeans(ranges, np.array(meanScoresD2))

    stat, p = scipy.stats.ttest_ind(meanGroupedD2[0], meanGroupedD2[3])
    print("significance: p= " + str(p))
    corr, _ = scipy.stats.pearsonr(ranges[0:p.reps * 4],
                                   meanScoresD2[0:p.reps * 4])
    print('Pearsons correlation: %.3f' % corr)

    # plt.scatter(ranges,np.array(meanScoresD2),c=[.9,.9,.9])

    cms = m.plotCategoricalMeans(ranges, np.array(meanScoresD2))
    cms2 = m.plotCategoricalMeans(ranges, np.array(meanScores))
    plt.xlabel('maximum cognitive gap (style diversity)')
    plt.ylabel('performance')
    plt.legend(['probem set 1', 'problem set 2'])

    myPath = os.path.dirname(__file__)
    plt.savefig(myPath + "/results/vii_diverseTeams_2problemsets.pdf")
Пример #4
0
from kaboom import modelFunctions as m
#from kaboom.kaboom import teamWorkProcess

from kaboom.carMakers import carTeamWorkProcess

# Structure vs Composition
# Optimal structure of 32-agent team for 3 allocation strategies

t0 = timer.time()
p = Params()

#change team size and specialization
p.nAgents = 16  #32
p.nDims = 56
p.steps = 3  #00
p.reps = 4

nAgentsPerTeam = [1, 2, 3, 4]  #,8,16,32]#[32,16]# [8,4,3,2,1]

resultMatrix = []
teamObjects = []
for i in range(3):  #range(3):
    if i == 0:  #homogeneous
        p.aiRange = 0
        p.aiScore = 95
        p.curatedTeams = True
    elif i == 1:
        p.aiRange = 70
        p.aiScore = 95
        p.curatedTeams = False
    elif i == 2:
Пример #5
0
def run(numberOfCores=4):
    t0 = timer.time()
    p = Params()

    #change team size and specialization
    p.nAgents = 33
    p.nDims = 56
    p.steps = 100  #100
    p.reps = 16

    myPath = os.path.dirname(__file__)
    parentPath = os.path.dirname(myPath)
    paramsDF = pd.read_csv(parentPath + "/SAE/paramDBreduced.csv")
    paramsDF = paramsDF.drop(["used"], axis=1)
    paramsDF.head()

    teams = ['brk', 'c', 'e', 'ft', 'fw', 'ia', 'fsp', 'rsp', 'rt', 'rw', 'sw']
    teamsDict = {i: teams[i] for i in range(10)}
    paramTeams = paramsDF.team
    p.nTeams = len(teams)
    #in the semantic division of the problem, variables are grouped by parts of
    #the car (eg, wheel dimensions; engine; brakes)
    teamDimensions_semantic = [[
        1 if paramTeam == thisTeam else 0 for paramTeam in paramTeams
    ] for thisTeam in teams]

    p.agentTeams = m.specializedTeams(p.nAgents, p.nTeams)
    p.teamDims = teamDimensions_semantic

    if __name__ == '__main__' or 'kaboom.IDETC_studies.iv_problemDecomposition':
        pool = multiprocessing.Pool(processes=4)
        teamObjectsSemantic = pool.starmap(
            carTeamWorkProcess,
            zip(range(p.reps), itertools.repeat(p),
                itertools.repeat(CarDesignerWeighted)))
        pool.close()
        pool.join()
    print("finished semantic: " + str(timer.time() - t0))

    #    name="allocation_semantic"
    #    directory = saveResults(teamObjectsSemantic,p,name)

    #NOW WITH BLIND TEAM DIMENSIONS INSTEAD OF SEMANTIC

    #assign dimensions blindly to teams,with even # per team (as possible)
    teamDimensions_blind = m.teamDimensions(p.nDims, p.nTeams)
    p.teamDims = teamDimensions_blind

    if __name__ == '__main__' or 'kaboom.IDETC_studies.iv_problemDecomposition':
        pool = multiprocessing.Pool(processes=numberOfCores)
        teamObjectsBlind = pool.starmap(
            carTeamWorkProcess,
            zip(range(p.reps), itertools.repeat(p),
                itertools.repeat(CarDesignerWeighted)))
        pool.close()
        pool.join()
    #    name="allocation_blind"
    #    directory = saveResults(teamObjectsBlind,p,name)
    print("finished blind: " + str(timer.time() - t0))

    #inverted scores so that its a maximization problem
    #(in the plot, higher scores are better)
    semanticScores = [t.getBestScore() * -1 for t in teamObjectsSemantic]
    blindScores = [t.getBestScore() * -1 for t in teamObjectsBlind]

    #Plot results:
    plt.boxplot([semanticScores, blindScores],
                labels=["semantic", "blind"],
                showfliers=True)
    plt.ylabel("car design performance")

    plt.savefig(myPath + "/results/iv_problemDecomposition.pdf")
    plt.show()
    plt.clf()

    print("Results figure saved to " + myPath +
          "/results/iv_problemDecomposition.pdf")

    print("effect size:")
    print(h.effectSize(semanticScores, blindScores))
    print("ANOVA p score: ")
    print(h.pScore(semanticScores, blindScores))
Пример #6
0
def run():
    """ Experiment to test how KAI style affects car design performance
    and beam design performance """

    t0 = timer.time()
    p = Params()

    #change team size and specialization
    p.nAgents = 33
    p.nDims = 56
    p.steps = 100
    p.reps = 16

    myPath = os.path.dirname(__file__)
    parentPath = os.path.dirname(myPath)
    paramsDF = pd.read_csv(parentPath + "/SAE/paramDBreduced.csv")
    paramsDF = paramsDF.drop(["used"], axis=1)
    paramsDF.head()

    teams = ['brk', 'c', 'e', 'ft', 'fw', 'ia', 'fsp', 'rsp', 'rt', 'rw', 'sw']
    paramTeams = paramsDF.team
    p.nTeams = len(teams)
    #in the semantic division of the problem, variables are grouped by parts of
    #the car (eg, wheel dimensions; engine; brakes)
    teamDimensions_semantic = [[
        1 if paramTeam == thisTeam else 0 for paramTeam in paramTeams
    ] for thisTeam in teams]

    p.agentTeams = m.specializedTeams(p.nAgents, p.nTeams)
    p.teamDims = teamDimensions_semantic

    styleTeams = []
    flatTeamObjects = []
    aiScores = np.linspace(45, 145, 9)
    for aiScore in aiScores:
        #use a homogeneous team with KAI style of [aiScore]
        p.aiScore = aiScore
        p.aiRange = 0

        teamObjects = []  #save results
        if __name__ == '__main__' or 'kaboom.IDETC_STUDIES.i_teamStyle':
            pool = multiprocessing.Pool(processes=4)
            allTeams = pool.starmap(
                carTeamWorkProcess,
                zip(range(p.reps), itertools.repeat(p),
                    itertools.repeat(CarDesignerWeighted)))
            for t in allTeams:
                teamObjects.append(t)
                flatTeamObjects.append(t)
            pool.close()
            pool.join()
        print("time to complete: " + str(timer.time() - t0))
        styleTeams.append(teamObjects)

#    saveResults(flatTeamObjects, p, "carProblem_KaiStyle")

#invert the scores *-1 to show a maximization (rather than minimization)
#objective. (Then, in this plot, higher scores are better)
    allScores = [t.getBestScore() * -1 for s in styleTeams for t in s]

    allkai = [kai for kai in aiScores for i in range(p.reps)]
    m.plotCategoricalMeans(allkai, allScores)
    plt.scatter(allkai, allScores, c=[0.9, 0.9, 0.9])
    qFit = np.polyfit(allkai, allScores, 2)
    q = np.poly1d(qFit)
    x = np.linspace(45, 145, 100)
    plt.plot(x, q(x), c='red')
    plt.xticks([int(i) for i in aiScores])
    plt.xlabel("KAI score of homogeneous team")
    plt.ylabel("Car Design Performance")
    plt.savefig(myPath + '/results/i_teamStyle_carProblem.pdf')
    plt.clf()

    #Now test the performance on the beam design problem
    p = Params()
    p.nAgents = 8
    p.nDims = 4
    p.nTeams = 2
    p.reps = 16
    p.steps = 100
    p.agentTeams = m.specializedTeams(p.nAgents, p.nTeams)
    p.teamDims = m.teamDimensions(p.nDims, p.nTeams)

    beamTeams = []
    for aiScore in aiScores:
        teamSet = []
        for i in range(p.reps):
            t = teamWorkSharing(p, BeamDesigner)
            teamSet.append(t)
        beamTeams.append(teamSet)
        print('next')

    #flip scores so that higher is better in the plot
    allScores = [[t.getBestScore() * -1 for t in teams] for teams in beamTeams]
    allAiScores = [ai for ai in aiScores for i in range(p.reps)]
    allScoresFlat = [s for r in allScores for s in r]

    plt.scatter(allAiScores, allScoresFlat, c=[.9, .9, .9])
    m.plotCategoricalMeans(allAiScores, allScoresFlat)

    #quadratic fit
    qm = np.polyfit(allAiScores, allScoresFlat, 2)
    qmodel = np.poly1d(qm)
    x = np.linspace(45, 145, 101)
    plt.plot(x, qmodel(x), c='red')
    plt.xlabel("KAI score of homogeneous team")
    plt.ylabel("Beam Design Performance")

    plt.savefig(myPath + '/results/i_teamStyle_beamProblem.pdf')
    plt.show()
    plt.clf()

    print("Results figure saved to " + myPath +
          "/results/i_teamStyle_beamProblem.pdf")
Пример #7
0
def run():
    t0 = timer.time()
    p = Params()

    #change team size and specialization
    p.nAgents = 12
    p.nTeams = 4
    p.nDims = 12
    p.agentTeams = m.specializedTeams(p.nAgents, p.nTeams)
    p.teamDims = m.teamDimensions(p.nDims, p.nTeams)

    pComms = np.linspace(0, 1, 11)

    p.reps = 32  # 10 #$40 #5

    aiScores = [60, 95, 130]  #100#300
    p.aiRange = 0
    # aiRanges = np.linspace(0,100,10)

    #    meetingTimes = 100

    resultsA14 = []
    for aiScore in aiScores:
        p.aiScore = aiScore
        allTeamObjects = []
        for pComm in pComms:
            if __name__ == '__main__' or 'kaboom.designScienceStudies.ii_comm_v_style':
                pool = multiprocessing.Pool(processes=4)
                allTeams = pool.starmap(
                    teamWorkProcess, zip(range(p.reps), itertools.repeat(p)))
                print('next. time: ' + str(timer.time() - t0))
                for team in allTeams:
                    allTeamObjects.append(team)

                pool.close()
                pool.join()
        resultsA14.append(allTeamObjects)
        scores = [t.getBestScore() for t in allTeamObjects]
        pcs = [pc for pc in pComms for i in range(p.reps)]
        m.plotCategoricalMeans(pcs, np.array(scores) * -1)
        plt.show()
        # allTeams = [t for tl in allTeamObjects for t in tl]
    print("time to complete: " + str(timer.time() - t0))

    for allTeamObjects in resultsA14:

        allScores = np.array([t.getBestScore() for t in allTeamObjects]) * -1
        kai = allTeamObjects[0].agents[0].kai.KAI
        nS = [t.nMeetings for t in allTeamObjects]
        #     plt.scatter(nS,allScores, c=[.9,.9,.9])
        pC = [pc for pc in pComms for i in range(p.reps)]
        #     plt.show()
        #     plt.scatter(pC,allScores, label=kai)
        c = m.plotCategoricalMeans(pC, allScores)
        plt.plot(pComms, c)
    #     name="A1.5_commRate_vStyle32_kai"+str(kai)
    #     directory = saveResults(allTeamObjects,name)
    plt.legend(aiScores)
    plt.xlabel('prob of communication (c)')
    plt.ylabel('performance')
    myPath = os.path.dirname(__file__)
    plt.savefig(myPath + "/results/ii_commRate_vStyle32_plot.pdf")
Пример #8
0
from kaboom import helperFunctions as h
from kaboom import modelFunctions as m
import itertools

import os

from kaboom.params import Params
from kaboom import modelFunctions as m

from kaboom.kaboom import teamWorkSharing

p= Params()
p.nAgents = 8
p.nDims = 4
p.nTeams = 2
p.reps = 32
p.steps = 50
#p.steps = 50
p.agentTeams = m.specializedTeams(p.nAgents,p.nTeams)
p.teamDims = m.teamDimensions(p.nDims,p.nTeams)

aiScores = np.linspace(45,145,9)
allScores = []
for aiScore in aiScores:
    scores = []
    for i in range(p.reps):
        t= teamWorkSharing(p,BeamDesigner)
        scores.append(t.getBestScore())
    allScores.append(scores)
    print('next')
Пример #9
0
        i += 1

    return myTeam



# Do certain sub-teams have style preference adaptive/innovative?

t0 = timer.time()
p=Params()

#change team size and one sub-teams style:
p.nAgents = 33
p.nDims = 56
p.steps = 100 #100
p.reps = 4#16


myPath = os.path.dirname(__file__)
paramsDF = pd.read_csv("../SAE/paramDBreduced.csv")
paramsDF = paramsDF.drop(["used"],axis=1)
paramsDF.head()

#assign the actual specialized teams:
teams = ['brk', 'c', 'e', 'ft', 'fw', 'ia','fsp','rsp', 'rt', 'rw', 'sw']
teamsDict = { i:teams[i] for i in range(10)}
paramTeams = paramsDF.team
p.nTeams = len(teams)
teamDimensions_semantic = [[ 1 if paramTeam == thisTeam else 0 for paramTeam in paramTeams] for thisTeam in teams]
#teamDimensions_blind = m.specializedTeams(p.nAgents,p.nTeams)
p.agentTeams = m.specializedTeams(p.nAgents,p.nTeams)
def run():
    t0 = timer.time()
    p=Params()

    p.nAgents = 32
    nAgentsPerTeam = [1,2,3,4,8,16,32]#[32,16]# [8,4,3,2,1]
    p.nDims = 32

    p.reps = 32


    #choose one problem (middle, favors mid-range)
    roughnesses = np.logspace(-1,.7,num=6,base=10)
    speeds = np.logspace(-1,.7,num=6,base=10) / 100
    p.amplitude = roughnesses[3]
    p.AVG_SPEED = speeds[3]


    resultMatrix = []
    teamObjects = []
    for i in range(3):
        if i == 0: #homogeneous
            p.aiRange = 0
            p.aiScore = 95
            p.curatedTeams = True
        elif i == 1:
            p.aiRange = 70
            p.aiScore = 95
            p.curatedTeams = False
        elif i == 2:
            p.aiScore = None
            p.aiRange = None
            p.curatedTeams = False
        scoresA = []
        teams = []
        for subteamSize in nAgentsPerTeam:
            p.nTeams = int(p.nAgents/subteamSize)
            p.agentTeams = m.specializedTeams(p.nAgents,p.nTeams)
            p.teamDims = m.teamDimensions(p.nDims,p.nTeams)
            if __name__ == '__main__' or 'kaboom.designScienceStudies.viii_specialization_composition':
                pool = multiprocessing.Pool(processes = 4)
                allTeams = pool.starmap(teamWorkProcess, zip(range(p.reps),itertools.repeat(p)))
                scoresA.append([t.getBestScore() for t in allTeams])
                teams.append(allTeams)
            pool.close()
            pool.join()
        resultMatrix.append(scoresA)
        teamObjects.append(teams)
        print("completed one")
    print("time to complete: "+str(timer.time()-t0))

    for i in range(3):
        nAgents = [len(team.agents) for teamSet in teamObjects[i] for team in teamSet]
        nTeams = [len(team.specializations) for teamSet in teamObjects[i] for team in teamSet]
        subTeamSize = [int(len(team.agents)/len(team.specializations)) for teamSet in teamObjects[i] for team in teamSet]
        teamScore =  [team.getBestScore() for teamSet in teamObjects[i] for team in teamSet]
        print("Diverse team, size %s in %s dim space: " % (32,p.nDims))
    #     plt.scatter(subTeamSize,teamScore,label='team size: '+str(teamSizes[i]))
        m.plotCategoricalMeans(subTeamSize,np.array(teamScore)*-1)

    plt.xlabel("subteam size")
    #     plt.xticks([1,4,8,16,32])
    plt.ylabel("performance")
    #     plt.show()
    plt.legend(['homogeneous','heterogeneous70','organic'])

    plt.title("composition vs structure")
    myPath = os.path.dirname(__file__)
    plt.savefig(myPath+"/results/viii_structure_composition.pdf")
Пример #11
0
def run(numberOfCores=4):
    t0 = timer.time()
    p = Params()

    #change team size and one sub-teams style:
    p.nAgents = 33
    p.nDims = 56
    p.steps = 100  #100
    p.reps = 16

    #organic composition: select agents randomly from population
    p.aiScore = None
    p.aiRange = None

    myPath = os.path.dirname(__file__)
    parentDir = os.path.dirname(myPath)
    paramsDF = pd.read_csv(parentDir + "/SAE/paramDBreduced.csv")
    paramsDF = paramsDF.drop(["used"], axis=1)
    paramsDF.head()

    #assign the actual specialized teams:
    teams = ['brk', 'c', 'e', 'ft', 'fw', 'ia', 'fsp', 'rsp', 'rt', 'rw', 'sw']
    paramTeams = paramsDF.team
    p.nTeams = len(teams)
    teamDimensions_semantic = [[
        1 if paramTeam == thisTeam else 0 for paramTeam in paramTeams
    ] for thisTeam in teams]
    #teamDimensions_blind = m.specializedTeams(p.nAgents,p.nTeams)
    p.agentTeams = m.specializedTeams(p.nAgents, p.nTeams)
    p.teamDims = teamDimensions_semantic

    #First run the control group: teams with organic composition
    if __name__ == '__main__' or 'kaboom.IDETC_studies.iii_strategicTeams':
        pool = multiprocessing.Pool(processes=numberOfCores)
        controlTeams = pool.starmap(
            carTeamWorkProcess,
            zip(range(p.reps), itertools.repeat(p),
                itertools.repeat(CarDesignerWeighted)))
        #            scoresA.append([t.getBestScore() for t in allTeams])
        #            teams.append(allTeams)
        pool.close()
        pool.join()

    controlScores = [t.getBestScore() * -1 for t in controlTeams]

    #Run strategic teams
    subteamsSortedByStyle = [7, 8, 0, 10, 3, 2, 6, 4, 1, 5, 9]
    #    namedSortedTeams = [teams[i] for i in subteamsSortedByStyle]

    strategicTeamObjects = []
    if __name__ == '__main__' or 'kaboom.IDETC_studies.iii_strategicTeams':
        pool = multiprocessing.Pool(processes=4)
        allTeams = pool.starmap(
            teamWorkOrganicSuperteam,
            zip(range(p.reps), itertools.repeat(p),
                itertools.repeat(subteamsSortedByStyle)))
        #            scoresA.append([t.getBestScore() for t in allTeams])
        #            teams.append(allTeams)
        for t in allTeams:
            strategicTeamObjects.append(t)
        pool.close()
        pool.join()
    print("time to complete: " + str(timer.time() - t0))

    strategicScores = [t.getBestScore() * -1 for t in strategicTeamObjects]

    plt.boxplot([np.array(controlScores),
                 np.array(strategicScores)],
                labels=["control", "strategic allocation"],
                showfliers=True)
    plt.ylabel("car design performance")

    plt.savefig(myPath + "/results/iii_carStrategicTeamAssignment.pdf")
    plt.show()
    plt.clf()

    print("Results figure saved to " + myPath +
          "/results/iii_carStrategicTeamAssignment.pdf")

    print("effect size:")
    print(h.effectSize(controlScores, strategicScores))
    print("ANOVA p score: ")
    print(h.pScore(controlScores, strategicScores))