Beispiel #1
0
def main():

    if True:
        os.chdir(constants.BENCH_BASE_DIR)
        for i in range(30):
            try:
                runCommand('hg pull -u > hgupdate.log')
            except RuntimeError:
                message('  retry...')
                time.sleep(60.0)
            else:
                s = open('hgupdate.log', 'r').read()
                if s.find('not updating') != -1:
                    raise RuntimeError('hg did not update: %s' % s)
                else:
                    break
        else:
            raise RuntimeError('failed to run hg pull -u')

        os.chdir('%s/%s' % (constants.BASE_DIR, NIGHTLY_DIR))

        runCommand('svn cleanup')
        open('update.log',
             'ab').write('\n\n[%s]: update' % datetime.datetime.now())
        for i in range(30):
            try:
                runCommand('svn update > update.log 2>&1')
            except RuntimeError:
                message('  retry...')
                time.sleep(60.0)
            else:
                svnRev = int(
                    reSVNRev.search(open('update.log', 'rb').read()).group(1))
                print 'SVN rev is %s' % svnRev
                break
        else:
            raise RuntimeError('svn update failed')

    runCommand('%s clean > clean.log 2>&1' % constants.ANT_EXE)
    runCommand('%s compile > compile.log 2>&1' % constants.ANT_EXE)

    MEDIUM_LINE_FILE = constants.NIGHTLY_MEDIUM_LINE_FILE
    MEDIUM_INDEX_NUM_DOCS = constants.NIGHTLY_MEDIUM_INDEX_NUM_DOCS

    mediumSource = competition.Data('wikimedium', MEDIUM_LINE_FILE,
                                    MEDIUM_INDEX_NUM_DOCS,
                                    constants.WIKI_MEDIUM_TASKS_FILE)

    comp = competition.Competition()
    index = comp.newIndex(NIGHTLY_DIR, mediumSource)
    c = comp.competitor(id, NIGHTLY_DIR, index=index)
    r = benchUtil.RunAlgs(constants.JAVA_COMMAND, True)
    r.compile(c)
Beispiel #2
0
                 self.stddevReopenTime,
                 self.qtCount,
                 self.totalDocs,
                 self.totalReopens,
                 self.totalSearches,
                 self.totalUpdateTime)


if __name__ == '__main__':

    sourceData = competition.sourceData()
    sourceData.tasksFile = benchUtil.getArg('-tasks', None, True)
    if sourceData.tasksFile is None:
        raise RuntimeError('No tasks file defined: -tasks [file]')

    comp = competition.Competition(randomSeed=0)

    index = comp.newIndex(
        constants.TRUNK_CHECKOUT,
        sourceData,
        postingsFormat='Lucene90',
        idFieldPostingsFormat='Memory',
        grouping=False,
        doDeletions=False,
        addDVFields=True,
    )

    c = competition.Competitor('base', constants.TRUNK_CHECKOUT)

    r = benchUtil.RunAlgs(constants.JAVA_COMMAND, False, False)
    r.compile(c)
Beispiel #3
0
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import competition

# simple example that runs benchmark with WIKI_MEDIUM source and taks files
# Baseline here is ../lucene-solr versus ../patch
if __name__ == '__main__':
    sourceData = competition.sourceData()
    comp = competition.Competition(
        randomSeed=0xdeadbeef,
        jvmCount=5,  # num of iter
        taskRepeatCount=32)  # repeat task in every iter

    #index = comp.newIndex('lucene-solr-6.6.5', sourceData)
    index = comp.newIndex('lucene-solr-6.6.5',
                          sourceData,
                          facets=(('taxonomy:Date', 'Date'), ('taxonomy:Month',
                                                              'Month'),
                                  ('taxonomy:DayOfYear',
                                   'DayOfYear'), ('sortedset:Month', 'Month'),
                                  ('sortedset:DayOfYear', 'DayOfYear')))  #,
    # Added by Wenbin
    #directory="MMapDirectory",  # default: MMapDirectory
    #maxConcurrentMerges=3,  # use 1 for spinning-magnets and 3 for fast SSD
    #optimize=True)
Beispiel #4
0
# the License.  You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import competition

# simple example that runs benchmark with WIKI_MEDIUM source and taks files
# Baseline here is ../trunk versus ../patch
if __name__ == '__main__':
    sourceData = competition.sourceData()
    comp = competition.Competition()

    index = comp.newIndex('lucene_baseline', sourceData)
    # create a competitor named baseline with sources in the ../trunk folder
    comp.competitor('baseline', 'lucene_baseline', index=index)

    # use the same index here
    # create a competitor named my_modified_version with sources in the ../patch folder
    # note that we haven't specified an index here, luceneutil will automatically use the index from the base competitor for searching
    # while the codec that is used for running this competitor is taken from this competitor.
    comp.competitor('my_modified_version', 'lucene_candidate', index=index)

    # start the benchmark - this can take long depending on your index and machines
    comp.benchmark("trunk_vs_patch")
Beispiel #5
0
p = player.StatisticPlayer([50, 50, 20, 20])
p1 = player.StatisticPlayer([60, 30, 5, 5])
p2 = player.StatisticPlayer([33, 33, 10, 33])
p3 = player.StatisticPlayer([33, 33, 20, 5])
p4 = player.Player("random")
data = pd.HDFStore('Statistic.h5')
data['results'] = pd.DataFrame()
i = 0
for x in [70]:
    for y in [70]:
        for z in [70]:
            for a in [10, 25, 40, 60, 70]:
                start = time.time()
                p = player.StatisticPlayer([x, y, z, a])
                c = competition.Competition(game_size=4,
                                            games_number=30,
                                            players=[p],
                                            game_type=game.NO_SCREEN)
                c.play()
                c.show_results(dont_show=True)
                data['results'] = data['results'].append(c.results,
                                                         ignore_index=True)
                print(max(c.results['max_score']))
                i = i + 1
                print("finished " + str(i) + ' out of 625 in ' +
                      str(time.time() - start))

data.close()
print("tiral")
Beispiel #6
0
    team_stats = {}
    for i, name in enumerate(raw_teams.keys()):
        teams.append(team.Team(i, name))
        teams[i].set_scores(raw_teams[name])
        if name not in team_stats:
            team_stats[name] = {}
        team_stats[name]["wins"] = np.zeros((nschedules), dtype=np.int32)
        team_stats[name]["ranks"] = np.zeros((nschedules), dtype=np.int32)

    for team in teams:
        print(team)

    nteams = len(teams)
    scheds = schedules.Schedules(nteams, nweeks, nschedules)
    for j, sched in enumerate(scheds.schedules):
        comp = competition.Competition(sched, teams)
        for i, team in enumerate(comp.teams):
            team_stats[team.name]["wins"][j] = team.wins
            team_stats[team.name]["ranks"][j] = i
        # print(comp)

    for team, stats in team_stats.items():
        team_stats[team]["pdf"] = np.bincount(stats['ranks'],
                                              minlength=nteams) / nschedules
        team_stats[team]["cdf"] = np.cumsum(team_stats[team]["pdf"])
        # print("{:20}".format(team), team_stats[team]["pdf"] )

    # team_stats = sorted(team_stats.items(), key = lambda kv: kv[1]["wins"].mean(), reverse=True)
    team_stats = sorted(team_stats.items(),
                        key=lambda kv: kv[1]["cdf"][3],
                        reverse=True)