コード例 #1
0
    def check_bounds(self):

        min_x = 0
        min_y = 0
        max_x = 1000 - self.image.width
        max_y = config.upper_y_bound - self.image.height

        if self.is_ball:    

            if self.x < min_x:
                util.score(self, 1)
            elif self.x > max_x:
                util.score(self, 2)
            if self.y < min_y:
                self.v_y = self.v_y * -1
                self.y = min_y
            elif self.y > max_y:
                self.v_y = self.v_y * -1
                self.y = max_y
               

        else:

            if self.x < min_x:
                self.x = min_x
            elif self.x > max_x:
                self.x = max_x
            if self.y < min_y:
                self.y = min_y
            elif self.y > max_y:
                self.y = max_y
コード例 #2
0
def teamScores(self):
    # player who called gameType
    playerTeam = {
        "players": [self.gameType["player"]],
        "breakdown": [],  # ["bonus", amount]
        "cardsWon": [],
    }

    # and his (potential) teammate
    if "with" in self.gameType and self.gameType["with"] != -1:
        playerTeam["players"].append(self.gameType["with"])

    # opposing team: everyone else
    opposingTeam = {
        "players": [
            i for i in range(len(self.players))
            if i not in playerTeam["players"]
        ],
        "breakdown": [],
        "cardsWon": [],
    }

    self.info("Teams: " + str(playerTeam["players"]) + " vs " +
              str(opposingTeam["players"]))

    # cards won by either team
    for i, p in enumerate(self.players):
        if i in playerTeam["players"]:
            playerTeam["breakdown"] += [[
                "player_" + p["name"],
                score(p["cardsWon"])
            ]]
            playerTeam["cardsWon"] += p["cardsWon"]
        else:
            opposingTeam["breakdown"] += [[
                "player_" + p["name"],
                score(p["cardsWon"])
            ]]
            opposingTeam["cardsWon"] += p["cardsWon"]

    # add talon cards
    talonFlat = self.talon
    if type(talonFlat[0]) == list:
        talonFlat = [c for pack in self.talon for c in pack]

    if "king" in self.gameType and len(playerTeam) == 1:
        if self.gameType["king"] in self.players[playerTeam[0]]["cardsWon"]:
            # if player played alone and won the called king, add talon to his card pile
            playerTeam["breakdown"] += [["Talon", score(talonFlat)]]
            playerTeam["cardsWon"] += talonFlat
    else:
        # in every other case, add it to opposing team
        opposingTeam["breakdown"] += [["Talon", score(talonFlat)]]
        opposingTeam["cardsWon"] += talonFlat

    # handle other contracts
    addBonus([playerTeam, opposingTeam], self.players)

    return [playerTeam, opposingTeam]
コード例 #3
0
ファイル: ocr_test.py プロジェクト: YingjingLu/10708
def test(theta, data, alphabet, print_tags, print_score):
    predictions = predict(theta, data, alphabet)

    if print_tags:
        for word in predictions:
            print ''.join(word)

    if print_score:
        print score(predictions, data)
コード例 #4
0
ファイル: ocr_test.py プロジェクト: deborausujono/crfocr
def test(theta, data, alphabet, print_tags, print_score):
	predictions = predict(theta, data, alphabet)

	if print_tags:
		for word in predictions:
			print ''.join(word)

	if print_score:
		print score(predictions, data)
コード例 #5
0
def normalScores(self):
    return [{
        "players": [i],
        "cardsWon": p["cardsWon"],
        "breakdown": [["player_" + p["name"],
                       score(p["cardsWon"])]],
    } for i, p in enumerate(self.players)]
コード例 #6
0
ファイル: genomestats.py プロジェクト: haydnKing/codon-optim
    def from_seqrecord(cls, sr, featuretype='CDS', name=None):

        if not name:
            name = sr.name

        CDS = [f for f in sr.features if f.type == featuretype]

        _data = pd.DataFrame(np.zeros((len(CDS), 64), dtype=int), 
                             columns = util.list_codons())

        _second_order = pd.DataFrame(np.zeros((64,64), dtype=int),
                                     index = util.list_codons(),
                                     columns = util.list_codons())

        _scores = pd.DataFrame(np.zeros((len(CDS), 2)), 
                               columns = ['first', 'second',])

        _seqs = [util._extract(sr, cds) for cds in CDS]
        for i,seq in enumerate(_seqs):
            _data.loc[i,:] = util.get_bias(seq)
            util.add_second_order(_second_order, seq)

        #calculate scores
        _nd = util.normalise(_data.sum(0))
        _nso= util.so_normalise(_second_order)
        for i,seq in enumerate(_seqs):
            _scores.at[i,'first'] = util.score(_nd, seq)
            _scores.at[i,'second'] = util.so_score(_nso, seq)

        return cls(name, _data, _second_order, _scores, _nd, _nso)
コード例 #7
0
ファイル: train_base.py プロジェクト: ihsgnef/pathologies
def test():
    from args import conf

    parser = argparse.ArgumentParser()
    parser.add_argument('--baseline', required=True)
    args = parser.parse_args()

    # set random seed
    random.seed(conf.seed)
    torch.manual_seed(conf.seed)
    if conf.cuda:
        torch.cuda.manual_seed(conf.seed)

    train, dev_x, dev_y, embedding, conf = load_data(conf)

    checkpoint = torch.load(args.baseline)
    # opt = checkpoint['config']
    model = DocReaderModel(vars(conf), embedding, checkpoint['state_dict'])
    model.cuda()

    dev_batches = BatchGen(
            dev_x, batch_size=conf.batch_size,
            pos_size=conf.pos_size, ner_size=conf.ner_size,
            gpu=conf.cuda, evaluation=True)

    predictions = []
    for batch in dev_batches:
        predictions.extend(model.predict(batch))
    em, f1 = score(predictions, dev_y)

    print(em, f1)
コード例 #8
0
ファイル: ParticleSwarm.py プロジェクト: twistedmove/optunity
    def optimize(self, f, maximize=True, pmap=map):

        @functools.wraps(f)
        def evaluate(d):
            return f(**d)

        if maximize:
            fit = 1.0
        else:
            fit = -1.0

        pop = [self.generate() for _ in range(self.num_particles)]
        best = None

        for g in range(self.num_generations):
            fitnesses = pmap(evaluate, list(map(self.particle2dict, pop)))
            for part, fitness in zip(pop, fitnesses):
                part.fitness = fit * util.score(fitness)
                if not part.best or part.best_fitness < part.fitness:
                    part.best = part.position
                    part.best_fitness = part.fitness
                if not best or best.fitness < part.fitness:
                    best = part.clone()
            for part in pop:
                self.updateParticle(part, best, self.phi1, self.phi2)

        return dict([(k, v)
                        for k, v in zip(self.bounds.keys(), best.position)]), None
コード例 #9
0
def predict(trainx, trainy, testx):
    scorer = skmet.make_scorer(util.score)
    #best_score = np.Inf
    #for degree in range(5):
        #polynomial_features = skpre.PolynomialFeatures(degree)
        #transx = polynomial_features.fit_transform(trainx)
        #for alphaLoc in np.linspace(100*(degree-1),100*(degree+1),num=101):
            #regressor = sklin.Ridge(alpha=alphaLoc)
            #scores = skcv.cross_val_score(regressor, transx, trainy, scoring=scorer, cv=5)
            #print('C-V score for degree=',degree,'and alpha=',alphaLoc,'is', np.mean(scores), '+/-',np.std(scores))

            #if np.mean(scores)<best_score:
                #best_score = np.mean(scores)
                #best_std = np.std(scores)
                #best_degree = degree
                #best_alpha = alphaLoc
        #print('C-V score for degree=',degree,'and alpha=',best_alpha,'is', best_score, '+/-',best_std)
        
        ##print(regressor.alphas)       
        ##print('    alpha=',regressor.alpha_,'was used.') 
    ##clf = sklin.RidgeCV( scoring=scorer, alphas=np.linspace(0, 5), normalize=False, cv=10)
    
    
    #polynomial_features = skpre.PolynomialFeatures(best_degree)
    polynomial_features = skpre.PolynomialFeatures(3)
    transx = polynomial_features.fit_transform(trainx) 
    transx_test = polynomial_features.fit_transform(testx)
    
    #regressor = sklin.Ridge(alpha=best_alpha)
    regressor = sklin.Ridge(alpha=260)
    regressor.fit(transx,trainy)
    testy = regressor.predict(transx_test)
    print("On the training set, RMSE  of the model is ",util.score(trainy,regressor.predict(transx)))
 #   print("The used degree is ", best_degree)
    return testy
コード例 #10
0
def klopScores(self):
    assert self.gameType["name"] == "klop"
    return [{
        "players": [i],
        "cardsWon": p["cardsWon"],
        "breakdown": [["klop", -score(p["cardsWon"])]],
    } for i, p in enumerate(self.players)]
コード例 #11
0
def solve(input_path):
    f = open(input_path)
    player1, player2 = util.parse(f)
    winner_number = util.play_part2_game(player1, player2, 0)
    winner = player1
    if winner_number == 2:
        winner = player2
    return util.score(winner)
コード例 #12
0
ファイル: model.py プロジェクト: unyqhz/sp-2016
 def on_epoch_end(self, callback_data, model, epoch):
     preds = model.get_outputs(self.eval_set)[:, 1]
     idx_file = os.path.join(
         self.data_dir,
         'eval-' + str(self.subj) + '-' + str(0) + '-index.csv')
     labels = np.loadtxt(idx_file, delimiter=',', skiprows=1, usecols=[1])
     logger.display('Eval AUC for subject %d epoch %d: %.4f\n' %
                    (self.subj, epoch, score(labels, preds)))
コード例 #13
0
def solve(input_path):
  f = open(input_path)
  player1, player2 = util.parse(f)
  while (player1 and player2):
    util.play_round(player1, player2)
  winner = player1
  if player2:
    winner = player2
  return util.score(winner)
コード例 #14
0
    def evaluate(self, input_df, output_df):
        print('  Setting up data')
        input_df, numeric_input_df, output_df = self.setup_data(
            input_df, output_df)
        self.setup_additional_output_data(input_df)

        print('  Fitting model')
        i = self.input_scaler.transform(numeric_input_df.values)
        ref_output = output_df.values
        o = self.model.predict(i)
        #test_output = self.output_scaler.inverse_transform(o[0])
        test_output = self.output_scaler.inverse_transform(o)

        print('  Evaluating model')
        return test_output, score(input_df, ref_output, test_output)
コード例 #15
0
    def evaluate(self, input_df, output_df):
        if self.verbose:
            print('  Setting up data')
        input_df, numeric_input_df, output_df = self.setup_data(
            input_df, output_df)

        if self.verbose:
            print('  Fitting model')
        ref_output = output_df.values.flatten(
        ) if self.flatten_output else output_df.values.reshape(
            (len(output_df), 1))
        test_output = self.model.predict(numeric_input_df.values)

        if self.verbose:
            print('  Evaluating model')
        return test_output, score(input_df, ref_output, test_output)
コード例 #16
0
ファイル: ridge_regression.py プロジェクト: comte-cauchy/lis
def predict(trainx, trainy, testx):
    scorer = skmet.make_scorer(util.score)
    best_score = np.Inf
    for alphaLoc in np.linspace(0,100,101):
        regressor = sklin.Ridge(alpha=alphaLoc,fit_intercept=True)
        scores = skcv.cross_val_score(regressor, trainx, trainy, scoring=scorer, cv=10)
        if np.mean(scores)<best_score:
            best_score = np.mean(scores)
            best_alpha = alphaLoc;
        print('C-V score for alpha=',alphaLoc,'is', np.mean(scores), '+/-', np.std(scores))
    #clf = sklin.RidgeCV( scoring=scorer, alphas=np.linspace(0, 5), normalize=False, cv=10)
    regressor = sklin.Ridge(alpha=best_alpha)
    regressor.fit(trainx,trainy)
    testy = regressor.predict(testx)
    print("On the training set, RMSE  of the model is ",util.score(trainy,regressor.predict(trainx)))
    print("The used alpha is ", regressor.alpha)
    return testy
コード例 #17
0
def addfinalScore(self, s):
    if self.gameType["name"] == "klop":
        for team in s:
            team["finalScore"] = sumScores(team)
        return s

    for team in s:
        if self.gameType["player"] in team["players"]:
            # player's team
            if score(team["cardsWon"]) > 0:
                # completed the game
                team["finalScore"] = valueOfGameType(
                    self.gameType) + contractBonus(team)
            else:
                # did not complete the game
                team["finalScore"] = -valueOfGameType(self.gameType)
        else:
            # opposing team
            team["finalScore"] = 0

    # TODO: radlc multiplier

    return s
コード例 #18
0
    predictions = map(Prediction.parse, args.predicted)
    assert len(golden) == len(golden_loc)
    #assert len(golden) == len(predictions)
    if len(predictions) < len(golden):
        n = len(predictions)
        golden = golden[:n]
        golden_loc = golden_loc[:n]

    print "Generating sentences with removed word"
    removed = map(tokenize_words, golden)
    removed = [
        remove_word(words, loc) for words, loc in izip(removed, golden_loc)
    ]

    orig = [' '.join(words) for words in removed]
    s = score(golden, orig)
    print "Score if no predictions are inserted: %s" % s

    best = None
    best_score = s
    loc_thresholds = np.linspace(0, 1, 21)
    word_thresholds = np.linspace(0, 1, 21)
    loss_surface = np.zeros((len(loc_thresholds), len(word_thresholds)))
    for i, loc_threshold in enumerate(loc_thresholds):
        for j, word_threshold in enumerate(word_thresholds):
            print "loc_threshold=%s, word_threshold=%s:" \
                % (loc_threshold, word_threshold)
            predicted = []
            for words, p in izip(removed, predictions):
                if p.location_posterior > loc_threshold:
                    if p.word_posterior > word_threshold:  # insert predicted word
コード例 #19
0
ファイル: linear_regression.py プロジェクト: comte-cauchy/lis
def predict(trainx, trainy, testx):
    clf.fit(trainx,trainy)
    testy = clf.predict(testx)
    print("On the training set, RMSE  of the model is ",util.score(trainy,clf.predict(trainx)))
    return testy
コード例 #20
0
import util
import numpy as np
data_path = "../game_data"
data = util.load_data(data_path)

raw_sensitivities = []
raw_specificities = []

filtered_sensitivities = []
filtered_specificities = []

for sample in data:
    print((data[sample]))
    gold_circles = util.get_gold(data[sample]["out_path"])
    if gold_circles is not None:
        raw_score = util.score(gold_circles, data[sample]["raw-circles"])
        raw_sensitivities.append(raw_score[0])
        raw_specificities.append(raw_score[1])

        try:
            filtered_score = util.score(gold_circles,
                                        data[sample]["filtered-circles"])
            filtered_sensitivities.append(filtered_score[0])
            filtered_specificities.append(filtered_score[1])

        except ZeroDivisionError as e:
            print(sample, "does not have filtered circles")

print(("raw sensitivity mean {} std {}".format(np.mean(raw_sensitivities),
                                               np.std(raw_sensitivities))))
print(("raw specificity mean {} std {}".format(np.mean(raw_specificities),
コード例 #21
0
def main():
    from args import args
    # parser = argparse.ArgumentParser()
    # parser.add_argument('--model', required=True)
    # parser.add_argument('--train', required=True)
    # parser.add_argument('--dev', required=True)
    # args.load_model_dir = parser.parse_args().model
    # args.ent_train_dir = parser.parse_args().train
    # args.ent_dev_dir = parser.parse_args().dev
    args.load_model_dir = '/scratch0/shifeng/rawr/drqa/original.pt'
    args.ent_train_dir = 'results/20180217T172242.135276/train.pkl'
    args.ent_dev_dir = 'pkls/original.rawr.dev.pkl'
    args.other_train_dir = 'results/targeted_train_all.pkl'
    out_dir = prepare_output_dir(args, '/scratch0/shifeng/rawr/drqa/')

    log = logging.getLogger(__name__)
    log.setLevel(logging.DEBUG)
    fh = logging.FileHandler(os.path.join(out_dir, 'output.log'))
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.INFO)
    formatter = logging.Formatter(fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    log.addHandler(fh)
    log.addHandler(ch)
    log.info('===== {} ====='.format(out_dir))

    with open(os.path.join(out_dir, 'args.pkl'), 'wb') as f:
        pickle.dump(args, f)
    
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    log.info('loading regular data from {}'.format(args.data_file))
    train_reg, dev_reg, dev_y, embedding, opt = load_data(args)
    log.info('{} regular training examples'.format(len(train_reg)))
    log.info('{} regular dev examples'.format(len(dev_reg)))
    # log.info(opt)

    ''' load data for regularization '''
    log.info('loading entropy training data from {}'.format(args.ent_train_dir))
    with open(args.ent_train_dir, 'rb') as f:
        train_ent = pickle.load(f)
        if isinstance(train_ent, dict) and 'reduced' in train_ent:
            train_ent = train_ent['reduced']
        if isinstance(train_ent[0][0], list):
            train_ent = list(itertools.chain(*train_ent))

    log.info('loading targeted training data from {}'.format(args.other_train_dir))
    with open(args.other_train_dir, 'rb') as f:
        other_train_ent = pickle.load(f)
        if isinstance(other_train_ent, dict) and 'reduced' in train_ent:
            other_train_ent = other_train_ent['reduced']
        if isinstance(other_train_ent[0][0], list):
            other_train_ent = list(itertools.chain(*other_train_ent))
    train_ent += other_train_ent

    if args.filter_long > 0:
        train_ent = [x for x in train_ent if len(x[5]) < args.filter_long]

    log.info('loading entropy dev data from {}'.format(args.ent_train_dir))
    with open(args.ent_dev_dir, 'rb') as f:
        dev_ent = pickle.load(f)['reduced']
        if isinstance(dev_ent[0], list):
            # dev_ent = list(itertools.chain(*dev_ent))
            dev_ent = [x[0] for x in dev_ent]
        # if args.filter_long > 0:
        #     dev_ent = [x for x in dev_ent if len(x[5]) > args.filter_long]
    log.info('{} entropy training examples'.format(len(train_ent)))
    log.info('{} entropy dev examples'.format(len(dev_ent)))

    log.info('loading model from {}'.format(args.load_model_dir))
    checkpoint = torch.load(args.load_model_dir)
    # opt = checkpoint['config']
    state_dict = checkpoint['state_dict']
    model = DocReaderModel(vars(opt), embedding, state_dict)
    model.cuda()

    ''' initial evaluation '''
    dev_reg_batches = BatchGen(
            dev_reg, batch_size=args.batch_size,
            pos_size=args.pos_size, ner_size=args.ner_size,
            evaluation=True, gpu=args.cuda)
    dev_ent_batches = BatchGen(
            dev_ent, batch_size=args.batch_size,
            pos_size=args.pos_size, ner_size=args.ner_size,
            evaluation=True, gpu=args.cuda)
    predictions = []
    for batch in dev_reg_batches:
        predictions.extend(model.predict(batch))
    em, f1 = score(predictions, dev_y)
    ents, predictions_r = [], []
    for batch in dev_ent_batches:
        p, _, ss, se, _, _ = model.predict(batch, get_all=True)
        ss = ss.cpu().numpy()
        se = se.cpu().numpy()
        ents.append(scipy.stats.entropy(ss.T).sum() + \
                    scipy.stats.entropy(se.T).sum())
        predictions_r.extend(p)
    ent = sum(ents) / len(ents)
    em_r, f1_r = score(predictions_r, dev_y)
    log.info("[dev EM: {:.5f} F1: {:.5f} Ent: {:.5f}]".format(em, f1, ent))
    log.info("[dev EMR: {:.5f} F1R: {:.5f}]".format(em_r, f1_r))
    best_f1_score = f1

    ''' interleaved training '''
    train_ent_batches = BatchGen(
            train_ent, batch_size=args.batch_size,
            pos_size=args.pos_size, ner_size=args.ner_size, gpu=args.cuda)
    len_train_ent_batches = len(train_ent_batches)
    train_ent_batches = iter(train_ent_batches)
    n_reg = 0
    n_ent = 0
    for epoch in range(args.epochs):
        log.warning('Epoch {}'.format(epoch))
        train_reg_batches = BatchGen(
                train_reg, batch_size=args.batch_size,
                pos_size=args.pos_size, ner_size=args.ner_size, gpu=args.cuda)
        start = datetime.now()

        for i_reg, reg_batch in enumerate(train_reg_batches):
            model.update(reg_batch)
            n_reg += 1
            if n_reg > args.start_ent:
                if i_reg % args.n_reg_per_ent == 0:
                    for j in range(args.n_ent_per_reg):
                        try:
                            model.update_entropy(next(train_ent_batches),
                                    gamma=args.gamma)
                            n_ent += 1
                        except StopIteration:
                            n_ent = 0
                            train_ent_batches = iter(BatchGen(
                                train_ent, batch_size=args.batch_size,
                                pos_size=args.pos_size, ner_size=args.ner_size,
                                gpu=args.cuda))

            if n_reg % args.n_report == 0:
                log.info('epoch [{:2}] batch [{}, {}] loss[{:.5f}] entropy[{:.5f}]'.format(
                    epoch, i_reg, n_ent, model.train_loss.avg,
                    -model.entropy_loss.avg / args.gamma))
        
            # if n_reg % args.n_eval == 0:
        dev_reg_batches = BatchGen(
                dev_reg, batch_size=args.batch_size,
                pos_size=args.pos_size, ner_size=args.ner_size,
                evaluation=True, gpu=args.cuda)
        dev_ent_batches = BatchGen(
                dev_ent, batch_size=args.batch_size,
                pos_size=args.pos_size, ner_size=args.ner_size,
                evaluation=True, gpu=args.cuda)

        ''' regular evaluation '''
        predictions = []
        for batch in dev_reg_batches:
            predictions.extend(model.predict(batch))
        em, f1 = score(predictions, dev_y)

        ''' entropy evaluation '''
        ents, predictions_r = [], []
        for batch in dev_ent_batches:
            p, _, ss, se, _, _ = model.predict(batch, get_all=True)
            ss = ss.cpu().numpy()
            se = se.cpu().numpy()
            ents.append(scipy.stats.entropy(ss.T).sum() + \
                        scipy.stats.entropy(se.T).sum())
            predictions_r.extend(p)
        ent = sum(ents) / len(ents)
        em_r, f1_r = score(predictions_r, dev_y)

        log.info("dev EM: {:.5f} F1: {:.5f} Ent: {:.5f}".format(em, f1, ent))
        log.info("[dev EMR: {:.5f} F1R: {:.5f}]".format(em_r, f1_r))

        ''' save best model '''
        if f1 > best_f1_score:
            best_f1_score = f1
            model_file = os.path.join(out_dir, 'best_model.pt')
            model.save(model_file, epoch)
            log.info('[save best model F1: {:.5f}]'.format(best_f1_score))

        ''' save models '''
        model_file = os.path.join(
                out_dir, 'checkpoint_epoch_{}.pt'.format(epoch))
        model.save(model_file, epoch)
        log.info("[save model {}]".format(model_file))
    golden_loc = np.asarray(map(int, args.i_removed))
    print "Loading predictions"
    predictions = map(Prediction.parse, args.predicted)
    assert len(golden) == len(golden_loc)
    #assert len(golden) == len(predictions)
    if len(predictions) < len(golden):
        n = len(predictions)
        golden = golden[:n]
        golden_loc = golden_loc[:n]
        
    print "Generating sentences with removed word"
    removed = map(tokenize_words, golden)
    removed = [remove_word(words, loc) for words, loc in izip(removed, golden_loc)]

    orig = [' '.join(words) for words in removed]
    s = score(golden, orig)
    print "Score if no predictions are inserted: %s" % s
    
    best = None
    best_score = s
    loc_thresholds = np.linspace(0, 1, 21)
    word_thresholds = np.linspace(0, 1, 21)
    loss_surface = np.zeros((len(loc_thresholds), len(word_thresholds)))
    for i, loc_threshold in enumerate(loc_thresholds):
        for j, word_threshold in enumerate(word_thresholds):
            print "loc_threshold=%s, word_threshold=%s:" \
                % (loc_threshold, word_threshold)
            predicted = []
            for words, p in izip(removed, predictions):
                if p.location_posterior > loc_threshold:
                    if p.word_posterior > word_threshold: # insert predicted word
コード例 #23
0
for i, bs in enumerate(BATCH_SIZE):
    print("\n ({0} of {1})".format(i + 1, number_of_exp))
    config = wv.Config(batch_size=bs)
    attrs = vars(config)
    config_info = ["%s: %s" % item for item in attrs.items()]
    info.append(config_info)
    my_model = wv.SkipGramModel(config)
    embeddings = wv.run_training(my_model,
                                 my_data,
                                 verbose=False,
                                 visualization=False,
                                 debug=False)
    score, report = util.score(index2word,
                               word2index,
                               embeddings,
                               eval_path,
                               verbose=False,
                               raw=True)
    results.append(score)
    print("Score = {}".format(score))
    for result in report:
        print(result)

BATCH_SIZE = list(BATCH_SIZE)
best_result = max(list(zip(results, BATCH_SIZE, info)))
result_string = """In an experiment with {0} batch sizes
the best size is {1} with score = {2}.
\n INFO = {3}""".format(number_of_exp, best_result[1], best_result[0],
                        best_result[2])

file = open("batch_size.txt", "w")
コード例 #24
0
    # input feature batch, output label batch
    ypred = []
    for x in X:
        ypred.append(single_pred(tree, x))
    assert (len(ypred) == len(X)), "output length should be same as input rows"
    return ypred


if __name__ == "__main__":
    X, y, name = iris['data'], iris['target'], iris['target_names']

    # sklearn implementation
    clf = DecisionTreeClassifier(criterion='entropy')
    clf.fit(X, y)
    ypred = clf.predict(X)
    acc = score(ypred, y)
    print(f'sklearn implementation: acc={acc}')

    # self implementation

    root = subtree(X, y)
    if mode == 'dev':
        traverse = [root]
        i = 0
        while (i < len(traverse)):
            if (traverse[i] is None):
                i += 1
                continue
            node = traverse[i]
            print(node.cut, '->')
            traverse.append(node.left)
コード例 #25
0
ファイル: genomestats.py プロジェクト: haydnKing/codon-optim
 def score(self, seq):
     return util.score(self._normed, seq)
コード例 #26
0
def individualScores(players):
    return [score(p["cardsWon"]) for p in players]
コード例 #27
0
ファイル: train_base.py プロジェクト: ihsgnef/pathologies
def main():
    from args import conf

    parser = argparse.ArgumentParser()
    parser.add_argument('--resume', default=False)
    parser.add_argument('--resume-options', default=False)
    args = parser.parse_args()

    # set random seed
    random.seed(conf.seed)
    torch.manual_seed(conf.seed)
    if conf.cuda:
        torch.cuda.manual_seed(conf.seed)

    # setup logger
    log = logging.getLogger(__name__)
    log.setLevel(logging.DEBUG)
    fh = logging.FileHandler('main.log')
    fh.setLevel(logging.DEBUG)
    ch = logging.StreamHandler(sys.stdout)
    ch.setLevel(logging.INFO)
    formatter = logging.Formatter(
            fmt='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S')
    fh.setFormatter(formatter)
    ch.setFormatter(formatter)
    log.addHandler(fh)
    log.addHandler(ch)

    train, dev, dev_y, embedding, conf = load_data(conf)
    log.info(conf)
    log.info('[Data loaded.]')

    if args.resume:
        log.info('[loading previous model...]')
        checkpoint = torch.load(args.resume)
        if args.resume_options:
            conf = checkpoint['config']
        state_dict = checkpoint['state_dict']
        model = DocReaderModel(vars(conf), embedding, state_dict)
        epoch_0 = checkpoint['epoch'] + 1
        for i in range(checkpoint['epoch']):
            random.shuffle(list(range(len(train))))  # synchronize random seed
        if conf.reduce_lr:
            for param_group in model.optimizer.param_groups:
                param_group['lr'] *= conf.lr_decay
            log.info('[learning rate reduced by {}]'.format(conf.lr_decay))
    else:
        model = DocReaderModel(vars(conf), embedding)
        epoch_0 = 1

    if conf.cuda:
        model.cuda()

    if args.resume:
        batches = BatchGen(
                dev, batch_size=conf.batch_size,
                pos_size=conf.pos_size, ner_size=conf.ner_size,
                gpu=conf.cuda, evaluation=True)
        predictions = []
        for batch in batches:
            predictions.extend(model.predict(batch))
        em, f1 = score(predictions, dev_y)
        log.info("[dev EM: {} F1: {}]".format(em, f1))
        best_val_score = f1
    else:
        best_val_score = 0.0

    for epoch in range(epoch_0, epoch_0 + conf.epochs):
        log.warning('Epoch {}'.format(epoch))
        # train
        batches = BatchGen(
                train, batch_size=conf.batch_size,
                pos_size=conf.pos_size, ner_size=conf.ner_size,
                gpu=conf.cuda)
        start = datetime.now()
        for i, batch in enumerate(batches):
            model.update(batch)
            if i % conf.log_per_updates == 0:
                log.info('epoch [{0:2}] updates[{1:6}] \
                        train loss[{2:.5f}] remaining[{3}]'.format(
                    epoch, model.updates, model.train_loss.avg,
                    str((datetime.now() - start) / (i + 1) * (len(batches) - i - 1)).split('.')[0]))
        # eval
        if epoch % conf.eval_per_epoch == 0:
            batches = BatchGen(
                    dev, batch_size=conf.batch_size,
                    pos_size=conf.pos_size, ner_size=conf.ner_size,
                    gpu=conf.cuda, evaluation=True)
            predictions = []
            for batch in batches:
                predictions.extend(model.predict(batch))
            em, f1 = score(predictions, dev_y)
            log.warning("dev EM: {} F1: {}".format(em, f1))
        # save
        if not conf.save_last_only or epoch == epoch_0 + conf.epochs - 1:
            model_file = 'results/baseline_epoch_{}.pt'.format(epoch)
            model.save(model_file, epoch)
            if f1 > best_val_score:
                best_val_score = f1
                copyfile(
                    model_file,
                    os.path.join('results/baseline.pt'))
                log.info('[new best model saved.]')
コード例 #28
0
def contractBonus(team):
    return sumScores(team) - score(team["cardsWon"])