示例#1
0
文件: app.py 项目: sshh12/OverwatchML
def predict(battletag=''):

    api_resp = requests.get('https://owapi.net/api/v3/u/{}/blob'.format(urllib.parse.quote_plus(battletag)), headers={'User-Agent':'OWAgent'}).text
    api_json = json.loads(api_resp)

    overall_sr, (sr_predictions, heros, time_played) = predict_all(api_json)

    try:
        actual_sr = int(api_json['us']['stats']['competitive']['overall_stats']['comprank'])
    except (TypeError, KeyError):
        actual_sr = -1

    response = jsonify({
        'battletag': battletag,
        'api': api_json,
        'actualrank': actual_sr,
        'predictedrank': overall_sr,
        'specifics': {
            'sr': sr_predictions,
            'heros': heros,
            'timeplayed': time_played
        }
    })
    response.headers.add('Access-Control-Allow-Origin', '*')

    return response
示例#2
0
def compute_accuracy(X, y, theta, year=0):
    '''
    Compute accuracy.
    '''

    h_theta = np.round(predict_all(X, theta))
    delta = np.abs(h_theta - y)
    accuracy = float(sum(np.less_equal(delta, year)) * 1.0 / len(h_theta))

    return accuracy
示例#3
0
def compute_cost(X, y, theta, lambdav=0):
    '''
    Compute cost.
    
    Cost = (sum((h(X) -y)^2) + lambda * sum(theta.^2))/ (2*m)
    '''

    m = len(X)
    h_theta = predict_all(X, theta)
    delta = np.square(h_theta - y)
    cost = (np.sum(delta) + lambdav * np.sum(np.square(theta))) / (2 * m)
    
    return cost
示例#4
0
def main():
    filename = './data/pima-indians-diabetes.csv'
    dataset = load_csv(filename)

    split_ratio = 0.67
    training_set, test_set = split_dataset(dataset, split_ratio)

    # prepare model
    summaries = summarize_by_class(training_set)

    # test model
    predictions = predict_all(summaries, test_set)
    accuracy = get_accuracy(test_set, predictions)

    print accuracy
示例#5
0
def compute_accuracy_year_2(X, y, theta_year_yes_or_not, theta_year_less,
                            theta_year_more, year1, year_delta):
    '''
    Compute accuracy.
    '''

    accuracy = 0
    year_yes_or_not = np.round(predict_all(X, theta_year_yes_or_not))

    for index in range(len(year_yes_or_not)):
        if year_yes_or_not[index] > year1:
            h_theta = np.round(predict(X[index], theta_year_more))
        else:
            h_theta = np.round(predict(X[index], theta_year_less))

        if np.round(abs(h_theta - y[index])) <= year_delta:
            accuracy += 1.0

    return accuracy / len(X)
示例#6
0
def compute_cost_year_2(X, y, theta_year_yes_or_not, theta_year_less,
                        theta_year_more, year1, year_delta):
    '''
    Compute accuracy.
    '''

    cost = 0
    year_yes_or_not = np.round(predict_all(X, theta_year_yes_or_not))

    for index in range(len(year_yes_or_not)):
        if year_yes_or_not[index] > year1:
            h_theta = np.round(predict(X[index], theta_year_more))
        else:
            h_theta = np.round(predict(X[index], theta_year_less))

        delta = np.square(h_theta - y[index])

        cost += delta

    cost = cost / (2.0 * len(X))

    return cost
示例#7
0
        pass

    def on_button4_clicked(self, data):
        print(
            "Button4",
            data,
        )

        self.f1 = Figure(figsize=(5, 4), dpi=100)
        self.a1 = self.f1.add_subplot(111)
        x, y = predict.fit_company_change(2867)
        #print(x, y)
        predict.plotchange(self, list_all[3], x, y)
        self.box3.remove(self.sw1)
        self.canvas2 = FigureCanvas(self.f1)  # a Gtk.DrawingArea
        self.canvas2.set_size_request(400, 400)
        self.sw1 = Gtk.ScrolledWindow()
        self.sw1.add_with_viewport(self.canvas2)
        self.sw1.set_border_width(10)
        self.box3.pack_end(self.sw1, True, True, 0)
        win.show_all()
        pass


if __name__ == "__main__":
    list_all = predict.predict_all()
    win = MyWindow()
    win.connect("destroy", Gtk.main_quit)
    win.show_all()
    Gtk.main()
示例#8
0
def train_with_classifier():
    logging.info("start train with classifier")
    turbo_num = 500
    part_epochs = [(2000, 500, 200), (100, 100, 100)]
    print_everys = [100, 100]
    save_everys = [100, 100]
    learning_rates = [1e-3, 1e-4]
    con_loss_lam = 0

    early_stopping = None
    # best_loss_batch = 0
    best_valid_loss = 100000
    # best_acc_batch = 0
    best_valid_acc = 0

    start_time = time.time()
    for turbo in range(turbo_num):
        if early_stopping:
            logging.info("early stopping at epoch %d" % turbo)
            break
        num_epochs = part_epochs[0] if turbo == 0 else part_epochs[1]
        print_every = print_everys[0] if turbo == 0 else print_everys[1]
        save_every = save_everys[0] if turbo == 0 else save_everys[1]
        lr = learning_rates[0] if turbo == 0 else learning_rates[1]

        if turbo == 1:
            for param_group in encoder_optimizer.param_groups:
                param_group['lr'] = lr
            for param_group in decoder_optimizer.param_groups:
                param_group['lr'] = lr
            for param_group in classifier_optimizer.param_groups:
                param_group['lr'] = lr

        for part in range(3):

            logging.info("::::::::::::::::::::::::")
            logging.info("::: Turbo " + str(turbo) + " : Part " + str(part) +
                         " :::")
            logging.info("::::::::::::::::::::::::")

            if part == 0:
                for param in encoder.parameters():
                    param.requires_grad = False if turbo > 0 else True
                for param in decoder.parameters():
                    param.requires_grad = True
                for param in classifier.parameters():
                    param.requires_grad = False
            elif part == 1:
                for param in encoder.parameters():
                    param.requires_grad = False
                for param in decoder.parameters():
                    param.requires_grad = False
                for param in classifier.parameters():
                    param.requires_grad = True
            elif part == 2:
                for param in encoder.parameters():
                    param.requires_grad = True
                for param in decoder.parameters():
                    param.requires_grad = False
                for param in classifier.parameters():
                    param.requires_grad = False

            train_loss1 = 0
            valid_loss1 = 0
            train_loss2 = 0
            valid_loss2 = 0
            batch_per_epoch = num_epochs[part] + 1
            for batch_idx, batch in enumerate(ctrain_loader.next_batch()):
                if batch_idx == batch_per_epoch:
                    break

                seq, seq_len, gold_rel, sub, rel, rel_len, class_rel, labels, \
                v_seq, v_seq_len, v_gold_rel, v_sub, v_rel, v_rel_len, v_class_rel, v_labels, = batch
                encoder_optimizer.zero_grad()
                decoder_optimizer.zero_grad()
                classifier_optimizer.zero_grad()

                loss1, loss2 = None, None
                if part == 0 or part == 2:
                    loss1 = train_epoch(seq, seq_len, rel, rel_len, sub,
                                        gold_rel, encoder, decoder)
                    v_loss1 = train_epoch(v_seq, v_seq_len, v_rel, v_rel_len,
                                          v_sub, v_gold_rel, encoder, decoder)
                    train_loss1 += loss1.data.item()
                    valid_loss1 += v_loss1.data.item()
                if part == 1 or part == 2:
                    loss2 = classify_epoch(seq, seq_len, rel, rel_len, sub,
                                           gold_rel, class_rel, labels,
                                           encoder, decoder, classifier,
                                           classifier_loss)
                    v_loss2 = classify_epoch(v_seq, v_seq_len, v_rel,
                                             v_rel_len, v_sub, v_gold_rel,
                                             v_class_rel, v_labels, encoder,
                                             decoder, classifier,
                                             classifier_loss)
                    train_loss2 += loss2.data.item()
                    valid_loss2 += v_loss2.data.item()

                if part == 0:
                    loss1.backward()
                    decoder_optimizer.step()
                    if turbo == 0:
                        encoder_optimizer.step()
                elif part == 1:
                    loss2.backward()
                    classifier_optimizer.step()
                elif part == 2:
                    loss_total = con_loss_lam * loss1 + loss2
                    loss_total.backward()
                    encoder_optimizer.step()

                if (batch_idx + 1) % print_every == 0:
                    hits, total = predict_all(cand_dataloader=valid_loader,
                                              encoder=encoder,
                                              decoder=decoder,
                                              device=device,
                                              rel_vocab=rel_vocab,
                                              train=True)
                    acc = hits / total
                    logging.info("-----------------------------")
                    logging.info("batch %d in epoch %d" %
                                 (batch_idx + 1, turbo))
                    logging.info("train loss1 is %f" % train_loss1)
                    logging.info("valid loss1 is %f" % valid_loss1)
                    logging.info("train loss2 is %f" % train_loss2)
                    logging.info("valid loss2 is %f" % valid_loss2)
                    logging.info("valid acc is %f" % acc)
                    logging.info("use time: %d s" % (time.time() - start_time))

                    if acc > best_valid_acc:
                        best_valid_acc = acc
                        # best_acc_batch = epoch * batch_per_epoch + batch_idx
                        torch.save(
                            encoder,
                            args.save_path + "/best_encoder.%s.pth" % name)
                        torch.save(
                            decoder,
                            args.save_path + "/best_decoder.%s.pth" % name)
                        torch.save(
                            classifier,
                            args.save_path + "/best_classifier.%s.pth" % name)
                    # elif acc < best_valid_acc:
                    #     if epoch * batch_per_epoch + batch_idx - best_acc_batch >= patience:
                    #         early_stopping = True
                    #         break

                    train_loss1 = 0
                    train_loss2 = 0
                    valid_loss1 = 0
                    valid_loss2 = 0

                if batch_idx % save_every == 0:
                    torch.save(encoder,
                               args.save_path + "/encoder.%s.pth" % name)
                    torch.save(decoder,
                               args.save_path + "/decoder.%s.pth" % name)
                    torch.save(classifier,
                               args.save_path + "/classifier.%s.pth" % name)
示例#9
0
def train_without_classifier():
    batch_per_epoch = train_loader.t_batch_num
    print_every = 400
    save_every = 3
    patience = 5 * batch_per_epoch
    early_stopping = None
    best_acc_batch = 0
    best_valid_acc = 0

    start_time = time.time()
    for epoch in range(1, args.epochs):
        if early_stopping:
            logging.info("early stopping at epoch %d" % epoch)
            break
        pr_train_loss = 0
        pr_valid_loss = 0
        for batch_idx, batch in enumerate(train_loader.next_batch()):
            # seq:      (batch_size, seq_len)
            # seq_len:  (batch,)
            # gold_rel: (batch,)
            # rel:      (batch, neg_size)
            # sub:      (batch,)
            seq, seq_len, gold_rel, sub, rel, rel_len, \
            v_seq, v_seq_len, v_gold_rel, v_sub, v_rel, v_rel_len = batch

            encoder_optimizer.zero_grad()
            decoder_optimizer.zero_grad()
            classifier_optimizer.zero_grad()

            loss1 = train_epoch(seq, seq_len, rel, rel_len, sub, gold_rel,
                                encoder, decoder)
            v_loss = train_epoch(v_seq, v_seq_len, v_rel, v_rel_len, v_sub,
                                 v_gold_rel, encoder, decoder)

            loss_total = loss1
            loss_total.backward()
            pr_train_loss += loss_total.data.item()
            pr_valid_loss += v_loss.data.item()
            encoder_optimizer.step()
            decoder_optimizer.step()
            classifier_optimizer.step()

            if (batch_idx + 1) % print_every == 0:
                hits, total = predict_all(cand_dataloader=valid_loader,
                                          encoder=encoder,
                                          decoder=decoder,
                                          device=device,
                                          rel_vocab=rel_vocab,
                                          train=True)
                acc = hits / total
                logging.info("-----------------------------")
                logging.info("batch %d in epoch %d" % (batch_idx + 1, epoch))
                logging.info("train loss is %f" % pr_train_loss)
                logging.info("valid loss is %f" % pr_valid_loss)
                logging.info("valid acc is %f" % acc)
                logging.info("use time: %d s" % (time.time() - start_time))

                if acc > best_valid_acc:
                    best_valid_acc = acc
                    best_acc_batch = epoch * batch_per_epoch + batch_idx
                    torch.save(encoder,
                               args.save_path + "/best_encoder.%s.pth" % name)
                    torch.save(decoder,
                               args.save_path + "/best_decoder.%s.pth" % name)
                elif acc < best_valid_acc:
                    if epoch * batch_per_epoch + batch_idx - best_acc_batch >= patience:
                        early_stopping = True
                        break

                pr_train_loss = 0
                pr_valid_loss = 0

        if epoch % save_every == 0:
            torch.save(encoder, args.save_path + "/encoder.%s.pth" % name)
            torch.save(decoder, args.save_path + "/decoder.%s.pth" % name)