コード例 #1
0
 def evaluate(self, game, player_number):
     winner = game.who_won()
     if winner is None:
         opponent = Game.get_other_player(player_number)
         # return the number of pawns that player has minus the number of pawns the oppponent has
         return Evaluation(
             len(game.state.pawns[player_number]) -
             len(game.state.pawns[opponent]))
     else:
         if winner == player_number:
             return Evaluation(self.win_loss_rewards[0])
         else:
             return Evaluation(self.win_loss_rewards[1])
コード例 #2
0
    def evaluate(self, data, sess):
        res = []
        all_labels = []
        all_scores = []

        sample = 0
        for idts, idbs, id_labels in data:
            sample += 1
            cur_scores = self.eval_batch(idts, idbs, sess)
            assert len(id_labels) == len(cur_scores)  # equal to 20

            all_labels.append(id_labels)
            all_scores.append(cur_scores)
            ranks = (-cur_scores).argsort()
            ranked_labels = id_labels[ranks]
            res.append(ranked_labels)

        e = Evaluation(res)
        MAP = e.MAP()
        MRR = e.MRR()
        P1 = e.Precision(1)
        P5 = e.Precision(5)
        if 'mlp_dim' in self.args and self.args.mlp_dim != 0:
            loss1 = dev_entropy_loss(all_labels, all_scores)
        else:
            loss1 = devloss1(all_labels, all_scores)
        loss0 = devloss0(all_labels, all_scores)
        loss2 = devloss2(all_labels, all_scores)
        return MAP, MRR, P1, P5, loss0, loss1, loss2
コード例 #3
0
def run_stochastic_gradient_descent(tx_train, y_train, tx_val, y_val):
    """It performs training and evaluation of least squares with stochastic gradient descent."""

    print('\nTraining with Stochastic Gradient Descent')
    initial_w = np.zeros((tx_train.shape[1]))
    gamma = 0.005
    max_iter = 3000

    # Train the model
    w, _ = least_squares_SGD(y=y_train,
                             tx=tx_train,
                             initial_w=initial_w,
                             max_iters=max_iter,
                             gamma=gamma,
                             verbose=False)

    # Perform predictions
    y_pred = predict_labels(weights=w, data=tx_val, logistic=False)

    # Evaluate
    evaluation = Evaluation(y_actual=y_val, y_pred=y_pred)
    acc = evaluation.get_accuracy()
    f1 = evaluation.get_f1()
    print('Accuracy: {acc}, F1: {f1}'.format(acc=acc, f1=f1))

    return acc, f1
コード例 #4
0
def main():
    # Step1: Collect Data (Uncomment this, if you dont have data)
    # Change movie names in config.properties for the ones you want
    print("---- Starting Scrapping Module ---")
    scrapper = RTScrapper()
    scrapper.main()
    print("\n")
    print("---- Completed Scrapping of Movie Reviews ----")
    # Step2: Create Complete DataFrame with all the information
    connector = Organizer()
    connector.connectDFtoReview(df_filename="DFM2R.pkl",
                                reviewFolder="ScrappedData")
    print("\n")
    print("---- Starting Raking Module ----")
    # Step3: Run Weighted Page Rank for scores
    # ss = getConfigParams()
    summarySize, Scores_folder = getConfigParams()
    for Measure in MeaureTypes:
        print("-- Using " + str(Measure) + " --")
        ranker = RankSentences(Measuretype=Measure,
                               summarySize=summarySize,
                               filename="CompleteData.pkl")
        ranker.main()
    print("\n")
    print("---- Completed Ranking of Sentences ----")
    # Final Step evaluate
    print("\n")
    print("---- Evaluating Summaries ----")
    evaluate = Evaluation()
    evaluate.main(folderName=Scores_folder)
    print("\n")
    print("---- Completed Evaluation ----")
コード例 #5
0
def run_experiment_with_rake():
    print "\nBegin experiment using RAKE algorithm..."
    # RAKE: predict keyword dengan RAKE, ambil words dengan RAKE skor tertinggi
    rake = RakeKeywordExtractor()
    tweets_rake['keyword'] = tweets_rake.apply(lambda t: rake.extract_keyword(
        rake.extract_candidates(t['text'], incl_scores=True)),
                                               axis=1)

    # RAKE: infer aspect dengan aspect mapping, dengan similarity terbesar
    tweets_rake['selected_keyword'] = tweets_rake.apply(
        lambda t: asp.find_nearest_inferred_aspect(t['keyword'], emb)[1],
        axis=1)
    tweets_rake['inferred_aspect'] = tweets_rake.apply(
        lambda t: asp.find_nearest_inferred_aspect(t['keyword'], emb)[0],
        axis=1)
    tweets_rake['gold_aspect'] = tweets_rake.apply(
        lambda t: asp.INVERTED_ASPECTS[t['inferred_aspect']], axis=1)

    tweets_rake.to_csv('dump/result_rake.csv', encoding='utf-8', index=False)

    # RAKE: Evaluasi dengan accuracy
    eva_rake = Evaluation(tweets_rake)
    conf_matrix = eva_rake.build_confusion_matrix(tweets_rake)
    print "Confusion matrix:"
    print conf_matrix
    print "Accuracy using RAKE algorithm: {}".format(eva_rake.accuracy())
    print "Average Precision using RAKE algorithm: {}".format(
        eva_rake.average_precision())
    print "Average Recall using RAKE algorithm: {}".format(
        eva_rake.average_recall())
コード例 #6
0
def main():
    vocab = input('Enter choice for vocabulary: ')
    # vocab = '0'
    ngram = input('Enter choice for NGram: ')
    # ngram = '1'
    delta = input('Enter smoothing delta value between 0 and 1: ')
    # delta = '0.5'
    # training_file = 'OriginalDataSet/training-tweets.txt'
    training_file = input('Enter training file: ')
    # testing_file = 'OriginalDataSet/test-tweets-given.txt'
    testing_file = input('Enter test file: ')

    classifier = Classifier(vocab, ngram, float(delta), training_file, testing_file)
    start = time.time()
    classifier.read_data(False)
    print('Time taken to read: ', time.time() - start)
    start = time.time()
    classifier.create_model()
    print('Time taken to create model: ', time.time() - start)
    start = time.time()
    classifier.train_model()
    print('Time taken to train model: ', time.time() - start)
    # classifier.save_model()
    start = time.time()
    trace_file = classifier.test_model()
    print('Time taken to test model: ', time.time() - start)
    evaluation = Evaluation(trace_file)
    start = time.time()
    evaluation.calculate_performance()
    evaluation.print_to_file()
    print('Time taken to evaluate model: ', time.time() - start)
コード例 #7
0
def crossValidationIris(k=5):
    testData1 = np.load("iris_data/competitionData.npy")
    testData2 = np.load("iris_data/evaluationData.npy")
    trainData = np.load("iris_data/trainingData.npy")
    wholeData = np.concatenate((trainData, testData1, testData2)).astype(float)
    folds = k
    attTypes = [1, 1, 1, 1]
    trainingSets = np.split(wholeData,
                            [(i + 1) * round(len(wholeData) / folds)
                             for i in range(folds - 1)])

    for set in trainingSets:
        print('\n\n\n', set, '\n\n\n')

    for idx, testData in enumerate(trainingSets):
        print(np.r_[0:idx, idx + 1:len(trainingSets) + 1])
        trainingData = np.concatenate(
            [trainingSets[i] for i in np.r_[0:idx, idx + 1:len(trainingSets)]])
        print('testData: ', len(testData))
        print('trainingData: ', len(trainingData))
        print('K FOLD #' + str(idx))
        start = time.time()
        model = Naive(trainingData, attTypes)
        timer = time.time()
        print('Time to train: ', timer - start)
        start = time.time()
        evalModel = Evaluation(model, testData, CLASS_AMM_IRIS)
        evalModel.normalPrint()
        print('Took ', time.time() - start, 's')
コード例 #8
0
 def __init__(self, name, param=10, link_method=1, granularity=1):
     self.name = name
     self.agent = None
     self.param = param
     self.link_method = link_method
     self.granularity = granularity
     self.evaluation = Evaluation()
コード例 #9
0
ファイル: main.py プロジェクト: dincydavistech/sunnynlp
def main(args):
    # Load configuration
    config = Configuration(args.yaml_path)

    print("Loading Probase...")
    probase = Probase(config)

    print("Loading dataset...")
    dataset = Data(config)

    print("Loading NLP utility...")
    nlp = NLP('en')

    print("Loading feature extractor...")
    features = Feature(config, probase, nlp=nlp)

    print("Extracting vector features")
    features.extract_vector_features(dataset)

    print("Extracting statistical vector features")
    features.extract_statistical_features(dataset)

    print("Evaluating clasifiers")
    ev = Evaluation(config, dataset)
    ev.full_evaluation(features.X, features.y)
コード例 #10
0
 def whole_evaluation(self):
     e = Evaluation()
     sql = "SELECT score1, score2, score3, score4, score5 From experiment_system_user Where system_type = 1"
     self.cursor.execute(sql)
     w2v_score = []
     w2v_scores = []
     scores_list = self.cursor.fetchall()
     for score_list in scores_list:
         w2v_score.append(score_list[0])
         w2v_score.append(score_list[1])
         w2v_score.append(score_list[2])
         w2v_score.append(score_list[3])
         w2v_score.append(score_list[4])
         w2v_scores.append(w2v_score)
         w2v_score = []
     sql = "SELECT score1, score2, score3, score4, score5 From experiment_system_user Where system_type = 2"
     self.cursor.execute(sql)
     rstr_score = []
     rstr_scores = []
     scores_list = self.cursor.fetchall()
     for score_list in scores_list:
         rstr_score.append(score_list[0])
         rstr_score.append(score_list[1])
         rstr_score.append(score_list[2])
         rstr_score.append(score_list[3])
         rstr_score.append(score_list[4])
         rstr_scores.append(rstr_score)
         rstr_score = []
     print("w2v_scores:" + str(w2v_scores))
     print("length:" + str(len(w2v_scores)))
     print("rstr_scores:" + str(rstr_scores))
     print("length:" + str(len(rstr_scores)))
     w2v_ndcg = e.average_ndcg(w2v_scores)
     w2v_MAP = e.MAP(w2v_scores)
     rstr_ndcg = e.average_ndcg(rstr_scores)
     rstr_MAP = e.MAP(rstr_scores)
     print("\nNDCG:")
     print(w2v_ndcg)
     print(rstr_ndcg)
     plt.title("Evaluate System Performance NDCG@k")
     plt.xlabel("Top K Recommendation")
     plt.ylabel("NDCG@k")
     plt.plot(range(1, 6), w2v_ndcg, "-v", color='y', label="W2V")
     plt.plot(range(1, 6), rstr_ndcg, "-v", color='m', label="RSTR")
     plt.legend(loc="best")
     # save image
     plt.savefig('image/System_NDCG.png')
     plt.close()
     print("\nMAP:")
     print(w2v_MAP)
     print(rstr_MAP)
     plt.title("Evaluate System Performance MAP@k")
     plt.xlabel("Top K Recommendation")
     plt.ylabel("MAP@k")
     plt.plot(range(1, 6), w2v_MAP, "-v", color='y', label="W2V")
     plt.plot(range(1, 6), rstr_MAP, "-v", color='m', label="RSTR")
     plt.legend(loc="best")
     # save image
     plt.savefig('image/System_MAP.png')
     plt.close()
コード例 #11
0
ファイル: outputter.py プロジェクト: wean/coupon
def run(configfile, name):

    OutputPath.init(configFile)

    thread = ThreadWritableObject(configFile, name)
    thread.start()

    sys.stdout = thread
    sys.errout = thread # XXX: Actually, it does NOT work

    try:

        db = Database(configFile, 'specials')
        db.initialize()

        evaluation = Evaluation(configFile, db)

        evaluation.updateOverdue()

        path = OutputPath.getSharePath()
        sharePath = getProperty(configFile, 'output-share-file')

        cmd = '/bin/rm -f {1} && /bin/ln -s {0} {1}'.format(path, sharePath)
        runCommand(cmd)

        data = evaluation.output()

        with open(path, 'w') as fp:
            fp.write(reprDict(data))

    except KeyboardInterrupt:
        pass
    except Exception, e:
        print 'Error occurs at', datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        traceback.print_exc(file=sys.stdout)
コード例 #12
0
ファイル: searcher.py プロジェクト: wean/coupon-windows
def run(configfile, name, content, savefile):

    OutputPath.init(configFile)

    try:

        db = Database(configFile, 'specials')
        db.initialize()

        evaluation = Evaluation(configFile, db)

        data = evaluation.search(content)

        if savefile is not None:
            with open(savefile, 'w') as fp:
                fp.write(reprDict(data))
        else:
            print reprDict(data)

        return 0

    except KeyboardInterrupt:
        pass
    except Exception, e:
        print 'Error occurs at', datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        traceback.print_exc(file=sys.stdout)
コード例 #13
0
    def evaluate(self, data, session):
        # return for each query the labels, ranked results, and scores
        eval_func = self.score_func
        all_ranked_labels = []
        all_ranked_ids = []
        all_ranked_scores = []
        query_ids = []
        all_MAP, all_MRR, all_Pat1, all_Pat5 = [], [], [], []
        for idts, idbs, labels, pid, qids in data:
            scores = eval_func(idts, idbs, session)
            assert len(scores) == len(labels)
            ranks = (-scores).argsort()
            ranked_scores = np.array(scores)[ranks]
            ranked_labels = labels[ranks]
            ranked_ids = np.array(qids)[ranks]
            query_ids.append(pid)
            all_ranked_labels.append(ranked_labels)
            all_ranked_ids.append(ranked_ids)
            all_ranked_scores.append(ranked_scores)
            this_ev = Evaluation([ranked_labels])
            all_MAP.append(this_ev.MAP())
            all_MRR.append(this_ev.MRR())
            all_Pat1.append(this_ev.Precision(1))
            all_Pat5.append(this_ev.Precision(5))

        print 'average all ... ', sum(all_MAP) / len(all_MAP), sum(
            all_MRR) / len(all_MRR), sum(all_Pat1) / len(all_Pat1), sum(
                all_Pat5) / len(all_Pat5)
        return all_MAP, all_MRR, all_Pat1, all_Pat5, all_ranked_labels, all_ranked_ids, query_ids, all_ranked_scores
コード例 #14
0
 def __init__(self, root):
     self.labelusr = tk.Label(root, text='学号:')
     self.labelusr.grid(row=0, sticky=tk.W)
     self.username = tk.StringVar()
     tk.Entry(root, textvariable=self.username).grid(row=0, column=1)
     self.labelpw = tk.Label(root, text='密码:')
     self.labelpw.grid(row=1, sticky=tk.W)
     self.password = tk.StringVar()
     tk.Entry(root, textvariable=self.password, show='*').grid(row=1,
                                                               column=1)
     self.labelcode = tk.Label(root, text='验证码:')
     self.labelcode.grid(row=2, sticky=tk.W)
     self.code = tk.StringVar()
     tk.Entry(root, textvariable=self.code).grid(row=2, column=1)
     self.button1 = tk.Button(root, text="登陆", command=self.prelogin)
     self.button1.grid(row=3, column=0)
     self.button2 = tk.Button(root, text="更换验证码", command=self.prechange)
     self.button2.grid(row=3, column=2)
     self.info = tk.LabelFrame(root, text='信息栏:                   ')
     self.error = tk.StringVar()
     self.info.grid(row=4, column=1)
     self.Labelerr = tk.Label(self.info,
                              textvariable=self.error,
                              wraplength=130,
                              height=2)  # 可调整调试内容文本框高度
     self.Labelerr.grid()
     self.eva = Evaluation(self.error)
     self.labelimg = tk.Label(root)
     self.labelimg.grid(row=2, column=2)
     self.prechange()
    def on_epoch_end(self, epoch, logs=None):
        modelName = os.path.join(
            self.foldPath, self.category + "_weights_" + str(epoch) + ".hdf5")
        keras.models.save_model(self.model, modelName)
        print "Saving model to ", modelName

        print "Runing evaluation ........."

        xEval = Evaluation(self.category, None)
        xEval.init_from_model(self.model)

        start = time()
        neScore, categoryDict = xEval.eval(self.multiOut, details=True)
        end = time()
        print "Evaluation Done", str(
            neScore), " cost ", end - start, " seconds!"

        for key in categoryDict.keys():
            scores = categoryDict[key]
            print key, ' score ', sum(scores) / len(scores)

        with open(self.valLog, 'a+') as xfile:
            xfile.write(modelName + ", Socre " + str(neScore) + "\n")
            for key in categoryDict.keys():
                scores = categoryDict[key]
                xfile.write(key + ": " + str(sum(scores) / len(scores)) + "\n")

        xfile.close()
コード例 #16
0
    def __init__(self, evaluation=None, param=None, **data_param):
        self.data_param = data_param
        self.model = None
        self.y_pred = None
        self.y_true = None
        self.feature = self.data_param.get('feature_list', None)

        self.Xi_train = self.data_param.get('Xi_train', None)
        self.Xv_train = self.data_param.get('Xv_train', None)
        self.X_train = self.data_param.get('X_train', None)
        self.y_train = self.data_param.get('y_train', None)

        self.Xi_test = self.data_param.get('Xi_test', None)
        self.Xv_test = self.data_param.get('Xv_test', None)
        self.X_test = self.data_param.get('X_test', None)
        self.y_test = self.data_param.get('y_test', None)

        # self.train_x = self.data_param.get('feature_train', None)
        # self.train_y = self.data_param.get('label_train', None)
        # self.test_x = self.data_param.get('feature_test', None)
        # self.test_y = self.data_param.get('label_test', None)
        if evaluation is None:
            self.evaluation = Evaluation(model=self)
        else:
            self.evaluation = evaluation
        if isinstance(param, dict):
            self.param.update(param)
コード例 #17
0
    def __init__(self, config, model_name):
        """
        Initialize model class
        :param config: experiment configuration
        :param model_name: model name
        """
        super(HML, self).__init__()
        self.config = config
        self.device = torch.device("cpu")
        self.model_name = model_name

        self.item_emb = ItemEmbedding(config)
        self.user_emb = UserEmbedding(config)

        self.mp_learner = MetapathLearner(config)
        self.meta_learner = MetaLearner(config)

        self.mp_lr = config['mp_lr']
        self.local_lr = config['local_lr']
        self.emb_dim = self.config['embedding_dim']

        self.cal_metrics = Evaluation()

        self.ml_weight_len = len(self.meta_learner.update_parameters())
        self.ml_weight_name = list(
            self.meta_learner.update_parameters().keys())
        self.mp_weight_len = len(self.mp_learner.update_parameters())
        self.mp_weight_name = list(self.mp_learner.update_parameters().keys())

        self.transformer_liners = self.transform_mp2task()

        self.meta_optimizer = torch.optim.Adam(self.parameters(),
                                               lr=config['lr'])
コード例 #18
0
ファイル: main.py プロジェクト: taesunwhang/BERT-ResSel
def evaluate_model(args):
  hparams = PARAMS_MAP[args.model]

  hparams = collections.namedtuple("HParams", sorted(hparams.keys()))(**hparams)

  model = Evaluation(hparams)
  model.run_evaluate(args.evaluate)
コード例 #19
0
    def __init__(self,
                 world,
                 reset_callback=None,
                 reward_callback=None,
                 observation_callback=None,
                 info_callback=None,
                 done_callback=None):

        logger.debug("Simsim Env")

        self._world = world
        self._drones = world.get_drones()
        self._n_drone = world.n_drone
        self._evaluation = Evaluation(world)

        # scenario callbacks
        self.reset_callback = reset_callback
        self.reward_callback = reward_callback
        self.observation_callback = observation_callback
        self.info_callback = info_callback
        self.done_callback = done_callback

        self._action_dim = 4
        self._obs_dim = np.reshape(self.get_obs(), -1).shape[0] / self._n_drone
        self._action_max = np.array([20, 20, 0.2,
                                     np.pi / 5])  # environment configuration4
        self._action_min = np.array([-20, -20, -0.2,
                                     -np.pi / 5])  # environment configuration5
コード例 #20
0
 def evaluate(self, args, data, cnn):
     res = []
     for idts, idbs, labels in data:
         xt = self.embedding.forward(idts.ravel())
         xt = xt.reshape((idts.shape[0], idts.shape[1], self.embedding.n_d))
         xb = self.embedding.forward(idbs.ravel())
         xb = xb.reshape((idbs.shape[0], idbs.shape[1], self.embedding.n_d))
         titles = Variable(torch.from_numpy(xt)).float()
         bodies = Variable(torch.from_numpy(xb)).float()
         if args.cuda:
             titles = titles.cuda()
             bodies = bodies.cuda()
         outputs = cnn(titles, bodies)
         pos = outputs[0].view(1, outputs[0].size(0))
         scores = torch.mm(pos, outputs[1:].transpose(1, 0)).squeeze()
         if args.cuda:
             scores = scores.data.cpu().numpy()
         else:
             scores = scores.data.numpy()
         assert len(scores) == len(labels)
         ranks = (-scores).argsort()
         ranked_labels = labels[ranks]
         res.append(ranked_labels)
     e = Evaluation(res)
     MAP = e.MAP() * 100
     MRR = e.MRR() * 100
     P1 = e.Precision(1) * 100
     P5 = e.Precision(5) * 100
     return MAP, MRR, P1, P5
コード例 #21
0
def get_evaluation(evaluation_type, build_dictionaries=False):
    evaluation = Evaluation(build_dictionaries=build_dictionaries)
    if evaluation_type == "-g":
        evaluation.show_precision_recall()
    elif evaluation_type == "-m":
        map_value = evaluation.mean_average_precision()
        print("MAP : %s" % map_value)
コード例 #22
0
def run_regularized_logistic_regression(tx_train, y_train, tx_val, y_val):
    """It performs training and evaluation of regularized logistic regression."""

    print('\nTraining with regularized logistic regression ')
    # Initialize parameters
    initial_w = np.zeros((tx_train.shape[1]))
    gamma = 1e-6
    max_iter = 1000
    lambda_ = 0.00001

    # Train the model
    w, _ = reg_logistic_regression(y=y_train,
                                   tx=tx_train,
                                   initial_w=initial_w,
                                   max_iters=max_iter,
                                   gamma=gamma,
                                   lambda_=lambda_)

    # Perform predictions
    y_pred = predict_labels(weights=w, data=tx_val, logistic=True)

    # Evaluate
    evaluation = Evaluation(y_actual=y_val, y_pred=y_pred)
    acc = evaluation.get_accuracy()
    f1 = evaluation.get_f1()
    print('Accuracy: {acc}, F1: {f1}'.format(acc=acc, f1=f1))

    return acc, f1
コード例 #23
0
def pipeline(tx_train, y_train, tx_val, y_val, degrees, gamma, lambda_, epochs,
             verbose):
    """ Run the model training and evaluation on the given parameters """

    # Perform data cleaning (missing values, constant features, outliers, standardization)
    data_cleaner = DataCleaning()
    tx_train = data_cleaner.fit_transform(tx_train)
    tx_val = data_cleaner.transform(tx_val)

    # Perform feature engineering
    feature_generator = FeatureEngineering()
    x_train = feature_generator.fit_transform(tx=tx_train, degree=degrees)
    x_val = feature_generator.transform(tx=tx_val)

    # Initialize values
    initial_w = np.zeros(x_train.shape[1])
    # Train model
    w, _ = reg_logistic_regression(y_train, x_train, lambda_, initial_w,
                                   epochs, gamma, verbose)

    # Perform inference on validation
    pred = predict_labels(weights=w, data=x_val, logistic=True)

    evaluator = Evaluation(y_val, pred)
    return evaluator.get_f1(), evaluator.get_accuracy()
コード例 #24
0
ファイル: models.py プロジェクト: jimmyz42/question-retrieval
def evaluate(all_ranked_labels):
    evaluator = Evaluation(all_ranked_labels)
    MAP = evaluator.MAP()*100
    MRR = evaluator.MRR()*100
    P1 = evaluator.Precision(1)*100
    P5 = evaluator.Precision(5)*100
    return MAP, MRR, P1, P5
コード例 #25
0
def train(epochs, iterations, outdir, path, batchsize, validsize, model_type):
    # Dataset Definition
    dataloader = DatasetLoader(path)
    print(dataloader)
    t_valid, x_valid = dataloader(validsize, mode="valid")

    # Model & Optimizer Definition
    if model_type == 'ram':
        model = Model()
    elif model_type == 'gan':
        model = Generator()
    model.to_gpu()
    optimizer = set_optimizer(model)

    vgg = VGG()
    vgg.to_gpu()
    vgg_opt = set_optimizer(vgg)
    vgg.base.disable_update()

    # Loss Function Definition
    lossfunc = RAMLossFunction()
    print(lossfunc)

    # Evaluation Definition
    evaluator = Evaluation()

    for epoch in range(epochs):
        sum_loss = 0
        for batch in range(0, iterations, batchsize):
            t_train, x_train = dataloader(batchsize, mode="train")

            y_train = model(x_train)
            y_feat = vgg(y_train)
            t_feat = vgg(t_train)
            loss = lossfunc.content_loss(y_train, t_train)
            loss += lossfunc.perceptual_loss(y_feat, t_feat)

            model.cleargrads()
            vgg.cleargrads()
            loss.backward()
            optimizer.update()
            vgg_opt.update()
            loss.unchain_backward()

            sum_loss += loss.data

            if batch == 0:
                serializers.save_npz(f"{outdir}/model_{epoch}.model", model)

                with chainer.using_config('train', False):
                    y_valid = model(x_valid)
                x = x_valid.data.get()
                y = y_valid.data.get()
                t = t_valid.data.get()

                evaluator(x, y, t, epoch, outdir)

        print(f"epoch: {epoch}")
        print(f"loss: {sum_loss / iterations}")
コード例 #26
0
    def start(origin_path, result_path, eval_path, paint_save_path, actor,
              is_eval_all, is_group_attr):
        attr_name = ['velocity_mean', 'shake']
        origin = DataLoader()
        origin.variable_operate(origin_path,
                                os.path.join(origin_path, 'test.txt'), actor)
        origin_data = origin.variable_get_data()
        origin_xy_data = origin.variable_get_data_with_window(2,
                                                              each_len=2,
                                                              stride=12)
        orign_single_dict = origin.variable_convert_data_to_dict(origin_data,
                                                                 stride=12)
        pred = DataLoader()
        pred.operate(result_path, os.path.join(result_path, "data_out.txt"),
                     actor)
        pred_data = pred.get_data()
        pred_xy_data = pred.get_data_with_window(4, 2)
        config = getattr(Config, 'config_ped')
        frame_list = [4, 10]
        eva = Evaluation(frame_list)
        eval_save_path = eval_path
        image_save_path = paint_save_path
        with open(eval_save_path, 'a') as f:
            f.write("skip is %d\n" % config['skip'])
        attr = Attributes(origin_data, origin_xy_data, pred_data, pred_xy_data,
                          config, orign_single_dict, frame_list)
        if is_eval_all:
            attr.operate()
            attr.paint_operate(eva, eval_save_path, save_path=image_save_path)
        else:
            for type_ in attr_name:
                getattr(attr, type_)()
                getattr(attr, "paint_" + type_)(eva,
                                                eval_save_path,
                                                save_path=image_save_path)

        if is_group_attr:
            # smooth & v <= 3
            result_data = attr.get_result_data()

            index1 = attr.select_continuous_attr(result_data, 'velocity_mean',
                                                 0, 3)
            index2 = attr.select_discrete_attr(result_data, 'shakes', 'smooth')
            index = (index1 & index2)
            index = index.to_frame()
            index.columns = ["un_normal"]
            attr.concate_trace_result(index, is_result=True)
            attr_type = [True, False]
            save_path_pie = os.path.join(paint_save_path, 'un_normal_pie.png')
            attr.paint_discrete_pie("un_normal",
                                    attr_type,
                                    save_path=save_path_pie)
            save_path_pie = os.path.join(paint_save_path,
                                         'un_normal_trace.png')
            attr.paint_discrete_trace(eva,
                                      "un_normal",
                                      attr_type,
                                      save_path=save_path_pie,
                                      eval_save_path=eval_save_path)
コード例 #27
0
 def valence_validation(self):
     self.get_single_validate_data_provider(self.valence_validate_tfrecords)
     predictions = self.get_predictions
     validation = Evaluation(self.single_validate_data_provider,
                             self.batch_size, self.epochs, self.num_classes,
                             self.learning_rate, predictions, 1811,
                             'valence', './ckpt/valence/model.ckpt')
     validation.start_evaluation()
コード例 #28
0
ファイル: lstm.py プロジェクト: exsmiley/6.806project
def run_epoch(data, model, optimizer, is_training, params):

    batch_size = params['batch_size']
    margin = params['margin']
    loader = torch.utils.data.DataLoader(data,
                                         batch_size=batch_size,
                                         shuffle=True)
    running_loss = 0.0

    model.train() if is_training else model.eval()
    print(len(loader))
    for i, data in tqdm.tqdm(enumerate(loader)):
        # get the inputs
        if i != len(loader) - 1:
            q_title, q_title_mask, q_body, q_body_mask, c_titles, c_titles_mask, c_bodies, c_bodies_mask, labels = data

            predicted = []
            # wrap them in Variable
            q_title, q_title_mask, q_body, q_body_mask = Variable(
                q_title), Variable(q_title_mask), Variable(q_body), Variable(
                    q_body_mask)

            c_titles, c_titles_mask, c_bodies, c_bodies_mask = Variable(
                c_titles), Variable(c_titles_mask), Variable(
                    c_bodies), Variable(c_bodies_mask)

            # zero the parameter gradients
            if is_training:
                optimizer.zero_grad()

            num_c = c_titles.size()[1]

            q_enc = (model(q_body, q_body_mask) +
                     model(q_title, q_title_mask)) / 2

            c_enc = (
                model(c_titles.view(-1, 40), c_titles_mask.view(-1, 40)) +
                model(c_bodies.view(-1, 100), c_bodies_mask.view(-1, 100))) / 2

            q_enc = q_enc.view(batch_size, 1, -1).repeat(1, num_c, 1)
            c_enc = c_enc.view(batch_size, num_c, -1)

            cos_sim = torch.nn.CosineSimilarity(2)
            sims = cos_sim(q_enc, c_enc)

            if is_training:

                loss = max_margin_loss(sims, margin)
                loss.backward()
                optimizer.step()
                running_loss += loss.data[0]
            else:
                s, i = sims.sort(dim=1, descending=True)
                predicted.extend(
                    [labels[x][i.data[x]] for x in range(q_title.size()[0])])
    if is_training:
        torch.save(model, 'cnn2.pt')
    return running_loss if is_training else Evaluation(predicted).evaluate()
コード例 #29
0
 def add_result(self, ranking, sample):
     predicted = RecommendationResult(dict.fromkeys(ranking, 1))
     real = RecommendationResult(sample)
     evaluation = Evaluation(predicted, real, self.repository_size)
     self.precision.append(evaluation.run(Precision()))
     self.recall.append(evaluation.run(Recall()))
     self.fpr.append(evaluation.run(FPR()))
     self.f05.append(evaluation.run(F_score(0.5)))
     self.mcc.append(evaluation.run(MCC()))
コード例 #30
0
ファイル: main.py プロジェクト: emiperez95/aprendAut2
def normalTrain(data, evData, classAmm, modelType, argv, dumpArgv):
    start = time.time()
    model = makeNode(*argv) if modelType == 0 else PoolTree(*argv)
    dumpDir = dumpArgv[0] + "_RUN_" + "WHOLE" + "_{}_{}".format(
        argv[3], argv[5]) + dumpArgv[1]
    timer = time.time() - start
    dumpModel(model, dumpDir)
    score = Evaluation(model, evData, classAmm)
    return model, timer, score