コード例 #1
0
ファイル: main.py プロジェクト: hlee95/unclear-questions
def eval_model(model,
               data,
               model_type,
               use_dev,
               use_title=True,
               use_body=True):
    print "Evaluating %s on %s dataset..." % (model_type.name,
                                              'dev' if use_dev else 'test')
    ranked_scores = []
    num_batches = len(data.dev_data) if use_dev else len(data.test_data)
    for i in xrange(num_batches):
        title, body, similar = data.get_next_eval_feature(use_dev)
        h = run_model(model, title, body, use_title, use_body, model_type)
        candidate_scores = []
        # The candidates are all results after the first one, which is h_q.
        h_q = h[0]
        for c in h[1:]:
            candidate_scores.append(get_cosine_similarity(h_q, c))
        # Sort candidate scores in decreasing order and remember which are the
        # correct similar questions.
        ranked_index = np.array(candidate_scores).argsort()[::-1]
        ranked_score = np.isin(ranked_index, similar).astype(int)
        ranked_scores.append(ranked_score)
    eval_obj = Eval(np.array(ranked_scores))
    print "MAP:", eval_obj.MAP()
    print "MRR:", eval_obj.MRR()
    print "Precision@1:", eval_obj.Precision(1)
    print "Precision@5:", eval_obj.Precision(5)
コード例 #2
0
def eval(out_file, tgt_file):
    """
        Given a filename, calculate the metric scores for that prediction file

        isDin: boolean value to check whether input file is DirectIn.txt
    """

    with open(out_file, 'r') as infile:
        out = [line[:-1] for line in infile]

    with open(tgt_file, "r") as infile:
        tgt = [line[:-1] for line in infile]

    ## eval
    from eval import Eval
    import json
    from json import encoder
    encoder.FLOAT_REPR = lambda o: format(o, '.4f')

    res = defaultdict(lambda: [])
    gts = defaultdict(lambda: [])
    for idx, (out_, tgt_) in enumerate(zip(out, tgt)):
        res[idx] = [out_.encode('utf-8')]

        ## gts
        gts[idx] = [tgt_.encode('utf-8')]

    eval = Eval(gts, res)
    return eval.evaluate()
コード例 #3
0
ファイル: main.py プロジェクト: openpublicforpapers/NeuralMCS
def main():
    if FLAGS.tvt_strategy == 'holdout':
        train_data, test_data = load_train_test_data()
        print('Training...')
        trained_model = train(train_data, saver)
        if FLAGS.save_model:
            saver.save_trained_model(trained_model)
        if FLAGS.debug:
            print('Debugging: Feed train data for testing and eval')
            test_data = train_data
        print('Testing...')
        test(test_data, trained_model, saver)
        eval = Eval(trained_model, train_data, test_data, saver)
        eval.eval_on_test_data()
    elif '-fold' in FLAGS.tvt_strategy:
        # for fold in range(10):
        #     if _train_model(..., saver):
        #         ...
        raise NotImplementedError()
    else:
        assert False
    overall_time = convert_long_time_to_str(time() - t)
    print(overall_time)
    print(saver.get_log_dir())
    print(basename(saver.get_log_dir()))
    saver.save_overall_time(overall_time)
    saver.close()
コード例 #4
0
def test(data, dist_calculator, model, saver, sess):
    # Test.
    eval = Eval(FLAGS.dataset, FLAGS.dist_algo,
                FLAGS.sim_kernel, FLAGS.yeta, FLAGS.plot_results)
    m, n = data.m_n()
    test_sim_mat = np.zeros((m, n))
    test_time_mat = np.zeros((m, n))
    run_tf(data, dist_calculator, model, saver, sess,
           'test', 0, 0)  # flush the pipeline
    print('i,j,time,sim,true_sim')
    for i in range(m):
        for j in range(n):
            sim_i_j, test_time = run_tf(
                data, dist_calculator, model, saver, sess,
                'test', i, j)
            sim_i_j = sim_i_j
            test_time *= 1000
            true_sim = eval.get_true_sim(i, j, FLAGS.dist_norm)
            print('{},{},{:.2f}mec,{:.4f},{:.4f}'.format(
                i, j, test_time, sim_i_j, true_sim))
            # assert (0 <= sim_i_j <= 1)
            test_sim_mat[i][i] = sim_i_j
            test_time_mat[i][j] = test_time
    print('Evaluating...')
    results = eval.eval_test(FLAGS.model, test_sim_mat, test_time_mat)
    print('Results generated with {} metrics'.format(len(results)))
    pretty_print_dict(results)
    return results
コード例 #5
0
def train(train_iter, test_iter, model, args):
    if args.use_cuda:
        model.cuda()

    optimizer = None
    if args.Adam is True:
        print("Adam Training......")
        # optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            model.parameters()),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)

    file = open("./Test_Result.txt", encoding="UTF-8", mode="a", buffering=1)
    best_acc = Best_Result()

    steps = 0
    model_count = 0
    model.train()
    max_dev_acc = -1
    train_eval = Eval()
    dev_eval = Eval()
    test_eval = Eval()
    for epoch in range(1, args.epochs + 1):
        print("\n## The {} Epoch,All {} Epochs !##".format(epoch, args.epochs))
        print("now lr is {}".format(optimizer.param_groups[0].get("lr")))
        random.shuffle(train_iter)
        model.train()
        # train_eval.clear()
        for batch_count, batch_features in enumerate(train_iter):
            model.zero_grad()
            # optimizer.zero_grad()
            logit = model(batch_features)
            # print(logit.size())
            # train_eval.clear_PRF()
            cal_train_acc(batch_features, train_eval, logit, args)
            loss = F.cross_entropy(
                logit.view(logit.size(0) * logit.size(1), logit.size(2)),
                batch_features.label_features)
            # print(loss)
            loss.backward()
            if args.clip_max_norm is not None:
                utils.clip_grad_norm(model.parameters(),
                                     max_norm=args.clip_max_norm)
            optimizer.step()
            steps += 1
            if steps % args.log_interval == 0:
                sys.stdout.write(
                    "\rbatch_count = [{}] , loss is {:.6f} , (correct/ total_num) = acc ({} / {}) = "
                    "{:.6f}%".format(batch_count + 1, loss.data[0],
                                     train_eval.correct_num,
                                     train_eval.gold_num,
                                     train_eval.acc() * 100))
        if steps is not 0:
            # print("\n{} epoch dev F-score".format(epoch))
            # print("\n")
            # test_eval.clear()
            eval(test_iter, model, test_eval, file, best_acc, epoch, args)
コード例 #6
0
def wordspotting():
    #Dokument laden
    pageNumber = 2700270
    document_image_filename = 'pages/%d.png' % (pageNumber)
    image = Image.open(document_image_filename)
    im_arr = np.asarray(image, dtype='float32')
    #Groundtruth laden
    gt = open("GT/%d.gtp" % (pageNumber))
    #Codebook laden
    input_file = open('codebook/codebook.bin', 'r')
    codebook = np.fromfile(input_file, dtype='float32')
    codebook = np.reshape(codebook, (4096, 128))
    words = []
    #Groundtruth lesen
    for line in gt:
        str = line.rstrip().split()
        word_img = im_arr[int(str[1]):int(str[3]), int(str[0]):int(str[2])]
        words.append(word(word_img, str[4], codebook))
    gt.close()
    print "Deskriptoren berechnet"
    bof = []
    #BagofFeatures der Wörter zusammentragen
    for word_ in words:
        bof.append(word_.getBof())
    bof = np.array(bof)

    dim = {1, 5, 10, 15, 30, 50, 100, 200}
    for d in dim:
        #Dimensionsreduktion in Dimension d
        tsp = TopicSubSpace(d)
        tsp.estimate(bof)
        bof_n = tsp.transform(bof)
        print bof_n.shape
        #Distanz der BoF der Wörter zueinander berechnen und sortieren
        dists = distance.cdist(bof_n, bof_n, 'euclidean')
        dists = argsort(dists, axis=1)

        result_word = np.array([([words[i].getWord() for i in c])
                                for c in dists])
        result_img = np.array([([words[i].getImg() for i in c])
                               for c in dists])

        res = np.zeros(result_word.shape)
        i = 0
        for c in result_word:
            j = 0
            for d in c:
                if c[0] == d:
                    res[i, j] = 1
                j += 1
            i += 1

        print result_word[21, :25]
        print res[21, :25]
        ev = Eval()
        map = ev.mean_avarage_precision(res.tolist())
        print map
コード例 #7
0
def test(model, data, test_train_links, saver, fold_num):
    print("testing...")
    fold_str = '' if fold_num is None else 'Fold_{}_'.format(fold_num)

    pairs, loss = evaluate(model, data, test_train_links, saver, test=True)
    eval = Eval(model, data, pairs, set_name="test", saver=saver)
    res = eval.eval(fold_str=fold_str)
    if COMET_EXPERIMENT:
        with COMET_EXPERIMENT.test():
            COMET_EXPERIMENT.send_notification(saver.get_f_name(),
                                               status="finished",
                                               additional_data=res)
コード例 #8
0
def train(train_iter, dev_iter, test_iter, model, args):
    if args.use_cuda:
        model.cuda()

    optimizer = None
    if args.Adam is True:
        print("Adam Training......")
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)
        # optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr,
        #                              weight_decay=args.weight_decay)

    file = open("./Test_Result.txt", encoding="UTF-8", mode="a", buffering=1)
    best_fscore = Best_Result()

    steps = 0
    model_count = 0
    model.train()
    max_dev_acc = -1
    train_eval = Eval()
    dev_eval = Eval()
    test_eval = Eval()
    for epoch in range(1, args.epochs + 1):
        print("\n## The {} Epoch,All {} Epochs !##".format(epoch, args.epochs))
        print("now lr is {}".format(optimizer.param_groups[0].get("lr")))
        random.shuffle(train_iter)
        model.train()
        for batch_count, batch_features in enumerate(train_iter):
            model.zero_grad()
            loss = model.forward(batch_features, train=True)
            loss.backward()
            optimizer.step()
            steps += 1
            if steps % args.log_interval == 0:
                sys.stdout.write(
                    "\rbatch_count = [{}] , loss is {:.6f}".format(
                        batch_count + 1, loss.data[0]))
        if steps is not 0:
            dev_eval.clear_PRF()
            eval(dev_iter,
                 model,
                 dev_eval,
                 file,
                 best_fscore,
                 epoch,
                 args,
                 test=False)
        if steps is not 0:
            test_eval.clear_PRF()
            eval(test_iter,
                 model,
                 test_eval,
                 file,
                 best_fscore,
                 epoch,
                 args,
                 test=True)
コード例 #9
0
def main():
    exec_turnoff_print()
    sim_mat_dict, ttsp = load_train_test_joint_sim_mat()
    eval_dict = {'nonorm': Eval(
        DATASET, DIST_ALGO, SIM_KERNEL, NONORM_YETA, plot_results=True),
        'norm': Eval(
            DATASET, DIST_ALGO, SIM_KERNEL, NORM_YETA, plot_results=True)}
    for norm_str, sim_mat in sim_mat_dict.items():
        for dim in DIMS:
            emb = perform_svd(sim_mat, dim)
            evaluate_emb(
                emb, ttsp, eval_dict[norm_str],
                '{}_dist={}_dim={}'.format(MODEL, norm_str, dim))
コード例 #10
0
def main():
    t = time()
    check_flags()
    print(get_model_info_as_str())
    data_train = SiameseModelData(FLAGS.dataset_train)
    dist_sim_calculator = DistSimCalculator(
        FLAGS.dataset_train, FLAGS.ds_metric, FLAGS.ds_algo)
    model = create_model(FLAGS.model, data_train.input_dim(),
                         data_train, dist_sim_calculator)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    saver = Saver(sess)
    sess.run(tf.global_variables_initializer())
    if FLAGS.dataset_val_test == FLAGS.dataset_train:
        data_val_test = data_train
    else:
        # Generalizability test: val test on unseen train and test graphs.
        data_val_test = SiameseModelData(FLAGS.dataset_val_test)
    eval = Eval(data_val_test, dist_sim_calculator)
    try:
        train_costs, train_times, val_results_dict = \
            train_val_loop(data_train, data_val_test, eval, model, saver, sess)
        best_iter, test_results = \
            test(data_val_test, eval, model, saver, sess, val_results_dict)
        overall_time = convert_long_time_to_str(time() - t)
        print(overall_time)
        saver.save_overall_time(overall_time)
    except:
        traceback.print_exc()
    else:
        return train_costs, train_times, val_results_dict, best_iter, test_results
コード例 #11
0
def validation(model, data, val_links, saver, max_num_examples=None):
    pairs, loss = evaluate(model,
                           data,
                           val_links,
                           saver,
                           max_num_examples=max_num_examples)
    res, supplement = Eval.eval_pair_list(pairs, FLAGS)
    res["Loss"] = loss
    return res, supplement
コード例 #12
0
    def train(self, train_file, dev_file, test_file):
        self.hyperParams.show()
        torch.set_num_threads(self.hyperParams.thread)
        reader = Reader(self.hyperParams.maxInstance)

        trainInsts = reader.readInstances(train_file)
        devInsts = reader.readInstances(dev_file)

        trainExamples = self.instance2Example(trainInsts)
        devExamples = self.instance2Example(devInsts)

        print("Training Instance: ", len(trainInsts))
        print("Dev Instance: ", len(devInsts))

        self.createAlphabet(trainInsts)

        self.model = RNNLabeler(self.hyperParams)
        optimizer = torch.optim.Adagrad(self.model.parameters(),
                                        lr=self.hyperParams.learningRate)

        indexes = []
        for idx in range(len(trainExamples)):
            indexes.append(idx)

        for iter in range(self.hyperParams.maxIter):
            print('###Iteration' + str(iter) + "###")
            random.shuffle(indexes)
            for idx in range(len(trainExamples)):
                self.model.zero_grad()
                self.model.LSTMHidden = self.model.init_hidden()
                exam = trainExamples[indexes[idx]]
                tag_scores = self.model(exam.feat)
                loss = torch.nn.functional.cross_entropy(
                    tag_scores, exam.labelIndexs)
                loss.backward()
                optimizer.step()
                if (idx + 1) % self.hyperParams.verboseIter == 0:
                    print('current: ', idx + 1, ", cost:", loss.data[0])

            eval_dev = Eval()
            for idx in range(len(devExamples)):
                predictLabels = self.predict(devExamples[idx])
                devInsts[idx].evalPRF(predictLabels, eval_dev)
            eval_dev.getFscore()
コード例 #13
0
    def train(self, train_file, dev_file, test_file):
        self.hyperParams.show()
        torch.set_num_threads(self.hyperParams.thread)
        reader = Reader(self.hyperParams.maxInstance)

        trainInsts = reader.readInstances(train_file)
        devInsts = reader.readInstances(dev_file)
        testInsts = reader.readInstances(test_file)
        print("Training Instance: ", len(trainInsts))
        print("Dev Instance: ", len(devInsts))
        print("Test Instance: ", len(testInsts))

        self.createAlphabet(trainInsts, devInsts, testInsts)

        trainExamples = self.instance2Example(trainInsts)
        devExamples = self.instance2Example(devInsts)
        testExamples = self.instance2Example(testInsts)

        self.model = RNNLabeler(self.hyperParams)
        parameters = filter(lambda p: p.requires_grad, self.model.parameters())
        optimizer = torch.optim.Adagrad(parameters,
                                        lr=self.hyperParams.learningRate)

        indexes = []
        for idx in range(len(trainExamples)):
            indexes.append(idx)
        for iter in range(self.hyperParams.maxIter):
            print('###Iteration' + str(iter) + "###")
            random.shuffle(indexes)
            for idx in range(len(trainExamples)):
                self.model.zero_grad()
                self.model.LSTMHidden = self.model.init_hidden()
                exam = trainExamples[indexes[idx]]
                lstm_feats = self.model(exam.feat)
                loss = self.model.crf.neg_log_likelihood(
                    lstm_feats, exam.labelIndexs)
                loss.backward()
                optimizer.step()
                if (idx + 1) % self.hyperParams.verboseIter == 0:
                    print('current: ', idx + 1, ", cost:", loss.data[0])

            eval_dev = Eval()
            for idx in range(len(devExamples)):
                predictLabels = self.predict(devExamples[idx])
                devInsts[idx].evalPRF(predictLabels, eval_dev)
            print('Dev: ', end="")
            eval_dev.getFscore()

            eval_test = Eval()
            for idx in range(len(testExamples)):
                predictLabels = self.predict(testExamples[idx])
                testInsts[idx].evalPRF(predictLabels, eval_test)
            print('Test: ', end="")
            eval_test.getFscore()
コード例 #14
0
    def eval_test(self):
        total_time = 0.
        mae_record, psnr_record = np.zeros(self.val_dataset.num_persons), np.zeros(self.val_dataset.num_persons)

        # create csv file
        csvfile = open(os.path.join(self.test_out_dir, 'stat.csv'), 'w', newline='')
        csvwriter = csv.writer(csvfile, delimiter=',')
        csvwriter.writerow(['p_id', 'MAE', 'PSNR', 'MAE std', 'PSNR std'])

        global_iter = 0
        for p_id in range(self.val_dataset.num_persons):
            self.evaluator = Eval(self.val_dataset.image_size, self.val_dataset.num_vals[p_id])
            samples, y_imgs = [], []

            for iter_ in range(self.val_dataset.num_vals[p_id]):
                print('p_id: {}, iter: {}'.format(p_id, iter_))

                x_img, y_img = self.val_dataset.val_next_batch(p_id, iter_, which_direction=self.flags.which_direction)
                start_time = time.time()
                imgs = self.model.test_step(x_img, y_img)
                total_time += time.time() - start_time

                # utils.plots(imgs, global_iter, self.val_dataset.image_size, save_file=self.test_out_dir)
                self.plots(imgs, global_iter)

                samples.append(imgs[1])  # imgs[1] == fake_y
                y_imgs.append(y_img)
                global_iter += 1

            # calcualte MAE and PSNR
            mae_record[p_id], psnr_record[p_id] = self.evaluator.calculate(samples, y_imgs)
            # write to csv file
            csvwriter.writerow([p_id + 1, mae_record[p_id], psnr_record[p_id]])

        for p_id in range(self.val_dataset.num_persons):
            print('p_id: {}, MAE: {:.2f}, PSNR: {:.2f}'.format(p_id, mae_record[p_id], psnr_record[p_id]))

        print('MAE Avg. {:.2f} and SD. {:.2f}'.format(np.mean(mae_record), np.std(mae_record)))
        print('PSRN Avg. {:.2f} and SD. {:.2f}'.format(np.mean(psnr_record), np.std(psnr_record)))
        print('Average PT: {:.2f} msec.'.format((total_time / np.sum(self.val_dataset.num_vals)) * 1000))

        # write to csv file for mean and std of MAE and PSNR
        csvwriter.writerow(['MEAN', np.mean(mae_record), np.mean(psnr_record), np.std(mae_record), np.std(psnr_record)])
コード例 #15
0
    def runLevel(self):
        #start level
        self.isLevelRunning = True
        startTime = time.time() #begin timer

        #create listener
        listen2Me = Listener('user_output.txt', self.levelToRun.getValidShortcutsList())
        listen2Me.start() #start listening user input and stop when shortcuts are done
        self.isLevelRunning = False

        #end level and record time,  user input, and actions
        endTime = time.time() #end timer
        self.elapsedTime = endTime - startTime #record elapsed time

        self.userInput = listen2Me.getCompletedShortcuts()
        self.userActions = len(listen2Me.completedShortcuts) #listener needs to be changed to count all actions

        #create eval object 
        eval1 = Eval(self.levelToRun, self.userInput, self.userActions, self.elapsedTime)
        eval1.evaluate() #run evaluation
コード例 #16
0
 def testEval1(self):
     lines = [
         'offset = 4 + random + 1', 'location = 1 + origin + offset',
         'origin = 3 + 5', 'random = 2', 'tick=tock+133',
         '   tock=   random+5+   7+ location', ''
     ]
     result = Eval().solve(lines)
     print(result)
     assert (result['origin'] == 8)
     assert (result['random'] == 2)
     assert (result['location'] == 16)
     assert (result['offset'] == 7)
     assert (result['tick'] == 163)
     assert (result['tock'] == 30)
コード例 #17
0
    def run(self,
            n_epochs=20,
            rebuild_metaphors=True,
            early_stopping_limit=10,
            verbose=True):

        # By default the training and testing data is split for every new run.
        if rebuild_metaphors:
            self.metaphors = MetaphorData(
                self.labelled_data_loc,
                self.w2v_model,
                train_ratio=self.train_ratio,
                validation_ratio=self.validation_ratio)
            self.train, self.test = self.metaphors.split_train_test()

        # Build checkpoint name based on parameters.
        checkpoint_name = os.path.join('modelruns', self.run_directory,
                                       '-'.join(str(n) for n in self.n_hidden))
        checkpoint_name += '-{}'.format(self.train_ratio)
        checkpoint_name += '-{}'.format(self.validation_ratio)
        checkpoint_name += '-{}'.format(self.learning_rate)
        checkpoint_name += '-{}'.format(self.activation.__name__)

        # Run nn training.
        X, probabilities, logits = train_network(
            self.w2v_model,
            self.train,
            checkpoint_name,
            n_epochs=n_epochs,
            n_hidden=self.n_hidden,
            batch_size=self.batch_size,
            learning_rate=self.learning_rate,
            early_stopping_limit=early_stopping_limit,
            verbose=verbose)

        # Standard save and reload, but TODO is it necessary? Seems sess could
        # be returned as well from train_network.
        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, checkpoint_name)
            # Z = logits.eval(feed_dict={X: self.test.embedded_sentences})
            Z, probabilities = sess.run(
                [logits, probabilities],
                feed_dict={X: self.test.embedded_sentences})
            y_pred = np.argmax(Z, axis=1)

        self.test.add_predictions(y_pred, probabilities)

        return Eval(self.test)
コード例 #18
0
 def testEval2(self):
     lines = [
         'offset = 55 + random + 1',
         'location = 1 + origin + offset+33+toe',
         'origin = toe + 5+1+1+1+111110', 'random = 2', 'tick=tock+133',
         '   tock=   random+5+   7+ location', 'toe=0',
         'lol=999999999999999999999'
     ]
     result = Eval().solve(lines)
     print(result)
     assert (result['origin'] == 111118)
     assert (result['random'] == 2)
     assert (result['location'] == 111210)
     assert (result['offset'] == 58)
     assert (result['tick'] == 111357)
     assert (result['tock'] == 111224)
コード例 #19
0
def main():
    t = time()
    check_flags()
    print(get_model_info_as_str())
    data = SiameseModelData()
    dist_calculator = DistCalculator(
        FLAGS.dataset, FLAGS.dist_metric, FLAGS.dist_algo)
    model = create_model(FLAGS.model, data.input_dim(), data, dist_calculator)
    os.environ["CUDA_VISIBLE_DEVICES"] = str(FLAGS.gpu)
    config = tf.ConfigProto()
    config.gpu_options.allow_growth=True
    sess = tf.Session(config=config)
    saver = Saver(sess)
    sess.run(tf.global_variables_initializer())
    eval = Eval(data, dist_calculator)
    train_costs, train_times, val_results_dict = \
        train_val_loop(data, eval, model, saver, sess)
    best_iter, test_results = \
        test(data, eval, model, saver, sess, val_results_dict)
    overall_time = convert_long_time_to_str(time() - t)
    print(overall_time)
    saver.save_overall_time(overall_time)
    return train_costs, train_times, val_results_dict, best_iter, test_results
コード例 #20
0
            model_b6 = model_b6.to(device)
            model_200D = model_200D.to(device)
            if len(device_ids) > 1:
                model_b6 = torch.nn.DataParallel(model_b6,
                                                 device_ids=device_ids)
                model_200D = torch.nn.DataParallel(model_200D,
                                                   device_ids=device_ids)
            # checkpoint_model_b6 = torch.load("E:/Ameme/test/checkpoint-epoch3.pth")
            checkpoint200D = torch.load("E:/Ameme/test/checkpoint-epoch2.pth")
            model_200D.load_state_dict(checkpoint200D['state_dict'])
            # model_b6.load_state_dict(checkpoint_model_b6['state_dict'])
            criterion = eval("loss_module." + "BinaryCrossEntropyLoss")
            # metrics
            metrics = [eval("metric_module." + met) for met in ["AUC"]]

            test = Eval([model_200D], criterion, metrics, device)
            processBar = st.progress(0)
            xfold_res = []
            for idx, (_, x) in enumerate(x_fold_dataloader):
                xfold_res.append(test.eval(x)['AUC'])
                # processBar.progress(idx / x_fold * 100)
            st.write(str(np.mean(xfold_res)))
            logger.info("metric: {}", str(np.mean(xfold_res)))

        if distiller_btn:
            distiller_save_path = st.text_input("distiller_save_path",
                                                value="./test/")
            distiller_stop_btn = st.button("distiller_stop")
            distiller_empty = st.empty()
            model_200d = eval("model_module." + "ResNet200D")(num_classes=11)
            device, device_ids = prepare_device(cfg['N_GPU'])
コード例 #21
0
def train(train_iter, dev_iter, test_iter, model_encoder, model_decoder, args):

    if args.Adam is True:
        print("Adam Training......")
        model_encoder_parameters = filter(lambda p: p.requires_grad, model_encoder.parameters())
        model_decoder_parameters = filter(lambda p: p.requires_grad, model_decoder.parameters())
        optimizer_encoder = torch.optim.Adam(params=filter(lambda p: p.requires_grad, model_encoder.parameters()),
                                             lr=args.lr,
                                             weight_decay=args.init_weight_decay)
        optimizer_decoder = torch.optim.Adam(params=filter(lambda p: p.requires_grad, model_decoder.parameters()),
                                             lr=args.lr,
                                             weight_decay=args.init_weight_decay)
        # optimizer_encoder = torch.optim.Adam(model_encoder.parameters(), lr=args.lr, weight_decay=args.init_weight_decay)
        # optimizer_decoder = torch.optim.Adam(model_decoder.parameters(), lr=args.lr, weight_decay=args.init_weight_decay)

    steps = 0
    model_count = 0
    # for dropout in train / dev / test
    model_encoder.train()
    model_decoder.train()
    time_list = []
    train_eval = Eval()
    dev_eval_seg = Eval()
    dev_eval_pos = Eval()
    test_eval_seg = Eval()
    test_eval_pos = Eval()
    for epoch in range(1, args.epochs+1):
        print("\n## 第{} 轮迭代,共计迭代 {} 次 !##\n".format(epoch, args.epochs))
        print("optimizer_encoder now lr is {}".format(optimizer_encoder.param_groups[0].get("lr")))
        print("optimizer_decoder now lr is {} \n".format(optimizer_decoder.param_groups[0].get("lr")))

        # train time
        start_time = time.time()

        # shuffle
        random.shuffle(train_iter)
        # random.shuffle(dev_iter)
        # random.shuffle(test_iter)

        model_encoder.train()
        model_decoder.train()

        for batch_count, batch_features in enumerate(train_iter):

            model_encoder.zero_grad()
            model_decoder.zero_grad()

            maxCharSize = batch_features.char_features.size()[1]
            encoder_out = model_encoder(batch_features)
            decoder_out, state = model_decoder(batch_features, encoder_out, train=True)

            cal_train_acc(batch_features, train_eval, batch_count, decoder_out, maxCharSize, args)

            loss = torch.nn.functional.nll_loss(decoder_out, batch_features.gold_features)
            # loss = F.cross_entropy(decoder_out, batch_features.gold_features)

            loss.backward()

            if args.init_clip_max_norm is not None:
                utils.clip_grad_norm(model_encoder_parameters, max_norm=args.init_clip_max_norm)
                utils.clip_grad_norm(model_decoder_parameters, max_norm=args.init_clip_max_norm)

            optimizer_encoder.step()
            optimizer_decoder.step()

            steps += 1
            if steps % args.log_interval == 0:
                sys.stdout.write("\rbatch_count = [{}] , loss is {:.6f} , (correct/ total_num) = acc ({} / {}) = "
                                 "{:.6f}%".format(batch_count + 1, loss.data[0], train_eval.correct_num,
                                                  train_eval.gold_num, train_eval.acc() * 100))
            # if steps % args.dev_interval == 0:
            #     print("\ndev F-score")
            #     dev_eval_pos.clear()
            #     dev_eval_seg.clear()
            #     eval(dev_iter, model_encoder, model_decoder, args, dev_eval_seg, dev_eval_pos)
            #     # model_encoder.train()
            #     # model_decoder.train()
            # if steps % args.test_interval == 0:
            #     print("test F-score")
            #     test_eval_pos.clear()
            #     test_eval_seg.clear()
            #     eval(test_iter, model_encoder, model_decoder, args, test_eval_seg, test_eval_pos)
            #     print("\n")
        # train time
        end_time = time.time()
        print("\ntrain time cost: ", end_time - start_time, 's')
        time_list.append(end_time - start_time)
        if time_list is not None:
            avg_time = sum(time_list) / len(time_list)
            print("{} - {} epoch avg  time {}".format(1, epoch, avg_time))
        model_encoder.eval()
        model_decoder.eval()
        if steps is not 0:
            print("\n{} epoch dev F-score".format(epoch))
            dev_eval_pos.clear()
            dev_eval_seg.clear()
            eval(dev_iter, model_encoder, model_decoder, dev_eval_seg)
            # model_encoder.train()
            # model_decoder.train()
        if steps is not 0:
            print("{} epoch test F-score".format(epoch))
            test_eval_pos.clear()
            test_eval_seg.clear()
            eval(test_iter, model_encoder, model_decoder, test_eval_seg)
            print("\n")
コード例 #22
0
ファイル: accuracy.py プロジェクト: nachocano/asml
 def __init__(self, alpha=1):
   Eval.__init__(self, alpha)
コード例 #23
0
class Trainer():
    def __init__(self, args, cuda=None):
        self.args = args
        self.cuda = cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if self.cuda else 'cpu')

        self.current_MIoU = 0
        self.best_MIou = 0
        self.current_epoch = 0
        self.current_iter = 0

        self.batch_idx = 0

        # set TensorboardX
        self.writer = SummaryWriter()

        # Metric definition
        self.Eval = Eval(self.args.num_classes)

        if self.args.loss == 'tanimoto':
            self.loss = tanimoto_loss()
        else:
            self.loss = nn.BCEWithLogitsLoss()

        self.loss.to(self.device)

        # model
        self.model = DeepLab(output_stride=self.args.output_stride,
                             class_num=self.args.num_classes,
                             num_input_channel=self.args.input_channels,
                             pretrained=self.args.imagenet_pretrained
                             and self.args.pretrained_ckpt_file is None,
                             bn_eps=self.args.bn_eps,
                             bn_momentum=self.args.bn_momentum,
                             freeze_bn=self.args.freeze_bn)

        if torch.cuda.device_count() > 1:
            self.model = nn.DataParallel(self.model)
            patch_replication_callback(self.model)
            self.m = self.model.module
        else:
            self.m = self.model
        self.model.to(self.device)

        self.optimizer = torch.optim.SGD(
            params=[
                {
                    "params": self.get_params(self.m, key="1x"),
                    "lr": self.args.lr,
                },
                {
                    "params": self.get_params(self.m, key="10x"),
                    "lr": 10 * self.args.lr,
                },
            ],
            momentum=self.args.momentum,
            weight_decay=self.args.weight_decay,
        )

        self.dataloader = ISICDataLoader(self.args)
        self.epoch_num = ceil(self.args.iter_max /
                              self.dataloader.train_iterations)

        if self.args.input_channels == 3:
            self.train_func = self.train_3ch
            if args.using_bb != 'none':
                if self.args.store_result:
                    self.validate_func = self.validate_crop_store_result
                else:
                    self.validate_func = self.validate_crop
            else:
                self.validate_func = self.validate_3ch
        else:
            self.train_func = self.train_4ch
            self.validate_func = self.validate_4ch

        if self.args.store_result:
            self.validate_one_epoch = self.validate_one_epoch_store_result

    def main(self):
        logger.info("Global configuration as follows:")
        for key, val in vars(self.args).items():
            logger.info("{:16} {}".format(key, val))

        if self.cuda:
            current_device = torch.cuda.current_device()
            logger.info("This model will run on {}".format(
                torch.cuda.get_device_name(current_device)))
        else:
            logger.info("This model will run on CPU")

        if self.args.pretrained_ckpt_file is not None:
            self.load_checkpoint(self.args.pretrained_ckpt_file)

        if self.args.validate:
            self.validate()
        else:
            self.train()

        self.writer.close()

    def train(self):
        for epoch in tqdm(range(self.current_epoch, self.epoch_num),
                          desc="Total {} epochs".format(self.epoch_num)):
            self.current_epoch = epoch
            tqdm_epoch = tqdm(
                self.dataloader.train_loader,
                total=self.dataloader.train_iterations,
                desc="Train Epoch-{}-".format(self.current_epoch + 1))
            logger.info("Training one epoch...")
            self.Eval.reset()

            self.train_loss = []
            self.model.train()
            if self.args.freeze_bn:
                for m in self.model.modules():
                    if isinstance(m, SynchronizedBatchNorm2d):
                        m.eval()

            # Initialize your average meters
            self.train_func(tqdm_epoch)

            MIoU_single_img, MIoU_thresh = self.Eval.Mean_Intersection_over_Union(
            )

            logger.info('Epoch:{}, train MIoU1:{}'.format(
                self.current_epoch, MIoU_thresh))
            tr_loss = sum(self.train_loss) / len(self.train_loss)
            self.writer.add_scalar('train_loss', tr_loss, self.current_epoch)
            tqdm.write("The average loss of train epoch-{}-:{}".format(
                self.current_epoch, tr_loss))
            tqdm_epoch.close()

            if self.current_epoch % 10 == 0:
                state = {
                    'state_dict': self.model.state_dict(),
                    'optimizer': self.optimizer.state_dict(),
                    'best_MIou': self.current_MIoU
                }
                # logger.info("=>saving the final checkpoint...")
                torch.save(state,
                           train_id + '_epoca_' + str(self.current_epoch))

            # validate
            if self.args.validation:
                MIoU, MIoU_thresh = self.validate()
                self.writer.add_scalar('MIoU', MIoU_thresh, self.current_epoch)

                self.current_MIoU = MIoU_thresh
                is_best = MIoU_thresh > self.best_MIou
                if is_best:
                    self.best_MIou = MIoU_thresh
                self.save_checkpoint(is_best, train_id + 'best.pth')

        state = {
            'state_dict': self.model.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'best_MIou': self.current_MIoU
        }
        logger.info("=>saving the final checkpoint...")
        torch.save(state, train_id + 'final.pth')

    def train_3ch(self, tqdm_epoch):
        for x, y in tqdm_epoch:
            self.poly_lr_scheduler(
                optimizer=self.optimizer,
                init_lr=self.args.lr,
                iter=self.current_iter,
                max_iter=self.args.iter_max,
                power=self.args.poly_power,
            )
            if self.current_iter >= self.args.iter_max:
                logger.info("iteration arrive {}!".format(self.args.iter_max))
                break
            self.writer.add_scalar('learning_rate',
                                   self.optimizer.param_groups[0]["lr"],
                                   self.current_iter)
            self.writer.add_scalar('learning_rate_10x',
                                   self.optimizer.param_groups[1]["lr"],
                                   self.current_iter)
            self.train_one_epoch(x, y)

    def train_4ch(self, tqdm_epoch):
        for x, y, target in tqdm_epoch:
            self.poly_lr_scheduler(
                optimizer=self.optimizer,
                init_lr=self.args.lr,
                iter=self.current_iter,
                max_iter=self.args.iter_max,
                power=self.args.poly_power,
            )
            if self.current_iter >= self.args.iter_max:
                logger.info("iteration arrive {}!".format(self.args.iter_max))
                break
            self.writer.add_scalar('learning_rate',
                                   self.optimizer.param_groups[0]["lr"],
                                   self.current_iter)
            self.writer.add_scalar('learning_rate_10x',
                                   self.optimizer.param_groups[1]["lr"],
                                   self.current_iter)

            target = target.float()
            x = torch.cat((x, target), dim=1)
            self.train_one_epoch(x, y)

    def train_one_epoch(self, x, y):
        if self.cuda:
            x, y = x.to(self.device), y.to(device=self.device,
                                           dtype=torch.long)

        y[y > 0] = 1.
        self.optimizer.zero_grad()

        # model
        pred = self.model(x)

        y = torch.squeeze(y, 1)
        if self.args.num_classes == 1:
            y = y.to(device=self.device, dtype=torch.float)
            pred = pred.squeeze()
        # loss
        cur_loss = self.loss(pred, y)

        # optimizer
        cur_loss.backward()
        self.optimizer.step()

        self.train_loss.append(cur_loss.item())

        if self.batch_idx % 50 == 0:
            logger.info("The train loss of epoch{}-batch-{}:{}".format(
                self.current_epoch, self.batch_idx, cur_loss.item()))
        self.batch_idx += 1

        self.current_iter += 1

        # print(cur_loss)
        if np.isnan(float(cur_loss.item())):
            raise ValueError('Loss is nan during training...')

    def validate(self):
        logger.info('validating one epoch...')
        self.Eval.reset()
        self.iter = 0

        with torch.no_grad():
            tqdm_batch = tqdm(self.dataloader.valid_loader,
                              total=self.dataloader.valid_iterations,
                              desc="Val Epoch-{}-".format(self.current_epoch +
                                                          1))
            self.val_loss = []
            self.model.eval()
            self.validate_func(tqdm_batch)

            MIoU, MIoU_thresh = self.Eval.Mean_Intersection_over_Union()

            logger.info('validation MIoU1:{}'.format(MIoU))
            v_loss = sum(self.val_loss) / len(self.val_loss)
            print('Miou: ' + str(MIoU) + ' MIoU_thresh: ' + str(MIoU_thresh))

            self.writer.add_scalar('val_loss', v_loss, self.current_epoch)

            tqdm_batch.close()

        return MIoU, MIoU_thresh

    def validate_3ch(self, tqdm_batch):
        for x, y, w, h, name in tqdm_batch:
            self.validate_one_epoch(x, y, w, h, name)

    def validate_4ch(self, tqdm_batch):
        for x, y, target, w, h, name in tqdm_batch:
            target = target.float()
            x = torch.cat((x, target), dim=1)
            self.validate_one_epoch(x, y, w, h, name)

    def validate_crop(self, tqdm_batch):
        for i, (x, y, left, top, right, bottom, w, h,
                name) in enumerate(tqdm_batch):
            self.validate_one_epoch(x, y, w, h, name, left, top, right, bottom)

    def validate_crop_store_result(self, tqdm_batch):
        for i, (x, y, left, top, right, bottom, w, h,
                name) in enumerate(tqdm_batch):
            if self.cuda:
                x, y = x.to(self.device), y.to(device=self.device,
                                               dtype=torch.long)

            # model
            pred = self.model(x)
            if self.args.loss == 'tanimoto':
                pred = (pred - pred.min()) / (pred.max() - pred.min())
            else:
                pred = nn.Sigmoid()(pred)

            pred = pred.squeeze().data.cpu().numpy()
            for i, single_argpred in enumerate(pred):
                pil = Image.fromarray(single_argpred)
                pil = pil.resize((right[i] - left[i], bottom[i] - top[i]))
                img = np.array(pil)
                img_border = cv.copyMakeBorder(img,
                                               top[i].numpy(),
                                               h[i].numpy() -
                                               bottom[i].numpy(),
                                               left[i].numpy(),
                                               w[i].numpy() - right[i].numpy(),
                                               cv.BORDER_CONSTANT,
                                               value=[0, 0, 0])

                if self.args.store_result:
                    img_border *= 255
                    pil = Image.fromarray(img_border.astype('uint8'))
                    pil.save(args.result_filepath +
                             'ISIC_{}.png'.format(name[i]))

                    self.iter += 1

    def validate_one_epoch_store_result(self, x, y, w, h, name):
        if self.cuda:
            x, y = x.to(self.device), y.to(device=self.device,
                                           dtype=torch.long)

        # model
        pred = self.model(x)
        if self.args.loss == 'tanimoto':
            pred = (pred - pred.min()) / (pred.max() - pred.min())
        else:
            pred = nn.Sigmoid()(pred)

        pred = pred.squeeze().data.cpu().numpy()
        for i, single_argpred in enumerate(pred):
            pil = Image.fromarray(single_argpred)
            pil = pil.resize((w[i], h[i]))
            img_border = np.array(pil)
            if self.args.store_result:
                img_border *= 255
                pil = Image.fromarray(img_border.astype('uint8'))
                pil.save(args.result_filepath + 'ISIC_{}.png'.format(name[i]))

                self.iter += 1

    # def validate_crop(self, tqdm_batch):
    #     for i, (x, y, left, top, right, bottom, w, h, name) in enumerate(tqdm_batch):
    #         if self.cuda:
    #             x, y = x.to(self.device), y.to(device=self.device, dtype=torch.long)
    #
    #         pred = self.model(x)
    #         y = torch.squeeze(y, 1)
    #         if self.args.num_classes == 1:
    #             y = y.to(device=self.device, dtype=torch.float)
    #             pred = pred.squeeze()
    #
    #         cur_loss = self.loss(pred, y)
    #         if np.isnan(float(cur_loss.item())):
    #             raise ValueError('Loss is nan during validating...')
    #         self.val_loss.append(cur_loss.item())
    #
    #         pred = pred.data.cpu().numpy()
    #
    #         pred[pred >= 0.5] = 1
    #         pred[pred < 0.5] = 0
    #         print('\n')
    #         for i, single_pred in enumerate(pred):
    #             gt = Image.open(self.args.data_root_path + "ground_truth/ISIC_" + name[i] + "_segmentation.png")
    #             pil = Image.fromarray(single_pred.astype('uint8'))
    #             pil = pil.resize((right[i] - left[i], bottom[i] - top[i]))
    #             img = np.array(pil)
    #             ground_border = np.array(gt)
    #             ground_border[ground_border == 255] = 1
    #             img_border = cv.copyMakeBorder(img, top[i].numpy(), h[i].numpy() - bottom[i].numpy(),
    #                                            left[i].numpy(),
    #                                            w[i].numpy() - right[i].numpy(), cv.BORDER_CONSTANT, value=[0, 0, 0])
    #
    #             iou = self.Eval.iou_numpy(img_border, ground_border)
    #             print(name[i] + ' iou: ' + str(iou))
    #
    #             if self.args.store_result:
    #                 img_border[img_border == 1] = 255
    #                 pil = Image.fromarray(img_border)
    #                 pil.save(args.result_filepath + 'ISIC_{}.png'.format(name[i]))
    #                 # gt.save(args.result_filepath + 'ISIC_ground_{}.png'.format(name[i]))
    #
    #                 self.iter += 1

    def validate_one_epoch(self, x, y, w, h, name, *ltrb):
        if self.cuda:
            x, y = x.to(self.device), y.to(device=self.device,
                                           dtype=torch.long)

        # model
        pred = self.model(x)
        y = torch.squeeze(y, 1)
        if self.args.num_classes == 1:
            y = y.to(device=self.device, dtype=torch.float)
            pred = pred.squeeze()

        cur_loss = self.loss(pred, y)
        if np.isnan(float(cur_loss.item())):
            raise ValueError('Loss is nan during validating...')
        self.val_loss.append(cur_loss.item())

        pred = pred.data.cpu().numpy()

        pred[pred >= 0.5] = 1
        pred[pred < 0.5] = 0
        print('\n')
        for i, single_pred in enumerate(pred):
            gt = Image.open(self.args.data_root_path + "ground_truth/ISIC_" +
                            name[i] + "_segmentation.png")
            pil = Image.fromarray(single_pred.astype('uint8'))

            if self.args.using_bb and self.args.input_channels == 3:
                pil = pil.resize(
                    (ltrb[2][i] - ltrb[0][i], ltrb[3][i] - ltrb[1][i]))
                img = np.array(pil)
                img_border = cv.copyMakeBorder(
                    img,
                    ltrb[1][i].numpy(),
                    h[i].numpy() - ltrb[3][i].numpy(),
                    ltrb[0][i].numpy(),
                    w[i].numpy() - ltrb[2][i].numpy(),
                    cv.BORDER_CONSTANT,
                    value=[0, 0, 0])
            else:
                pil = pil.resize((w[i], h[i]))
                img_border = np.array(pil)

            ground_border = np.array(gt)
            ground_border[ground_border == 255] = 1
            iou = self.Eval.IoU_one_class(img_border, ground_border)

            print(name[i] + ' iou: ' + str(iou))

            if self.args.store_result:
                img_border[img_border == 1] = 255
                pil = Image.fromarray(img_border)
                pil.save(args.result_filepath + 'ISIC_{}.png'.format(name[i]))
                # gt.save(args.result_filepath + 'ISIC_ground_{}.png'.format(name[i]))

                self.iter += 1

    def save_checkpoint(self, is_best, filename=None):
        filename = os.path.join(self.args.checkpoint_dir, filename)
        state = {
            'state_dict': self.model.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'best_MIou': self.best_MIou
        }
        if is_best:
            logger.info("=>saving a new best checkpoint...")
            torch.save(state, filename)
        else:
            logger.info("=> The MIoU of val does't improve.")

    def load_checkpoint(self, filename):
        try:
            logger.info("Loading checkpoint '{}'".format(filename))
            checkpoint = torch.load(filename)
            if 'module.Resnet101.bn1.weight' in checkpoint['state_dict']:
                checkpoint2 = collections.OrderedDict([
                    (k[7:], v) for k, v in checkpoint['state_dict'].items()
                ])
                self.model.load_state_dict(checkpoint2)
            else:
                self.model.load_state_dict(checkpoint['state_dict'])

            if not self.args.freeze_bn:
                self.current_epoch = checkpoint['epoch']
                self.current_iter = checkpoint['iteration']
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_MIou = checkpoint['best_MIou']
            print(
                "Checkpoint loaded successfully from '{}', MIoU:{})\n".format(
                    self.args.checkpoint_dir, checkpoint['best_MIou']))
            logger.info(
                "Checkpoint loaded successfully from '{}', MIoU:{})\n".format(
                    self.args.checkpoint_dir, checkpoint['best_MIou']))
        except OSError as e:
            logger.info("No checkpoint exists from '{}'. Skipping...".format(
                self.args.checkpoint_dir))
            logger.info("**First time to train**")

    def get_params(self, model, key):
        # For Dilated CNN
        if key == "1x":
            for m in model.named_modules():
                if "Resnet101" in m[0]:
                    if isinstance(m[1], nn.Conv2d):
                        for p in m[1].parameters():
                            yield p
        #
        if key == "10x":
            for m in model.named_modules():
                if "encoder" in m[0] or "decoder" in m[0]:
                    if isinstance(m[1], nn.Conv2d):
                        for p in m[1].parameters():
                            yield p

    def poly_lr_scheduler(self, optimizer, init_lr, iter, max_iter, power):
        new_lr = init_lr * (1 - float(iter) / max_iter)**power
        optimizer.param_groups[0]["lr"] = new_lr
        optimizer.param_groups[1]["lr"] = 10 * new_lr
コード例 #24
0
    def train(self, train_file, dev_file, test_file):
        self.hyperParams.show()
        torch.set_num_threads(self.hyperParams.thread)
        reader = Reader()

        trainInsts = reader.readInstances(train_file, self.hyperParams.maxInstance)
        devInsts = reader.readInstances(dev_file, self.hyperParams.maxInstance)
        testInsts = reader.readInstances(test_file, self.hyperParams.maxInstance)

        print("Training Instance: ", len(trainInsts))
        print("Dev Instance: ", len(devInsts))
        print("Test Instance: ", len(testInsts))

        self.createAlphabet(trainInsts, devInsts, testInsts)

        trainExamples = self.instance2Example(trainInsts)
        devExamples = self.instance2Example(devInsts)
        testExamples = self.instance2Example(testInsts)

        self.model = RNNLabeler(self.hyperParams)
        parameters = filter(lambda p: p.requires_grad, self.model.parameters())
        optimizer = torch.optim.Adam(parameters, lr=self.hyperParams.learningRate)

        indexes = []
        for idx in range(len(trainExamples)):
            indexes.append(idx)

        batchBlock = len(trainExamples) // self.hyperParams.batch
        for iter in range(self.hyperParams.maxIter):
            print('###Iteration' + str(iter) + "###")
            random.shuffle(indexes)
            self.model.train()
            for updateIter in range(batchBlock):
                #self.model.zero_grad()
                optimizer.zero_grad()
                exams = []
                start_pos = updateIter * self.hyperParams.batch
                end_pos = (updateIter + 1) * self.hyperParams.batch
                for idx in range(start_pos, end_pos):
                    exams.append(trainExamples[indexes[idx]])
                feats, labels = self.getBatchFeatLabel(exams)
                output = self.model(feats, self.hyperParams.batch)
                loss = torch.nn.functional.cross_entropy(output, labels)
                loss.backward()
                optimizer.step()
                if (updateIter + 1) % self.hyperParams.verboseIter == 0:
                    print('current: ', idx + 1, ", cost:", loss.data[0])

            self.model.eval()
            eval_dev = Eval()
            for idx in range(len(devExamples)):
                predictLabel = self.predict(devExamples[idx])
                devInsts[idx].evalACC(predictLabel, eval_dev)
            print("dev: ", end='')
            eval_dev.getACC()

            eval_test = Eval()
            for idx in range(len(testExamples)):
                predictLabel = self.predict(testExamples[idx])
                testInsts[idx].evalACC(predictLabel, eval_test)
            print("test: ", end='')
            eval_test.getACC()
コード例 #25
0
 def eval_setup(self):
     x = Eval()
     x.load_model()
     x.load_depth_model()
     self.eval = x
コード例 #26
0
def train(train_iter, dev_iter, test_iter, model_encoder, model_decoder, args):
    # if args.use_cuda:
    #     model_encoder = model_encoder.cuda()
    #     model_decoder = model_decoder.cuda()

    if args.Adam is True:
        print("Adam Training......")
        model_encoder_parameters = filter(lambda p: p.requires_grad,
                                          model_encoder.parameters())
        model_decoder_parameters = filter(lambda p: p.requires_grad,
                                          model_decoder.parameters())
        optimizer_encoder = torch.optim.Adam(
            params=filter(lambda p: p.requires_grad,
                          model_encoder.parameters()),
            lr=args.lr,
            weight_decay=args.init_weight_decay)
        optimizer_decoder = torch.optim.Adam(
            params=filter(lambda p: p.requires_grad,
                          model_decoder.parameters()),
            lr=args.lr,
            weight_decay=args.init_weight_decay)
        # optimizer_encoder = torch.optim.Adam(model_encoder.parameters(), lr=args.lr, weight_decay=args.init_weight_decay)
        # optimizer_decoder = torch.optim.Adam(model_decoder.parameters(), lr=args.lr, weight_decay=args.init_weight_decay)

    steps = 0
    model_count = 0
    # for dropout in train / dev / test
    model_encoder.train()
    model_decoder.train()
    time_list = []
    dev_eval_seg = Eval()
    dev_eval_pos = Eval()
    test_eval_seg = Eval()
    test_eval_pos = Eval()
    for epoch in range(1, args.epochs + 1):
        print("\n## 第{} 轮迭代,共计迭代 {} 次 !##\n".format(epoch, args.epochs))
        print("optimizer_encoder now lr is {}".format(
            optimizer_encoder.param_groups[0].get("lr")))
        print("optimizer_decoder now lr is {} \n".format(
            optimizer_decoder.param_groups[0].get("lr")))

        # shuffle
        random.shuffle(train_iter)
        # random.shuffle(dev_iter)
        # random.shuffle(test_iter)

        model_encoder.train()
        model_decoder.train()

        for batch_count, batch_features in enumerate(train_iter):
            model_encoder.zero_grad()
            model_decoder.zero_grad()

            encoder_out = model_encoder(batch_features)
            decoder_out, state, decoder_out_acc = model_decoder(batch_features,
                                                                encoder_out,
                                                                train=True)

            train_acc, correct, total_num = cal_train_acc(
                batch_features, batch_count, decoder_out_acc, args)

            loss = torch.nn.functional.nll_loss(decoder_out,
                                                batch_features.gold_features)

            loss.backward()

            if args.init_clip_max_norm is not None:
                utils.clip_grad_norm(model_encoder_parameters,
                                     max_norm=args.init_clip_max_norm)
                utils.clip_grad_norm(model_decoder_parameters,
                                     max_norm=args.init_clip_max_norm)

            optimizer_encoder.step()
            optimizer_decoder.step()

            steps += 1
            if steps % args.log_interval == 0:
                sys.stdout.write(
                    "\rbatch_count = [{}] , loss is {:.6f} , (correct/ total_num) = acc ({} / {}) = "
                    "{:.6f}%".format(batch_count + 1, loss.data[0], correct,
                                     total_num, train_acc * 100))
            if steps % args.dev_interval == 0:
                print("\ndev F-score")
                dev_eval_pos.clear()
                dev_eval_seg.clear()
                eval(dev_iter, model_encoder, model_decoder, args,
                     dev_eval_seg, dev_eval_pos)
                # model_encoder.train()
                # model_decoder.train()
            if steps % args.test_interval == 0:
                print("test F-score")
                test_eval_pos.clear()
                test_eval_seg.clear()
                eval(test_iter, model_encoder, model_decoder, args,
                     test_eval_seg, test_eval_pos)
                print("\n")
        model_encoder.eval()
        model_decoder.eval()
        if steps is not 0:
            print("\none epoch dev F-score")
            dev_eval_pos.clear()
            dev_eval_seg.clear()
            eval(dev_iter, model_encoder, model_decoder, args, dev_eval_seg,
                 dev_eval_pos)
            # model_encoder.train()
            # model_decoder.train()
        if steps is not 0:
            print("one epoch test F-score")
            test_eval_pos.clear()
            test_eval_seg.clear()
            eval(test_iter, model_encoder, model_decoder, args, test_eval_seg,
                 test_eval_pos)
            print("\n")
コード例 #27
0
from aug import aug_lv3
from utils import plot_images
if __name__ == '__main__':
    fg_dir = 'foreground/original_fg'
    bg_dir = 'background/cropped_bg'
    # dp == DataProvider
    dp = WallyDataset_ver2(fg_dir, bg_dir, resize=(64, 64))
    # Setting models
    models = Models(n_classes = 2 , img_shape = (64,64,3))
    # Get batch xs , ys

    # Augmenatation
    #plot_images(batch_xs , batch_ys)
    # Training

    eval=Eval()
    for step in range(cfg.max_iter):
        batch_xs, batch_ys = dp.next_batch(fg_batchsize=30, bg_batchsize=30, normalization=True)
        #batch_xs = aug_lv3(batch_xs)
        #batch_xs = batch_xs / 255.
        show_progress(step , cfg.max_iter)
        train_cost = models.training(batch_xs , batch_ys , cfg.lr)
        if step % cfg.ckpt  == 0 :
            print 'Validation ... '
            #pred_op, pred_cls, eval_cost, accuracy =models.eval(dp.val_imgs , dp.val_labs,)
            acc = eval.get_acc(sess_op=models.sess, preds_op=models.pred[:,0], batch_size=60, x_op=models.x_,
                               phase_train=models.phase_train)
            models.save_models('models/{}.ckpt'.format(step))

            print acc
            print 'train cost : {}'.format(train_cost)
コード例 #28
0
ファイル: main_cifar.py プロジェクト: SoulDuck/Find_Wally
    test_imgs = test_imgs / 255.
    train_labs = cls2onehot(train_labs, depth=10)
    test_labs = cls2onehot(test_labs, depth=10)
    train_imgs = train_imgs
    test_imgs = test_imgs
    # Setting models
    models = Models(n_classes=10, img_shape=(32, 32, 3))
    # Get batch xs , ys

    # Augmenatation
    # batch_xs = aug_lv3(batch_xs)
    # batch_xs = batch_xs / 255.
    # plot_images(batch_xs , batch_ys)

    # Training
    eval = Eval()
    for step in range(cfg.max_iter):
        show_progress(step, cfg.max_iter)
        batch_xs, batch_ys = next_batch(train_imgs, train_labs, 60)

        train_cost = models.training(batch_xs, batch_ys, cfg.lr)
        if step % cfg.ckpt == 0:
            print 'Validation ... '

            pred, pred_cls, eval_cost, accuracy = models.eval(
                test_imgs, test_labs)
            #pred_op, pred_cls, eval_cost, accuracy = models.eval(dp.val_imgs , dp.val_labs,)
            #acc = eval.get_acc(sess_op=models.sess, preds_op=models.pred[:,0], batch_size=60, x_op=models.x_,
            #                   phase_train=models.phase_train)
            print accuracy
            models.save_models('models/{}.ckpt'.format(step))
コード例 #29
0
    def __init__(self, args, cuda=None):
        self.args = args
        self.cuda = cuda and torch.cuda.is_available()
        self.device = torch.device('cuda' if self.cuda else 'cpu')

        self.current_MIoU = 0
        self.best_MIou = 0
        self.current_epoch = 0
        self.current_iter = 0

        self.batch_idx = 0

        # set TensorboardX
        self.writer = SummaryWriter()

        # Metric definition
        self.Eval = Eval(self.args.num_classes)

        if self.args.loss == 'tanimoto':
            self.loss = tanimoto_loss()
        else:
            self.loss = nn.BCEWithLogitsLoss()

        self.loss.to(self.device)

        # model
        self.model = DeepLab(output_stride=self.args.output_stride,
                             class_num=self.args.num_classes,
                             num_input_channel=self.args.input_channels,
                             pretrained=self.args.imagenet_pretrained
                             and self.args.pretrained_ckpt_file is None,
                             bn_eps=self.args.bn_eps,
                             bn_momentum=self.args.bn_momentum,
                             freeze_bn=self.args.freeze_bn)

        if torch.cuda.device_count() > 1:
            self.model = nn.DataParallel(self.model)
            patch_replication_callback(self.model)
            self.m = self.model.module
        else:
            self.m = self.model
        self.model.to(self.device)

        self.optimizer = torch.optim.SGD(
            params=[
                {
                    "params": self.get_params(self.m, key="1x"),
                    "lr": self.args.lr,
                },
                {
                    "params": self.get_params(self.m, key="10x"),
                    "lr": 10 * self.args.lr,
                },
            ],
            momentum=self.args.momentum,
            weight_decay=self.args.weight_decay,
        )

        self.dataloader = ISICDataLoader(self.args)
        self.epoch_num = ceil(self.args.iter_max /
                              self.dataloader.train_iterations)

        if self.args.input_channels == 3:
            self.train_func = self.train_3ch
            if args.using_bb != 'none':
                if self.args.store_result:
                    self.validate_func = self.validate_crop_store_result
                else:
                    self.validate_func = self.validate_crop
            else:
                self.validate_func = self.validate_3ch
        else:
            self.train_func = self.train_4ch
            self.validate_func = self.validate_4ch

        if self.args.store_result:
            self.validate_one_epoch = self.validate_one_epoch_store_result
コード例 #30
0
ファイル: train.py プロジェクト: KT19/pytorch
def train():
    trainset = torchvision.datasets.CIFAR10(root="~/dataset",train=True,transform=transform,download=True)
    trainloader = torch.utils.data.DataLoader(trainset, args.batch_size, shuffle=True)

    evaluator = Eval()
    model = Model("vgg16",cls_num=10)
    model.to(device)
    optimizer = torch.optim.SGD(model.parameters(),lr=args.lr,weight_decay=args.wd,momentum=args.momentum)
    
    if args.mode == "normal":
        criterion = nn.CrossEntropyLoss()
    else:
        criterion = MixUpLoss()
    n_epochs = args.n_epochs

    LOSS_LOG = []
    ACC_LOG = []
    EPOCHS = []
    for epoch in range(1,n_epochs+1,1):
        print("Epoch:[{}]/[{}]".format(epoch,n_epochs))
        if epoch in [0.5*n_epochs, 0.8*n_epochs]:
            print("change learning rate")
            for param in optimizer.param_groups:
                param["lr"] *= 0.1

        total_loss = []
        for img, label in tqdm(trainloader):
            optimizer.zero_grad()

            img = img.to(device)
            label = label.to(device)

            #mixup
            if args.mode == "mixup":
                img,y1,y2,lam = create_mixup(img, label, alpha=args.alpha)

            #forward
            pred = model(img)
            #backward
            if args.mode == "normal": #normal loss
                loss = criterion(pred,label)
            else: #mixup loss
                loss = criterion(pred, y1, y2, lam)
            
            loss.backward()

            optimizer.step()
            total_loss.append(loss.item())

        acc = evaluator.eval(model)
        mean_loss = np.mean(total_loss)
        print("loss: {}".format(mean_loss))
        print("accuracy: {}[%]".format(acc))

        LOSS_LOG.append(mean_loss)
        ACC_LOG.append(acc)
        EPOCHS.append(epoch)
        df = pd.DataFrame({
        "LOSS":LOSS_LOG,
        "ACCURACY": ACC_LOG,
        "EPOCH": EPOCHS,
        })
        df.to_csv("./"+args.mode+"_train_log.csv")
コード例 #31
0
    def train(self, train_file, dev_file, test_file, model_file):
        self.hyperParams.show()
        torch.set_num_threads(self.hyperParams.thread)
        reader = Reader()

        trainInsts = reader.readInstances(train_file, self.hyperParams.maxInstance)
        devInsts = reader.readInstances(dev_file, self.hyperParams.maxInstance)
        testInsts = reader.readInstances(test_file, self.hyperParams.maxInstance)

        print("Training Instance: ", len(trainInsts))
        print("Dev Instance: ", len(devInsts))
        print("Test Instance: ", len(testInsts))

        self.createAlphabet(trainInsts, devInsts, testInsts)

        trainExamples = self.instance2Example(trainInsts)
        devExamples = self.instance2Example(devInsts)
        testExamples = self.instance2Example(testInsts)

        self.encoder = Encoder(self.hyperParams)
        self.decoder = Decoder(self.hyperParams)

        indexes = []
        for idx in range(len(trainExamples)):
            indexes.append(idx)

        encoder_parameters = filter(lambda p: p.requires_grad, self.encoder.parameters())
        encoder_optimizer = torch.optim.Adam(encoder_parameters, lr = self.hyperParams.learningRate)

        decoder_parameters = filter(lambda p: p.requires_grad, self.decoder.parameters())
        decoder_optimizer = torch.optim.Adam(decoder_parameters, lr = self.hyperParams.learningRate)
        train_num = len(trainExamples)
        batchBlock = train_num // self.hyperParams.batch
        if train_num % self.hyperParams.batch != 0:
            batchBlock += 1
        for iter in range(self.hyperParams.maxIter):
            print('###Iteration' + str(iter) + "###")
            random.shuffle(indexes)
            self.encoder.train()
            self.decoder.train()
            train_eval = Eval()
            for updateIter in range(batchBlock):
                exams = []
                start_pos = updateIter * self.hyperParams.batch
                end_pos = (updateIter + 1) * self.hyperParams.batch
                if end_pos > train_num:
                    end_pos = train_num
                for idx in range(start_pos, end_pos):
                    exams.append(trainExamples[indexes[idx]])
                batchCharFeats, batchBiCharFeats, batchLabel, batch, maxSentSize = self.getBatchFeatLabel(exams)
                encoder_optimizer.zero_grad()
                decoder_optimizer.zero_grad()

                encoderHidden = self.encoder.init_hidden(batch)
                encoderOutput, encoderHidden = self.encoder(batchCharFeats, batchBiCharFeats, encoderHidden)
                loss = 0
                decoderOutput = self.decoder(batch, encoderOutput, exams, bTrain=True)
                for idx in range(batch):
                    exam = exams[idx]
                    for idy in range(exam.size):
                        labelID = getMaxIndex(self.hyperParams, decoderOutput[idx * maxSentSize + idy])
                        if labelID == exam.labelIndexes[idy]:
                            train_eval.correct_num += 1
                        train_eval.gold_num += 1

                loss += torch.nn.functional.nll_loss(decoderOutput, batchLabel)
                loss.backward()
                if (updateIter + 1) % self.hyperParams.verboseIter == 0:
                    print('Current: ', updateIter + 1, ", Cost:", loss.data[0], ", ACC:", train_eval.acc())
                encoder_optimizer.step()
                decoder_optimizer.step()

            self.encoder.eval()
            self.decoder.eval()

            dev_eval = Eval()
            for idx in range(len(devExamples)):
                exam = devExamples[idx]
                predict_labels = self.predict(exam)
                devInsts[idx].evalPRF(predict_labels, dev_eval)
            p, r, f = dev_eval.getFscore()
            print("precision: ", p, ", recall: ", r, ", fscore: ", f)

            test_eval = Eval()
            for idx in range(len(testExamples)):
                exam = testExamples[idx]
                predict_labels = self.predict(exam)
                testInsts[idx].evalPRF(predict_labels, test_eval)
            p, r, f = test_eval.getFscore()
            print("precision: ", p, ", recall: ", r, ", fscore: ", f)
        '''