コード例 #1
0
def main():
    if len(args['device_ids']) > 0:
        torch.cuda.set_device(args['device_ids'][0])

    test_loader = data.DataLoader(imageLabelLoader(args['data_path'],
                                                   dataName=args['domainB'],
                                                   phase='val'),
                                  batch_size=args['batch_size'],
                                  num_workers=args['num_workers'],
                                  shuffle=False)
    gym = deeplabGanS2TWithRefine4()
    gym.initialize(args)
    gym.load(
        '/home/ben/mathfinder/PROJECT/AAAI2017/our_Method/v3/deeplab_feature_adaptation/checkpoints/Lip_to_July_g1/best_Ori_on_B_model.pth'
    )
    gym.eval()
    matrix = ConfusionMatrix(args['label_nums'])
    for i, (image, label) in enumerate(test_loader):
        label = label.cuda(async=True)
        target_var = torch.autograd.Variable(label, volatile=True)

        gym.test(False, image)
        output = gym.output

        matrix = update_confusion_matrix(matrix, output.data, label)
    print(matrix.avg_f1score())
    print(matrix.f1score())
コード例 #2
0
def test_model(model_path: str, stop_word_file="data\stopwords_eng.txt"):
    print("starting testing")

    token_filter = TokenFilter(stop_word_file)
    tokenizer = Tokenizer()

    with open(model_path, "rb") as fb:
        bayes = pickle.load(fb)
    cf = ConfusionMatrix(["positive", "negative"])

    for path in paths:
        for file in os.listdir(path[0]):
            with open(path[0] + "/" + file) as f:
                tokens = tokenizer.tokenize(f)
                bag_of_words = create_bag_of_words(token_filter, tokens, True)

                predicted_class = bayes.predict_class(bag_of_words)
                print("Predicted: " + predicted_class + ", Real: " + path[1])

                cf.add_prediction(path[1], predicted_class)

    print(cf)
    print("Accuracy: " + str(cf.accuracy_average()))
    print("Recall: " + str(cf.recall_average()))
    print("Precision: " + str(cf.precision_average()))
    print("F-Measure: " + str(cf.f_measure_average()))
コード例 #3
0
def validate(val_loader, model, criterion, adaptation):
    # switch to evaluate mode
    run_time = time.time()
    matrix = ConfusionMatrix(args['label_nums'])
    loss = 0
    for i, (images, labels) in enumerate(val_loader):
        labels = labels.cuda(async=True)
        target_var = torch.autograd.Variable(labels, volatile=True)

        model.test(adaptation, images)
        output = model.output
        loss += criterion(output, target_var)/args['batch_size']
        matrix = update_confusion_matrix(matrix, output.data, labels)
    loss /= (i+1)
    run_time = time.time() - run_time
    logger.info('=================================================')
    logger.info('val:'
          'loss: {0:.4f}\t'
          'accuracy: {1:.4f}\t'
          'fg_accuracy: {2:.4f}\t'
          'avg_precision: {3:.4f}\t'
          'avg_recall: {4:.4f}\t'
          'avg_f1score: {5:.4f}\t'
          'run_time:{run_time:.2f}\t'
          .format(loss.data[0], matrix.accuracy(),
        matrix.fg_accuracy(), matrix.avg_precision(), matrix.avg_recall(), matrix.avg_f1score(),run_time=run_time))
    logger.info('=================================================')
    return matrix.all_acc()
コード例 #4
0
def train(A_train_loader, B_train_loader, model, epoch):
    # switch to train mode
    model.train()
    for i, (A_image, A_label) in enumerate(A_train_loader):
        B_image = next(iter(B_train_loader))
        model.set_input({'A': A_image, 'A_label': A_label, 'B': B_image})
        model.forward()
        model.optimize_parameters()
        output = model.output
        if i % args['print_freq'] == 0:
            matrix = ConfusionMatrix()
            update_confusion_matrix(matrix, output.data, A_label)

            print('Time: {time}\t'
                  'Epoch/Iter: [{epoch}/{iter}]\t'
                  'loss: {loss:.4f}\t'
                  'acc: {accuracy:.4f}\t'
                  'fg_acc: {fg_accuracy:.4f}\t'
                  'avg_prec: {avg_precision:.4f}\t'
                  'avg_rec: {avg_recall:.4f}\t'
                  'avg_f1: {avg_f1core:.4f}\t'
                  'loss_G: {loss_G:.4f}\t'
                  'loss_D: {loss_D:.4f}\t'.format(
                      time=time.strftime("%Y-%m-%d_%H:%M:%S",
                                         time.localtime()),
                      epoch=epoch,
                      iter=i + epoch * len(A_train_loader),
                      loss=model.loss_P.data[0],
                      accuracy=matrix.accuracy(),
                      fg_accuracy=matrix.fg_accuracy(),
                      avg_precision=matrix.avg_precision(),
                      avg_recall=matrix.avg_recall(),
                      avg_f1core=matrix.avg_f1score(),
                      loss_G=model.loss_G.data[0],
                      loss_D=model.loss_D.data[0]))
コード例 #5
0
def main():
    if len(args['device_ids']) > 0:
        torch.cuda.set_device(args['device_ids'][0])

    test_loader = data.DataLoader(imageLabelLoader(args['data_path'], dataName=args['domainB'], phase='test'),
                                   batch_size=args['batch_size'],
                                   num_workers=args['num_workers'], shuffle=False)
    gym = deeplabG1G2()
    gym.initialize(args)
    gym.load('/home/ben/mathfinder/PROJECT/AAAI2017/our_Method/v3/deeplab_feature_adaptation/checkpoints/lr_g1=0.00001_lr_g2=0.00000001_interval_g1=6_interval_d1=6_net_D=lsganMultOutput_D_if_adaptive=True/best_Ori_on_B_model.pth')
    gym.eval()
    matrix = ConfusionMatrix(args['label_nums'])
    for i, (image, label) in enumerate(test_loader):
        label = label.cuda(async=True)
        target_var = torch.autograd.Variable(label, volatile=True)

        gym.test(image)
        output = gym.output

        matrix = update_confusion_matrix(matrix, output.data, label)
    print(matrix.all_acc())
コード例 #6
0
def train(train_loader, model, criterion, optimizer, epoch):
    # switch to train mode
    model.train()
    for i, (images, labels) in enumerate(train_loader):
        run_time = time.time()
        labels = labels.cuda(async=True)
        input_var = torch.autograd.Variable(images)
        target_var = torch.autograd.Variable(labels)
        # compute output
        output = model.forward(input_var)
        loss = criterion(output, target_var) / args['batch_size']
        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        if i % args['print_freq'] == 0:
            matrix = ConfusionMatrix()
            update_confusion_matrix(matrix, output.data, labels)
            run_time = time.time() - run_time
            print('Epoch/Iter: [{epoch}/{iter}]\t'
                  'loss: {loss:.4f}\t'
                  'acc: {accuracy:.4f}\t'
                  'fg_acc: {fg_accuracy:.4f}\t'
                  'avg_prec: {avg_precision:.4f}\t'
                  'avg_rec: {avg_recall:.4f}\t'
                  'avg_f1: {avg_f1:.4f}\t'
                  'run_time:{run_time:.2f}\t'.format(
                      epoch=epoch,
                      iter=i + epoch * len(train_loader),
                      loss=loss.data[0],
                      accuracy=matrix.accuracy(),
                      fg_accuracy=matrix.fg_accuracy(),
                      avg_precision=matrix.avg_precision(),
                      avg_recall=matrix.avg_recall(),
                      avg_f1core=matrix.avg_f1score(),
                      run_time=run_time))
コード例 #7
0
def main():

    makedirs.mkdirs(os.path.join(args['checkpoints_dir'], args['name']))
    if len(args['device_ids']) > 0:
        torch.cuda.set_device(args['device_ids'][0])

    A_train_loader = data.DataLoader(imageLabelLoader(args['data_path'],dataName=args['domainA'], phase='train'), batch_size=args['batch_size'],
                                  num_workers=args['num_workers'], shuffle=True)
    A_val_loader = data.DataLoader(imageLabelLoader(args['data_path'], dataName=args['domainA'], phase='val'), batch_size=args['batch_size'],
                                num_workers=args['num_workers'], shuffle=False)

    B_train_loader = data.DataLoader(imageLoader(args['data_path'], dataName=args['domainB'], phase='train+unlabel'),
                                     batch_size=args['batch_size'],
                                     num_workers=args['num_workers'], shuffle=True)
    B_val_loader = data.DataLoader(imageLabelLoader(args['data_path'], dataName=args['domainB'], phase='val'),
                                   batch_size=args['batch_size'],
                                   num_workers=args['num_workers'], shuffle=False)
    model = deeplabGanWithRefine()
    model.initialize(args)

    # multi GPUS
    # model = torch.nn.DataParallel(model,device_ids=args['device_ids']).cuda()
    Iter = 0
    if args['resume']:
        if os.path.isfile(args['resume']):
            logger.info("=> loading checkpoint '{}'".format(args['resume']))
            model.load(args['resume'])
        else:
            print("=> no checkpoint found at '{}'".format(args['resume']))

    best_Ori_on_B = 0
    best_Ada_on_B = 0
    model.train()
    for epoch in range(args['n_epoch']):
        # train(A_train_loader, B_train_loader, model, epoch)
        # switch to train mode
        for i, (A_image, A_label) in enumerate(A_train_loader):
            Iter += 1
            B_image = next(iter(B_train_loader))
            model.set_input({'A': A_image, 'A_label': A_label, 'B': B_image})
            model.optimize_parameters()
            output = model.output
            if i % args['print_freq'] == 0:
                matrix = ConfusionMatrix()
                update_confusion_matrix(matrix, output.data, A_label)
                logger.info('Time: {time}\t'
                      'Epoch/Iter: [{epoch}/{Iter}]\t'
                      'loss: {loss:.4f}\t'
                      'loss_R: {loss_R:.4f}\t'
                      'acc: {accuracy:.4f}\t'
                      'fg_acc: {fg_accuracy:.4f}\t'
                      'avg_prec: {avg_precision:.4f}\t'
                      'avg_rec: {avg_recall:.4f}\t'
                      'avg_f1: {avg_f1core:.4f}\t'
                      'loss_G: {loss_G:.4f}\t'
                      'loss_D: {loss_D:.4f}\t'.format(
                    time=time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime()),
                    epoch=epoch, Iter=Iter, loss=model.loss_P.data[0],
                    loss_R=model.loss_R.data[0], accuracy=matrix.accuracy(),
                    fg_accuracy=matrix.fg_accuracy(), avg_precision=matrix.avg_precision(),
                    avg_recall=matrix.avg_recall(), avg_f1core=matrix.avg_f1score(),
                    loss_G=model.loss_G.data[0], loss_D=model.loss_D.data[0]))

            if Iter % 1000 == 0:
                model.eval()
                acc_Ori_on_A = validate(A_val_loader, model, nn.CrossEntropyLoss(size_average=False), False)
                acc_Ori_on_B = validate(B_val_loader, model, nn.CrossEntropyLoss(size_average=False), False)
                acc_Ada_on_B = validate(B_val_loader, model, nn.CrossEntropyLoss(size_average=False), True)
                prec_Ori_on_B = acc_Ori_on_B['avg_f1score']
                prec_Ada_on_B = acc_Ada_on_B['avg_f1score']

                is_best = prec_Ori_on_B > best_Ori_on_B
                best_Ori_on_B = max(prec_Ori_on_B, best_Ori_on_B)
                if is_best:
                    model.save('best_Ori_on_B', Iter=Iter, epoch=epoch, acc={'acc_Ori_on_A':acc_Ori_on_A, 'acc_Ori_on_B':acc_Ori_on_B, 'acc_Ada_on_B':acc_Ada_on_B})

                is_best = prec_Ada_on_B > best_Ada_on_B
                best_Ada_on_B = max(prec_Ada_on_B, best_Ada_on_B)
                if is_best:
                    model.save('best_Ada_on_B', Iter=Iter, epoch=epoch, acc={'acc_Ori_on_A':acc_Ori_on_A, 'acc_Ori_on_B':acc_Ori_on_B, 'acc_Ada_on_B':acc_Ada_on_B})
                model.train()
コード例 #8
0
from sentiment_classifier import SentimentClassifier

from data import create_bows_from_path
from util.confusion_matrix import ConfusionMatrix
from util.object_util import load_object

MODEL_PATH = "../data/model/"
TEST_DATA_PATH = "../data/test/"
# load model
bayes: SentimentClassifier = load_object(MODEL_PATH + "classifier.model")
cf: ConfusionMatrix = ConfusionMatrix(bayes.classes)

# load bows
# collect bag_of_words for positive samples
positive_bow_list = []
positive_test_path = TEST_DATA_PATH + "pos/"

positive_bow_list = create_bows_from_path(positive_test_path)

for bow in positive_bow_list:
    clazz: str = bayes.predict_class(bow)
    cf.add_prediction("positive", clazz)

negative_bow_list = []
negative_test_path = TEST_DATA_PATH + "neg/"

negative_bow_list = create_bows_from_path(negative_test_path)

for bow in negative_bow_list:
    clazz: str = bayes.predict_class(bow)
    cf.add_prediction("negative", clazz)