Exemple #1
0
def different_classification(csv_file,
                             train_dict,
                             need_classes,
                             batch_size=30,
                             plot=True,
                             verbose=0):
    '''
    train_dict = {0: for test, 1 for training}
    '''

    # Load data
    embeddings, labels = loadData(csv_file, dtypes)

    # Prepare data
    X_train, X_test, y_train, y_test = splitGroups(embeddings, labels,
                                                   need_classes, classes,
                                                   train_dict)

    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test, 3)

    output_size = len(np.unique(list(need_classes.values())))

    classifier(X_train,
               X_test,
               y_train,
               y_test,
               output_size,
               'test',
               batch_size=batch_size,
               plot=plot,
               verbose=verbose)
Exemple #2
0
def simple_classification(csv_file,
                          need_classes,
                          title,
                          batch_size=30,
                          plot=True,
                          verbose=0):

    # Load data
    embeddings, labels = loadData(csv_file, dtypes)

    # Prepare data
    labels = changeGroups(labels, classes, need_classes)
    labels = to_categorical(labels)

    output_size = len(np.unique(list(need_classes.values())))

    # Split data
    X_train, X_test, y_train, y_test = train_test_split(embeddings,
                                                        labels,
                                                        test_size=0.33)

    classifier(X_train,
               X_test,
               y_train,
               y_test,
               output_size,
               title,
               batch_size=batch_size,
               plot=plot,
               verbose=verbose)
Exemple #3
0
def split_classes(csv_file):

    embeddings, labels = loadData(csv_file, dtypes)

    data_dict = {}

    for cl, id in classes.items():

        embeddings_cl, labels_cl = onlyGroup(embeddings, labels, [id], classes)

        X_train, X_test, y_train, y_test = train_test_split(embeddings_cl,
                                                            labels_cl,
                                                            test_size=250)

        data_dict[cl] = {'train': [X_train, y_train], 'test': [X_test, y_test]}

    return data_dict
Exemple #4
0
def main():
    args = getArguments()
    SPLIT = 100

    #
    # prepare DATA
    #
    print "Load Data"
    X, char_dict = loadData()
    X = X[:100]

    print "load existing model: ", args.model
    autoencoder = load_model(args.model)

    y_hat = autoencoder.predict(X)

    for idx in range(X.shape[0]):
        x1 = decode(np.argmax(X[idx], axis=1), char_dict)
        x2 = decode(np.argmax(y_hat[idx], axis=1), char_dict)
        print "".join(x1), "".join(x2)
        raw_input()
Exemple #5
0
def main():
    args = getArguments()
    SPLIT = 100

    #
    # prepare DATA
    #    
    print "Load Data"
    X, char_dict = loadData()
    X = X[:100]

    print "load existing model: ", args.model
    autoencoder = load_model(args.model)

    y_hat = autoencoder.predict(X)

    for idx in range(X.shape[0]):
        x1 = decode(np.argmax(X[idx],    axis=1),char_dict)
        x2 = decode(np.argmax(y_hat[idx],axis=1),char_dict)
        print "".join(x1), "".join(x2)
        raw_input()
Exemple #6
0
                            os.path.join(args.save_dir, "collect_score"))


if __name__ == '__main__':
    args = parse_args()

    utils.mkdir(args.save_dir)

    # cls and sord
    print("Loading model weight......")
    model = MobileNetV2(num_classes=args.num_classes)

    saved_state_dict = torch.load(args.snapshot)
    model.load_state_dict(saved_state_dict)
    model.cuda(0)

    model.eval()  # Change model to 'eval' mode (BN uses moving mean/var).

    softmax = nn.Softmax(dim=1).cuda(0)

    # test dataLoader
    test_loader = loadData(args.test_data, args.input_size, args.batch_size,
                           args.num_classes, False)

    # testing
    print('Ready to test network......')

    if args.collect_score:
        utils.mkdir(os.path.join(args.save_dir, "collect_score"))
    test(model, test_loader, softmax, args)
def train():
    """
    :return:
    """
    # create model
    model = MobileNetV2(args.num_classes, width_mult=args.width_mult)

    # loading pre trained weight
    logger.logger.info("Loading PreTrained Weight".center(100, '='))
    utils.load_filtered_stat_dict(
        model, model_zoo.load_url(model_urls["mobilenet_v2"]))

    # loading data
    logger.logger.info("Loading data".center(100, '='))
    train_data_loader, valid_data_loader = loadData(args.train_data,
                                                    args.input_size,
                                                    args.batch_size,
                                                    args.num_classes)
    print()

    # initialize loss function
    cls_criterion = nn.BCEWithLogitsLoss().cuda(0)
    reg_criterion = nn.MSELoss().cuda(0)
    softmax = nn.Softmax(dim=1).cuda(0)
    model.cuda(0)

    # training
    logger.logger.info("Training".center(100, '='))

    # initialize learning rate and step
    lr = args.lr
    step = 0
    for epoch in range(args.epochs + 1):
        print("Epoch:", epoch)
        if epoch > args.unfreeze:
            optimizer = torch.optim.Adam(
                [{
                    "params": get_non_ignored_params(model),
                    "lr": lr
                }, {
                    "params": get_cls_fc_params(model),
                    "lr": lr
                }],
                lr=args.lr)
        else:
            optimizer = torch.optim.Adam(
                [{
                    "params": get_non_ignored_params(model),
                    "lr": 0
                }, {
                    "params": get_cls_fc_params(model),
                    "lr": lr * 5
                }],
                lr=args.lr)
        lr = lr * args.lr_decay
        min_degree_error = 180.
        for i, (images, classify_label, vector_label,
                name) in enumerate(train_data_loader):
            step += 1
            images = images.cuda(0)
            classify_label = classify_label.cuda(0)
            vector_label = vector_label.cuda(0)

            # inference
            x_cls_pred, y_cls_pred, z_cls_pred = model(images)
            logits = [x_cls_pred, y_cls_pred, z_cls_pred]
            loss, degree_error = utils.computeLoss(classify_label,
                                                   vector_label, logits,
                                                   softmax, cls_criterion,
                                                   reg_criterion, args)

            #print(loss)
            # backward
            grad = [torch.tensor(1.0).cuda(0) for _ in range(3)]
            optimizer.zero_grad()
            torch.autograd.backward(loss, grad)
            optimizer.step()

            # save training log and weight
            if (i + 1) % 10 == 0:
                msg = "Epoch: %d/%d | Iter: %d/%d | x_loss: %.6f | y_loss: %.6f | z_loss: %.6f | degree_error:%.3f" % (
                    epoch, args.epochs, i + 1, len(train_data_loader.dataset)
                    // args.batch_size, loss[0].item(), loss[1].item(),
                    loss[2].item(), degree_error.item())
                logger.logger.info(msg)
                valid_degree_error = valid(model, valid_data_loader, softmax)

                # writer summary
                writer.add_scalar("train degrees error", degree_error, step)
                writer.add_scalar("valid degrees error", valid_degree_error,
                                  step)

                # saving snapshot
                if valid_degree_error < min_degree_error:
                    min_degree_error = valid_degree_error
                    logger.logger.info(
                        "A better validation degrees error {}".format(
                            valid_degree_error))
                    torch.save(
                        model.state_dict(),
                        os.path.join(
                            snapshot_dir,
                            output_string + '_epoch_' + str(epoch) + '.pkl'))
Exemple #8
0
from numpy import empty, inf, zeros, array, abs, count_nonzero
from matplotlib.pyplot import ion, draw, plot, savefig
from cv2 import imwrite, waitKey
from LogisticRegression import LogisticRegression as LogReg
from theano import function, pp, config as cfg
from time import sleep
# cfg.openmp = True
import theano.tensor as T
from dataset import loadData, OneToMany
from visual import visualize

Tr, Ts, _ = loadData('mnist.pkl.gz', True)
m_sample = Tr[0].shape[0]
m_test_sample = Ts[1].shape[0]

x, y = T.dmatrices('x', 'y')
L = LogReg(x, 784, 10)
lam = 0.04

p = L.predict()
l = L.cost(y) + L.regularizer(lam)
gw = T.grad(l, wrt=L.W)
gb = T.grad(l, wrt=L.B)
alpha = 0.05

W_shape = L.weightShapes()[0]
B_shape = L.weightShapes()[1]
VW = zeros(W_shape)
VB = zeros(B_shape)

train = function([x, y], [l, gw, gb])
Exemple #9
0
save best model parameters
"""

import torch
import zoo

import numpy as np

from dataset import loadData
from crossvalidation import crossval_proc

minibatchsize = 64
NcrossVal = 5

# trainloader, validationloader, testloader
dataloaders = loadData(minibatchsize)

nn = zoo.CNN_class

model_param = {
    #"nClass": 10,
    "hiddenCells": 25
}

train_param = {
    # Adam optimizer
    # "optimizer": torch.optim.Adam,
    # "opt_param": {"lr": 0.001},

    # Stochastic Gradient Descent Optimizer
    "optimizer": torch.optim.SGD,
Exemple #10
0
from matplotlib.pyplot import ion, draw, plot, savefig
from ConvNet import LeConvNet
from theano import function, pp, tensor as T
from dataset import loadData, OneToMany
from numpy import zeros, abs, array, count_nonzero
Tr, Ts, _ = loadData('mnist.pkl.gz', True)
m_sample = 10000
m_test_sample = 500
X = Tr[0].reshape((50000,1,28,28))[:m_sample,:,:,:];Y = Tr[1][:m_sample]
Xt = Ts[0].reshape((10000,1,28,28))[:m_test_sample,:,:,:];Yt = Ts[1][:m_test_sample]
del Tr; del Ts

# Hyper-parameters
alpha = 0.5
zeta = 0.99
lam = 0.01

x = T.tensor4('x')
y = T.dmatrix('y')
LCV = LeConvNet(x)
cost = LCV.cost(y) + LCV.regularization() * lam
params = LCV.params()

grads = [ T.grad(cost,wrt=p) for p in params ]
V = [zeros(g) for g in LCV.paramshapes()]
train = function( [x, y], [cost] + grads )

p = LCV.predict()
predict = function ([x],p)
print 'Functions compiled'
Exemple #11
0
def train(net, bins, alpha, beta, batch_size):
    """
    params: 
          bins: number of bins for classification
          alpha: regression loss weight
          beta: ortho loss weight
    """
    # create model
    if net == "resnet50":
        model = ResNet(torchvision.models.resnet50(pretrained=True),
                       num_classes=bins)
        lr = args.lr_resnet
    else:
        model = MobileNetV2(bins)
        lr = args.lr_mobilenet

    # loading data
    logger.logger.info("Loading data".center(100, '='))
    train_data_loader = loadData(args.train_data, args.input_size, batch_size,
                                 bins)
    valid_data_loader = loadData(args.valid_data, args.input_size, batch_size,
                                 bins, False)

    # initialize cls loss function
    if args.cls_loss == "KLDiv":
        cls_criterion = nn.KLDivLoss(reduction='batchmean').cuda(0)
    elif args.cls_loss == "BCE":
        cls_criterion = nn.BCELoss().cuda(0)

    # initialize reg loss function
    reg_criterion = nn.MSELoss().cuda(0)
    softmax = nn.Softmax(dim=1).cuda(0)
    model.cuda(0)

    # training log
    logger.logger.info("Training".center(100, '='))

    # initialize learning rate and step
    lr = lr
    step = 0

    # validation error
    min_avg_error = 1000.

    # start training
    for epoch in range(args.epochs):
        print("Epoch:", epoch)

        # learning rate initialization
        if net == 'resnet50':
            if epoch >= args.unfreeze:
                optimizer = torch.optim.Adam(
                    [{
                        "params": get_non_ignored_params(model, net),
                        "lr": lr
                    }, {
                        "params": get_cls_fc_params(model),
                        "lr": lr * 10
                    }],
                    lr=args.lr_resnet)
            else:
                optimizer = torch.optim.Adam(
                    [{
                        "params": get_non_ignored_params(model, net),
                        "lr": lr
                    }, {
                        "params": get_cls_fc_params(model),
                        "lr": lr * 10
                    }],
                    lr=args.lr_resnet)

        else:
            if epoch >= args.unfreeze:
                optimizer = torch.optim.Adam(
                    [{
                        "params": get_non_ignored_params(model, net),
                        "lr": lr
                    }, {
                        "params": get_cls_fc_params(model),
                        "lr": lr
                    }],
                    lr=args.lr_mobilenet)
            else:
                optimizer = torch.optim.Adam(
                    [{
                        "params": get_non_ignored_params(model, net),
                        "lr": lr * 10
                    }, {
                        "params": get_cls_fc_params(model),
                        "lr": lr * 10
                    }],
                    lr=args.lr_mobilenet)

        # reduce lr by lr_decay factor for each epoch
        lr = lr * args.lr_decay
        print("------------")

        for i, (images, cls_v1, cls_v2, cls_v3, reg_v1, reg_v2, reg_v3,
                name) in enumerate(train_data_loader):
            step += 1
            images = images.cuda(0)

            # get classified labels
            cls_v1 = cls_v1.cuda(0)
            cls_v2 = cls_v2.cuda(0)
            cls_v3 = cls_v3.cuda(0)

            # get continuous labels
            reg_v1 = reg_v1.cuda(0)
            reg_v2 = reg_v2.cuda(0)
            reg_v3 = reg_v3.cuda(0)

            # inference
            x_pred_v1, y_pred_v1, z_pred_v1, x_pred_v2, y_pred_v2, z_pred_v2, x_pred_v3, y_pred_v3, z_pred_v3 = model(
                images)

            logits = [
                x_pred_v1, y_pred_v1, z_pred_v1, x_pred_v2, y_pred_v2,
                z_pred_v2, x_pred_v3, y_pred_v3, z_pred_v3
            ]

            loss, degree_error_v1, degree_error_v2, degree_error_v3 = utils.computeLoss(
                cls_v1, cls_v2, cls_v3, reg_v1, reg_v2, reg_v3, logits,
                softmax, cls_criterion, reg_criterion, [
                    bins, alpha, beta, args.cls_loss, args.reg_loss,
                    args.ortho_loss
                ])

            # backward
            grad = [torch.tensor(1.0).cuda(0) for _ in range(3)]
            optimizer.zero_grad()
            torch.autograd.backward(loss, grad)
            optimizer.step()

            # save training log and weight
            if (i + 1) % 100 == 0:
                msg = "Epoch: %d/%d | Iter: %d/%d | x_loss: %.6f | y_loss: %.6f | z_loss: %.6f | degree_error_f:%.3f | degree_error_r:%.3f | degree_error_u:%.3f" % (
                    epoch, args.epochs, i + 1, len(train_data_loader.dataset)
                    // batch_size, loss[0].item(), loss[1].item(),
                    loss[2].item(), degree_error_v1.item(),
                    degree_error_v2.item(), degree_error_v3.item())
                logger.logger.info(msg)

        # Test on validation dataset
        error_v1, error_v2, error_v3 = valid(model, valid_data_loader, softmax,
                                             bins)
        print("Epoch:", epoch)
        print("Validation Error:", error_v1.item(), error_v2.item(),
              error_v3.item())
        logger.logger.info("Validation Error(l,d,f)_{},{},{}".format(
            error_v1.item(), error_v2.item(), error_v3.item()))

        # save model if achieve better validation performance
        if error_v1.item() + error_v2.item() + error_v3.item() < min_avg_error:

            min_avg_error = error_v1.item() + error_v2.item() + error_v3.item()
            print("Training Info:")
            print("Model:", net, " ", "Number of bins:", bins, " ", "Alpha:",
                  alpha, " ", "Beta:", beta)
            print("Saving Model......")
            torch.save(
                model.state_dict(),
                os.path.join(snapshot_dir, output_string + '_Best_' + '.pkl'))
            print("Saved")
Exemple #12
0
# 1. 데이터 준비하기

# 코드 사전 정의
code2idx = {'bend': 0, 'unfold': 1}
idx2code = {
    0: 'zero',
    1: 'one',
    2: 'two',
    3: 'three',
    4: 'four',
    5: 'five',
    6: 'error'
}

# 2. 데이터셋 생성하기
(x_train, t_train), (x_test, t_test) = loadData()

max_idx_value = 6

one_hot_vec_size = t_train.shape[1]

print("one hot encoding vector size is ", one_hot_vec_size)

# 3. 모델 구성하기
model = Sequential()
model.add(Dense(128, input_dim=5, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(one_hot_vec_size, activation='softmax'))

# 4. 모델 학습과정 설정하기
Exemple #13
0
    print("Testing Accuracy: {}".format(test_acc/len(test_loader.dataset)))




if __name__ == "__main__":
    args = parse_args()
    img_dir = args.img_dir
    xml_dir = args.xml_dir
    input_size = args.input_size
    batch_size = args.batch_size
    num_classes = args.num_classes
    snapshot = args.snapshot

    #model = ResNet(torchvision.models.resnet50(pretrained=False), num_classes)

    print("Loading weight......")
    #saved_state_dict = torch.load(snapshot)
    #model.load_state_dict(saved_state_dict)

    model = torch.load(snapshot)
    model.cuda(0)
    model.eval()

    test_loader = loadData(img_dir, xml_dir, input_size, batch_size, False)

    print("Start testing...")

    # run train function
    test(img_dir, xml_dir, input_size, batch_size, model, test_loader)
def train(img_dir, xml_dir, epochs, input_size, batch_size, num_classes):
    """
    params: 
          bins: number of bins for classification
          alpha: regression loss weight
          beta: ortho loss weight
    """
    # create model
    model = ResNet(torchvision.models.resnet50(pretrained=True),
                   num_classes=num_classes)

    cls_criterion = nn.CrossEntropyLoss().cuda(1)

    softmax = nn.Softmax(dim=1).cuda(1)
    model.cuda(1)

    # initialize learning rate and step
    lr = 0.001
    step = 0

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    #load data
    train_data_loader = loadData(img_dir, xml_dir, input_size, batch_size,
                                 True)
    test_loader = loadData('../yolov3/data/test_imgs',
                           '../yolov3/data/test_anns', 224, 8, False)

    #variables
    history = []
    best_acc = 0.0
    best_epoch = 0

    # start training
    for epoch in range(epochs):
        print("Epoch:", epoch)
        print("------------")

        # reduce lr by lr_decay factor for each epoch
        if epoch % 10 == 0:
            lr = lr * 0.9

        train_loss = 0.0
        train_acc = 0
        val_acc = 0

        model.train()

        for i, (images, labels) in enumerate(train_data_loader):
            if i % 10 == 0:
                print("batch: {}/{}".format(
                    i,
                    len(train_data_loader.dataset) // batch_size))
            images = images.cuda(1)
            labels = labels.cuda(1)

            # backward
            optimizer.zero_grad()
            outputs = model(images)

            loss = cls_criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            train_loss += loss.item()

            ret, predictions = torch.max(outputs.data, 1)
            correct_counts = predictions.eq(labels.data.view_as(predictions))

            acc = torch.mean(correct_counts.type(torch.FloatTensor))

            train_acc += acc.item() * images.size(0)

        print("epoch: {:03d}, Training loss: {:.4f}, Accuracy: {:.4f}%".format(
            epoch + 1, train_loss, train_acc / 3096 * 100))

        #if (epoch+1) % 3 == 0:
        #    torch.save(model, 'models/'+'model_'+str(epoch+1)+'.pt')
        print("Start testing...")
        with torch.no_grad():
            model.eval()

            for j, (images, labels) in enumerate(test_loader):
                images = images.cuda(1)
                labels = labels.cuda(1)

                outputs = model(images)

                ret, preds = torch.max(outputs.data, 1)
                cnt = preds.eq(labels.data.view_as(preds))

                acc = torch.mean(cnt.type(torch.FloatTensor))
                val_acc += acc.item() * images.size(0)

            if val_acc > best_acc:
                print("correct testing samples:", val_acc)
                best_acc = val_acc
                torch.save(model,
                           'models/' + 'model_' + str(epoch + 1) + '.pt')
Exemple #15
0
if (btlnkFeatures):
    (xTrain, yTrain, fnTrain, xTest, yTest, fnTest, xValid, yValid, fnValid,
     input_dim, fileNames) = dataset.loadBottleneckData(
         logger,
         normalize=True,
         sharedVariable=False,
         reshapeToChannels=True,
         n_context_frames=num_context_frames)

else:
    (xTrain, yTrain, fnTrain, xTest, yTest, fnTest, xValid, yValid, fnValid,
     input_dim,
     fileNames) = dataset.loadData(logger,
                                   normalize=normalization,
                                   sharedVariable=False,
                                   reshapeToChannels=True,
                                   n_context_frames=num_context_frames)

if logger.isFirstEpoch():
    logger.printInfo('model   :\t{}'.format(model))
    logger.printInfo('n epochs:\t{}'.format(num_epochs))
    logger.printInfo('l rate  :\t{}'.format(learning_rate))
    logger.printInfo('n cntx  :\t{}'.format(num_context_frames))
    logger.printInfo('n btlnk :\t{}'.format(n_bottleneck))
    logger.printInfo('in_dim  :\t{}'.format(input_dim))

from mlp import build_model
build_model([(xTrain, yTrain), (xValid, yValid), (xTest, yTest)],
            model=model,
            input_dim=input_dim,
Exemple #16
0
def train():
    """
    :return:
    """
    # create model
    model = vgg19_bn()

    if torch.cuda.device_count() > 1:
        model = nn.DataParallel(model)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = model.to(device)

    num_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
    print('The number of parameters of model is', num_params)

    # loading pre trained weight
    #logger.logger.info("Loading PreTrained Weight".center(100, '='))
    #utils.load_filtered_stat_dict(model, model_zoo.load_url(model_urls["mobilenet_v2"]))

    # loading data
    logger.logger.info("Loading data".center(100, '='))
    train_data_loader, valid_data_loader = loadData(args.train_data, args.input_size, args.batch_size, args.num_classes)
    print()

    # initialize loss function
    cls_criterion = nn.BCEWithLogitsLoss()
    reg_criterion = nn.MSELoss()
    softmax = nn.Softmax(dim=1)
    #model.to(device)

    # training
    logger.logger.info("Training".center(100, '='))

    # initialize learning rate and step
    lr = args.lr
    step = 0

    for epoch in range(args.epochs + 1):
        print("Epoch:", epoch)
        if epoch > args.unfreeze:
            optimizer = torch.optim.Adam([{"params": get_non_ignored_params(model), "lr": lr},
                                          {"params": get_cls_fc_params(model), "lr": lr}], lr=args.lr)
        else:
            optimizer = torch.optim.Adam([{"params": get_non_ignored_params(model), "lr": lr},
                                          {"params": get_cls_fc_params(model), "lr": lr * 10}], lr=args.lr)
        lr = lr * args.lr_decay
        min_degree_error = 180.
        for i, (images, cls_label_f, cls_label_r, cls_label_u, vector_label_f, vector_label_r, vector_label_u, name) in enumerate(train_data_loader):
            step += 1
            images = images.to(device)
            #classify_label = classify_label.cuda(0)
            #vector_label = vector_label.cuda(0)
            cls_label_f = cls_label_f.to(device)
            cls_label_r = cls_label_r.to(device)
            cls_label_u = cls_label_u.to(device)

            vector_label_f = vector_label_f.to(device)
            vector_label_r = vector_label_r.to(device)
            vector_label_u = vector_label_u.to(device)
            # inference
            x_cls_pred_f, y_cls_pred_f, z_cls_pred_f,x_cls_pred_r, y_cls_pred_r, z_cls_pred_r,x_cls_pred_u, y_cls_pred_u, z_cls_pred_u = model(images)

            logits = [x_cls_pred_f, y_cls_pred_f, z_cls_pred_f,x_cls_pred_r, y_cls_pred_r, z_cls_pred_r,x_cls_pred_u, y_cls_pred_u, z_cls_pred_u]

            loss, degree_error_f, degree_error_r, degree_error_u = utils.computeLoss(cls_label_f, cls_label_r, cls_label_u,
                vector_label_f, vector_label_r, vector_label_u, 
                logits, softmax, cls_criterion, reg_criterion, args)

            #print(loss)
            # backward
            grad = [torch.tensor(1.0).to(device) for _ in range(12)]
            optimizer.zero_grad()
            torch.autograd.backward(loss, grad)
            optimizer.step()

            # save training log and weight
            if (i + 1) % 50 == 0:
                msg = "Epoch: %d/%d | Iter: %d/%d | x_loss: %.6f | y_loss: %.6f | z_loss: %.6f | degree_error_f:%.3f | degree_error_r:%.3f | degree_error_u:%.3f"  % (
                    epoch, args.epochs, i + 1, len(train_data_loader.dataset) // args.batch_size, loss[0].item()+loss[3].item()+loss[6].item(), loss[1].item()+loss[4].item()+loss[7].item(),
                    loss[2].item()+loss[5].item()+loss[8].item(), degree_error_f.item(), degree_error_r.item(), degree_error_u.item())
                logger.logger.info(msg)
                valid_degree_error_f, valid_degree_error_r, valid_degree_error_u = valid(model, valid_data_loader, softmax)

                # writer summary
                writer.add_scalar("train degrees error", degree_error_f, step)
                writer.add_scalar("valid degrees error", valid_degree_error_f, step)

                # saving snapshot
                if valid_degree_error_f + valid_degree_error_r + valid_degree_error_u < min_degree_error:
                    min_degree_error = valid_degree_error_f + valid_degree_error_r + valid_degree_error_u
                    logger.logger.info("A better validation degrees error {}".format(min_degree_error))
                    torch.save(model.state_dict(), os.path.join(snapshot_dir, output_string + '_epoch_' + str(epoch) + '_constrain_a=0.075' +'.pkl'))