示例#1
0
def mainClassification():

    # Get configurations
    config = get_config_from_json('config/classification.config')

    # Loading Model
    model = ClassificationModel(config)

    if torch.cuda.is_available():
        model.cuda()

    model.load()

    if os.path.isfile(config['model']['checkpoint']):
        model.load_state_dict(torch.load(config['model']['checkpoint']))
        print('Weigths loaded from checkpoint : %s' %
              config['model']['checkpoint'])

    # Loading Dataset
    train_dataset = ClassificationTrainDataset(config)
    val_dataset = ClassificationValDataset(config)

    # Creating Data Loader
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config['model']['batch_size'])

    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=config['model']['batch_size'])

    # Train model
    trainClassification(config, model, train_loader, val_loader)
示例#2
0
def main():

    # Load config
    config = get_config_from_json('config/model.config')

    # Load data
    [X_train, Y_train, X_CV, Y_CV, X_test, Y_test] = load_data(0.18)

    # Generate dataset
    test_dataset = MyDataset(X_test, Y_test)

    # Create Data Loaders
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=config.model.batch_size,
                             shuffle=False)

    # Build model
    model = CNNModel()
    model = model.double()
    model.eval()

    if os.path.isfile(config.model.path):
        model.load_state_dict(torch.load(config.model.path))
        print('Loaded checkpoint..')
    else:
        print('checkpoint not found..')

    evaluate(model, test_loader)
示例#3
0
def mainClassification():

    # Load config
    config = get_config_from_json('config/modelClassification.config')

    # Load data
    [X_train, Y_train, X_CV, Y_CV, X_test, Y_test] = load_data(0.18)

    # Generate dataset
    train_dataset = MyDataset(X_train, Y_train)
    val_dataset = MyDataset(X_CV, Y_CV)

    # Create Data Loaders
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config.model.batch_size,
                              shuffle=True)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=config.model.batch_size,
                            shuffle=False)

    # Build model
    model = CNNModel()
    model = model.double()
    print(model)

    # Train model
    train(model, config, train_loader, val_loader)
def testClassification():

    # Get configurations
    config = get_config_from_json('config/classification.config')

    # Loading Model
    model = VGG16(config)

    if torch.cuda.is_available():
        model = model.cuda()

    if os.path.isfile(config['model']['checkpoint']):
        model.load_state_dict(torch.load(config['model']['checkpoint']))
        print('Weigths loaded from checkpoint : %s' %
              config['model']['checkpoint'])
    else:
        print('############### No checkpoint found ####################')

    # Loading Dataset
    test_dataset = ClassificationTestDataset(config)

    # Creating Data Loader
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=config['model']['batch_size'],
                             num_workers=4)

    criterion = nn.CrossEntropyLoss()

    model.eval()
    y_pred, y_true = [], []
    loss_test = 0

    for i, (images, labels) in enumerate(test_loader):
        if torch.cuda.is_available():
            images = Variable(images.cuda())
            labels = Variable(labels.cuda())
        else:
            images = Variable(images)
            labels = Variable(labels)

        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)

        y_pred.extend(predicted.tolist())
        y_true.extend(labels.tolist())

        if i % 5 == 0:
            correct = sum([y_true[i] == y_pred[i] for i in range(len(y_true))])
            total = len(y_true)
            print(correct, total)
            print(i, '  ACC : ', 100.0 * correct / total)
        test_loss = criterion(outputs, labels.long())

        loss_test += test_loss.item()

    print('\nAcc : ', 100.0 * float(correct) / total)
    print('test loss : ', loss_test / len(test_loader))
示例#5
0
def mainSiamese():
    # Load config
    config = get_config_from_json('config/modelSiamese.config')

    # Load data
    [X_train, Y_train, X_CV, Y_CV, X_test, Y_test] = load_data(0.18)

    # Generate dataset
    test_dataset = MySiameseDataset(X_test, Y_test)

    # Create Data Loaders
    test_loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=True)

    # Build model
    model = SiameseModel()
    model = model.double()
    print(model)
    model.eval()

    if os.path.isfile(config.model.path):
        model.load_state_dict(torch.load(config.model.path))
        print('Loaded checkpoint..')
    else:
        print('checkpoint not found..')

    dataiter = iter(test_loader)
    x0, _, _ = next(dataiter)

    plt.tight_layout()

    plt.subplot(4, 3, 2)
    plt.axis('off')
    plt.imshow(x0[0][0], 'gray')
    plt.title('Original Image', fontdict={'fontsize': 10})

    for i in range(9):
        _, x1, label = next(dataiter)
        output0, output1 = model(x0, x1)

        output0 = output0.type(torch.DoubleTensor)
        output1 = output1.type(torch.DoubleTensor)

        euclidean_distance = F.pairwise_distance(output0, output1)

        plt.subplot(4, 3, i + 4)
        plt.axis('off')
        plt.imshow(x1[0][0], 'gray')
        plt.title(str(round(euclidean_distance.item(), 2)),
                  fontdict={'fontsize': 10})

    plt.show()
示例#6
0
def mainLRCN():

    # Get configurations
    config = get_config_from_json('config/lrcn.config')

    # Loading Model
    model = LRCNModel(config)
    model = model.double()

    if torch.cuda.is_available():
        model.cuda()

    model.load()

    print(model)

    if os.path.isfile(config['model']['checkpoint']):
        model.load_state_dict(torch.load(config['model']['checkpoint']))
        print('Weigths loaded from checkpoint : %s' %
              config['model']['checkpoint'])

    # Loading Dataset
    train_dataset = LRCNTrainDataset(config)
    val_dataset = LRCNValDataset(config)

    # Creating Data Loader
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=config['model']['batch_size'],
                              num_workers=1)

    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=config['model']['batch_size'],
                            num_workers=1)

    # Train model
    trainLRCN(config, model, train_loader, val_loader)
示例#7
0
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=config['model']['batch_size'],
                            num_workers=1)

    # Train model
    trainLRCN(config, model, train_loader, val_loader)


if __name__ == "__main__":
    mainLRCN()

import matplotlib.pyplot as plt
import numpy as np

# Get configurations
config = get_config_from_json('config/classification.config')

# Loading Dataset
train_dataset = ClassificationTrainDataset(config)
val_dataset = ClassificationValDataset(config)

# Creating Data Loader
train_loader = DataLoader(dataset=train_dataset,
                          batch_size=config['model']['batch_size'])
val_loader = DataLoader(dataset=val_dataset,
                        batch_size=config['model']['batch_size'])

it = iter(train_loader)
x, y = next(it)

示例#8
0
    os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    import numpy as np

    opt = parse_option()

    Seeds = [0, 1, 2, 3, 4]
    Runs = range(0, 10, 1)

    exp = 'exp-linear-evaluation'
    exp_ckpt = 'linear_eval'
    backname = 'last'

    aug1 = 'magnitude_warp'
    aug2 = 'time_warp'

    config_dict = get_config_from_json('{}/{}_config.json'.format(
        opt.config_dir, opt.dataset_name))

    opt.class_type = config_dict['class_type']
    opt.piece_size = config_dict['piece_size']

    if opt.model_name == 'InterSample':
        model_paras = 'none'
    else:
        model_paras = '{}_{}'.format(opt.piece_size, opt.class_type)

    if aug1 == aug2:
        opt.aug_type = [aug1]
    elif type(aug1) is list:
        opt.aug_type = aug1 + aug2
    else:
        opt.aug_type = [aug1, aug2]
"""
import os
import pandas as pd
from sklearn.metrics import f1_score, accuracy_score

os.environ["CUDA_VISIBLE_DEVICES"] = "3"

import torch

from data_loader.data_loader import load_data
from utils.load_model_optimizer_lr import load_model_op_lr
from utils.get_result import get_result
from utils.utils import get_config_from_json

config_file = 'config1.json'
config = get_config_from_json(config_file)

for k, v in config.items():
    print(k, ': ', v)

# 1.加载数据
print('loading data.')
if config['only_train']:
    train_loader = load_data(config)
else:
    train_loader, valid_loader, test_loader = load_data(config)
print('loaded data.')

# 2.加载模型、优化器、损失函数、学习率计划器
sentiment_model, optimizer, criterion, scheduler = load_model_op_lr(config, len(train_loader))