def get_recognition_model():
    from model import model
    model = model.WaterMeterModel()
    checkpoint = torch.load('./models/water_meter_recognition.pth',
                            map_location='cpu')
    state_dict = checkpoint['state_dict']
    model.load_state_dict(state_dict)

    model.eval()
    return model
Example #2
0
def predict(json_map, checkpoint, image_path, model, topk, processor):
    #''' Predict the class (or classes) of an image using a trained deep learning model.'''
    # Test out your network!

    if processor == 'CPU':
        checkpoint_information = torch.load(checkpoint, map_location='cpu')
    else:
        checkpoint_information = torch.load(checkpoint)

    model.load_state_dict(checkpoint_information['state_dict'])
    model.eval()
    a = process_image(image_path)
    y = np.expand_dims(a, axis=0)
    if processor == 'CPU':
        img = torch.from_numpy(y)
    else:
        img = torch.from_numpy(y).cuda()
    output = model.double()(Variable(img, volatile=True))
    ps = torch.exp(output)
    ps_top5 = torch.topk(ps, topk)
    probs = ps_top5[0]
    classes = ps_top5[1]

    import json
    with open(json_map, 'r') as f:
        cat_to_name = json.load(f)

    data_dir = 'flowers'
    train_dir = data_dir + '/train'
    train_transforms = transforms.Compose([
        transforms.RandomRotation(25),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
    xx = train_data.class_to_idx

    class_to_idx_dict = train_data.class_to_idx
    key_value_exchange_dict = dict((v, k) for k, v in xx.items())

    probabilities = probs.data.cpu().numpy().tolist()[0]
    plant_classes = classes.data.cpu().numpy().tolist()[0]
    for i in range(len(probabilities)):
        plant_classes[i] = key_value_exchange_dict[plant_classes[i]]

    return probabilities, plant_classes, cat_to_name
def main():
    model = Net()
    if torch.cuda.is_available():
        model.cuda()
    else:
        pass
    model.apply(weights_init)

    if args.resume:
        if isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}'"
                  .format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    # 数据处理
    # 直接在train里面处理
    # dataParser = DataParser(batch_size)
    loss_function = nn.L1Loss()
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    # train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer,milestones=settings.MILESTONES,gamma=0.2)#learning rate decay
    scheduler = lr_scheduler.StepLR(optimizer, step_size=args.stepsize, gamma=args.gamma)

    log = Logger(join(TMP_DIR, '%s-%d-log.txt' % ('Adam', args.lr)))
    sys.stdout = log
    train_loss = []
    train_loss_detail = []

    for epoch in range(args.start_epoch, args.maxepoch):
        if epoch == 0:
            print("Performing initial testing...")
            # 暂时空着

        tr_avg_loss, tr_detail_loss = train(model = model,optimizer = optimizer,epoch= epoch,save_dir=join(TMP_DIR, 'epoch-%d-training-record' % epoch))
        test()

        log.flush()
        # Save checkpoint
        save_file = os.path.join(TMP_DIR, 'checkpoint_epoch{}.pth'.format(epoch))
        save_checkpoint({'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict()})

        scheduler.step()  # 自动调整学习率
        train_loss.append(tr_avg_loss)
        train_loss_detail += tr_detail_loss
Example #4
0
def main():
    """Run inference."""

    from model import model

    parser = argparse.ArgumentParser(
        description='PyTorch RNN Anomaly Detection Model')
    parser.add_argument('--prediction_window_size',
                        type=int,
                        default=10,
                        help='prediction_window_size')
    parser.add_argument(
        '--data',
        type=str,
        default='ecg',
        help=
        'type of the dataset (ecg, gesture, power_demand, space_shuttle, respiration, nyc_taxi'
    )
    parser.add_argument('--filename',
                        type=str,
                        default='chfdb_chf13_45590.pkl',
                        help='filename of the dataset')
    parser.add_argument('--save_fig',
                        action='store_true',
                        help='save results as figures')
    parser.add_argument(
        '--compensate',
        action='store_true',
        help='compensate anomaly score using anomaly score esimation')
    parser.add_argument('--beta',
                        type=float,
                        default=1.0,
                        help='beta value for f-beta score')
    parser.add_argument('--device',
                        type=str,
                        default='cuda',
                        help='cuda or cpu')

    args_ = parser.parse_args()
    print('-' * 89)
    print("=> loading checkpoint ")
    if args_.device == 'cpu':
        checkpoint = torch.load(str(
            Path('save', args_.data, 'checkpoint',
                 args_.filename).with_suffix('.pth')),
                                map_location=torch.device('cpu'))
    else:
        checkpoint = torch.load(
            str(
                Path('save', args_.data, 'checkpoint',
                     args_.filename).with_suffix('.pth')))
    args = checkpoint['args']
    args.prediction_window_size = args_.prediction_window_size
    args.beta = args_.beta
    args.save_fig = args_.save_fig
    args.compensate = args_.compensate
    args.device = args_.device
    print("=> loaded checkpoint")

    # Set the random seed manually for reproducibility.
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    ###############################################################################
    # Load data
    ###############################################################################
    TimeseriesData = preprocess_data.PickleDataLoad(data_type=args.data,
                                                    filename=args.filename,
                                                    augment_test_data=False)
    train_dataset = TimeseriesData.batchify(
        args, TimeseriesData.trainData[:TimeseriesData.length], bsz=1)
    test_dataset = TimeseriesData.batchify(args,
                                           TimeseriesData.testData,
                                           bsz=1)

    ###############################################################################
    # Build the model
    ###############################################################################
    nfeatures = TimeseriesData.trainData.size(-1)
    model = model.RNNPredictor(rnn_type=args.model,
                               enc_inp_size=nfeatures,
                               rnn_inp_size=args.emsize,
                               rnn_hid_size=args.nhid,
                               dec_out_size=nfeatures,
                               nlayers=args.nlayers,
                               res_connection=args.res_connection).to(
                                   args.device)
    model.load_state_dict(checkpoint['state_dict'])

    scores, predicted_scores, precisions, recalls, f_betas = list(), list(
    ), list(), list(), list()
    targets, mean_predictions, oneStep_predictions, Nstep_predictions = list(
    ), list(), list(), list()
    try:
        # For each channel in the dataset
        for channel_idx in range(nfeatures):
            ''' 1. Load mean and covariance if they are pre-calculated, if not calculate them. '''
            # Mean and covariance are calculated on train dataset.
            if 'means' in checkpoint.keys() and 'covs' in checkpoint.keys():
                print('=> loading pre-calculated mean and covariance')
                mean, cov = checkpoint['means'][channel_idx], checkpoint[
                    'covs'][channel_idx]
            else:
                print('=> calculating mean and covariance')
                mean, cov = fit_norm_distribution_param(
                    args, model, train_dataset, channel_idx=channel_idx)
            ''' 2. Train anomaly score predictor using support vector regression (SVR). (Optional) '''
            # An anomaly score predictor is trained
            # given hidden layer output and the corresponding anomaly score on train dataset.
            # Predicted anomaly scores on test dataset can be used for the baseline of the adaptive threshold.
            if args.compensate:
                print('=> training an SVR as anomaly score predictor')
                train_score, _, _, hiddens, _ = anomalyScore(
                    args,
                    model,
                    train_dataset,
                    mean,
                    cov,
                    channel_idx=channel_idx)
                score_predictor = GridSearchCV(SVR(),
                                               cv=5,
                                               param_grid={
                                                   "C": [1e0, 1e1, 1e2],
                                                   "gamma":
                                                   np.logspace(-1, 1, 3)
                                               })
                score_predictor.fit(
                    torch.cat(hiddens, dim=0).numpy(),
                    train_score.cpu().numpy())
            else:
                score_predictor = None
            ''' 3. Calculate anomaly scores'''
            # Anomaly scores are calculated on the test dataset
            # given the mean and the covariance calculated on the train dataset
            print('=> calculating anomaly scores')
            score, sorted_prediction, sorted_error, _, predicted_score = anomalyScore(
                args,
                model,
                test_dataset,
                mean,
                cov,
                score_predictor=score_predictor,
                channel_idx=channel_idx)
            ''' 4. Evaluate the result '''
            # The obtained anomaly scores are evaluated by measuring precision, recall, and f_beta scores
            # The precision, recall, f_beta scores are are calculated repeatedly,
            # sampling the threshold from 1 to the maximum anomaly score value, either equidistantly or logarithmically.
            print('=> calculating precision, recall, and f_beta')
            precision, recall, f_beta = get_precision_recall(
                args,
                score,
                num_samples=1000,
                beta=args.beta,
                label=TimeseriesData.testLabel.to(args.device))
            print('data: ', args.data, ' filename: ', args.filename,
                  ' f-beta (no compensation): ',
                  f_beta.max().item(), ' beta: ', args.beta)
            if args.compensate:
                precision, recall, f_beta = get_precision_recall(
                    args,
                    score,
                    num_samples=1000,
                    beta=args.beta,
                    label=TimeseriesData.testLabel.to(args.device),
                    predicted_score=predicted_score)
                print('data: ', args.data, ' filename: ', args.filename,
                      ' f-beta    (compensation): ',
                      f_beta.max().item(), ' beta: ', args.beta)

            target = preprocess_data.reconstruct(
                test_dataset.cpu()[:, 0, channel_idx],
                TimeseriesData.mean[channel_idx],
                TimeseriesData.std[channel_idx]).numpy()
            mean_prediction = preprocess_data.reconstruct(
                sorted_prediction.mean(dim=1).cpu(),
                TimeseriesData.mean[channel_idx],
                TimeseriesData.std[channel_idx]).numpy()
            oneStep_prediction = preprocess_data.reconstruct(
                sorted_prediction[:,
                                  -1].cpu(), TimeseriesData.mean[channel_idx],
                TimeseriesData.std[channel_idx]).numpy()
            Nstep_prediction = preprocess_data.reconstruct(
                sorted_prediction[:,
                                  0].cpu(), TimeseriesData.mean[channel_idx],
                TimeseriesData.std[channel_idx]).numpy()
            sorted_errors_mean = sorted_error.abs().mean(dim=1).cpu()
            sorted_errors_mean *= TimeseriesData.std[channel_idx]
            sorted_errors_mean = sorted_errors_mean.numpy()
            score = score.cpu()
            scores.append(score), targets.append(
                target), predicted_scores.append(predicted_score)
            mean_predictions.append(
                mean_prediction), oneStep_predictions.append(
                    oneStep_prediction)
            Nstep_predictions.append(Nstep_prediction)
            precisions.append(precision), recalls.append(
                recall), f_betas.append(f_beta)

            if args.save_fig:
                save_dir = Path(
                    'result', args.data,
                    args.filename).with_suffix('').joinpath('fig_detection')
                save_dir.mkdir(parents=True, exist_ok=True)
                plt.plot(precision.cpu().numpy(), label='precision')
                plt.plot(recall.cpu().numpy(), label='recall')
                plt.plot(f_beta.cpu().numpy(), label='f1')
                plt.legend()
                plt.xlabel('Threshold (log scale)')
                plt.ylabel('Value')
                plt.title('Anomaly Detection on ' + args.data + ' Dataset',
                          fontsize=18,
                          fontweight='bold')
                plt.savefig(
                    str(
                        save_dir.joinpath('fig_f_beta_channel' +
                                          str(channel_idx)).with_suffix(
                                              '.png')))
                plt.close()

                fig, ax1 = plt.subplots(figsize=(15, 5))
                ax1.plot(target,
                         label='Target',
                         color='black',
                         marker='.',
                         linestyle='--',
                         markersize=1,
                         linewidth=0.5)
                ax1.plot(mean_prediction,
                         label='Mean predictions',
                         color='purple',
                         marker='.',
                         linestyle='--',
                         markersize=1,
                         linewidth=0.5)
                ax1.plot(oneStep_prediction,
                         label='1-step predictions',
                         color='green',
                         marker='.',
                         linestyle='--',
                         markersize=1,
                         linewidth=0.5)
                ax1.plot(Nstep_prediction,
                         label=str(args.prediction_window_size) +
                         '-step predictions',
                         color='blue',
                         marker='.',
                         linestyle='--',
                         markersize=1,
                         linewidth=0.5)
                ax1.plot(sorted_errors_mean,
                         label='Absolute mean prediction errors',
                         color='orange',
                         marker='.',
                         linestyle='--',
                         markersize=1,
                         linewidth=1.0)
                ax1.legend(loc='upper left')
                ax1.set_ylabel('Value', fontsize=15)
                ax1.set_xlabel('Index', fontsize=15)
                ax2 = ax1.twinx()
                ax2.plot(
                    score.numpy().reshape(-1, 1),
                    label=
                    'Anomaly scores from \nmultivariate normal distribution',
                    color='red',
                    marker='.',
                    linestyle='--',
                    markersize=1,
                    linewidth=1)
                if args.compensate:
                    ax2.plot(predicted_score,
                             label='Predicted anomaly scores from SVR',
                             color='cyan',
                             marker='.',
                             linestyle='--',
                             markersize=1,
                             linewidth=1)
                ax2.legend(loc='upper right')
                ax2.set_ylabel('anomaly score', fontsize=15)
                plt.title('Anomaly Detection on ' + args.data + ' Dataset',
                          fontsize=18,
                          fontweight='bold')
                plt.tight_layout()
                plt.xlim([0, len(test_dataset)])
                plt.savefig(
                    str(
                        save_dir.joinpath('fig_scores_channel' +
                                          str(channel_idx)).with_suffix(
                                              '.png')))
                plt.close()

    except KeyboardInterrupt:
        print('-' * 89)
        print('Exiting from training early')

    print('=> saving the results as pickle extensions')
    save_dir = Path('result', args.data, args.filename).with_suffix('')
    save_dir.mkdir(parents=True, exist_ok=True)
    pickle.dump(targets, open(str(save_dir.joinpath('target.pkl')), 'wb'))
    pickle.dump(mean_predictions,
                open(str(save_dir.joinpath('mean_predictions.pkl')), 'wb'))
    pickle.dump(oneStep_predictions,
                open(str(save_dir.joinpath('oneStep_predictions.pkl')), 'wb'))
    pickle.dump(Nstep_predictions,
                open(str(save_dir.joinpath('Nstep_predictions.pkl')), 'wb'))
    pickle.dump(scores, open(str(save_dir.joinpath('score.pkl')), 'wb'))
    pickle.dump(predicted_scores,
                open(str(save_dir.joinpath('predicted_scores.pkl')), 'wb'))
    pickle.dump(precisions, open(str(save_dir.joinpath('precision.pkl')),
                                 'wb'))
    pickle.dump(recalls, open(str(save_dir.joinpath('recall.pkl')), 'wb'))
    pickle.dump(f_betas, open(str(save_dir.joinpath('f_beta.pkl')), 'wb'))
    print('-' * 89)
eval_dataset = dsets.ImageFolder(EVAL_DATA_PATH,
                                 transform=preprocessing.transform)
train_dataset = dsets.ImageFolder(TRAIN_DATA_PATH,
                                  transform=preprocessing.transform)

eval_dataloader = DataLoader(eval_dataset, batch_size=BATCH_SIZE, shuffle=True)
train_dataloader = DataLoader(train_dataset,
                              batch_size=BATCH_SIZE,
                              shuffle=True)

#使用交叉熵为 loss,使用 SGD 优化方法
criterion = NN.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

#载入上次训练
model.load_state_dict(torch.load("./chkpoint_res.bin"))

#开始训练
for epoch in range(0, 100):
    model.train()
    with tqdm(train_dataloader, unit="batch") as tepoch:  #进度条
        correct = 0
        batch = 0
        for data, target in tepoch:
            batch += 1
            tepoch.set_description(f"Epoch {epoch}")
            data, target = data.cuda(), target.cuda()  #数据载入 GPU

            optimizer.zero_grad()  #梯度归零
            output = model(data)  #前向计算
            loss = criterion(output, target)  #计算 loss
X, y = sklearn.datasets.make_moons(2000, noise=0.1)
def predict_fn(data,model):
    print('Predicting class labels for the input data...')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    data = torch.from_numpy(X.astype('float32'))
    data = data.to(device)
    model.eval()
    # Compute the result of applying the model to the input data.
    out = model(data)
    # The variable `result` should be a numpy array; a single value 0-1
    result = out.cpu().detach().numpy()
    return result

if __name__ == '__main__':
    model.load_state_dict(torch.load('model/model.pth'))
    model.eval()
    x = torch.from_numpy(X).type(torch.FloatTensor)
    ans = model.predict(x)
    print(ans.numpy())
    print(accuracy_score(model.predict(x), y))
    tp = np.logical_and(y, ans).sum()
    fp = np.logical_and(1 - y, ans).sum()
    tn = np.logical_and(1 - y, 1 - ans).sum()
    fn = np.logical_and(y, 1 - ans).sum()
    recall = tp / (tp + fn)
    precision = tp / (tp + fp)
    accuracy = (tp + tn) / (tp + fp + tn + fn)
    print(pd.crosstab(y, ans, rownames=['actuals'],
                      colnames=['predictions']))
    print("\n{:<11} {:.3f}".format('Recall:', recall))
Example #7
0
#!/usr/bin/env python
# coding: utf-8
from utils import testPaths, dataIter, DataIter, srcIndex, \
            trgIndex,mu,sig,homeDirectory, averageFilter, handleAngleData
import torch
import codecs
import csv
from model import model,device
import matplotlib.pyplot as plt
import numpy as np
import copy
import time

model.load_state_dict(torch.load(homeDirectory+'my-model-test.pt'))

for n in range(len(testPaths)):
    locationData = np.load(homeDirectory+'dataset/'+ testPaths[n])
    locationFilePath = homeDirectory + 'predictionData/locations' +testPaths[n][:-4] + '.csv'
    file = codecs.open(locationFilePath, 'w', 'gbk')
    writer = csv.writer(file)
    for i in range(0, np.shape(locationData)[0]):
        writer.writerow(locationData[i,trgIndex])
    file.close()

    angleFilePath = homeDirectory + 'predictionData/angle' +testPaths[n][:-4] + '.csv'
    angleFile = codecs.open(angleFilePath, 'w', 'gbk')
    angleWriter = csv.writer(angleFile)
    predictionFilePath = homeDirectory + 'predictionData/predictions' +testPaths[n][:-4] + '.csv'
    file = codecs.open(predictionFilePath, 'w', 'gbk')
    writer = csv.writer(file)
    # 获取中心点,并将数据进行归一化
Example #8
0
    '--model',
    type=str,
    metavar='M',
    help="the model file to be evaluated. Usually it is of the form model_X.pth"
)
parser.add_argument('--outfile',
                    type=str,
                    default='experiment/kaggle.csv',
                    metavar='D',
                    help="name of the output csv file")

args = parser.parse_args()
use_cuda = torch.cuda.is_available()

state_dict = torch.load(args.model)
model.load_state_dict(state_dict)
model.eval()
if use_cuda:
    print('Using GPU')
    model.cuda()
else:
    print('Using CPU')

from data import data_transforms

test_dir = args.data + '/test_images/mistery_category'


def pil_loader(path):
    # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
    with open(path, 'rb') as f:
train_dataset = TimeseriesData.batchify(args,TimeseriesData.trainData[:TimeseriesData.length], bsz=1)
test_dataset = TimeseriesData.batchify(args,TimeseriesData.testData, bsz=1)


###############################################################################
# Build the model
###############################################################################
nfeatures = TimeseriesData.trainData.size(-1)
model = model.RNNPredictor(rnn_type = args.model,
                           enc_inp_size=nfeatures,
                           rnn_inp_size = args.emsize,
                           rnn_hid_size = args.nhid,
                           dec_out_size=nfeatures,
                           nlayers = args.nlayers,
                           res_connection=args.res_connection).to(args.device)
model.load_state_dict(checkpoint['state_dict'])
#del checkpoint

scores, predicted_scores, precisions, recalls, f_betas = list(), list(), list(), list(), list()
targets, mean_predictions, oneStep_predictions, Nstep_predictions = list(), list(), list(), list()
try:
    # For each channel in the dataset
    for channel_idx in range(nfeatures):
        ''' 1. Load mean and covariance if they are pre-calculated, if not calculate them. '''
        # Mean and covariance are calculated on train dataset.
        if 'means' in checkpoint.keys() and 'covs' in checkpoint.keys():
            print('=> loading pre-calculated mean and covariance')
            mean, cov = checkpoint['means'][channel_idx], checkpoint['covs'][channel_idx]
        else:
            print('=> calculating mean and covariance')
            mean, cov = fit_norm_distribution_param(args, model, train_dataset, channel_idx=channel_idx)
Example #10
0

### read train & test input and output
def read_input():
    data_inputs = []
    data_outputs = []
    with open("../data/data.txt") as f:
        content = f.readlines()
        for i in content:
            i = i.strip("\n")
            i = i.split()
            i = [float(x) for x in i]
            temp = []
            temp_box_x = int(i[4]) - int(i[2])
            temp_box_y = int(i[5]) - int(i[3])
            temp.append(i[0])
            temp.append(min(temp_box_x, temp_box_y))
            data_outputs.append(i[1:2])
            data_inputs.append(temp)
    return data_inputs, data_outputs


if __name__ == '__main__':
    m = m()
    m.load_state_dict(torch.load('model_1.pt'))
    train_inputs, train_outputs = read_input()
    for i in range(len(train_inputs)):
        print(m(train_inputs[i]))
        print(train_outputs[i])
        print("****")
Example #11
0
        shuffle=False,
        num_workers=args.workers,
        pin_memory=False,
        drop_last=False)

    # 4.4 load model
    model_dir = './imageset/label/model_ir_se50.pth'
    pretrained_dict = torch.load(model_dir)
    model = model.Backbone(num_layers=50, drop_ratio=0.6, mode='ir')
    model_dict = model.state_dict()
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in model_dict
    }
    model_dict.update(pretrained_dict)  # update parameter
    model.load_state_dict(pretrained_dict)
    model = torch.nn.DataParallel(model).to(args.device)

    # 4.5 set loss_function
    loss_function_A = torch.nn.MarginRankingLoss().to(args.device)
    loss_function_B = torch.nn.MSELoss().to(args.device)

    # 4.6 choose optimizer
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     milestones=[30, 60, 100],
                                                     gamma=0.1,
                                                     last_epoch=-1)
parser = argparse.ArgumentParser(description='Pass in an image, it will show you its class')
parser.add_argument('-i','--path', type = str, dest = 'img_path', help = 'Image file path')
parser.add_argument('-m','--model', type = str, help = 'Model file path (*.bin)', default = './chkpoint.bin')
parser.add_argument('-v','--verbose',type = bool, help = 'Show model structure and full output', default = False)
args = parser.parse_args()

torch.cuda.set_device(0) #使用 GPU

from preprocessing import transform

image = Image.open(args.img_path)
x = transform(image)
x.unsqueeze_(0)
x = x.cuda()

from model import model
model = model.cuda()
model.load_state_dict(torch.load(args.model))
model.eval()

output = model(x)

if args.verbose == True:
    print(model)
    print(output)
else:
    predictions = output.argmax(dim=1, keepdim=True).squeeze()
    print(predictions)

Example #13
0
#!/usr/bin/env ipython
import sys

import numpy as np
import torch
import tqdm

from model import NUM_FEAT, model, seq_len
from util import remove_outliers

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("Device", device)

model.load_state_dict(torch.load("model.pt"))
model.to(device)
model.eval()


def predict_step(model, inputs):
    inputs = inputs.to(device)
    src_mask = model.generate_square_subsequent_mask(inputs.size(0)).to(device)
    return model.forward(
        inputs,
        src_mask=src_mask,
    )


def predict(model, seq_len, steps, context):

    seq = context
    n_context = context.shape[0]
Example #14
0
        for k, v in checkpoint.items():
            if 'module' in k:
                name = k[7:]
            else:
                name = k
            new_state_dcit[name] = v
        model_dict = model.state_dict()
        pretrained_dict = {
            k: v
            for k, v in new_state_dcit.items() if k in model_dict
        }

        for k, v in model_dict.items():
            if k not in pretrained_dict:
                print(k)
        model.load_state_dict(pretrained_dict, strict=True)

    else:
        print("===> no models found at '{}'".format(args.pretrained))

print("===> Setting Optimizer")

optimizer = optim.Adam(model.parameters(), lr=args.lr)


def train(epoch):
    model.train()
    utils.adjust_learning_rate(optimizer, epoch, args.step_size, args.lr,
                               args.gamma)
    print('epoch =', epoch, 'lr = ', optimizer.param_groups[0]['lr'])
    for iteration, (lr_tensor, hr_tensor) in enumerate(training_data_loader,
Example #15
0
def train_model(model,
                device,
                train_data_loader,
                valid_data_loader,
                criterion,
                optimizer,
                scheduler,
                num_epochs=5):
    """
    training

    Parameters
    --------------
    model : DogClassificationModel
        Network model to be trained.
    device : device
        cuda or cpu
    train_data_loader : dataloader
        dataloader for training
    valid_data_loader : dataloader
        dataloader for validation
    criterion : 
        Loss function.
    optimizer :
        Optimizer.
    scheduler : 
        Learning rate scheduler.
    num_epochs : int
        The number of epochs.

    Returns
    --------------
    model : DogClassificationModel
        Trained model.
    """
    since = time.time()
    model = model.to(device)

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        bar = tqdm(total=len(train_data_loader))
        bar.set_description("Epoch: {}/{}".format(epoch + 1, num_epochs))
        """
        Training Phase
        """
        model.train()

        running_loss = 0.0
        running_corrects = 0

        for j, (inputs, labels) in enumerate(train_data_loader):
            optimizer.zero_grad()
            tmp_loss_item = 0.0

            # training
            with torch.set_grad_enabled(True):
                outputs = model(inputs.to(device))
                torch.cuda.empty_cache()

                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels.to(device))

                # backward + optimize only if in training phase
                loss.backward()
                optimizer.step()

                tmp_loss_item = loss.item()

            # statistics
            running_loss += tmp_loss_item * inputs.size(0)
            running_corrects += torch.sum(preds.to('cpu') == labels.data)

            # progress bar
            bar.update(1)
            tmp_loss = float(running_loss /
                             (j + 1)) / 32  # 32: mini-batch size
            tmp_acc = float(running_corrects // (j + 1)) / 32
            bar.set_postfix(OrderedDict(loss=tmp_loss, acc=tmp_acc))

        # update learning rate scheduler
        scheduler.step()

        dataset_size = len(train_data_loader.dataset)
        epoch_loss = running_loss / dataset_size
        epoch_acc = running_corrects.double() / dataset_size
        """
        Validation Phase
        """
        model.eval()  # Set model to validation mode

        val_running_loss = 0.0
        val_running_corrects = 0

        # Iterate over data.
        for inputs, labels in valid_data_loader:
            val_inputs = inputs.to(device)
            val_labels = labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            # track history if only in train
            with torch.no_grad():
                val_outputs = model(val_inputs)
                _, preds = torch.max(val_outputs, 1)
                loss = criterion(val_outputs, val_labels)

            # statistics
            val_running_loss += loss.item() * val_inputs.size(0)
            val_running_corrects += torch.sum(preds == val_labels.data)

        dataset_size = len(valid_data_loader.dataset)
        val_epoch_loss = val_running_loss / dataset_size
        val_epoch_acc = val_running_corrects.double() / dataset_size

        print('VALIDATION  Loss: {:.4f} Acc: {:.4f}'.format(
            val_epoch_loss, val_epoch_acc))
        print("Elapsed time: {} [sec]".format(time.time() - since))

        # deep copy the model
        if val_epoch_acc > best_acc:
            best_acc = val_epoch_acc
            best_model_wts = copy.deepcopy(model.state_dict())

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model
Example #16
0
print(opt)

cuda = opt.cuda
device = torch.device('cuda' if cuda else 'cpu')

filepath = opt.test_hr_folder

filelist = utils.get_list(filepath, ext='.png') + utils.get_list(filepath,
                                                                 ext='.JPG')
psnr_list = np.zeros(len(filelist))
ssim_list = np.zeros(len(filelist))
time_list = np.zeros(len(filelist))

model = model.model_rtc(upscale=opt.upscale_factor)
model_dict = utils.load_state_dict(opt.checkpoint)
model.load_state_dict(model_dict, strict=True)

i = 0
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)

for imname in filelist:
    # im_gt = cv2.imread(imname, cv2.IMREAD_COLOR)[:, :, [2, 1, 0]]  # BGR to RGB
    im_gt = sio.imread(imname)  # RGB
    im_gt = utils.modcrop(im_gt, opt.upscale_factor)
    # im_l = cv2.imread(opt.test_lr_folder + imname.split('/')[-1].split('.')[0] + 'x' + str(opt.upscale_factor) + ext, cv2.IMREAD_COLOR)[:, :, [2, 1, 0]]  # BGR to RGB
    # im_l = cv2.imread(opt.test_lr_folder + imname.split('/')[-1].split('.')[0] + ext, cv2.IMREAD_COLOR)[:, :, [2, 1, 0]]  # BGR to RGB
    im_l = sio.imread(opt.test_lr_folder + '/' + imname.split('/')[-1])  # RGB
    if len(im_gt.shape) < 3:
        im_gt = im_gt[..., np.newaxis]
        im_gt = np.concatenate([im_gt] * 3, 2)
from model import model
import torch
import torch.nn as nn
from config import config
from dataset.data_preprocess import ImageDataProcess
device = config.device
model_path = "saved/model/model.pth"

#--------Load the Model and run as GPU Mode---------------------#
model = model.LeNet(config.train_class_num).to(device)
model.load_state_dict(torch.load(model_path))
model.eval()

#-------------------Process Photo--------------------------------#
image_tensor_list = []
image_dir = "data/test/santapepe.jpg"
image_tensor = ImageDataProcess.image_normalize(image_dir)  #image to Tensor
image_tensor_list.append(torch.unsqueeze(image_tensor, 0))
input_tensor = torch.cat(tuple(image_tensor_list), dim=0)  # +1 Dimension
input_tensor = input_tensor.cuda()

#------------------Softmax the result to probability--------------#
outputs_tensor = model(input_tensor)
m_softmax = nn.Softmax(dim=1)
outputs_tensor = m_softmax(outputs_tensor)

#------------------Print the Result--------------#
print(outputs_tensor)
Example #18
0
train_dataset = TimeseriesData.batchify(
    args, TimeseriesData.trainData[:TimeseriesData.length], bsz=1)
test_dataset = TimeseriesData.batchify(args, TimeseriesData.testData, bsz=1)

###############################################################################
# Build the model
###############################################################################
nfeatures = TimeseriesData.trainData.size(-1)
model = model.RNNPredictor(rnn_type=args.model,
                           enc_inp_size=nfeatures,
                           rnn_inp_size=args.emsize,
                           rnn_hid_size=args.nhid,
                           dec_out_size=nfeatures,
                           nlayers=args.nlayers,
                           res_connection=args.res_connection).to(args.device)
model.load_state_dict(checkpoint['state_dict'])
#del checkpoint

scores, predicted_scores, precisions, recalls, f_betas = list(), list(), list(
), list(), list()
targets, mean_predictions, oneStep_predictions, Nstep_predictions = list(
), list(), list(), list()
try:
    # For each channel in the dataset
    for channel_idx in range(nfeatures):
        ''' 1. Load mean and covariance if they are pre-calculated, if not calculate them. '''
        # Mean and covariance are calculated on train dataset.
        if 'means' in checkpoint.keys() and 'covs' in checkpoint.keys():
            print('=> loading pre-calculated mean and covariance')
            mean, cov = checkpoint['means'][channel_idx], checkpoint['covs'][
                channel_idx]
    def __init__(self, x):
        self.data = torch.from_numpy(x)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        return self.data[idx]


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
testset = Test_Dataset(test_data)
test_loader = DataLoader(dataset=testset, batch_size=1, shuffle=False)

model = model()
model.load_state_dict(torch.load("./model.pth"))
model = model.to(device)


def inference(test_loader, device, model, ans_list):
    model.eval()
    with torch.no_grad():
        for data in test_loader:
            data = data.to(device)
            predict = model(data)
            ans = torch.argmax(predict, dim=1)
            ans_list.append(ans.item())


ans_list = []
inference(test_loader, device, model, ans_list)