Пример #1
0
def validate(val_loader, model):
  batch_time = AverageMeter()
  losses = AverageMeter()

  # switch to evaluate mode
  model.eval()

  end = time.time()

  for i, (im, gt) in enumerate(val_loader):
    loss, scores, boxes = model((im, gt))
    losses.update(loss.data[0], im.size(0))
    # measure elapsed time
    batch_time.update(time.time() - end)
    end = time.time()

    if i % args.print_freq == 0:
      print('Test: [{0}/{1}]\t'
            'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
            #'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
            'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
            #'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
            #'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'
            .format(
            i, len(val_loader), batch_time=batch_time,
            #data_time=data_time, 
            loss=losses,
            #top1=top1, top5=top5
            ))
Пример #2
0
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)
Пример #3
0
def evaluate_model(val_loader, model, criterion, config):
    """Evaluate the model"""

    validation_loss = AverageMeter()
    validation_accuracy = AverageMeter()
    if config["precision_recall"]:
        validation_precision = AverageMeter()
        validation_recall = AverageMeter()
    model.eval()
    with torch.no_grad():
        for i, (images, targets) in enumerate(val_loader):
            targets = targets.cuda()
            images = images.cuda()
            output = model.forward(images)
            loss = criterion(output, targets)
            validation_loss.update(loss.item(), images.size(0))
            y_true = targets.detach().cpu().numpy()
            y_score = torch.topk(output, 1).indices.reshape(
                output.size(0)).detach().cpu().numpy()
            acc = accuracy_score(y_true, y_score)
            validation_accuracy.update(acc, images.size(0))
            if config["precision_recall"]:
                rec = recall_score(y_true, y_score)
                prec = precision_score(y_true, y_score)
                validation_precision.update(prec, images.size(0))
                validation_recall.update(rec, images.size(0))
    if config["precision_recall"]:
        return mean(validation_loss.history), mean(
            validation_accuracy.history), mean(
                validation_precision.history), mean(validation_recall.history)
    else:
        return mean(validation_loss.history), mean(validation_accuracy.history)
def fit_norm_distribution_param(args, model, train_dataset, endPoint=10000):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    pasthidden = model.init_hidden(1)
    predictions = []
    organized = []
    errors = []
    #out = Variable(test_dataset[0].unsqueeze(0))
    for t in range(endPoint):
        out, hidden = model.forward(Variable(train_dataset[t].unsqueeze(0), volatile=True), pasthidden)
        predictions.append([])
        organized.append([])
        errors.append([])
        predictions[t].append(out.data.cpu()[0][0][0])
        pasthidden = model.repackage_hidden(hidden)
        for prediction_step in range(1,args.prediction_window_size):
            out, hidden = model.forward(out, hidden)
            predictions[t].append(out.data.cpu()[0][0][0])

        if t >= args.prediction_window_size:
            for step in range(args.prediction_window_size):
                organized[t].append(predictions[step+t-args.prediction_window_size][args.prediction_window_size-1-step])
            errors[t] = torch.FloatTensor(organized[t]) - train_dataset[t][0][0]
            if args.cuda:
                errors[t] = errors[t].cuda()
            errors[t] = errors[t].unsqueeze(0)

    errors_tensor = torch.cat(errors[args.prediction_window_size:],dim=0)
    mean = errors_tensor.mean(dim=0)
    cov = errors_tensor.t().mm(errors_tensor)/errors_tensor.size(0) - mean.unsqueeze(1).mm(mean.unsqueeze(0))
    # cov: positive-semidefinite and symmetric.

    return mean, cov
Пример #5
0
def valid():
    model.eval()
    avg_psnr, avg_ssim = 0, 0
    for i, batch in enumerate(testing_data_loader):
        lr_tensor, hr_tensor = batch[0], batch[1]
        if args.cuda:
            lr_tensor = lr_tensor.to(device)
            hr_tensor = hr_tensor.to(device)

        with torch.no_grad():
            pre = model(lr_tensor)

        sr_img = utils.tensor2np(pre.detach()[0])
        gt_img = utils.tensor2np(hr_tensor.detach()[0])
        crop_size = args.scale
        cropped_sr_img = utils.shave(sr_img, crop_size)
        cropped_gt_img = utils.shave(gt_img, crop_size)
        if args.isY is True:
            im_label = utils.quantize(sc.rgb2ycbcr(cropped_gt_img)[:, :, 0])
            im_pre = utils.quantize(sc.rgb2ycbcr(cropped_sr_img)[:, :, 0])
        else:
            im_label = cropped_gt_img
            im_pre = cropped_sr_img

        psnr = utils.compute_psnr(im_pre, im_label)
        ssim = utils.compute_ssim(im_pre, im_label)

        avg_psnr += psnr
        avg_ssim += ssim
        print(
            f" Valid {i}/{len(testing_data_loader)} with PSNR = {psnr} and SSIM = {ssim}"
        )
    print("===> Valid. psnr: {:.4f}, ssim: {:.4f}".format(
        avg_psnr / len(testing_data_loader),
        avg_ssim / len(testing_data_loader)))
Пример #6
0
def train_model(dataloader, model, criterion, optimizer, device, num_epochs,
                dataset_size):
    model.to(device)
    since = time.time()
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        for phase in ['train', 'test']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_corrects = 0

            for inputs, labels in tqdm(dataloaders[phase]):
                inputs = inputs.to(device)
                labels = labels.to(device)
                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    _, pred = torch.max(outputs, 1)
                    loss = criterion(outputs, labels)

                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(pred == labels.data)

            epoch_loss = running_loss / dataset_size[phase]
            epoch_acc = running_corrects.double() / dataset_size[phase]

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss,
                                                       epoch_acc))

            if phase == 'test' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())

        torch.save(
            best_model_wts,
            osp.join(Config['root_path'], Config['checkpoint_path'],
                     'model.pth'))
        print('Model saved at: {}'.format(
            osp.join(Config['root_path'], Config['checkpoint_path'],
                     'model.pth')))

    time_elapsed = time.time() - since
    print('Time taken to complete training: {:0f}m {:0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best acc: {:.4f}'.format(best_acc))
def inference(test_loader, device, model, ans_list):
    model.eval()
    with torch.no_grad():
        for data in test_loader:
            data = data.to(device)
            predict = model(data)
            ans = torch.argmax(predict, dim=1)
            ans_list.append(ans.item())
Пример #8
0
def get_recognition_model():
    from model import model
    model = model.WaterMeterModel()
    checkpoint = torch.load('./models/water_meter_recognition.pth',
                            map_location='cpu')
    state_dict = checkpoint['state_dict']
    model.load_state_dict(state_dict)

    model.eval()
    return model
def predict_fn(data,model):
    print('Predicting class labels for the input data...')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    data = torch.from_numpy(X.astype('float32'))
    data = data.to(device)
    model.eval()
    # Compute the result of applying the model to the input data.
    out = model(data)
    # The variable `result` should be a numpy array; a single value 0-1
    result = out.cpu().detach().numpy()
    return result
Пример #10
0
def evaluate(model, iterator, criterion):
    model.eval()
    epoch_loss = 0
    len_iterator = 0
    with torch.no_grad():
        for i, batch in enumerate(iterator):
            src = batch.src
            trg = batch.trg
            output = model(src, trg, 0)  #turn off teacher forcing
            loss = criterion(output, trg)
            epoch_loss += loss.item()
            len_iterator += 1
    return epoch_loss / len_iterator
Пример #11
0
def predict(json_map, checkpoint, image_path, model, topk, processor):
    #''' Predict the class (or classes) of an image using a trained deep learning model.'''
    # Test out your network!

    if processor == 'CPU':
        checkpoint_information = torch.load(checkpoint, map_location='cpu')
    else:
        checkpoint_information = torch.load(checkpoint)

    model.load_state_dict(checkpoint_information['state_dict'])
    model.eval()
    a = process_image(image_path)
    y = np.expand_dims(a, axis=0)
    if processor == 'CPU':
        img = torch.from_numpy(y)
    else:
        img = torch.from_numpy(y).cuda()
    output = model.double()(Variable(img, volatile=True))
    ps = torch.exp(output)
    ps_top5 = torch.topk(ps, topk)
    probs = ps_top5[0]
    classes = ps_top5[1]

    import json
    with open(json_map, 'r') as f:
        cat_to_name = json.load(f)

    data_dir = 'flowers'
    train_dir = data_dir + '/train'
    train_transforms = transforms.Compose([
        transforms.RandomRotation(25),
        transforms.RandomResizedCrop(224),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ])

    train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
    xx = train_data.class_to_idx

    class_to_idx_dict = train_data.class_to_idx
    key_value_exchange_dict = dict((v, k) for k, v in xx.items())

    probabilities = probs.data.cpu().numpy().tolist()[0]
    plant_classes = classes.data.cpu().numpy().tolist()[0]
    for i in range(len(probabilities)):
        plant_classes[i] = key_value_exchange_dict[plant_classes[i]]

    return probabilities, plant_classes, cat_to_name
Пример #12
0
def evaluate(data_source):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0.
    ntokens = len(corpus.dictionary)
    hidden = model.init_hidden(eval_batch_size)
    with torch.no_grad():
        for i in range(0, data_source.size(0) - 1, args.bptt):
            data, targets = get_batch(data_source, i)
            output, hidden = model(data, hidden)
            output_flat = output.view(-1, ntokens)
            total_loss += len(data) * criterion(output_flat, targets).item()
            hidden = repackage_hidden(hidden)
            data.to("cpu")
            targets.to("cpu")
    return total_loss / len(data_source)
def evaluate_1step_pred(args, model, test_dataset):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    with torch.no_grad():
        hidden = model.init_hidden(args.eval_batch_size)
        for nbatch, i in enumerate(range(0, test_dataset.size(0) - 1, args.bptt)):

            inputSeq, targetSeq = get_batch(args,test_dataset, i)
            outSeq, hidden = model.forward(inputSeq, hidden)

            loss = criterion(outSeq.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))
            hidden = model.repackage_hidden(hidden)
            total_loss+= loss.item()

    return total_loss / nbatch
Пример #14
0
def evaluate_1step_pred(args, model, test_dataset):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    with torch.no_grad():
        hidden = model.init_hidden(args.eval_batch_size)
        for nbatch, i in enumerate(range(0, test_dataset.size(0) - 1, args.bptt)):

            inputSeq, targetSeq = get_batch(args,test_dataset, i)
            outSeq, hidden = model.forward(inputSeq, hidden)

            loss = criterion(outSeq.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))
            hidden = model.repackage_hidden(hidden)
            total_loss+= loss.item()

    return total_loss / nbatch
Пример #15
0
    def evaluate(args, model, test_dataset):
        # Turn on evaluation mode which disables dropout.
        model.eval()
        with torch.no_grad():
            total_loss = 0
            hidden = model.init_hidden(args.eval_batch_size)
            nbatch = 1
            for nbatch, i in enumerate(
                    range(0,
                          test_dataset.size(0) - 1, args.bptt)):
                inputSeq, targetSeq = get_batch(args, test_dataset, i)
                # inputSeq: [ seq_len * batch_size * feature_size ]
                # targetSeq: [ seq_len * batch_size * feature_size ]
                hidden_ = model.repackage_hidden(hidden)
                '''Loss1: Free running loss'''
                outVal = inputSeq[0].unsqueeze(0)
                outVals = []
                hids1 = []
                for i in range(inputSeq.size(0)):
                    outVal, hidden_, hid = model.forward(outVal,
                                                         hidden_,
                                                         return_hiddens=True)
                    outVals.append(outVal)
                    hids1.append(hid)
                outSeq1 = torch.cat(outVals, dim=0)
                hids1 = torch.cat(hids1, dim=0)
                loss1 = criterion(
                    outSeq1.contiguous().view(args.batch_size, -1),
                    targetSeq.contiguous().view(args.batch_size, -1))
                '''Loss2: Teacher forcing loss'''
                outSeq2, hidden, hids2 = model.forward(inputSeq,
                                                       hidden,
                                                       return_hiddens=True)
                loss2 = criterion(
                    outSeq2.contiguous().view(args.batch_size, -1),
                    targetSeq.contiguous().view(args.batch_size, -1))
                '''Loss3: Simplified Professor forcing loss'''
                loss3 = criterion(hids1.view(args.batch_size, -1),
                                  hids2.view(args.batch_size, -1).detach())
                '''Total loss = Loss1+Loss2+Loss3'''
                loss = loss1 + loss2 + loss3

                total_loss += loss.item()

        return total_loss / (nbatch + 1)
Пример #16
0
def train_model(dataloader, model, optimizer, device, num_epochs,
                dataset_size):
    model.to(device)

    for epoch in range(num_epochs):
        print('-' * 15)
        print('Epoch {}/{}'.format(epoch + 1, num_epochs))

        for phase in ['train', 'val']:  #train and validate every epoch
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0

            for i in tqdm(range(len(dataloader[phase].dataset[0]))):
                inputs = dataloader[phase].dataset[0][i]
                #print(inputs.shape)
                labels = dataloader[phase].dataset[1][i]
                #print(labels.shape)
                inputs = inputs.unsqueeze(0)
                labels = labels.unsqueeze(0)
                inputs = inputs.to(device)
                labels = labels.to(device)
                optimizer.zero_grad()

                with torch.set_grad_enabled(phase == 'train'):
                    outputs = model(inputs)
                    loss = criterion(outputs, labels)

                    if phase == 'train':
                        loss.backward()
                        optimizer.step()

                running_loss += loss.item() * inputs.size(0)

            epoch_loss = running_loss / dataset_size[phase]

            print('{} Loss: {:.4f} '.format(phase, epoch_loss))

    # save the model
    #saved_model = copy.deepcopy(model.state_dict())
    with open(osp.join(Config['path'], "my_model.pth"), "wb") as output_file:
        torch.save(model.state_dict(), output_file)
Пример #17
0
def validation():
    model.eval()
    validation_loss = 0
    correct = 0
    for data, target in val_loader:
        if use_cuda:
            data, target = data.cuda(), target.cuda()
        output = model(data)
        # sum up batch loss
        criterion = torch.nn.CrossEntropyLoss(reduction='mean')
        validation_loss += criterion(output, target).data.item()
        # get the index of the max log-probability
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    validation_loss /= len(val_loader.dataset)
    print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.
          format(validation_loss, correct, len(val_loader.dataset),
                 100. * correct / len(val_loader.dataset)))
Пример #18
0
def validation():
    model.eval()
    validation_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in val_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            validation_loss += F.nll_loss(
                output, target, reduction='sum').item()  # sum up batch loss
            pred = output.data.max(
                1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).to(device).sum()

    validation_loss /= len(val_loader.dataset)
    print(
        '\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
        format(validation_loss, correct, len(val_loader.dataset),
               100. * correct / len(val_loader.dataset)))
def eval(model,val_idx,input_id,y_true,device,foward_name):
    model.eval()
    e_batch=1024
    val_y_all=list()
    val_pred_y_all=list()
    for n_kousin in range(len(val_idx)//e_batch+1):
        batch_input_idx=val_idx[n_kousin*e_batch:min((n_kousin+1)*e_batch,len(train_idx))]
        val_input=list()
        val_y=list()
        for idx in batch_input_idx:
            val_input.append(input_id[idx])
            val_y.append(y_true[idx])
        val_y_all.extend(val_y)
        batch_val_idx_tensor=torch.tensor(val_input).to(device)
        val_y_tensor=torch.tensor(val_y).to(device)
        output=foward(model,foward_name,batch_val_idx_tensor)
        for o in output:
            val_pred_y_all.append(int(torch.argmax(o)))
    report_dict=classification_report(val_y_all, val_pred_y_all, output_dict=True, target_names=["☆","☆ ☆","☆ ☆ ☆","☆ ☆ ☆ ☆","☆ ☆ ☆ ☆ ☆"])
    print(classification_report(val_y_all, val_pred_y_all, digits=3, target_names=["☆","☆ ☆","☆ ☆ ☆","☆ ☆ ☆ ☆","☆ ☆ ☆ ☆ ☆"]))
def anomalyScore(args,model,test_dataset,mean,cov,endPoint=10000):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    pasthidden = model.init_hidden(1)
    predictions = []
    organized = []
    errors = []
    # out = Variable(test_dataset[0].unsqueeze(0))
    for t in range(endPoint):
        out, hidden = model.forward(Variable(test_dataset[t].unsqueeze(0), volatile=True), pasthidden)
        predictions.append([])
        organized.append([])
        errors.append([])
        predictions[t].append(out.data.cpu()[0][0][0])
        pasthidden = model.repackage_hidden(hidden)
        for prediction_step in range(1, args.prediction_window_size):
            out, hidden = model.forward(out, hidden)
            predictions[t].append(out.data.cpu()[0][0][0])

        if t >= args.prediction_window_size:
            for step in range(args.prediction_window_size):
                organized[t].append(
                    predictions[step + t - args.prediction_window_size][args.prediction_window_size - 1 - step])
            organized[t] =torch.FloatTensor(organized[t]).unsqueeze(0)
            errors[t] = organized[t] - test_dataset[t][0][0]
            if args.cuda:
                errors[t] = errors[t].cuda()
        else:
            organized[t] = torch.zeros(1,args.prediction_window_size)
            errors[t] = torch.zeros(1,args.prediction_window_size)
            if args.cuda:
                errors[t] = errors[t].cuda()

    scores = []
    for error in errors:
        mult1 = error-mean.unsqueeze(0) # [ 1 * prediction_window_size ]
        mult2 = torch.inverse(cov) # [ prediction_window_size * prediction_window_size ]
        mult3 = mult1.t() # [ prediction_window_size * 1 ]
        score = torch.mm(mult1,torch.mm(mult2,mult3))
        scores.append(score[0][0])
    return scores, organized, errors
def evaluate(args, model, test_dataset):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    total_loss = 0
    hidden = model.init_hidden(args.eval_batch_size)
    for nbatch, i in enumerate(range(0, test_dataset.size(0) - 1, args.bptt)):

        inputSeq, targetSeq = get_batch(test_dataset, i, evaluation=True)
        # outVal = inputSeq[0].unsqueeze(0)
        # outVals = []
        # for i in range(inputSeq.size(0)):
        #     outVal, hidden = model.forward(outVal, hidden)
        #     outVals.append(outVal)
        # outSeq = torch.cat(outVals, dim=0)

        outSeq, hidden = model.forward(inputSeq, hidden)

        loss = criterion(outSeq.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))
        hidden = model.repackage_hidden(hidden)
        total_loss+= loss.data

    return total_loss[0] / nbatch
def evaluate(args, model, test_dataset):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    with torch.no_grad():
        total_loss = 0
        hidden = model.init_hidden(args.eval_batch_size)
        nbatch = 1
        for nbatch, i in enumerate(range(0, test_dataset.size(0) - 1, args.bptt)):
            inputSeq, targetSeq = get_batch(args,test_dataset, i)
            # inputSeq: [ seq_len * batch_size * feature_size ]
            # targetSeq: [ seq_len * batch_size * feature_size ]
            hidden_ = model.repackage_hidden(hidden)
            '''Loss1: Free running loss'''
            outVal = inputSeq[0].unsqueeze(0)
            outVals=[]
            hids1 = []
            for i in range(inputSeq.size(0)):
                outVal, hidden_, hid = model.forward(outVal, hidden_,return_hiddens=True)
                outVals.append(outVal)
                hids1.append(hid)
            outSeq1 = torch.cat(outVals,dim=0)
            hids1 = torch.cat(hids1,dim=0)
            loss1 = criterion(outSeq1.view(args.batch_size,-1), targetSeq.view(args.batch_size,-1))

            '''Loss2: Teacher forcing loss'''
            outSeq2, hidden, hids2 = model.forward(inputSeq, hidden, return_hiddens=True)
            loss2 = criterion(outSeq2.view(args.batch_size, -1), targetSeq.view(args.batch_size, -1))

            '''Loss3: Simplified Professor forcing loss'''
            loss3 = criterion(hids1.view(args.batch_size,-1), hids2.view(args.batch_size,-1).detach())

            '''Total loss = Loss1+Loss2+Loss3'''
            loss = loss1+loss2+loss3

            total_loss += loss.item()

    return total_loss / (nbatch+1)
Пример #23
0
def evaluate(model, loader, data_len):
    model = model.eval()
    with torch.no_grad():
        all_targets = []
        all_preds = []
        accurate_preds = 0
        losses = []
        for d in loader:
            inputs = d['input_ids'].to(device)
            masks = d['attention_mask'].to(device)
            all_targets.extend(list(d['targets'].squeeze().numpy()))
            targets = d['targets'].to(device)
            outputs = model(
                input_ids=inputs,
                attention_mask=masks
            )
            _, preds = torch.max(outputs, dim=1)
            loss = criterion(outputs, targets)
            all_preds.extend(list(preds.cpu().squeeze().numpy()))
            accurate_preds += torch.sum(preds == targets)
            losses.append(loss.item())
    return accurate_preds / data_len, np.mean(losses), all_targets, all_preds
Пример #24
0
            optimizer.zero_grad()  #梯度归零
            output = model(data)  #前向计算
            loss = criterion(output, target)  #计算 loss
            loss.backward()  #反向传播
            optimizer.step()  #优化器梯度下降

            predictions = output.argmax(dim=1, keepdim=True).squeeze()  #预测
            correct += (predictions == target).sum().item()  #统计预测正确数
            accuracy = correct / (BATCH_SIZE * batch)  #计算准确度

            tepoch.set_postfix(loss=loss.item(), accuracy=100. * accuracy)

    if epoch % 15 == 0:
        print("Epoch done, evaluating:", epoch)
        torch.save(model.state_dict(), "./chkpoint_res.bin")  #每 15 epoch 保存一次
        model.eval()  #测试
        with tqdm(eval_dataloader, unit="batch") as eepoch:
            correct = 0
            batch = 0
            for data, target in eepoch:
                batch += 1
                eepoch.set_description(f"Epoch {epoch}")
                data, target = data.cuda(), target.cuda()
                output = model(data)
                predictions = output.argmax(dim=1, keepdim=True).squeeze()
                correct += (predictions == target).sum().item()
                accuracy = correct / (BATCH_SIZE * batch)

                eepoch.set_postfix(loss=loss.item(), accuracy=100. * accuracy)

print('Finished Training')
Пример #25
0
# define the image transforms
transform = albumentations.Compose([
    albumentations.Normalize(mean=[0.45734706, 0.43338275, 0.40058118],
                             std=[0.23965294, 0.23532275, 0.2398498],
                             always_apply=True)
])

# initialize the model
model = model
# load the model checkpoint
checkpoint = torch.load(args['model_path'])
# load the trained weights
model.load_state_dict(checkpoint['model_state_dict'])
# load the model on to the computation device
model.eval().to(config.DEVICE)

image = np.array(Image.open(args['input']).convert('RGB'))
# make a copy of the image
orig_image = image.copy()
# apply transforms
image = transform(image=image)['image']
# tranpose dimensions
image = np.transpose(image, (2, 0, 1))
# convert to torch tensors
image = torch.tensor(image, dtype=torch.float)
# add batch dimension
image = image.unsqueeze(0).to(config.DEVICE)

# forward pass through the model
outputs = model(image)
def train_model(dataloader, model, criterion, optimizer, device, num_epochs, dataset_size):
    model.to(device)
    since = time.time()
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    acc_list = []
    loss_list = []
    test_acc_list= []
    test_loss_list = []

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        for phase in ['train', 'test']:
            if phase=='train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_corrects = 0

            for input1, input2, labels in tqdm(dataloaders[phase], position=0, leave=True):
                input1 = input1.to(device)
                input2 = input2.to(device)
                labels = labels.to(device)
                optimizer.zero_grad()

                with torch.set_grad_enabled(phase=='train'):
                    outputs = model(input1, input2)
                    outputs = torch.reshape(outputs, (outputs.shape[0],))
                    outputs = outputs.type(torch.DoubleTensor)
                    labels = labels.type(torch.DoubleTensor)

                    pred = []
                    for i in outputs:
                      if i>0.5:
                        pred.append(0)
                      else:
                        pred.append(1)
                    
                    pred = torch.FloatTensor(pred)

                    loss = criterion(outputs,labels)

                    if phase=='train':
                        loss.backward()
                        optimizer.step()


                running_loss += loss.item() * input1.size(0)
                running_corrects += torch.sum(pred==labels.data)

            epoch_loss = running_loss / dataset_size[phase]
            epoch_acc = running_corrects.double() / dataset_size[phase]

            if phase=='train':
              acc_list.append(epoch_acc)
              loss_list.append(epoch_loss)
            elif phase=='test':
              test_acc_list.append(epoch_acc)
              test_loss_list.append(epoch_loss)
            
            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))

            if phase=='test' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())

        torch.save(best_model_wts, osp.join(Config['root_path'], Config['checkpoint_path'], 'model.pth'))
        print('Model saved at: {}'.format(osp.join(Config['root_path'], Config['checkpoint_path'], 'model.pth')))

    time_elapsed = time.time() - since
    print('Time taken to complete training: {:0f}m {:0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    print('Best acc: {:.4f}'.format(best_acc))

    np.savetxt('acc_list.txt',acc_list)
    np.savetxt('test_acc_list.txt',test_acc_list)
    np.savetxt('loss_list.txt',loss_list)
    np.savetxt('test_loss_list.txt',test_loss_list)
def generate_output(args,epoch, model, gen_dataset, disp_uncertainty=True,startPoint=500, endPoint=3500):
    if args.save_fig:
        # Turn on evaluation mode which disables dropout.
        model.eval()
        hidden = model.init_hidden(1)
        outSeq = []
        upperlim95 = []
        lowerlim95 = []
        with torch.no_grad():
            for i in range(endPoint):
                if i>=startPoint:
                    # if disp_uncertainty and epoch > 40:
                    #     outs = []
                    #     model.train()
                    #     for i in range(20):
                    #         out_, hidden_ = model.forward(out+0.01*Variable(torch.randn(out.size())).cuda(),hidden,noise=True)
                    #         outs.append(out_)
                    #     model.eval()
                    #     outs = torch.cat(outs,dim=0)
                    #     out_mean = torch.mean(outs,dim=0) # [bsz * feature_dim]
                    #     out_std = torch.std(outs,dim=0) # [bsz * feature_dim]
                    #     upperlim95.append(out_mean + 2.58*out_std/np.sqrt(20))
                    #     lowerlim95.append(out_mean - 2.58*out_std/np.sqrt(20))

                    out, hidden = model.forward(out, hidden)

                    #print(out_mean,out)

                else:
                    out, hidden = model.forward(gen_dataset[i].unsqueeze(0), hidden)
                outSeq.append(out.data.cpu()[0][0].unsqueeze(0))


        outSeq = torch.cat(outSeq,dim=0) # [seqLength * feature_dim]

        target= preprocess_data.reconstruct(gen_dataset.cpu().numpy(), TimeseriesData.mean, TimeseriesData.std)
        outSeq = preprocess_data.reconstruct(outSeq.numpy(), TimeseriesData.mean, TimeseriesData.std)
        # if epoch>40:
        #     upperlim95 = torch.cat(upperlim95, dim=0)
        #     lowerlim95 = torch.cat(lowerlim95, dim=0)
        #     upperlim95 = preprocess_data.reconstruct(upperlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)
        #     lowerlim95 = preprocess_data.reconstruct(lowerlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)

        plt.figure(figsize=(15,5))
        for i in range(target.size(-1)):
            plt.plot(target[:,:,i].numpy(), label='Target'+str(i),
                     color='black', marker='.', linestyle='--', markersize=1, linewidth=0.5)
            plt.plot(range(startPoint), outSeq[:startPoint,i].numpy(), label='1-step predictions for target'+str(i),
                     color='green', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            # if epoch>40:
            #     plt.plot(range(startPoint, endPoint), upperlim95[:,i].numpy(), label='upperlim'+str(i),
            #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            #     plt.plot(range(startPoint, endPoint), lowerlim95[:,i].numpy(), label='lowerlim'+str(i),
            #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            plt.plot(range(startPoint, endPoint), outSeq[startPoint:,i].numpy(), label='Recursive predictions for target'+str(i),
                     color='blue', marker='.', linestyle='--', markersize=1.5, linewidth=1)

        plt.xlim([startPoint-500, endPoint])
        plt.xlabel('Index',fontsize=15)
        plt.ylabel('Value',fontsize=15)
        plt.title('Time-series Prediction on ' + args.data + ' Dataset', fontsize=18, fontweight='bold')
        plt.legend()
        plt.tight_layout()
        plt.text(startPoint-500+10, target.min(), 'Epoch: '+str(epoch),fontsize=15)
        save_dir = Path('result',args.data,args.filename).with_suffix('').joinpath('fig_prediction')
        save_dir.mkdir(parents=True,exist_ok=True)
        plt.savefig(save_dir.joinpath('fig_epoch'+str(epoch)).with_suffix('.png'))
        #plt.show()
        plt.close()
        return outSeq

    else:
        pass
    def validate(self, epoch):
        print('Validating')
        model.eval()
        valid_running_loss = 0.0
        valid_running_inter, valid_running_union = 0, 0
        valid_running_correct, valid_running_label = 0, 0
        # calculate the number of batches
        num_batches = int(
            len(self.valid_dataset) / self.valid_data_loader.batch_size)
        with torch.no_grad():
            prog_bar = tqdm(self.valid_data_loader, total=num_batches)
            counter = 0  # to keep track of batch counter
            for i, data in enumerate(prog_bar):
                counter += 1
                data, target = data[0].to(self.device), data[1].to(self.device)
                outputs = self.model(data)
                outputs = outputs['out']

                # save the validation segmentation maps every...
                # ... last batch of each epoch
                if i == num_batches - 1:
                    draw_seg_maps(data, outputs, epoch, i)

                ##### BATCH-WISE LOSS #####
                loss = self.criterion(outputs, target)
                valid_running_loss += loss.item()
                ###########################

                ##### BATCH-WISE METRICS ####
                correct, labeled, inter, union = eval_metric(
                    outputs, target, self.num_classes)
                valid_running_inter += inter
                valid_running_union += union
                # for pixel accuracy
                valid_running_correct += correct
                valid_running_label += labeled
                #############################

                ##### TENSORBOARD LOGGING #####
                valid_running_IoU = 1.0 * inter / (np.spacing(1) + union)
                valid_running_mIoU = valid_running_IoU.mean()
                valid_running_pixacc = 1.0 * correct / (np.spacing(1) +
                                                        labeled)
                self.writer.tensorboard_writer(loss,
                                               valid_running_mIoU,
                                               valid_running_pixacc,
                                               self.valid_iters,
                                               phase='valid')
                ###############################

                prog_bar.set_description(
                    desc=
                    f"Loss: {loss:.4f} | mIoU: {valid_running_mIoU:.4f} | PixAcc: {valid_running_pixacc:.4f}"
                )

                self.valid_iters += 1

        ##### PER EPOCH LOSS #####
        valid_loss = valid_running_loss / counter
        ##########################

        ##### PER EPOCH METRICS ######
        # IoU and mIoU
        IoU = 1.0 * valid_running_inter / (np.spacing(1) + valid_running_union)
        mIoU = IoU.mean()
        # pixel accuracy
        pixel_acc = 1.0 * valid_running_correct / (np.spacing(1) +
                                                   valid_running_label)
        ##############################
        return valid_loss, mIoU, pixel_acc
Пример #29
0
def train_model(model,
                device,
                train_data_loader,
                valid_data_loader,
                criterion,
                optimizer,
                scheduler,
                num_epochs=5):
    """
    training

    Parameters
    --------------
    model : DogClassificationModel
        Network model to be trained.
    device : device
        cuda or cpu
    train_data_loader : dataloader
        dataloader for training
    valid_data_loader : dataloader
        dataloader for validation
    criterion : 
        Loss function.
    optimizer :
        Optimizer.
    scheduler : 
        Learning rate scheduler.
    num_epochs : int
        The number of epochs.

    Returns
    --------------
    model : DogClassificationModel
        Trained model.
    """
    since = time.time()
    model = model.to(device)

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        bar = tqdm(total=len(train_data_loader))
        bar.set_description("Epoch: {}/{}".format(epoch + 1, num_epochs))
        """
        Training Phase
        """
        model.train()

        running_loss = 0.0
        running_corrects = 0

        for j, (inputs, labels) in enumerate(train_data_loader):
            optimizer.zero_grad()
            tmp_loss_item = 0.0

            # training
            with torch.set_grad_enabled(True):
                outputs = model(inputs.to(device))
                torch.cuda.empty_cache()

                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels.to(device))

                # backward + optimize only if in training phase
                loss.backward()
                optimizer.step()

                tmp_loss_item = loss.item()

            # statistics
            running_loss += tmp_loss_item * inputs.size(0)
            running_corrects += torch.sum(preds.to('cpu') == labels.data)

            # progress bar
            bar.update(1)
            tmp_loss = float(running_loss /
                             (j + 1)) / 32  # 32: mini-batch size
            tmp_acc = float(running_corrects // (j + 1)) / 32
            bar.set_postfix(OrderedDict(loss=tmp_loss, acc=tmp_acc))

        # update learning rate scheduler
        scheduler.step()

        dataset_size = len(train_data_loader.dataset)
        epoch_loss = running_loss / dataset_size
        epoch_acc = running_corrects.double() / dataset_size
        """
        Validation Phase
        """
        model.eval()  # Set model to validation mode

        val_running_loss = 0.0
        val_running_corrects = 0

        # Iterate over data.
        for inputs, labels in valid_data_loader:
            val_inputs = inputs.to(device)
            val_labels = labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            # track history if only in train
            with torch.no_grad():
                val_outputs = model(val_inputs)
                _, preds = torch.max(val_outputs, 1)
                loss = criterion(val_outputs, val_labels)

            # statistics
            val_running_loss += loss.item() * val_inputs.size(0)
            val_running_corrects += torch.sum(preds == val_labels.data)

        dataset_size = len(valid_data_loader.dataset)
        val_epoch_loss = val_running_loss / dataset_size
        val_epoch_acc = val_running_corrects.double() / dataset_size

        print('VALIDATION  Loss: {:.4f} Acc: {:.4f}'.format(
            val_epoch_loss, val_epoch_acc))
        print("Elapsed time: {} [sec]".format(time.time() - since))

        # deep copy the model
        if val_epoch_acc > best_acc:
            best_acc = val_epoch_acc
            best_model_wts = copy.deepcopy(model.state_dict())

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model
X, y = sklearn.datasets.make_moons(2000, noise=0.1)
def predict_fn(data,model):
    print('Predicting class labels for the input data...')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    data = torch.from_numpy(X.astype('float32'))
    data = data.to(device)
    model.eval()
    # Compute the result of applying the model to the input data.
    out = model(data)
    # The variable `result` should be a numpy array; a single value 0-1
    result = out.cpu().detach().numpy()
    return result

if __name__ == '__main__':
    model.load_state_dict(torch.load('model/model.pth'))
    model.eval()
    x = torch.from_numpy(X).type(torch.FloatTensor)
    ans = model.predict(x)
    print(ans.numpy())
    print(accuracy_score(model.predict(x), y))
    tp = np.logical_and(y, ans).sum()
    fp = np.logical_and(1 - y, ans).sum()
    tn = np.logical_and(1 - y, 1 - ans).sum()
    fn = np.logical_and(y, 1 - ans).sum()
    recall = tp / (tp + fn)
    precision = tp / (tp + fp)
    accuracy = (tp + tn) / (tp + fp + tn + fn)
    print(pd.crosstab(y, ans, rownames=['actuals'],
                      colnames=['predictions']))
    print("\n{:<11} {:.3f}".format('Recall:', recall))
    print("{:<11} {:.3f}".format('Precision:', precision))
def generate_output(args,
                    epoch,
                    model,
                    gen_dataset,
                    disp_uncertainty=True,
                    startPoint=500,
                    endPoint=3500):
    if args.save_fig:
        # Turn on evaluation mode which disables dropout.
        model.eval()
        hidden = model.init_hidden(1)
        outSeq = []
        upperlim95 = []
        lowerlim95 = []
        with torch.no_grad():
            for i in range(endPoint):
                if i >= startPoint:
                    # if disp_uncertainty and epoch > 40:
                    #     outs = []
                    #     model.train()
                    #     for i in range(20):
                    #         out_, hidden_ = model.forward(out+0.01*Variable(torch.randn(out.size())).cuda(),hidden,noise=True)
                    #         outs.append(out_)
                    #     model.eval()
                    #     outs = torch.cat(outs,dim=0)
                    #     out_mean = torch.mean(outs,dim=0) # [bsz * feature_dim]
                    #     out_std = torch.std(outs,dim=0) # [bsz * feature_dim]
                    #     upperlim95.append(out_mean + 2.58*out_std/np.sqrt(20))
                    #     lowerlim95.append(out_mean - 2.58*out_std/np.sqrt(20))

                    out, hidden = model.forward(out, hidden)

                    #print(out_mean,out)

                else:
                    out, hidden = model.forward(gen_dataset[i].unsqueeze(0),
                                                hidden)
                outSeq.append(out.data.cpu()[0][0].unsqueeze(0))

        outSeq = torch.cat(outSeq, dim=0)  # [seqLength * feature_dim]

        target = preprocess_data.reconstruct(gen_dataset.cpu(),
                                             TimeseriesData.mean,
                                             TimeseriesData.std)
        outSeq = preprocess_data.reconstruct(outSeq, TimeseriesData.mean,
                                             TimeseriesData.std)
        # if epoch>40:
        #     upperlim95 = torch.cat(upperlim95, dim=0)
        #     lowerlim95 = torch.cat(lowerlim95, dim=0)
        #     upperlim95 = preprocess_data.reconstruct(upperlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)
        #     lowerlim95 = preprocess_data.reconstruct(lowerlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)

        plt.figure(figsize=(15, 5))
        for i in range(target.size(-1)):
            plt.plot(target[:, :, i].numpy(),
                     label='Target' + str(i),
                     color='black',
                     marker='.',
                     linestyle='--',
                     markersize=1,
                     linewidth=0.5)
            plt.plot(range(startPoint),
                     outSeq[:startPoint, i].numpy(),
                     label='1-step predictions for target' + str(i),
                     color='green',
                     marker='.',
                     linestyle='--',
                     markersize=1.5,
                     linewidth=1)
            # if epoch>40:
            #     plt.plot(range(startPoint, endPoint), upperlim95[:,i].numpy(), label='upperlim'+str(i),
            #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            #     plt.plot(range(startPoint, endPoint), lowerlim95[:,i].numpy(), label='lowerlim'+str(i),
            #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
            plt.plot(range(startPoint, endPoint),
                     outSeq[startPoint:, i].numpy(),
                     label='Recursive predictions for target' + str(i),
                     color='blue',
                     marker='.',
                     linestyle='--',
                     markersize=1.5,
                     linewidth=1)

        plt.xlim([startPoint - 500, endPoint])
        plt.xlabel('Index', fontsize=15)
        plt.ylabel('Value', fontsize=15)
        plt.title('Time-series Prediction on ' + args.data + ' Dataset',
                  fontsize=18,
                  fontweight='bold')
        plt.legend()
        plt.tight_layout()
        plt.text(startPoint - 500 + 10,
                 target.min(),
                 'Epoch: ' + str(epoch),
                 fontsize=15)
        save_dir = Path(
            args.path_save + '/result', args.data,
            args.filename).with_suffix('').joinpath('fig_prediction')
        save_dir.mkdir(parents=True, exist_ok=True)
        plt.savefig(
            save_dir.joinpath('fig_epoch' + str(epoch)).with_suffix('.png'))
        #plt.show()
        plt.close()
        return outSeq

    else:
        pass
def generate_output(args,
                    epoch,
                    model,
                    gen_dataset,
                    startPoint=500,
                    endPoint=3500):
    # Turn on evaluation mode which disables dropout.
    model.eval()
    hidden = model.init_hidden(1)
    outSeq = []
    for i in range(endPoint):

        if i > startPoint:
            out, hidden = model.forward(out, hidden)
        else:
            out, hidden = model.forward(
                Variable(gen_dataset[i].unsqueeze(0), volatile=True), hidden)

        outValue = out.data.cpu()[0][0][0]

        outSeq.append(outValue)

    target = preprocess_data.reconstruct(
        gen_dataset.cpu()[:, 0,
                          0].numpy(), TimeseriesData.trainData['seqData_mean'],
        TimeseriesData.trainData['seqData_std'])

    outSeq = preprocess_data.reconstruct(
        np.array(outSeq), TimeseriesData.trainData['seqData_mean'],
        TimeseriesData.trainData['seqData_std'])

    plt.figure(figsize=(15, 5))
    plot1 = plt.plot(target,
                     label='Target',
                     color='black',
                     marker='.',
                     linestyle='--',
                     markersize=1,
                     linewidth=0.5)
    plot2 = plt.plot(range(startPoint),
                     outSeq[:startPoint],
                     label='1-step predictions',
                     color='green',
                     marker='.',
                     linestyle='--',
                     markersize=1.5,
                     linewidth=1)

    plot3 = plt.plot(range(startPoint, endPoint, 1),
                     outSeq[startPoint:],
                     label='Multi-step predictions',
                     color='blue',
                     marker='.',
                     linestyle='--',
                     markersize=1.5,
                     linewidth=1)
    plt.xlim([1500, endPoint])
    plt.xlabel('Index', fontsize=15)
    plt.ylabel('Value', fontsize=15)

    plt.title('Time-series Prediction on ' + args.data + ' Dataset',
              fontsize=18,
              fontweight='bold')
    plt.legend()
    plt.tight_layout()
    plt.text(1520, 32000, 'Epoch: ' + str(epoch), fontsize=15)
    plt.savefig('result/nyc_taxi/fig_epoch' + str(epoch) + '.png')
    #plt.show()
    plt.close()

    return outSeq
Пример #33
0
def generate_output(args, epoch, model, gen_dataset, scale_norm, data_organization, disp_uncertainty=True, figNumber = 30, startPoint = 50, endPoint = 400):
    
    if args.save_fig:
        # Turn on evaluation mode which disables dropout.
        model.eval()
        
        outSeq = []
        # upperlim95 = []
        # lowerlim95 = []
        
        for n in range(figNumber):
            tempOutSeq = []
            hidden = model.init_hidden(1)
            with torch.no_grad():
                for i in range(endPoint):
                    if i>=startPoint:
                        # if disp_uncertainty and epoch > 40:
                        #     outs = []
                        #     model.train()
                        #     for i in range(20):
                        #         out_, hidden_ = model.forward(out+0.01*Variable(torch.randn(out.size())).cuda(),hidden,noise=True)
                        #         outs.append(out_)
                        #     model.eval()
                        #     outs = torch.cat(outs,dim=0)
                        #     out_mean = torch.mean(outs,dim=0) # [bsz * feature_dim]
                        #     out_std = torch.std(outs,dim=0) # [bsz * feature_dim]
                        #     upperlim95.append(out_mean + 2.58*out_std/np.sqrt(20))
                        #     lowerlim95.append(out_mean - 2.58*out_std/np.sqrt(20))
    
                        out, hidden = model.forward(out, hidden)
    
                        #print(out_mean,out)
    
                    else:
                        out, hidden = model.forward(gen_dataset[n][i].unsqueeze(0).unsqueeze(0).float(), hidden)
                    tempOutSeq.append(out)
                    
                tempOutSeq = torch.cat(tempOutSeq, dim=1)
            outSeq.append(tempOutSeq)

        outSeq = torch.cat(outSeq, dim=0) # [seqLength * feature_dim]

        target = denormalized_data(gen_dataset[:figNumber].cpu().numpy(), scale_norm, DATA_ORGANIZATION, data_type)

        outSeq = denormalized_data(outSeq.cpu().numpy(), scale_norm, DATA_ORGANIZATION, data_type)
  
        # if epoch>40:
        #     upperlim95 = torch.cat(upperlim95, dim=0)
        #     lowerlim95 = torch.cat(lowerlim95, dim=0)
        #     upperlim95 = preprocess_data.reconstruct(upperlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)
        #     lowerlim95 = preprocess_data.reconstruct(lowerlim95.data.cpu().numpy(),TimeseriesData.mean,TimeseriesData.std)

        if data_organization == 'partial_combination':
            for i in range(target.shape[0]):
                fig = plt.figure(figsize=(15,5))
                plt.axis('off')
                plt.grid(b=None)
                plt.title('Time-series Prediction on ' + args.data + ' Dataset', y = 1.05, fontsize=18, fontweight='bold')
                for j in range(3):     
                    ax = fig.add_subplot(3, 1, j+1)
                    ax.plot(target[i,:,j], label='Target'+str(i),
                             color='black', marker='.', linestyle='--', markersize=1, linewidth=0.5)
                    ax.plot(range(startPoint), outSeq[i,:startPoint,j], label='1-step predictions for target'+str(i),
                             color='green', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    # if epoch>40:
                    #     plt.plot(range(startPoint, endPoint), upperlim95[:,i].numpy(), label='upperlim'+str(i),
                    #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    #     plt.plot(range(startPoint, endPoint), lowerlim95[:,i].numpy(), label='lowerlim'+str(i),
                    #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    ax.plot(range(startPoint, endPoint), outSeq[i,startPoint:,j], label='Recursive predictions for target'+str(i),
                             color='blue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    
                    # plt.xlim([startPoint-500, endPoint])
                    plt.xlabel('Index',fontsize=15)
                    plt.ylabel('Value',fontsize=15)
                    plt.legend()
                plt.subplots_adjust(wspace = 0.2, hspace = 0.3)
                # plt.tight_layout()
                # plt.text(startPoint-500+10, target.min(), 'Epoch: '+str(epoch),fontsize=15)                               
                save_dir = Path('result',args.data,args.filename).with_suffix('').joinpath('fig_prediction')
                save_dir.mkdir(parents=True,exist_ok=True)
                plt.savefig(save_dir.joinpath('fig_epoch'+str(epoch)+'_'+str(i+1)).with_suffix('.png'))
                plt.show()
                plt.close()
        elif data_organization == 'full_combination':
            for i in range(target.shape[0]): 
                fig = plt.figure(figsize=(15,5))
                plt.axis('off')
                plt.grid(b=None)
                plt.title('Time-series Prediction on ' + args.data + ' Dataset', y = 1.05, fontsize=18, fontweight='bold')
                for j in range(4):
                    
                    ax = fig.add_subplot(2, 2, j+1)
                    ax.plot(target[i,:,j], label='Target'+str(i),
                             color='black', marker='.', linestyle='--', markersize=1, linewidth=0.5)
                    ax.plot(range(startPoint), outSeq[i,:startPoint,j], label='1-step predictions for target'+str(i),
                             color='green', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    # if epoch>40:
                    #     plt.plot(range(startPoint, endPoint), upperlim95[:,i].numpy(), label='upperlim'+str(i),
                    #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    #     plt.plot(range(startPoint, endPoint), lowerlim95[:,i].numpy(), label='lowerlim'+str(i),
                    #              color='skyblue', marker='.', linestyle='--', markersize=1.5, linewidth=1)
                    ax.plot(range(startPoint, endPoint), outSeq[i,startPoint:,j], label='Recursive predictions for target'+str(i),
                             color='blue', marker='.', linestyle='--', markersize=1.5, linewidth=1)    
                    
                    # plt.xlim([startPoint-500, endPoint])
                    plt.xlabel('Index',fontsize=15)
                    plt.ylabel('Value',fontsize=15)
                    plt.legend()
                    
                plt.subplots_adjust(wspace = 0.2, hspace = 0.3)
                # plt.tight_layout()
                # plt.text(startPoint-500+10, target.min(), 'Epoch: '+str(epoch),fontsize=15)                
                # plt.show()
                save_dir = Path('result',args.data,args.filename).with_suffix('').joinpath('fig_prediction')
                save_dir.mkdir(parents=True,exist_ok=True)
                plt.savefig(save_dir.joinpath('fig_epoch'+str(epoch)+'_'+str(i+1)).with_suffix('.png'))
                plt.show()
                plt.close()
        return outSeq

    else:
        pass