Exemple #1
0
class Testing:
    def __init__(self, trainedModel, pathTest, batchSize, imageSize):
        super(Testing, self).__init__()
        self.model = ConvNet(10)
        state_dict = torch.load(trainedModel)
        self.model.load_state_dict(state_dict)
        self.pathTest = pathTest
        self.imageSize = imageSize
        self.batchSize = batchSize
        self.data_size = calculate_data_size(self.pathTest)
        self.data_loader = run_loader('test', self.pathTest, self.batchSize, self.imageSize, shuffle=False)
        self.test()

    def test(self):
        self.model.eval()
        acc = 0
        y_hat = []
        y_true = []
        for X, y in tqdm(self.data_loader):
            out = self.model(X)

            predictions = torch.argmax(out, 1)
            acc += torch.sum(predictions == y).item()
            y_hat.append(predictions)
            y_true.append(y)

        y_hat = torch.cat(y_hat)
        y_true = torch.cat(y_true)
        acc = acc / self.data_size
        print(acc)
        print(classification_report(y_hat, y_true))
Exemple #2
0
def inference_model(network,lstm_out,out_format,model_path):
  doa_classes = DoaClasses()
  if out_format == "cartesian":
    out_dim = 3
  elif out_format == "class":
    out_dim = len(doa_classes.classes)
  
  if network == "CNN":
    model = ConvNet(device, Dropouts(0,0,0), out_dim, doa_classes)
  elif network == "CRNN":
    model = CRNN(device, Dropouts(0,0,0), out_dim, doa_classes, lstm_out)
  model.load_state_dict(torch.load(model_path,map_location=device))
  model.eval()
  model.to(device)
  
  return model,doa_classes
Exemple #3
0
def main():

    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print("Using device: ", device)

    model = ConvNet().to(device)

    try:
        model.load_state_dict(torch.load(FILE))
        print("Finished loading model.")
        model.eval()
    except IOError:
        print("Failed to load model. Model might not exist.")
        return

    print("Print Network Parameters:")
    for param in model.parameters():
        print(param)

    print("Print model state dict: ", model.state_dict())

    with torch.no_grad():
        print("Perform inference/testing here...")
Exemple #4
0
def pred_prob(arg_path, field_path, pth_path, doc, device=torch.device('cpu')):

    # Load args
    # with open(arg_path) as f:
    #     args = json.load(f)['args']
    arg_path = os.path.join(
        'https://raw.githubusercontent.com/qianyingw/rob-pome/master/rob-app',
        arg_path)
    with urllib.request.urlopen(arg_path) as url:
        args = json.loads(url.read().decode())['args']

    # Load TEXT field
    field_url = os.path.join(
        'https://github.com/qianyingw/rob-pome/raw/master/rob-app', field_path)
    field_path = wget.download(field_url)
    with open(field_path, "rb") as fin:
        TEXT = dill.load(fin)
    os.remove(field_path)

    unk_idx = TEXT.vocab.stoi[TEXT.unk_token]  # 0
    pad_idx = TEXT.vocab.stoi[TEXT.pad_token]  # 1

    # Load model
    if args['net_type'] == 'cnn':
        sizes = args['filter_sizes'].split(',')
        sizes = [int(s) for s in sizes]
        model = ConvNet(vocab_size=args['max_vocab_size'] + 2,
                        embedding_dim=args['embed_dim'],
                        n_filters=args['num_filters'],
                        filter_sizes=sizes,
                        output_dim=2,
                        dropout=args['dropout'],
                        pad_idx=pad_idx,
                        embed_trainable=args['embed_trainable'],
                        batch_norm=args['batch_norm'])

    if args['net_type'] == 'attn':
        model = AttnNet(vocab_size=args['max_vocab_size'] + 2,
                        embedding_dim=args['embed_dim'],
                        rnn_hidden_dim=args['rnn_hidden_dim'],
                        rnn_num_layers=args['rnn_num_layers'],
                        output_dim=2,
                        bidirection=args['bidirection'],
                        rnn_cell_type=args['rnn_cell_type'],
                        dropout=args['dropout'],
                        pad_idx=pad_idx,
                        embed_trainable=args['embed_trainable'],
                        batch_norm=args['batch_norm'],
                        output_attn=False)

    # Load checkpoint
    pth_url = os.path.join(
        'https://github.com/qianyingw/rob-pome/raw/master/rob-app', pth_path)
    pth_path = wget.download(pth_url)
    checkpoint = torch.load(pth_path, map_location=device)
    os.remove(pth_path)
    state_dict = checkpoint['state_dict']
    model.load_state_dict(state_dict, strict=False)
    model.cpu()

    # Load pre-trained embedding
    pretrained_embeddings = TEXT.vocab.vectors
    model.embedding.weight.data.copy_(pretrained_embeddings)
    model.embedding.weight.data[unk_idx] = torch.zeros(
        args['embed_dim'])  # Zero the initial weights for <unk> tokens
    model.embedding.weight.data[pad_idx] = torch.zeros(
        args['embed_dim'])  # Zero the initial weights for <pad> tokens

    # Tokenization
    tokens = [tok.text.lower() for tok in nlp.tokenizer(doc)]
    idx = [TEXT.vocab.stoi[t] for t in tokens]

    while len(idx) < args['max_token_len']:
        idx = idx + [1] * args['max_token_len']

    if len(idx) > args['max_token_len']:
        idx = idx[:args['max_token_len']]

    # Prediction
    model.eval()
    doc_tensor = torch.LongTensor(idx).to(device)
    doc_tensor = doc_tensor.unsqueeze(
        1)  # bec AttnNet input shape is [seq_len, batch_size]
    probs = model(doc_tensor)
    probs = probs.data.cpu().numpy()[0]
    # print("Prob of RoB reported: {:.4f}".format(probs[1]))

    return probs[1]
Exemple #5
0
        # Forward pass
        outputs = model(images)
        loss = criterion(outputs, torch.max(labels, 1)[1])

        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if (i + 1) % 100 == 0:
            print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
                epoch + 1, num_epochs, i + 1, total_step, loss.item()))

# Test the model
# eval mode (batchnorm uses moving mean/var instead of mini-batch mean/var)
model.eval()
with torch.no_grad():
    total = 0
    count = 0
    for i, im_batch in enumerate(test_loader):
        images = im_batch['image']
        images = images.to(device)

        labels = im_batch['arrangement']
        labels = labels.reshape(-1, num_classes)
        labels = labels.float().to(device)

        outputs = model(images)

        outputs.reshape(test_batch_size, num_classes)
        # get class index from one-hot
Exemple #6
0
        for b in minibatch(val_data_curr_fold, batch_size):
            val_batch_pred = val_batch(model, criterion, optimizer, b)
            val_pred.append(val_batch_pred)
        val_pred = np.concatenate(val_pred, axis=0)

        # calculate acc
        train_target = train_data_curr_fold[:, 1].reshape(-1)
        train_acc = cal_acc(train_pred, train_target)
        val_target = val_data_curr_fold[:, 1].reshape(-1)
        val_acc = cal_acc(val_pred, val_target)

        # print stats
        print(
            f"fold: {i}, epoch: {curr_epoch}, train acc: {train_acc}, val acc: {val_acc}"
        )

    # test acc
    model = model.eval()
    test_pred = []
    for b in minibatch(test_data, batch_size):
        test_batch_pred = val_batch(model, criterion, optimizer, b)
        test_pred.append(test_batch_pred)
    test_pred = np.concatenate(test_pred, axis=0)
    test_target = test_data[:, 1].reshape(-1)
    test_acc = cal_acc(test_pred, test_target)
    test_f_score, test_percision, test_recall = cal_f(test_pred, test_target)
    print(f"fold: {i}, test acc: {test_acc}")
    print(
        f"fold: {i}, test percision: {test_percision}, test recall: {test_recall}, test f score: {test_f_score}"
    )
Exemple #7
0
        
        # Forward pass
        outputs = model(images)
        loss = criterion(outputs, labels)
        
        # Backward and optimize
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        
        if (i+1) % 100 == 0:
            print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' 
                   .format(epoch+1, num_epochs, i+1, total_step, loss.item()))

# Test the model
model.eval()  # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)
with torch.no_grad():
    correct = 0
    total = 0
    for images, labels in test_loader:
        images = images.to(device)
        labels = labels.to(device)
        outputs = model(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
        correct += (predicted == labels).sum().item()

    print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))

# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
out_name = os.path.join(args.output_dir, args.img_folder)
#--------------------------------#


face_dataset = ImageData(root_dir=args.input_dir,\
                                transform=transforms.Compose([PreprocessData(args.scale_size, args.crop_size, mode= args.mode)]))
dataloader = DataLoader(face_dataset, batch_size=args.batch_size, shuffle=True)

#--------------- Load Weights ----------------#
net = ConvNet(3, args.K).to(device)
checkpoint = torch.load(os.path.join(args.output_dir, args.checkpoint))
net.load_state_dict(checkpoint['model_state_dict'])
#---------------------------------------------#

#-------------visualize----------------------#
net.eval()
with torch.no_grad():

    clr = landmark_colors(args.K)

    for i_batch, sample_batched in enumerate(dataloader):
        i_image = sample_batched['image']
        i_name = sample_batched['name'][0].split("/")[-1]
        print(i_name)
        i_image = i_image.float().to(device)
        out = net(i_image)

        #single batch operation
        p = spatialSoft(out, args).squeeze(0)
        # print(p.size())
        pred = torch.mean(p, dim=0)
import cv2
import torch
from torchsummary import summary
from torchvision.transforms import ToTensor
import numpy as np
from tensorboardX import SummaryWriter
from model import ConvNet
from widerface import WIDERFaceDetection
from augmentations import SSDAugmentation

if __name__ == "__main__":
    net = ConvNet()
    net.load_state_dict(torch.load('no_gassuion_epoch240.pth'))
    net = net.eval()
    if torch.cuda.is_available():
        net = net.cuda()
    summary(net.cuda(), input_size=(3, 640, 640), batch_size=1, device='cuda')

    WIDERFace_ROOT = r"F:\Datasets\人脸识别\WIDERFACE"
    dataset = WIDERFaceDetection(WIDERFace_ROOT)

    writer = SummaryWriter('eval_log')

    # img = dataset.pull_image(1144)
    img = cv2.imread('2.jpg')
    # cv2.waitKey()
    # _, img = cv2.VideoCapture(0).read()
    # img = cv2.resize(img, (640, 640))

    src = img.copy()
    img = ToTensor()(img).unsqueeze(0)