Пример #1
0
            def closure():
                # correct the values of updated input image
                input_img.data.clamp_(0, 1)

                optimizer.zero_grad()
                model(input_img)
                style_score = 0
                content_score = 0

                for sl in style_losses:
                    style_score += sl.loss
                for cl in content_losses:
                    content_score += cl.loss

                style_score *= style_weight
                content_score *= content_weight

                loss = style_score + content_score
                loss.backward()

                run[0] += 1
                if run[0] % 50 == 0:
                    print("run {}:".format(run))
                    print('Style Loss : {:4f} Content Loss: {:4f}'.format(
                        style_score.item(), content_score.item()))
                    print()

                return style_score + content_score
Пример #2
0
def test_model(model, test_loader, title):
    # switch to evaluate mode
    #self.eval()

    running_corrects = 0
    y_pred = []
    y_true = []

    for i, data in enumerate(test_loader, 0):

        # get the inputs
        inputs, labels = data

        # wrap them in Variable
        if use_gpu:
            inputs = Variable(inputs.cuda())
            labels = Variable(labels.cuda())
        else:
            inputs, labels = Variable(inputs), Variable(labels)

        # forward
        outputs = model(inputs)
        _, preds = torch.max(outputs.data, 1)

        running_corrects += torch.sum(preds == labels.data)
        y_pred += list(preds)
        y_true += list(labels.data)

    test_acc = running_corrects * 1.0 / test_dataset_sizes

    print('Test_Acc: {:.4f}'.format(test_acc))
    plt.figure()
    plot_confusion_matrix(y_pred, y_true, title)

    return y_pred
Пример #3
0
def test(model, last_layer_input, clf):
    print(len(test_loader.dataset))
    # put features and targets into tensor variables
    features = torch.Tensor(len(test_loader.dataset), last_layer_input)
    targets = torch.Tensor(len(test_loader.dataset))
    # use for loop to extract the features and target from the test loader
    for i, (in_data, target) in enumerate(test_loader):
        print("iteration")
        print(i)
        from_ = int(i)
        till_ = int(from_ + 1)
        input_var = torch.autograd.Variable(in_data, volatile=True)
        feature_batch = model(input_var)
        features[from_:till_] = feature_batch.data.cpu()
        targets[from_:till_] = target
    features = features.numpy()
    targets = targets.numpy()
    prediction = clf.predict(features)
    print(clf)
    print(prediction)
    print(targets)
    count = 0
    for i in range(len(prediction)):
        if prediction[i] == targets[i]:
            count += 1
    accuracy = count * 1.0 / len(prediction)
    print("classification accuracy is:")
    print(accuracy)
    return targets, prediction
Пример #4
0
def get_model(cfg):
    model_dict = cfg["model"]
    name = model_dict["arch"]
    model = _get_model_instance(name)
    param_dict = copy.deepcopy(model_dict)
    param_dict.pop("arch")

    if name == "fcn":
        model = model(n_classes=cfg["data"]["n_classes"])
        model.apply(weights_init)
    elif name == "drcn":
        model = model(cfg)
        model.apply(weights_init)
    elif name == "unet":
        model = model(cfg)
        model.apply(weights_init)

    return model
Пример #5
0
def train(model, last_layer_input):
    # pipeline: for loop, split train_loader into image and target, feed into modified model and get output, combine all features and target, fit SVM model.
    # define features variables in tensor form
    print(len(train_loader.dataset))
    features = torch.Tensor(len(train_loader.dataset), last_layer_input)
    targets = torch.Tensor(len(train_loader.dataset))
    clf = svm.SVC(decision_function_shape='ovo')
    for i, (in_data, target) in enumerate(train_loader):
        print(i)
        from_ = int(i * 4)
        till_ = int(from_ + 4)
        input_var = torch.autograd.Variable(in_data, volatile=True)
        feature_batch = model(input_var)
        features[from_:till_] = feature_batch.data.cpu()
        targets[from_:till_] = target
    # convert features and target to numpy form
    features = features.numpy()
    targets = targets.numpy()
    # then fit SVM on the entire features
    clf.fit(features, targets)
    return clf
Пример #6
0
def test(model):
    img_list=[]
    tag_list=[]
    seg_list=[]
    with torch.no_grad():
        model.to(device)
        model.eval()
        # img,tag=next(iter(test_loader))
        for img,tag in test_loader:
            img=img.to(device)
            img_list.append(img)
            tag_list.append(tag)
            output=model(img)
            label=output.argmax(dim=1)
            tmp=label.cpu()
            img_final=torch.from_numpy(label2image(tmp))
            seg_list.append(img_final)
        img=torch.cat(img_list,dim=0)
        tag=torch.cat(tag_list,dim=0)
        seg=torch.cat(seg_list,dim=0)
    score=iou(seg,tag)
    pic_pred(img[0:4].numpy(),tag[0:4].numpy(),seg[0:4].numpy())
Пример #7
0
def test_model(model, test_loader):
    correct_num = 0.0
    y_predict = []
    y_actual = []
    for iter, data in enumerate(test_loader, 0):
        # get the inputs
        inputs, labels = data
        # make them variables

        # if use GPU
        inputs = Variable(inputs)
        labels = Variable(labels)

        # forward
        outputs = model(inputs)
        _, preds = torch.max(outputs.data, 1)
        correct_num += (preds == labels.data).sum()
        y_predict += list(preds)
        y_actual += list(labels.data)

    test_accuracy = correct_num * 100.0 / testset_size
    print("test accuracy: {:.4f}".format(test_accuracy))
    plot_confusion_matrix(y_predict, y_actual, "VGG16_fintuned")
    return y_predict
Пример #8
0
    def get_style_model_and_losses(cnn,
                                   normalization_mean,
                                   normalization_std,
                                   style_img,
                                   content_img,
                                   content_layers=content_layers_default,
                                   style_layers=style_layers_default):
        cnn = copy.deepcopy(cnn)

        # normalization module
        normalization = Normalization(normalization_mean,
                                      normalization_std).to(device)

        # just in order to have an iterable access to or list of content/syle
        # losses
        content_losses = []
        style_losses = []

        # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
        # to put in modules that are supposed to be activated sequentially
        model = nn.Sequential(normalization)

        i = 0  # increment every time we see a conv
        for layer in cnn.children():
            if isinstance(layer, nn.Conv2d):
                i += 1
                name = 'conv_{}'.format(i)
            elif isinstance(layer, nn.ReLU):
                name = 'relu_{}'.format(i)
                # The in-place version doesn't play very nicely with the ContentLoss
                # and StyleLoss we insert below. So we replace with out-of-place
                # ones here.
                layer = nn.ReLU(inplace=False)
            elif isinstance(layer, nn.MaxPool2d):
                name = 'pool_{}'.format(i)
            elif isinstance(layer, nn.BatchNorm2d):
                name = 'bn_{}'.format(i)
            else:
                raise RuntimeError('Unrecognized layer: {}'.format(
                    layer.__class__.__name__))

            model.add_module(name, layer)

            if name in content_layers:
                # add content loss:
                target = model(content_img).detach()
                content_loss = ContentLoss(target)
                model.add_module("content_loss_{}".format(i), content_loss)
                content_losses.append(content_loss)

            if name in style_layers:
                # add style loss:
                target_feature = model(style_img).detach()
                style_loss = StyleLoss(target_feature)
                model.add_module("style_loss_{}".format(i), style_loss)
                style_losses.append(style_loss)

        # now we trim off the layers after the last content and style losses
        for i in range(len(model) - 1, -1, -1):
            if isinstance(model[i], ContentLoss) or isinstance(
                    model[i], StyleLoss):
                break

        model = model[:(i + 1)]

        return model, style_losses, content_losses
Пример #9
0
def train_model(model,
                criterion,
                optimizer,
                train_loader,
                scheduler,
                num_epochs=9):
    since = time.time()

    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        running_loss = 0.0
        running_corrects = 0

        # Iterate over data.
        for i, data in enumerate(train_loader, 0):

            # get the inputs
            inputs, labels = data

            # wrap them in Variable
            if use_gpu:
                inputs = Variable(inputs.cuda())
                labels = Variable(labels.cuda())
            else:
                inputs, labels = Variable(inputs), Variable(labels)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward
            outputs = model(inputs)
            _, preds = torch.max(outputs.data, 1)
            loss = criterion(outputs, labels)

            # backward + optimize only if in training phase

            loss.backward()
            optimizer.step()

            # statistics
            running_loss += loss.data[0] * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)

        epoch_loss = running_loss / dataset_sizes
        epoch_acc = running_corrects * 1.0 / dataset_sizes

        print('Loss: {:.4f} Acc: {:.4f}'.format(epoch_loss, epoch_acc))

        # deep copy the model
        if epoch_acc > best_acc:
            best_acc = epoch_acc
            best_model_wts = copy.deepcopy(model.state_dict())

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # load best model weights
    model.load_state_dict(best_model_wts)
    return model