예제 #1
0
def objective(params):
    kernel1 = int(params[0])
    kernel2 = int(params[1])
    kernel3 = int(params[2])
    kernel4 = int(params[3])
    kernel5 = int(params[4])
    kernel6 = int(params[5])
    kernel7 = int(params[6])
    kernel8 = int(params[7])
    kernel9 = int(params[8])
    kernel10 = int(params[9])
    dropout5 = float(params[10])
    dropout6 = float(params[11])

    net = VGG(kernel1=kernel1,
              kernel2=kernel2,
              kernel3=kernel3,
              kernel4=kernel4,
              kernel5=kernel5,
              kernel6=kernel6,
              kernel7=kernel7,
              kernel8=kernel8,
              kernel9=kernel9,
              kernel10=kernel10,
              dropout5=dropout5,
              dropout6=dropout6)

    if use_cuda and torch.cuda.device_count() > 1:
        net = nn.DataParallel(net)
        net.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(),
                          lr=0.1,
                          momentum=0.9,
                          weight_decay=5e-4)

    num_epochs = 50
    for _ in range(num_epochs):
        # Training
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            if use_cuda:
                inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            inputs, targets = Variable(inputs), Variable(targets)
            outputs = net(inputs)
            loss = criterion(outputs, targets)
            loss.backward()
            optimizer.step()

            train_loss += loss.data[0]
            _, predicted = torch.max(outputs.data, 1)
            total += targets.size(0)
            correct += predicted.eq(targets.data).cpu().sum()
        #print("Train loss: {}".format(train_loss))

    # Validation
    net.eval()
    val_loss = 0
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(validloader):
        if use_cuda:
            inputs, targets = inputs.cuda(), targets.cuda()
        inputs, targets = Variable(inputs, volatile=True), Variable(targets)
        outputs = net(inputs)
        loss = criterion(outputs, targets)

        val_loss += loss.data[0]
        _, predicted = torch.max(outputs.data, 1)
        total += targets.size(0)
        correct += predicted.eq(targets.data).cpu().sum()
    #print("Validation loss: {}".format(val_loss))

    return val_loss
예제 #2
0
                    help='The extension name of the output viedo')
parser.add_argument('--output', type=str, default = 'output',
                    help='Directory to save the output image(s)')

# Advanced options
args = parser.parse_args('')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if not os.path.exists(args.output):
    os.mkdir(args.output)
decoder = Decoder('Decoder')
transform = Transform(in_planes=512)
vgg = VGG('VGG19')

decoder.eval()
transform.eval()
vgg.eval()

# decoder.features.load_state_dict(torch.load(args.decoder))
decoder.load_state_dict(torch.load(args.decoder))
transform.load_state_dict(torch.load(args.transform))
vgg.features.load_state_dict(torch.load(args.vgg))

enc_1 = nn.Sequential(*list(vgg.features.children())[:4])  # input -> relu1_1
enc_2 = nn.Sequential(*list(vgg.features.children())[4:11])  # relu1_1 -> relu2_1
enc_3 = nn.Sequential(*list(vgg.features.children())[11:18])  # relu2_1 -> relu3_1
enc_4 = nn.Sequential(*list(vgg.features.children())[18:31])  # relu3_1 -> relu4_1
enc_5 = nn.Sequential(*list(vgg.features.children())[31:44])  # relu4_1 -> relu5_1


enc_1.to(device)
enc_2.to(device)
예제 #3
0
import PIL.Image as Image
import torch.optim as optim
import torch.nn.functional as F
from tensorboardX import SummaryWriter

from data_loader import get_data_loader
from model import VGG
from focal_loss import FocalLoss

if __name__ == "__main__":
    r = 16
    thresh = 0.9
    # model
    net = VGG(1).cuda()
    net.load_state_dict(torch.load("./modules/vgg-160-1.433.pth.tar")['state_dict'])
    net.eval()

    # test image
    image = Image.open("./predictions/test5.jpg")

    # to patch tensor
    trans = transforms.Compose([transforms.Pad(r, padding_mode='symmetric'), transforms.Grayscale(), transforms.ToTensor()])
    image = torch.squeeze(trans(image))
    w, h = image.shape
    print(image.shape)

    patches = [image[i - r:i + r, j - r:j + r] for i in range(r, w - r) for j in range(r, h - r)]
    a = patches[23]
    print(len(patches))

    # predict