コード例 #1
0
    def __init__(self, params):
        super(Network, self).__init__()
        self.ve = vE.convEncoderSimple3d(3, 4, 1, params.useBn)
        outChannels = self.outChannels = self.ve.output_channels
        layers = []
        for i in range(2):
            layers.append(nn.Conv3d(outChannels, outChannels, kernel_size=1))
            layers.append(nn.BatchNorm3d(outChannels))
            layers.append(nn.LeakyReLU(0.2, True))

        self.fc_layers = nn.Sequential(*layers)
        self.fc_layers.apply(netUtils.weightsInit)

        biasTerms = lambda x: 0

        biasTerms.quat = torch.Tensor([1, 0, 0, 0])
        biasTerms.shape = torch.Tensor(
            params.nz).fill_(-3) / params.shapeLrDecay
        biasTerms.prob = torch.Tensor(1).fill_(0)
        for p in range(len(params.primTypes)):
            if (params.primTypes[p] == 'Cu'):
                biasTerms.prob[p] = 2.5 / params.probLrDecay

        self.primitivesTable = primitives.Primitives(params, outChannels,
                                                     biasTerms)
        # self.primitivesTable.apply(netUtils.weightsInit)

        my_alexnet = alexnet(pretrained=params.PRETRAINED)
        features = list(my_alexnet.classifier.children())[:-1]
        features.extend([nn.Linear(4096, outChannels)])
        my_alexnet.classifier = nn.Sequential(*features)
        self.image_2D = my_alexnet
コード例 #2
0
ファイル: lab3.py プロジェクト: seniorkot/ITMO-mobileCV
def process_images(images: list, trt: bool):
    timest = time.time()
    if trt:
        # x = torch.ones((1, 3, 224, 224)).cuda()
        # model = alexnet(pretrained=True).eval().cuda()
        # model_trt = torch2trt(model, [x])
        # torch.save(model_trt.state_dict(), 'alexnet_trt.pth')
        # model = model_trt
        model = TRTModule()
        model.load_state_dict(torch.load('alexnet_trt.pth'))
    else:
        model = alexnet(pretrained=True).eval().cuda()
    print("Model load time {}".format(time.time() - timest))

    timest = time.time()
    for image in images:
        index = classify_image(image, model)
        output_text = str(index) + ': ' + classes[index]
        edit = ImageDraw.Draw(image)
        edit.rectangle((0, image.height - 20, image.width, image.height),
                       fill=(255, 255, 255))
        edit.text((50, image.height - 15),
                  output_text, (0, 0, 0),
                  font=ImageFont.load_default())
        image.save('./output/' + image.filename.split('/')[-1])

    print("Image(s) processing time {}".format(time.time() - timest))
    print('Memory allocated: ' + str(torch.cuda.memory_allocated()))
    print('Max memory allocated: ' + str(torch.cuda.max_memory_allocated()))
コード例 #3
0
def pytorch_alexnet():
    from torchvision.models.alexnet import alexnet
    from model_tools.activations.pytorch import load_preprocess_images

    preprocessing = functools.partial(load_preprocess_images, image_size=224)
    return PytorchWrapper(model=alexnet(pretrained=True),
                          preprocessing=preprocessing)
コード例 #4
0
 def test_alexnet(self):
     state_dict = model_zoo.load_url(model_urls['alexnet'], progress=False)
     self.run_model_test(alexnet(),
                         train=False,
                         batch_size=BATCH_SIZE,
                         state_dict=state_dict,
                         atol=1e-3)
コード例 #5
0
    def __init__(self):
        super(AlexNetFeatureExtractor, self).__init__()
        self.model = AlexNetFeatures(1000)

        model_full = alexnet(pretrained=True)
        self.model.load_state_dict(model_full.state_dict())
        self.model.features = self.model.features[:9]
コード例 #6
0
ファイル: model.py プロジェクト: younginsong21/PG3_study
    def build_ResNet(self):
        self.alexnet = alexnet(pretrained=True).to(device)
        self.alexnet = nn.Sequential(*list(self.alexnet.children())[:-1]).to(
            device)  # remove classifier

        # fix layer's parameter
        for param in self.alexnet.parameters():
            param.requires_grad = False
コード例 #7
0
ファイル: AlexNet.py プロジェクト: nilaisr/MCV_CNN_framework
    def __init__(self, cf, num_classes=21, pretrained=False, net_name='alexnet'):
        super(AlexNet, self).__init__(cf)

        self.url = 'http://datasets.cvc.uab.es/models/pytorch/basic_vgg16.pth'
        self.pretrained = pretrained
        self.net_name = net_name

        self.model = models.alexnet(pretrained=self.pretrained, num_classes=num_classes)

        if pretrained:
            self.model.classifier[6] = nn.Linear(4096, num_classes)
コード例 #8
0
def load_model():

    model_log = log('Load {} ... '.format('alexnet & tensorrt'))

    model = alexnet().eval().cuda()
    model.load_state_dict(torch.load('alexnet.pth'))
    model_trt = TRTModule()
    model_trt.load_state_dict(torch.load('alexnet_trt.pth'))

    model_log.end()

    return (model, model_trt)
コード例 #9
0
ファイル: test___init__.py プロジェクト: bwest25/model-tools
def pytorch_alexnet_resize():
    from torchvision.models.alexnet import alexnet
    from model_tools.activations.pytorch import load_images, torchvision_preprocess
    from torchvision import transforms
    torchvision_preprocess_input = transforms.Compose([transforms.Resize(224), torchvision_preprocess()])

    def preprocessing(paths):
        images = load_images(paths)
        images = [torchvision_preprocess_input(image) for image in images]
        images = np.concatenate(images)
        return images

    return PytorchWrapper(alexnet(pretrained=True), preprocessing, identifier='alexnet-resize')
コード例 #10
0
 def __init__(self, feature_name, pretrained_val=True):
     super().__init__()
     self.feature_name = feature_name
     base = alexnet(pretrained=pretrained_val)
     self.conv_1 = base.features[:3]
     self.conv_2 = base.features[3:6]
     self.conv_3 = base.features[6:8]
     self.conv_4 = base.features[8:10]
     self.conv_5 = base.features[10:]
     self.avgpool = base.avgpool
     self.fc_1 = base.classifier[:3]
     self.fc_2 = base.classifier[3:6]
     self.fc_3 = base.classifier[6:]
     self.eval()
コード例 #11
0
    def __init__(self, bidirectional: bool = True, pretrained: bool = True):
        super().__init__()

        # resnet18_modules = [module for module in (resnet18(pretrained=pretrained).modules())][1:-1]
        # resnet18_modules_cut = resnet18_modules[0:4]
        # resnet18_modules_cut.extend(
        #     [module for module in resnet18_modules if type(module) == nn.Sequential and type(module[0]) == BasicBlock])
        # resnet18_modules_cut.append(resnet18_modules[-1])
        # self.cnn = nn.Sequential(*resnet18_modules_cut)
        # cnn_last_dim = 512

        alex_model = alexnet(pretrained=pretrained)
        self.cnn = nn.Sequential(*alex_model.features, alex_model.avgpool)
        cnn_last_dim = 9216

        lstm_dim = 512
        batch_first = False
        if bidirectional:
            self.lstm_forward = self.bidirectional_forward
            self.lstm = nn.LSTM(cnn_last_dim,
                                int(lstm_dim / 2),
                                bidirectional=True,
                                num_layers=2,
                                batch_first=batch_first)
        else:
            self.lstm_forward = self.normal_forward
            self.lstm = nn.LSTM(cnn_last_dim,
                                lstm_dim,
                                bidirectional=False,
                                num_layers=2,
                                batch_first=batch_first)

        # self.fc = nn.Linear(lstm_dim * 2, 2)
        # nn.init.kaiming_normal_(self.fc.weight)

        self.fc1 = nn.Linear(lstm_dim * 2, 4096)
        self.relu1 = nn.ReLU(inplace=True)
        self.fc2 = nn.Linear(4096, 4096)
        self.relu2 = nn.ReLU(inplace=True)
        self.fc3 = nn.Linear(4096, 2)
        nn.init.kaiming_normal_(self.fc1.weight)
        nn.init.kaiming_normal_(self.fc2.weight)
        nn.init.kaiming_normal_(self.fc3.weight)
コード例 #12
0
ファイル: alexnet_train.py プロジェクト: KqSMea8/public-data
def main(args):
    torch.manual_seed(args.seed)  # 为CPU设置种子用于生成随机数,以使得结果是确定的?

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    model = alexnet(True).to(device)  #设置在cpu or cuda上执行
    print(model)
    for i, p in enumerate(model.parameters()):
        print(i, p)
    return

    # 定义优化函数 SGD + 动量 , mini-batch gradient descent
    # momentum 动量
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)

    train_loader, test_loader = data_loader(args, use_cuda)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, test_loader)
コード例 #13
0
 def __init__(self, domain_classes, n_classes):
     super(AlexNetNoBottleneck, self).__init__()
     pretrained = alexnet(pretrained=True)
     self._convs = pretrained.features
     self._classifier = nn.Sequential(
         Flatten(),
         nn.Dropout(),
         pretrained.classifier[1],  # nn.Linear(256 * 6 * 6, 4096),  #
         nn.ReLU(inplace=True),
         nn.Dropout(),
         pretrained.classifier[4],  # nn.Linear(4096, 4096),  #
         nn.ReLU(inplace=True),
     )
     self.features = nn.Sequential(self._convs, self._classifier)
     self.class_classifier = nn.Linear(4096, n_classes)
     self.domain_classifier = nn.Sequential(
         nn.Dropout(),
         nn.Linear(4096, 1024),  # pretrained.classifier[1]
         nn.ReLU(inplace=True),
         nn.Dropout(),
         nn.Linear(1024, 1024),  # pretrained.classifier[4]
         nn.ReLU(inplace=True),
         nn.Linear(1024, domain_classes),
     )
コード例 #14
0
 def __init__(self, domain_classes, n_classes):
     super(AlexNet, self).__init__()
     pretrained = alexnet(pretrained=True)
     self.build_self(pretrained, domain_classes, n_classes)
コード例 #15
0
import sys
sys.path.insert(0, '.')
import torch
from torch.autograd import Variable
from torchvision.models.alexnet import alexnet
import pytorch_to_caffe

if __name__ == '__main__':
    name = 'alexnet'
    net = alexnet(True)
    input = Variable(torch.ones([1, 3, 226, 226]))
    pytorch_to_caffe.trans_net(net, input, name)
    pytorch_to_caffe.save_prototxt('{}.prototxt'.format(name))
    pytorch_to_caffe.save_caffemodel('{}.caffemodel'.format(name))
コード例 #16
0
ファイル: alexnet.py プロジェクト: FelixFu520/classification
 def __init__(self, num_class=1000, in_channels=3, pretrained=False, freeze_bn=False):
     super(AlexNet, self).__init__()
     self.model = alexnet(pretrained=pretrained)
     self.model.features[0] = torch.nn.Conv2d(in_channels=in_channels, out_channels=64, kernel_size=11,
                                              stride=4, padding=2)
     self.model.classifier[-1] = torch.nn.Linear(4096, num_class, bias=True)
コード例 #17
0
ファイル: sotabench.py プロジェクト: RJT1990/vision
from torchvision.models.alexnet import alexnet
import torchvision.transforms as transforms
from torchbench.image_classification import ImageNet
import PIL
import torch

# Define Transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
b0_input_transform = transforms.Compose([
    transforms.Resize(256),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    normalize,
])

# Run Evaluation
ImageNet.benchmark(model=alexnet(pretrained=True),
                   paper_model_name='AlexNet',
                   input_transform=b0_input_transform,
                   batch_size=256,
                   num_gpu=1)
コード例 #18
0
    # test the output
    print("TEST output")
    for rst_caffe, rst_torch in zip(rsts_caffe, rsts_torchs):
        np.testing.assert_almost_equal(rst_caffe,
                                       rst_torch,
                                       decimal=args.decimal)
    print("TEST output: PASS")


if __name__ == '__main__':
    args = arg_parse()

    if args.model == 'alexnet':
        # Alexnet example
        from torchvision.models.alexnet import alexnet
        net_torch = alexnet(True).eval()
        if args.gpu:
            net_torch.cuda()
        try:
            net_caffe = caffe.Net('alexnet.prototxt', 'alexnet.caffemodel',
                                  caffe.TEST)
        except:
            raise ("Please run alexnet_pytorch_to_caffe.py first")
        shape = get_input_size(net_caffe)
        data_np, data_torch = generate_random(shape, args.gpu)
        test(net_caffe, net_torch, data_np, data_torch, args)

    elif args.model == 'resnet18':
        # ResNet example
        from torchvision.models.resnet import resnet18
        net_torch = resnet18(True).eval()
コード例 #19
0
from torchvision.models.alexnet import alexnet

from converter.pytorch import ModuleParser

parser = ModuleParser()
net = alexnet(pretrained=True)
engine = parser.parse(net, (3, 224, 224))
コード例 #20
0
ファイル: test_caffe2.py プロジェクト: gtgalone/pytorch
 def test_alexnet(self):
     state_dict = model_zoo.load_url(model_urls['alexnet'], progress=False)
     self.run_model_test(alexnet(), train=False, batch_size=BATCH_SIZE,
                         state_dict=state_dict, atol=1e-3)
コード例 #21
0
from torchvision.models.alexnet import alexnet
from trajopt.models.critic_nets import Critic, STATE_DIM


class ModelToTest(object):
    def __init__(self, model, name, x):
        self.model = model
        self.name = name
        self.x = x


if __name__ == '__main__':
    # Create models to test
    models = []
    models.append(
        ModelToTest(model=alexnet(pretrained=True).eval(),
                    name="AlexNet",
                    x=torch.ones((1, 3, 224, 224))))
    models.append(
        ModelToTest(model=Critic(),
                    name="CriticNet",
                    x=torch.ones((1, STATE_DIM))))

    # Evaluate throughput of each non-accelerated, CUDA-accelerated,
    # and TensorRT-accelerated model
    for model in models:
        model_cuda = copy.deepcopy(model.model).cuda()
        x_cuda = model.x.clone().cuda()
        model_trt = torch2trt(model_cuda, [x_cuda])

        print('<=========== {} ===========>'.format(model.name))
コード例 #22
0
ファイル: cal_param.py プロジェクト: alisure-ml/PyTorchGCN
        self.features = vgg.features
        self.classifier = nn.Linear(512, 10, bias=False)
        pass

    pass


def view_model_param(model):
    total_param = 0
    for param in model.parameters():
        total_param += np.prod(list(param.data.size()))
    return total_param


if __name__ == '__main__':
    """
    resnet:281548163
    vGG:59903747
    light:22127747
    resnet50 25557032
    vgg16 138357544
    vgg16_bn 138365992
    vgg16 14719808
    vgg16_bn 14728256
    alexnet 61100840
    """
    model = alexnet().to(torch.device("cpu"))
    num = view_model_param(model)
    print(num)
    pass
コード例 #23
0
ファイル: lab3.py プロジェクト: A0405u/mcv
    trt = (sys.argv[1] == "trt")

# Select device

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# Load regular model

if not trt:

    print("loading model...")

    timest = time.time()

    # model = torch.hub.load('pytorch/vision:v0.8.0', 'wide_resnet101_2', pretrained=True).eval().cuda()
    model = alexnet(pretrained=True).eval().cuda()

    print("model loaded in {}s".format(round(time.time() - timest, 3)))

# Load TRT

else:

    print("loding trt model...")
    timesttrt = time.time()

    try:  # Load from file
        model_trt = TRTModule()
        model_trt.load_state_dict(torch.load(MODEL_TRT_PATH))

    except FileNotFoundError:  # Convert from regular
コード例 #24
0
def performanceTest():
    writer = SummaryWriter('ShapeNetTest')
    Decoder = DecoderNetwork()
    Encoder = Network_3D()

    my_alexnet = alexnet(pretrained=False)
    features = list(my_alexnet.classifier.children())[:-1]
    features.extend([nn.Linear(4096, 64)])
    my_alexnet.classifier = nn.Sequential(*features)
    Image_2D = my_alexnet

    Decoder.load_state_dict(torch.load('./ModelDict/decoder_final.pkl'))
    Encoder.load_state_dict(torch.load('./ModelDict/encoder_final.pkl'))
    Image_2D.load_state_dict(torch.load('./ModelDict/image_final.pkl'))

    test_data = TrainDataset(CWD_PATH, loaded=True, test=True)
    testloader = Data.DataLoader(dataset=test_data,
                                 batch_size=BATCH_SIZE,
                                 shuffle=True,
                                 num_workers=2)
    loss_func_1 = nn.BCELoss()

    if torch.cuda.is_available:
        Image_2D = Image_2D.cuda()
        Encoder = Encoder.cuda()
        Decoder = Decoder.cuda()
        loss_func_1 = loss_func_1.cuda()

    for step, (image, voxel) in enumerate(testloader):
        if torch.cuda.is_available:
            image = image.cuda()
            voxel = voxel.cuda()
        feature_vector = Encoder(voxel)
        recon_voxel_3D = Decoder(feature_vector)
        recon_voxel_3D = recon_voxel_3D.view(BATCH_SIZE, -1)
        real_voxel = voxel.view(BATCH_SIZE, -1)
        loss_1 = loss_func_1(recon_voxel_3D, real_voxel)

        try:
            ave_pre = evaluation(
                real_voxel.view(-1).cpu(),
                recon_voxel_3D.view(-1).cpu())
        except BaseException:
            print('eval error')

        decoder_input = Image_2D(image)
        recon_voxel_2d = Decoder(decoder_input)
        loss_2 = loss_func_1(recon_voxel_2d, real_voxel)
        ave_pre_2 = evaluation(
            recon_voxel_2d.view(-1).cpu(),
            recon_voxel.view(-1).cpu())

        if step % 200 == 0:
            print('=============3D TEST==================')
            print('+++++++LOSS:' + str(loss_1.item()) + '+++++++')
            print('AP: %.5f' % ave_pre)

            print('=============2D TEST==================')
            print('+++++++LOSS:' + str(loss_2.item()) + '+++++++')
            print('AP: %.5f' % ave_pre_2)

        if step % 20 == 0:
            niter = step
            writer.add_scalar('Test/Loss_3D', loss_1.item(), niter)
            writer.add_scalar('Test/Loss_2D', loss_2.item(), niter)
            writer.add_scalar('Test/AP_3D', ave_pre.item(), niter)
            writer.add_scalar('Test/AP_2D', ave_pre_2.item(), niter)
コード例 #25
0
ファイル: test_models.py プロジェクト: gtgalone/pytorch
 def test_alexnet(self):
     x = Variable(
         torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0)
     )
     self.exportTest(toC(alexnet()), toC(x))
コード例 #26
0
ファイル: Lec6_alexnet.py プロジェクト: wowhyuck/mooc
from glob import glob
from PIL import Image
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from torchvision.models.alexnet import alexnet

## Load the pre-trained model
model = alexnet(pretrained=True)
print(model.modules)  # Check the model architecture

num_classes = 4
model.classifier[6] = nn.Linear(4096, num_classes)

## Hyper-parameter
num_epochs, batch_size, learning_rate = 10, 64, 0.001


## Custom Dataset & DataLoader
class FoodDataset(Dataset):
    def __init__(self, file_names, transform=None):
        self.img_list = glob(file_names)
        self.transform = transform

    def __len__(self):
        return len(self.img_list)

    def __getitem__(self, index):
        img_name = self.img_list[index]
コード例 #27
0
def preprocess_noisyart_images(
        net_selector,
        im_resize,
        crop,  #divisor,
        mean_sub,
        std_norm,
        range_255,
        batch_size,
        dataset_imgs_path=None,
        feats_out_dir_path=None,
        feats_out_append=None,
        workers=0,
        feature_layer=None,
        verbose=False,
        device=None,
        seed=DEFAULT_SEED):
    if seed is not None:
        print(f"Using fixed seed = {seed}")
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
        torch.backends.cudnn.deterministic = True

    from torchvision.models.alexnet import alexnet
    from torchvision.models.vgg import vgg16
    from torchvision.models.vgg import vgg19
    from torchvision.models.resnet import resnet50
    from torchvision.models.resnet import resnet101
    from torchvision.models.resnet import resnet152

    dataset_imgs_path = DEFAULT_IMGS_PATH if dataset_imgs_path is None else dataset_imgs_path

    feats_out_dir_path = str(
        DEFAULT_FEATS_PATH
    ) if feats_out_dir_path is None else feats_out_dir_path
    feats_out_append = "" if feats_out_append is None else '_' + feats_out_append

    # divisor = assign_if_bool(divisor, case_true=255)
    # rescale=1./divisor if divisor is not None else None
    #
    # print("divisor: {}".format(divisor))
    # print("rescale: {}".format(rescale))
    print('\n\n')
    loader = get_loader(batch_size,
                        im_resize,
                        crop,
                        mean_sub,
                        std_norm,
                        range_255,
                        shuffle=False,
                        dataset_imgs_path=dataset_imgs_path,
                        verbose=True,
                        workers=workers)

    #%%
    #labels = [int(l) for f, l in loader.dataset.samples]
    dir_path = dataset_imgs_path if dataset_imgs_path.endswith(
        '/') else dataset_imgs_path + '/'
    filenames = [f.split(dir_path)[1] for f, l in loader.dataset.samples]
    class_indices = loader.dataset.class_to_idx

    resize_str = '_r{}'.format(im_resize) if im_resize else ''
    crop_str = '_c{}'.format(crop) if crop else ''
    mean_sub_str = '_meansub' if mean_sub else ''
    std_norm_str = '_stdnorm' if std_norm else ''
    range_255_str = '_range255' if range_255 else ''

    #divisor_str = '_div{}'.format(divisor) if divisor is not None else ''
    #duplicate_seed_std = '_dseed{}'.format(duplicate_seeds) if duplicate_seeds else ''

    def get_save_path(feat_net_name):
        from os.path import join
        return join(feats_out_dir_path, '{}{}{}{}{}{}{}'.\
            format(feat_net_name, resize_str, crop_str, mean_sub_str, std_norm_str, range_255_str, feats_out_append))

    savepath = get_save_path(net_selector)
    print('After prediction, features will be saved in: ' + savepath)

    #%%

    # As an alternative to get layer outputs look here: https://www.stacc.ee/extract-feature-vector-image-pytorch/

    if net_selector.startswith('resnet'):
        if net_selector == 'resnet50':
            net = resnet50(pretrained=True)
        elif net_selector == 'resnet101':
            net = resnet101(pretrained=True)
        elif net_selector == 'resnet152':
            net = resnet152(pretrained=True)

        if feature_layer is None or feature_layer in ['pool', 'avgpool']:
            net.fc = FakeLayer()  # remove useless layers
            #net.fc = nn.AdaptiveAvgPool2d(1024) # remove useless layers
        else:
            raise RuntimeError(
                "resnet feature_layer can only be 'avgpool' ('pool' or None for short)"
            )

    elif net_selector.startswith('vgg'):
        if net_selector == 'vgg16':
            net = vgg16(pretrained=True)
        elif net_selector == 'vgg19':
            net = vgg19(pretrained=True)

        default_feature_layer = 'fc7'
        feature_layer = default_feature_layer if feature_layer is None else feature_layer  # default layer is fc7

        if feature_layer == 'fc6':
            l_index = 0  # layer 0 is FC6, we wont layer 0 output -> remove the next layers (1 to last)
        elif feature_layer == 'fc7':
            l_index = 3  # layer 3 is FC7, we wont layer 3 output -> remove the next layers (4 to last)
        else:
            raise RuntimeError(
                "vgg feature_layer can only be 'fc6' or 'fc7' (None for {})".
                format(default_feature_layer))
        for i in range(l_index + 1, len(net.classifier)):
            net.classifier[i] = FakeLayer()

    elif net_selector == 'alexnet':
        net = alexnet(pretrained=True)
        net.classifier = FakeLayer()  # remove useless layers

    print('Start prediction')
    from progressbar import progressbar

    preds = []
    labels = []

    # Fix for:  RuntimeError: received 0 items of ancdata
    import resource
    rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
    resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
    torch.multiprocessing.set_sharing_strategy('file_system')

    from PIL import ImageFile, Image
    ImageFile.LOAD_TRUNCATED_IMAGES = True  # Allow read truncated files. Otherwise truncated file will except and kill!
    Image.MAX_IMAGE_PIXELS = Image.MAX_IMAGE_PIXELS * 4  # Change the max pixel for 'decompressionBomb' warning

    if device is not None and device is not 'cpu':
        print("Using CUDA")
        net.to(device)
        net.eval()
        suffix = '\n' if verbose else ''
        for X, Y in progressbar(loader, suffix=suffix):
            if verbose:
                print("\nMax-Val: {}".format(X.max()))
                print("Min-Val: {}".format(X.min()))
                print("Mean:    {}".format(X.mean()))
                print("STD:     {}\n".format(X.std()))
            preds.append(net(X.to(device)).detach().cpu().numpy())
            labels.append(Y)
    else:
        print("Using CPU")
        for X, Y in progressbar(loader):
            preds.append(net(X).detach().numpy())
            labels.append(Y)

    preds = np.vstack(preds)
    labels = np.concatenate(labels)
    #labels = np.array(labels)

    print('Saving preds to: ' + savepath)
    saved = False

    while not saved:
        try:
            np.save(
                savepath, {
                    'feats': preds,
                    'labels': labels,
                    'filenames': filenames,
                    'class_indices': class_indices
                })
            saved = True
        except MemoryError as E:
            import traceback
            from time import sleep
            traceback.print_exc()
            print('\n\nMemory Error')
            print("Waiting 30 seconds and will try again..")
            import gc
            gc.collect()
            sleep(60.0)
コード例 #28
0
 def test_alexnet(self):
     x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
     self.exportTest(toC(alexnet()), toC(x))
コード例 #29
0
def main():
    vis = Visualizer(env='env')
    train_data = TrainDataset(CWD_PATH, loaded=True, test=False)
    test_data = TrainDataset(CWD_PATH, loaded=True, test=True)
    trainloader = Data.DataLoader(dataset=train_data,
                                  batch_size=BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=2)
    testloader = Data.DataLoader(dataset=test_data,
                                 batch_size=BATCH_SIZE,
                                 shuffle=True,
                                 num_workers=2)

    Encoder = Network_3D()
    Decoder = DecoderNetwork()

    my_alexnet = alexnet(pretrained=PRETRAINED)
    features = list(my_alexnet.classifier.children())[:-1]
    features.extend([nn.Linear(4096, 64)])
    my_alexnet.classifier = nn.Sequential(*features)
    Image_2D = my_alexnet

    Encoder.apply(weights_init)
    Decoder.apply(weights_init)

    if not torch.cuda.is_available():
        loss_func_1 = nn.BCELoss()
        loss_func_2 = nn.MSELoss()

    else:
        distributed = GPU_NUM > 1
        if distributed > 1:
            Image_2D = nn.parallel.DataParallel(Image_2D).cuda()
            Encoder = nn.parallel.DataParallel(Encoder).cuda()
            Decoder = nn.parallel.DataParallel(Decoder).cuda()
        else:
            Image_2D = Image_2D.cuda()
            Encoder = Encoder.cuda()
            Decoder = Decoder.cuda()
        loss_func_1 = nn.BCELoss().cuda()
        loss_func_2 = nn.MSELoss().cuda()

    params_1 = [{
        'params': Encoder.parameters(),
        'lr': LR_1
    }, {
        'params': Decoder.parameters(),
        'lr': LR_1
    }]
    params_2 = [{'params': Image_2D.parameters(), 'lr': LR_2}]
    params_3 = [{
        'params': Encoder.parameters(),
        'lr': LR_3
    }, {
        'params': Image_2D.parameters(),
        'lr': LR_3
    }, {
        'params': Decoder.parameters(),
        'lr': LR_3
    }]

    optimizer_1 = torch.optim.Adam(params_1)
    optimizer_2 = torch.optim.Adam(params_2)
    optimizer_3 = torch.optim.Adam(params_3)

    if AUTO_TRAIN:
        for epoch in range(EPOCH_1):
            notJointLoss = []
            notJointAP = []
            notJointValiLoss = []
            notJointValiAP = []
            notJointTestLoss = []
            notJointTestAP = []

            for step, (image, voxel) in enumerate(trainloader):
                if torch.cuda.is_available:
                    voxel = voxel.cuda()
                feature_vector = Encoder(voxel)
                recon_voxel = Decoder(feature_vector)
                recon_voxel = recon_voxel.view(BATCH_SIZE, -1)
                real_voxel = voxel.view(BATCH_SIZE, -1)
                loss_1 = loss_func_1(recon_voxel, real_voxel)
                avePre = eval.evaluation(
                    real_voxel.view(-1).cpu(),
                    recon_voxel.view(-1).cpu())
                notJointLoss.append(loss_1)
                notJointAP.append(avePre)

                optimizer_1.zero_grad()
                loss_1.backward()
                optimizer_1.step()

                if step % 200 == 0:
                    print('+++++++EPOCH: ' + str(epoch) + 'LOSS: ' +
                          str(loss_1.item()) + '+++++++')
                    print('AP: %.5f' % avePre)

            for step, (image, voxel) in enumerate(valiloader):
                if torch.cuda.is_available:
                    voxel = Variable(voxel.cuda(), requires_grad=False)
                feature_vector = Encoder(voxel)
                recon_voxel = Decoder(feature_vector)
                recon_voxel = recon_voxel.view(BATCH_SIZE, -1)
                real_voxel = voxel.view(BATCH_SIZE, -1)
                loss_1 = loss_func_1(recon_voxel, real_voxel)
                avePre = eval.evaluation(
                    real_voxel.view(-1).cpu(),
                    recon_voxel.view(-1).cpu())
                notJointValiLoss.append(loss_1)
                notJointValiAP.append(avePre)

                if step % 200 == 0:
                    print(
                        '=============WITHOUT JOINT Validation=================='
                    )
                    print('+++++++LOSS:' + str(loss_1.item()) + '+++++++')
                    print('AP: %.5f' % avePre)

            for step, (image, voxel) in enumerate(testloader):
                if torch.cuda.is_available:
                    voxel = Variable(voxel.cuda(), requires_grad=False)
                feature_vector = Encoder(voxel)
                recon_voxel = Decoder(feature_vector)
                recon_voxel = recon_voxel.view(BATCH_SIZE, -1)
                real_voxel = voxel.view(BATCH_SIZE, -1)
                loss_1 = loss_func_1(recon_voxel, real_voxel)
                avePre = eval.evaluation(
                    real_voxel.view(-1).cpu(),
                    recon_voxel.view(-1).cpu())
                notJointTestLoss.append(loss_1)
                notJointTestAP.append(avePre)

                if step % 200 == 0:
                    print('=============WITHOUT JOINT TEST==================')
                    print('+++++++LOSS:' + str(loss_1.item()) + '+++++++')
                    print('AP: %.5f' % avePre)

            vis.plot('AUTO_LOSS', [
                np.mean(np.array(notJointLoss)),
                np.mean(np.array(notJointValiLoss)),
                np.mean(np.array(notJointTestLoss))
            ])
            vis.plot('AUTO_AP', [
                np.mean(np.array(notJointAP)),
                np.mean(np.array(notJointValiAP)),
                np.mean(np.array(notJointTestAP))
            ])
            vis.log("epoch:{epoch},lr:{lr},loss:{loss}, AP:{ap_out}\n".format(
                epoch=epoch,
                loss=np.mean(np.array(notJointTestLoss)),
                lr=LR_1,
                ap_out=np.mean(np.array(notJointTestAP))))

        torch.save(Encoder.state_dict(), './ModelDict/encoder.pkl')
        torch.save(Decoder.state_dict(), './ModelDict/decoder.pkl')

    if REGRESS_TRAIN:
        for epoch in range(EPOCH_2):
            midLoss = []
            midAP = []
            midTestLoss = []
            midTestAP = []

            for step, (image, voxel) in enumerate(trainloader):
                if torch.cuda.is_available:
                    image = image.cuda()
                    voxel = voxel.cuda()
                Encoder.load_state_dict(torch.load('./ModelDict/encoder.pkl'))
                feature_vector = Encoder(voxel)
                regress_vector = Image_2D(image)
                loss_2 = loss_func_2(feature_vector,
                                     regress_vector) / 100 / 200
                ap = eval.evaluation(
                    real_voxel.view(-1).cpu(),
                    recon_voxel.view(-1).cpu())
                midLoss.append(loss_2)
                midAP.append(ap)

                optimizer_2.zero_grad()
                loss_2.backward()
                optimizer_2.step()

                if step % 200 == 0:
                    print('+++++++' + str(loss_2.item()) + '+++++++')
                    print('+++++++EPOCH: ' + str(epoch) + 'LOSS: ' +
                          str(loss_2.item()) + '+++++++')
                    print('AP: %.5f' % ap)

            for step, (image, voxel) in enumerate(testloader):
                if torch.cuda.is_available:
                    image = Variable(image.cuda(), requires_grad=False)
                    voxel = Variable(voxel.cuda(), requires_grad=False)

                feature_vector = Encoder(voxel)
                regress_vector = Image_2D(image)
                recon_voxel = Decoder(regress_vector)
                recon_voxel = recon_voxel.view(BATCH_SIZE, -1)
                real_voxel = voxel.view(BATCH_SIZE, -1)

                loss_2 = loss_func_2(feature_vector,
                                     regress_vector) / 100 / 200
                ap_test = eval.evaluation(
                    real_voxel.view(-1).cpu(),
                    recon_voxel.view(-1).cpu())
                midTestLoss.append(loss_2)
                midTestAP.append(ap_test)

                if step % 50 == 0:
                    print('=============Medium Validation==================')
                    print('+++++++LOSS: ' + str(loss_2.item()) + '+++++++')
                    print('AP: %.5f' % ap_test)

            vis.plot(
                'MID_LOSS',
                [np.mean(np.array(midLoss)),
                 np.mean(np.array(midTestLoss))])
            vis.plot('MID_AP',
                     [np.mean(np.array(midAP)),
                      np.mean(np.array(midTestAP))])
            vis.log("epoch:{epoch},lr:{lr},loss:{loss}, AP:{ap_out}\n".format(
                epoch=epoch,
                loss=np.mean(np.array(midTestLoss)),
                lr=LR_2,
                ap_out=np.mean(np.array(midAP))))

        torch.save(Encoder.state_dict(), './ModelDict/encoder_2.pkl')
        torch.save(Image_2D.state_dict(), './ModelDict/image.pkl')

    if TOTAL_TRAIN:
        for epoch in range(EPOCH_3):
            JointLoss = []
            JointAP = []
            JointTestLoss = []
            JointTestAP = []

            for step, (image, voxel) in enumerate(trainloader):
                if torch.cuda.is_available:
                    image = image.cuda()
                    voxel = voxel.cuda()
                Encoder.load_state_dict(
                    torch.load('./ModelDict/encoder_2.pkl'))
                Decoder.load_state_dict(torch.load('./ModelDict/decoder.pkl'))
                Image_2D.load_state_dict(torch.load('./ModelDict/image.pkl'))

                feature_vector = Encoder(voxel)
                regress_vector = Image_2D(image)
                recon_voxel = Decoder(regress_vector)
                recon_voxel = recon_voxel.view(BATCH_SIZE, -1)
                real_voxel = voxel.view(BATCH_SIZE, -1)

                ap = eval.evaluation(
                    real_voxel.view(-1).cpu(),
                    recon_voxel.view(-1).cpu())
                loss_3 = loss_func_1(recon_voxel, real_voxel) + loss_func_2(
                    feature_vector, regress_vector) * 0.01 * 0.005
                JointLoss.append(loss_3)
                JointAP.append(ap)

                optimizer_3.zero_grad()
                loss_3.backward()
                optimizer_3.step()

                if step % 200 == 0:
                    print('+++++++EPOCH: ' + str(epoch) + 'LOSS: ' +
                          str(loss_3.item()) + '+++++++')
                    print('AP: %.5f' % ap)

            for step, (image, voxel) in enumerate(testloader):
                if torch.cuda.is_available:
                    image = Variable(image.cuda(), requires_grad=False)
                    voxel = Variable(voxel.cuda(), requires_grad=False)

                feature_vector = Encoder(voxel)
                regress_vector = Image_2D(image)
                recon_voxel = Decoder(regress_vector)
                recon_voxel = recon_voxel.view(BATCH_SIZE, -1)
                real_voxel = voxel.view(BATCH_SIZE, -1)

                loss_3 = loss_func_1(recon_voxel, real_voxel) + loss_func_2(
                    feature_vector, regress_vector) * 0.01 * 0.005
                ap_test = eval.evaluation(
                    real_voxel.view(-1).cpu(),
                    recon_voxel.view(-1).cpu())
                JointTestLoss.append(loss_3)
                JointTestAP.append(ap_test)

                if step % 50 == 0:
                    print('=============FINAL Validation==================')
                    print('+++++++LOSS: ' + str(loss_3.item()) + '+++++++')
                    print('AP: %.5f' % ap_test)

            vis.plot('FINAL_LOSS', [
                np.mean(np.array(JointLoss)),
                np.mean(np.array(JointTestLoss))
            ])
            vis.plot(
                'FINAL_AP',
                [np.mean(np.array(JointAP)),
                 np.mean(np.array(JointTestAP))])
            vis.log("epoch:{epoch},lr:{lr},loss:{loss}, AP:{ap_out}\n".format(
                epoch=epoch,
                loss=np.mean(np.array(JointTestLoss)),
                lr=LR_3,
                ap_out=np.mean(np.array(JointTestAP))))

        torch.save(Decoder.state_dict(), './ModelDict/decoder_final.pkl')
        torch.save(Encoder.state_dict(), './ModelDict/encoder_final.pkl')
        torch.save(Image_2D.state_dict(), './ModelDict/image_final.pkl')
コード例 #30
0
import torch
from torch2trt import torch2trt
from torchvision.models.alexnet import alexnet

# create some regular pytorch model...
model = alexnet(pretrained=True).eval().cuda()

# create example data
x = torch.ones((1, 3, 224, 224)).cuda()

# convert to TensorRT feeding sample data as input
model_trt = torch2trt(model, [x])
y = model(x)
y_trt = model_trt(x)

# check the output against PyTorch
print(torch.max(torch.abs(y - y_trt)).item())
コード例 #31
0
from model2blender import model2blender
import torch
from torch.autograd import Variable
from torchvision.models.vgg import vgg11
from torchvision.models.alexnet import alexnet

inputs = Variable(torch.randn(1, 3, 224, 224), requires_grad=True)
model = alexnet()
#model = vgg11()
model2blender(model, inputs)