def qmodel(name, device): if name == "resnet18": return ResNet18().to(device) elif name == "resnet34": return ResNet34().to(device) elif name == "resnet50": return ResNet50().to(device) elif name == "resnet101": return ResNet101().to(device) elif name == "resnet152": return ResNet152().to(device) elif name == "vgg11": return VGG("VGG11").to(device) elif name == "vgg13": return VGG("VGG13").to(device) elif name == "vgg16": return VGG("VGG16").to(device) elif name == "vgg19": return VGG("VGG19").to(device) elif name == "densenet121": return DenseNet121().to(device) elif name == "densenet169": return DenseNet169().to(device) elif name == "densenet201": return DenseNet201().to(device) elif name == "resnext": return ResNeXt29_8x64d().to(device)
def get_model(model_name, pho_size=299, num_classes=110): if model_name == "vgg16": model = VGG(num_classes=num_classes, pho_size=299) elif model_name == "resnet101": model = resnet101(num_classes=num_classes) elif model_name == "resnet152": model = resnet152(num_classes=num_classes) elif model_name == "densenet": model = DenseNet(growth_rate=12, block_config=[(100 - 4) // 6 for _ in range(3)], num_classes=num_classes, small_inputs=False, efficient=True, pho_size=pho_size) elif model_name == "InceptionResNetV2": model = InceptionResNetV2(num_classes=num_classes) elif model_name == "InceptionV4": model = InceptionV4(num_classes=num_classes) elif model_name == "Inception3": model = Inception3(num_classes=num_classes) elif model_name == "denoise": model = get_denoise() elif model_name == "Mymodel": model = Mymodel() elif model_name == 'Comdefend': model = ComDefend() elif model_name == 'Rectifi': model = Rectifi() return model
def __init__(self, channel=32): super(MQP_VGG_regression, self).__init__() self.vgg = VGG() self.rfb3_1 = RFB(256, channel) self.rfb4_1 = RFB(512, channel) self.rfb5_1 = RFB(512, channel) self.agg1 = aggregation(channel) self.rfb3_2 = RFB(256, channel) self.rfb4_2 = RFB(512, channel) self.rfb5_2 = RFB(512, channel) self.agg2 = aggregation(channel) self.HA = HA() self.upsample = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)
def build_model(args): if 'vgg' in args.backbone: return VGG(args) elif 'res' in args.backbone: return build_ResNet(args) elif 'dense' in args.backbone: return build_DenseNet(args) elif 'efficient' in args.backbone: return build_EfficientNet(args)
def get_model(model_name): if model_name=="vgg16": model=VGG(num_classes=110) elif model_name=="resnet101": model=resnet101(num_classes=1000) elif model_name=="densenet": model=DenseNet( growth_rate=12, block_config=[(100 - 4) // 6 for _ in range(3)], num_classes=110, small_inputs=False, efficient=True, ) else: model=None return model
def get_model(model_name): if model_name == 'resnet': from model.resnet import ResNet18 net = ResNet18(10) elif model_name == 'lenet': from model.lenet import LeNet net = LeNet(10) elif model_name == 'densenet': from model.densenet import DenseNet net = DenseNet(growthRate=12, depth=40, reduction=0.5, bottleneck=True, nClasses=10) elif model_name == 'vgg': from model.vgg import VGG net = VGG('VGG16', num_classes=10) return net
class MQP_VGG_regression(nn.Module): def __init__(self, channel=32): super(MQP_VGG_regression, self).__init__() self.vgg = VGG() self.rfb3_1 = RFB(256, channel) self.rfb4_1 = RFB(512, channel) self.rfb5_1 = RFB(512, channel) self.agg1 = aggregation(channel) self.rfb3_2 = RFB(256, channel) self.rfb4_2 = RFB(512, channel) self.rfb5_2 = RFB(512, channel) self.agg2 = aggregation(channel) self.HA = HA() self.upsample = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False) def forward(self, x): x1 = self.vgg.conv1(x) x2 = self.vgg.conv2(x1) x3 = self.vgg.conv3(x2) x3_1 = x3 x4_1 = self.vgg.conv4_1(x3_1) x5_1 = self.vgg.conv5_1(x4_1) x3_1 = self.rfb3_1(x3_1) x4_1 = self.rfb4_1(x4_1) x5_1 = self.rfb5_1(x5_1) attention = self.agg1(x5_1, x4_1, x3_1) x3_2 = self.HA(attention.sigmoid(), x3) x4_2 = self.vgg.conv4_2(x3_2) x5_2 = self.vgg.conv5_2(x4_2) x3_2 = self.rfb3_2(x3_2) x4_2 = self.rfb4_2(x4_2) x5_2 = self.rfb5_2(x5_2) detection = self.agg2(x5_2, x4_2, x3_2) return self.upsample(attention), self.upsample(detection)
if model_name == 'resnet': from model.resnet import ResNet18 net = ResNet18(10) elif model_name == 'lenet': from model.lenet import LeNet net = LeNet(10) elif model_name == 'densenet': from model.densenet import DenseNet net = DenseNet(growthRate=12, depth=40, reduction=0.5, bottleneck=True, nClasses=10) elif model_name == 'vgg': from model.vgg import VGG net = VGG('VGG16', num_classes=10) if resume: # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isdir(save_path), 'Error: no checkpoint directory found!' checkpoint = torch.load(save_path + '/%s_ckpt.t7' % model_name) net.load_state_dict(checkpoint['net']) if use_cuda: Device = int(sys.argv[3]) # Device = 0 net.cuda(Device) cudnn.benchmark = True criterion = nn.CrossEntropyLoss()
sys.stdout.flush() self._loss += _loss self.model.zero_grad() processed_image.requires_grad_(False) created_image = recreate_image(processed_image, self.ImageNet, self.reshape) images.append(created_image) self._loss /= self._n self._layer_name = 'output' self._save(images) def _save(self, x): path = '../save/para/[' + self.model.name + ']/' if not os.path.exists(path): os.makedirs(path) if type(x) == list: file = self._layer_name + ' (vis), loss = {:.4f}'.format( self._loss) _save_multi_img(x, int(np.sqrt(self._n)), path=path + file) else: file = self._layer_name + ' (vis), loss = {:.4f}'.format( self._loss) _save_img(x, path=path + file) sys.stdout.write('\r') sys.stdout.flush() print("Visual saved in: " + path + file + " " * 25) if __name__ == '__main__': from model.vgg import VGG vgg = VGG('vgg11', batch_norm=True, load_pre=True) vgg._visual('weight', filter_id=0, epoch=100)
from model.vgg import VGG from swapper import Swapper TARGET_LAYERS = ['relu3_1', 'relu2_1', 'relu1_1'] device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--patch_size', default=3) parser.add_argument('--stride', default=1) args, unknown = parser.parse_known_args() model = VGG(model_type='vgg19').to(device) swapper = Swapper(args.patch_size, args.stride).to(device) dataset = SwappingDataset('CUFED5') data_loader = DataLoader(dataset, batch_size=1, num_workers=4) save_path = os.path.join(dataset.path, 'texture') os.makedirs(save_path, exist_ok=True) print("Offline Swapping...") for i, (data) in enumerate(data_loader): input_imgs, lowRef_imgs, highRef_imgs, filenames = data input_imgs = input_imgs.to(device) lowRef_imgs = lowRef_imgs.to(device) highRef_imgs = highRef_imgs.to(device)
if __name__ == '__main__': import cv2 import torch from model.vgg import VGG from model.cnn import CNN start_p = time.perf_counter() # os.environ["CUDA_VISIBLE_DEVICES"] = "3,2,1" # load image for text image_path = './test/text/1032838434.jpg' img = cv2.imread(image_path) # ndarray # load model textModel = VGG(3).cuda() checkpoint = torch.load(textPath) textModel.load_state_dict(checkpoint) print(textModel) ocrModel = CNN(1).cuda() checkpoint = torch.load(ocrPath) ocrModel.load_state_dict(checkpoint) print(ocrModel) end = time.perf_counter() print('Load Model time is {}'.format(end - start_p)) text = text_ocr(img, textModel, ocrModel) end_p = time.perf_counter()
import numpy as np import web web.config.debug = False render = web.template.render('templates', base='base') from main import text_ocr from helper.image import read_url_img, base64_to_PIL, get_now from config import ocrPath, textPath, GPU # load model import torch from model.vgg import VGG from model.cnn import CNN textModel = VGG(3).cuda() if GPU else VGG(3) checkpoint = torch.load(textPath) textModel.load_state_dict(checkpoint) print(textModel) ocrModel = CNN(1).cuda() if GPU else CNN(1) checkpoint = torch.load(ocrPath) ocrModel.load_state_dict(checkpoint) print(ocrModel) billList = [] root = './test/' timeOutTime = 5 def job(uid, url, imgString, iscut, isclass, billModel, ip):
scores, boxes, rate, w, h = detect_box( model, image, scale, maxScale) # textModel, image is opencv image size = (h, w) text_lines, scores =detectors.detect( boxes,scores,size,\ TEXT_PROPOSALS_MIN_SCORE,TEXT_PROPOSALS_NMS_THRESH,TEXT_LINE_NMS_THRESH,TEXT_LINE_SCORE) if len(text_lines) > 0: text_lines = text_lines / rate return text_lines, scores if __name__ == '__main__': os.environ["CUDA_VISIBLE_DEVICES"] = "3,2,1" sys.path.append('../') from model.vgg import VGG img = cv2.imread('../test/text/1032838434.jpg') print(img.shape) scale = 900 maxScale = 1800 model = VGG(3).cuda() checkpoint = torch.load('../weight/text/text.pth.tar') model.load_state_dict(checkpoint) boxes, scores = detect_lines(model, img, scale=scale, maxScale=maxScale) print(scores) print(boxes)
D1 = models.vgg16(pretrained=True).cuda() D1 = nn.DataParallel(D1) D1.eval() D2 = models.resnet50(pretrained=True).cuda() D2 = nn.DataParallel(D2) D2.eval() num_classes = 1000 img_size = 224 img_ch = 3 clipmax = [2.249, 2.429, 2.640] clipmin = [-2.118, -2.036, -1.804] elif args.dataset == 'cifar10': D1_path = os.path.join(load_dir, 'D1_network.pth') D1 = VGG('VGG16').cuda() D1.load_state_dict(torch.load(D1_path)) D1.eval() D2_path = os.path.join(load_dir, 'D2_network.pth') D2 = ResNet50().cuda() D2.load_state_dict(torch.load(D2_path)) D2.eval() num_classes = 10 img_size = 32 img_ch = 3 clipmax = 1. clipmin = 0. else: D2_path = os.path.join(load_dir, 'Netll_mnist.pth')
if model_name == 'resnet': from model.resnet import ResNet18 net = ResNet18(10) elif model_name == 'lenet': from model.lenet import LeNet net = LeNet(10) elif model_name == 'densenet': from model.densenet import DenseNet net = DenseNet(growthRate=12, depth=40, reduction=0.5, bottleneck=True, nClasses=10) elif model_name == 'vgg': from model.vgg import VGG net = VGG('VGG16') if resume: # Load checkpoint. print('==> Resuming from checkpoint..') assert os.path.isdir(save_path), 'Error: no checkpoint directory found!' checkpoint = torch.load(save_path + '/%s_ckpt.t7' % model_name) net.load_state_dict(checkpoint['net']) if use_cuda: Device = int(sys.argv[3]) # Device = 0 net.cuda(Device) cudnn.benchmark = True criterion = nn.CrossEntropyLoss()
transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=0) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=16, shuffle=True, num_workers=0) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') # net = MobileNet().cuda() # net = MobileNetV2(n_class=10, input_size=32).cuda() # net = ResNet18().cuda() net = VGG("VGG19").cuda() criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) criterion = criterion.cuda() for epoch in range(120): net.train() for i, data in enumerate(trainloader, start=1): inputs, labels = data inputs = inputs.cuda() labels = labels.cuda() outputs = net(inputs) loss = criterion(outputs, labels)
if args.sbp_dir is None else args.sbp_dir gend_dir = os.path.join('../results', args.net, 'gend/iter%d/trial_%e'%(args.n_iter,args.init_lr)) \ if args.gend_dir is None else args.gend_dir elif args.net == 'vgg': if args.data == 'cifar10': input_fn = utils.cifar10.input_fn NUM_TRAIN = utils.cifar10.NUM_TRAIN NUM_TEST = utils.cifar10.NUM_TEST n_classes = 10 elif args.data == 'cifar100': input_fn = utils.cifar100.input_fn NUM_TRAIN = utils.cifar100.NUM_TRAIN NUM_TEST = utils.cifar100.NUM_TEST n_classes = 100 net = VGG(n_classes) base_dir = os.path.join('../results', args.net, args.data, 'base') \ if args.base_dir is None else args.base_dir bbd_dir = os.path.join('../results', args.net, args.data, 'bbd/trial') \ if args.bbd_dir is None else args.bbd_dir dbbd_dir = os.path.join('../results', args.net, args.data, 'dbbd/trial') \ if args.dbbd_dir is None else args.dbbd_dir sbp_dir = os.path.join('../results', args.net, '%s/sbp/iter%d/trial_%e'%(\ args.data,args.n_iter,args.init_lr)) \ if args.sbp_dir is None else args.sbp_dir gend_dir = os.path.join('../results', args.net, '%s/gend/iter%d/trial_%e'%(\ args.data,args.n_iter,args.init_lr)) \ if args.gend_dir is None else args.gend_dir else: