Пример #1
0
    def __init__(self, num_classes=1000):

        super(FCN_32s, self).__init__()

        # Load the model with convolutionalized
        # fully connected layers
        vgg16 = vgg.vgg16(pretrained=True, fully_conv=True)

        # Copy all the feature layers as is
        self.features = vgg16.features

        # TODO: check if Dropout works correctly for
        # fully convolutional mode

        # Remove the last classification 1x1 convolution
        # because it comes from imagenet 1000 class classification.
        # We will perform classification on different classes
        fully_conv = list(vgg16.classifier.children())
        fully_conv = fully_conv[:-1]
        self.fully_conv = nn.Sequential(*fully_conv)

        # Get a new 1x1 convolution and randomly initialize
        score_32s = nn.Conv2d(4096, num_classes, 1)
        self._normal_initialization(score_32s)
        self.score_32s = score_32s
Пример #2
0
    def __init__(self,
                 nclass,
                 backbone='vgg16',
                 aux=False,
                 pretrained_base=True,
                 norm_layer=nn.BatchNorm2d,
                 **kwargs):
        super(FCN8s, self).__init__()
        self.aux = aux
        if backbone == 'vgg16':
            self.pretrained = vgg16(pretrained=pretrained_base).features
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))
        self.pool3 = nn.Sequential(*self.pretrained[:17])
        self.pool4 = nn.Sequential(*self.pretrained[17:24])
        self.pool5 = nn.Sequential(*self.pretrained[24:])
        self.head = _FCNHead(512, nclass, norm_layer)
        self.score_pool3 = nn.Conv2d(256, nclass, 1)
        self.score_pool4 = nn.Conv2d(512, nclass, 1)
        if aux:
            self.auxlayer = _FCNHead(512, nclass, norm_layer)

        self.__setattr__('exclusive',
                         ['head', 'score_pool3', 'score_pool4', 'auxlayer']
                         if aux else ['head', 'score_pool3', 'score_pool4'])
Пример #3
0
    def __init__(self, load_weights=False):
        super(CSRNet, self).__init__()
        self.seen = 0
        self.frontend_feat = [
            64,
            64,
            "M",
            128,
            128,
            "M",
            256,
            256,
            256,
            "M",
            512,
            512,
            512,
        ]
        self.backend_feat = [512, 512, 512, 256, 128, 64]
        self.frontend = make_layers(self.frontend_feat)
        self.backend = make_layers(self.backend_feat,
                                   in_channels=512,
                                   dilation=True)
        self.output_layer = nn.Conv2d(64, 1, kernel_size=1)

        if not load_weights:
            mod = vgg16(pretrained=True)
            pretrain_models = flow.load(
                "vgg_imagenet_pretrain_model/vgg16_oneflow_model")
            mod.load_state_dict(pretrain_models)
            self._initialize_weights()
            for i in range(len(self.frontend.state_dict().items())):
                src = list(mod.state_dict().items())[i][1]
                dst = list(self.frontend.state_dict().items())[i][1].copy_(src)
Пример #4
0
    def __init__(self):
        self.global_step = opt.load_step
        self.prepare_paths()
        self.data_loader = get_loader(self.data_path, opt.b_size,
                                      opt.scale_size, True, opt.num_workers)

        self.build_model()

        # self.z = Variable(torch.FloatTensor(opt.b_size, opt.h))
        # self.fixed_z = Variable(torch.FloatTensor(opt.b_size, opt.h))
        # self.fixed_z.data.uniform_(-1, 1)
        # self.fixed_x = None

        # self.criterion_perceptual = perceptual_models.PerceptualLoss(model='net-lin', net='vgg', use_gpu=True, spatial=True)
        self.vgg = vgg16(pretrained=True)
        self.vgg.eval()
        self.criterion_l1 = nn.L1Loss()
        self.criterion_l2 = nn.MSELoss()
        self.adversarial_loss = torch.nn.BCELoss()

        if opt.cuda:
            self.set_cuda()
            self.cuda = True
        else:
            self.cuda = False
Пример #5
0
def main():
    # parse the argument
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'data_list',
        help='The path of data list file, which consists of one image path per line'
    )
    parser.add_argument(
        'model',
        help='The model for image classification',
        choices=[
            'alexnet', 'vgg13', 'vgg16', 'vgg19', 'resnet', 'googlenet',
            'inception-resnet-v2', 'inception_v4', 'xception'
        ])
    parser.add_argument(
        'params_path', help='The file which stores the parameters')
    args = parser.parse_args()

    # PaddlePaddle init
    paddle.init(use_gpu=True, trainer_count=1)

    image = paddle.layer.data(
        name="image", type=paddle.data_type.dense_vector(DATA_DIM))

    if args.model == 'alexnet':
        out = alexnet.alexnet(image, class_dim=CLASS_DIM)
    elif args.model == 'vgg13':
        out = vgg.vgg13(image, class_dim=CLASS_DIM)
    elif args.model == 'vgg16':
        out = vgg.vgg16(image, class_dim=CLASS_DIM)
    elif args.model == 'vgg19':
        out = vgg.vgg19(image, class_dim=CLASS_DIM)
    elif args.model == 'resnet':
        out = resnet.resnet_imagenet(image, class_dim=CLASS_DIM)
    elif args.model == 'googlenet':
        out, _, _ = googlenet.googlenet(image, class_dim=CLASS_DIM)
    elif args.model == 'inception-resnet-v2':
        assert DATA_DIM == 3 * 331 * 331 or DATA_DIM == 3 * 299 * 299
        out = inception_resnet_v2.inception_resnet_v2(
            image, class_dim=CLASS_DIM, dropout_rate=0.5, data_dim=DATA_DIM)
    elif args.model == 'inception_v4':
        out = inception_v4.inception_v4(image, class_dim=CLASS_DIM)
    elif args.model == 'xception':
        out = xception.xception(image, class_dim=CLASS_DIM)

    # load parameters
    with gzip.open(args.params_path, 'r') as f:
        parameters = paddle.parameters.Parameters.from_tar(f)

    file_list = [line.strip() for line in open(args.data_list)]
    test_data = [(paddle.image.load_and_transform(image_file, 256, 224, False)
                  .flatten().astype('float32'), ) for image_file in file_list]
    probs = paddle.infer(
        output_layer=out, parameters=parameters, input=test_data)
    lab = np.argsort(-probs)
    for file_name, result in zip(file_list, lab):
        print "Label of %s is: %d" % (file_name, result[0])
Пример #6
0
    def init_net(self):

        net_args = {
            "pretrained": True,
            "n_input_channels": len(self.kwargs["static"]["imagery_bands"])
        }

        # https://pytorch.org/docs/stable/torchvision/models.html
        if self.kwargs["net"] == "resnet18":
            self.model = resnet.resnet18(**net_args)
        elif self.kwargs["net"] == "resnet34":
            self.model = resnet.resnet34(**net_args)
        elif self.kwargs["net"] == "resnet50":
            self.model = resnet.resnet50(**net_args)
        elif self.kwargs["net"] == "resnet101":
            self.model = resnet.resnet101(**net_args)
        elif self.kwargs["net"] == "resnet152":
            self.model = resnet.resnet152(**net_args)
        elif self.kwargs["net"] == "vgg11":
            self.model = vgg.vgg11(**net_args)
        elif self.kwargs["net"] == "vgg11_bn":
            self.model = vgg.vgg11_bn(**net_args)
        elif self.kwargs["net"] == "vgg13":
            self.model = vgg.vgg13(**net_args)
        elif self.kwargs["net"] == "vgg13_bn":
            self.model = vgg.vgg13_bn(**net_args)
        elif self.kwargs["net"] == "vgg16":
            self.model = vgg.vgg16(**net_args)
        elif self.kwargs["net"] == "vgg16_bn":
            self.model = vgg.vgg16_bn(**net_args)
        elif self.kwargs["net"] == "vgg19":
            self.model = vgg.vgg19(**net_args)
        elif self.kwargs["net"] == "vgg19_bn":
            self.model = vgg.vgg19_bn(**net_args)

        else:
            raise ValueError("Invalid network specified: {}".format(
                self.kwargs["net"]))

        #  run type: 1 = fine tune, 2 = fixed feature extractor
        #  - replace run type option with "# of layers to fine tune"
        if self.kwargs["run_type"] == 2:
            layer_count = len(list(self.model.parameters()))
            for layer, param in enumerate(self.model.parameters()):
                if layer <= layer_count - 5:
                    param.requires_grad = False

        # Parameters of newly constructed modules have requires_grad=True by default
        # get existing number for input features
        # set new number for output features to number of categories being classified
        # see: https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
        if "resnet" in self.kwargs["net"]:
            num_ftrs = self.model.fc.in_features
            self.model.fc = nn.Linear(num_ftrs, self.ncats)
        elif "vgg" in self.kwargs["net"]:
            num_ftrs = self.model.classifier[6].in_features
            self.model.classifier[6] = nn.Linear(num_ftrs, self.ncats)
Пример #7
0
 def __init__(self, vgg_is_fixed, retain_upto=4):
     super(VGGNetFeats, self).__init__()
     self.vgg_model = vgg.vgg16(pretrained=True, retain_upto=retain_upto)
     self.normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                            std=[0.229, 0.224, 0.225])
     self.to_tensor = transforms.ToTensor()
     self.vgg_is_fixed = vgg_is_fixed
     self.retain_upto = retain_upto
     print "VGGNetFeats: ", self._modules.keys()
Пример #8
0
    def __init__(self):
        super(EAST, self).__init__()

        #param
        self.TEXT_SCALE = 512
        self.pi = math.pi / 1.
        self.pi = mindspore.Tensor([self.pi], mindspore.float32)

        #network
        self.model = vgg.vgg16()
        
        #for i = 0
        self.split = P.Split(1, 2)
        self.unpool0 = unpool((32, 32))
        self._concat = P.Concat(axis=1)

        #for i = 1
        self.concat1 = P.Concat(axis=1)
        self.conv1_1 = ops.conv_bn_relu(1024, 128, stride=1, kernel_size=1, padding='valid')
        self.conv1_2 = ops.conv_bn_relu(128, 128, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.unpool1 = unpool((64, 64))

        #for i = 2
        self.concat2 = P.Concat(axis=1)
        self.conv2_1 = ops.conv_bn_relu(384, 64, stride=1, kernel_size=1, padding='valid')
        self.conv2_2 = ops.conv_bn_relu(64, 64, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.unpool2 = unpool((128, 128))


        #for i = 3
        self.concat3 = P.Concat(axis=1)
        self.conv3_1 = ops.conv_bn_relu(192, 32, stride=1, kernel_size=1, padding='valid')
        self.conv3_2 = ops.conv_bn_relu(32, 32, stride=1, kernel_size=3, padding='pad', padding_number=1)
        self.conv3_3 = ops.conv_bn_relu(32, 32, stride=1, kernel_size=3, padding='pad', padding_number=1)


        #output
        ## F_score
        self.conv_for_fscore = ops._conv(32, 1, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_fscore = nn.Sigmoid()

        ## geo_map
        self.conv_for_geo_map = ops._conv(32, 4, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_geo_map = nn.Sigmoid()

        ## angle_map
        self.conv_for_angle_map = ops._conv(32, 1, stride=1, kernel_size=1, padding='valid')
        self.sigmoid_for_angle_map = nn.Sigmoid()

        ## F_geometry 
        self.concat_for_F_geometry  = P.Concat(axis=1)


        ## other
        self.mul = P.Mul()
        self.add = P.TensorAdd()
Пример #9
0
Файл: art.py Проект: dlimx/pAInt
def main(argv):

    art_image = cv2.imread('images/starry_night.jpg')
    user_image = cv2.imread('images/trump.jpg')

    image_shape = user_image.shape

    image = tf.Variable(initial_value=np.random.rand(1, image_shape[0],
                                                     image_shape[1],
                                                     image_shape[2]),
                        dtype=tf.float32,
                        trainable=True,
                        name='output_image')

    sess = tf.Session()

    with tf.variable_scope('vgg'):
        vgg = vgg16(image, reuse=False)
        download_weights_maybe('weights/vgg16_weights.npz')
        vgg.load_weights('weights/vgg16_weights.npz', sess)

    style_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
    content_layers = ['conv3_2', 'conv4_2']

    feature_matrices, gram_matrices = precompute(style_layers,
                                                 content_layers,
                                                 vgg_scope='vgg',
                                                 sess=sess,
                                                 user_image=user_image,
                                                 art_image=art_image)

    content_layer_ops = map(lambda layer: vgg.get_layer(layer), content_layers)
    style_layer_ops = map(lambda layer: vgg.get_layer(layer), style_layers)

    loss = total_loss(image, content_layer_ops, style_layer_ops,
                      feature_matrices, gram_matrices)

    optimizer = tf.train.AdamOptimizer(
        learning_rate=LEARNING_RATE).minimize(loss)

    global_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)

    sess.run(tf.variables_initializer(global_vars))

    for step in xrange(550):
        sys.stdout.flush()
        sys.stdout.write('\r Step %i' % step)

        sess.run(optimizer)
        if step % 50 == 0:
            print "\rLoss for step %i: %f" % (step, sess.run(loss))
            cv2.imwrite('images/result.png',
                        sess.run(image).reshape(image_shape))

    print 'Final Loss: %f' % sess.run(loss)
    cv2.imwrite('images/result.png', sess.run(image).reshape(image_shape))
Пример #10
0
 def __init__(self, num_classes=40, feature_path=None):
     super(MyVGG, self).__init__()
     net = vgg.vgg16(pretrained=True, model_path=feature_path)
     net.classifier = nn.Sequential()
     self.features = net
     self.classifier = nn.Sequential(
         nn.Linear(512 * 7 * 7, 4096),
         nn.ReLU(True),
         nn.Dropout(),
         nn.Linear(4096, num_classes)
                    
     )
Пример #11
0
def load_model(path):
    net = vgg16(pretrained=False, progress=False)
    classifier = nn.Sequential(
        OrderedDict([('fc1', nn.Linear(25088, 4096)), ('relu1', nn.ReLU()),
                     ('fc2', nn.Linear(4096, 1000)), ('relu2', nn.ReLU()),
                     ('fc3', nn.Linear(1000, 10)),
                     ('output', nn.LogSoftmax(dim=1))]))
    # 替换
    net.classifier = classifier
    net.to(device)
    net.load_state_dict(torch.load(path))
    return net
Пример #12
0
Файл: art.py Проект: dlimx/pAInt
def precompute(style_layers, content_layers, vgg_scope, sess, user_image,
               art_image):

    image_shape = art_image.shape
    x = tf.placeholder(dtype=tf.float32, shape=[1] + list(image_shape))

    with tf.variable_scope(vgg_scope, reuse=True):
        vgg = vgg16(x, reuse=True)

    gram_matrices = []
    for layer in style_layers:
        layer_op = vgg.get_layer(layer)
        gram_op = gram_matrix(feature_matrix(layer_op))
        gram = sess.run(
            gram_op, feed_dict={x: art_image.reshape([1] + list(image_shape))})

        shape = list(gram.shape)
        gram_matrices += [tf.constant(gram.reshape(shape[1:]))]

    feature_matrices = []
    if len(content_layers) > 0:

        image_shape = user_image.shape
        x = tf.placeholder(dtype=tf.float32, shape=[1] + list(image_shape))

        with tf.variable_scope(vgg_scope, reuse=True):
            vgg = vgg16(x, reuse=True)

        for layer in content_layers:
            layer_op = vgg.get_layer(layer)
            feature_op = feature_matrix(layer_op)
            feature = sess.run(
                feature_op,
                feed_dict={x: user_image.reshape([1] + list(image_shape))})

            shape = list(feature.shape)
            feature_matrices += [tf.constant(feature.reshape(shape[1:]))]

    return feature_matrices, gram_matrices
Пример #13
0
def precompute_gram_matrices(image, final_endpoint='fc8'):
    with tf.Session() as session:
        end_points = vgg.vgg16(image, final_endpoint=final_endpoint)

        # In vgg file, we have tf.variable_scope('vgg_16', [inputs], reuse=reuse) as sc:
        # In vgg file, we have variable scope named 'vgg_16'
        tf.train.Saver(slim.get_variable('vgg_16')).restore(
            session, vgg.checkpoint_file)

        # The gram_matrix function will be defined later
        # The end_points is in the vgg.py file
        # it contains the layers' name (like conv1) and values.

        return dict([(key, gram_matrix(value).eval())
                     for key, value in end_points.iteritems()])
Пример #14
0
def load_model(args):
    if args.model == 'vgg11':
        model = vgg.vgg11().to(device)
    if args.model == 'vgg13':
        model = vgg.vgg13().to(device)
    if args.model == 'vgg16':
        model = vgg.vgg16().to(device)
    elif args.model == 'vgg19':
        model = vgg.vgg19().to(device)
    elif args.model == 'modified_vgg11':
        model = modified_vgg.vgg11().to(device)
    elif args.model == 'modified_vgg13':
        model = modified_vgg.vgg13().to(device)
    elif args.model == 'modified_vgg16':
        model = modified_vgg.vgg16().to(device)
    elif args.model == 'modified_vgg19':
        model = modified_vgg.vgg19().to(device)
    return model
Пример #15
0
    def __init__(self,
                 nclass,
                 backbone='vgg16',
                 aux=False,
                 pretrained_base=True,
                 norm_layer=nn.BatchNorm2d,
                 **kwargs):
        super(FCN32s, self).__init__()
        self.aux = aux
        if backbone == 'vgg16':
            self.pretrained = vgg16(pretrained=pretrained_base).features
        else:
            raise RuntimeError('unknown backbone: {}'.format(backbone))
        self.head = _FCNHead(512, nclass, norm_layer)
        if aux:
            self.auxlayer = _FCNHead(512, nclass, norm_layer)

        self.__setattr__('exclusive',
                         ['head', 'auxlayer'] if aux else ['head'])
Пример #16
0
def main(_):
	x, img = load_image(FLAGS.input)
	###x.shape (1, 224, 224, 3)
	###img.shape (224, 224, 3)

	sess = tf.Session()

	print("\nLoading Vgg")
	imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
	vgg = vgg16(imgs, 'vgg.npz', sess)

	print("\nFeedforwarding")
	prob = sess.run(vgg.probs, feed_dict={vgg.imgs: x})[0]#probs是网络中fc3输出的结果
	preds = (np.argsort(prob)[::-1])[0:5]
	#argsort函数返回的是数组值从小到大的索引值,[::-1]倒顺序
	print('\nTop 5 classes are')
	for p in preds:
	    print(class_names[p], prob[p])

	# Target class
	predicted_class = preds[0]
	# Target layer for visualization
	layer_name = FLAGS.layer_name
	# Number of output classes of model being used
	nb_classes = 3

	cam3 = grad_cam(x, vgg, sess, predicted_class, layer_name, nb_classes)
	np.save('cam.npy',cam3)

	img = img.astype(float)
	img /= img.max()

	# Superimposing the visualization with the image.
	#将可视化与图像叠加在一起
	new_img = img+3*cam3
	new_img /= new_img.max()
	

	# Display and save
	io.imshow(new_img)
	plt.show()
	io.imsave(FLAGS.output, new_img)
	print('-'*19,'运行完毕','-'*19)
Пример #17
0
	def __init__(self, num_classes):
		super().__init__()

		feats = list(vgg.vgg16(pretrained=True).features.children())

		self.feats = nn.Sequential(*feats[0:10])
		self.feat3 = nn.Sequential(*feats[10:17])
		self.feat4 = nn.Sequential(*feats[17:24])
		self.feat5 = nn.Sequential(*feats[24:31])

		self.fconn = nn.Sequential(
			nn.Conv2d(512, 4096, 7,padding=3),
			nn.ReLU(inplace=True),
			nn.Dropout(),
			nn.Conv2d(4096, 4096, 1),
			nn.ReLU(inplace=True),
			nn.Dropout(),
		)
		self.score_feat3 = nn.Conv2d(256, num_classes, 1)
		self.score_feat4 = nn.Conv2d(512, num_classes, 1)
		self.score_fconn = nn.Conv2d(4096, num_classes, 1)
Пример #18
0
    def __init__(self):
        # hyper-parameter
        self.batch_size = 256
        self.lr = 1e-2
        self.num_epochs = 160

        # data
        self.train_set = None
        self.test_set = None
        self.train_loader = None
        self.test_loader = None

        # net
        # self.net = vgg16(num_classes=10).cuda()
        self.net = vgg16(num_classes=10).half().cuda()
        self.init_state_dict = copy.deepcopy(self.net.state_dict())
        self.criterion = nn.CrossEntropyLoss()

        # show
        self.show_log = False
        self.show_vis = False

        # visdom
        if self.show_vis:
            self.vis = visdom.Visdom(port=8096,
                                     env='vgg16-train-test-diff-random-seed')

        # save date
        self.train_batch_loss_list = []
        self.train_batch_acc_list = []
        self.train_epoch_loss_list = []
        self.train_epoch_acc_list = []

        self.val_epoch_loss_list = []
        self.val_epoch_acc_list = []

        self.params_l2_norm_list = []
Пример #19
0
def main(_):
    x, img = load_image(FLAGS.input)

    sess = tf.Session()

    print("\nLoading Vgg")
    imgs = tf.placeholder(tf.float32, [None, 224, 224, 3])
    vgg = vgg16(imgs, 'vgg16_weights.npz', sess)

    print("\nFeedforwarding")
    prob = sess.run(vgg.probs, feed_dict={vgg.imgs: x})[0]
    preds = (np.argsort(prob)[::-1])[0:5]
    print('\nTop 5 classes are')
    for p in preds:
        print(class_names[p], prob[p])

    # Target class
    predicted_class = preds[0]
    # Target layer for visualization
    layer_name = FLAGS.layer_name
    # Number of output classes of model being used
    nb_classes = 1000

    cam3 = grad_cam(x, vgg, sess, predicted_class, layer_name, nb_classes)

    img = img.astype(float)
    img /= img.max()

    # Superimposing the visualization with the image.
    new_img = img + 3 * cam3
    new_img /= new_img.max()

    # Display and save
    io.imshow(new_img)
    plt.show()
    io.imsave(FLAGS.output, new_img)
                         transform=transforms.Compose([
                             transforms.ToTensor(),
                             transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                                  std=[0.229, 0.224, 0.225])
                         ]))
val_sampler = torch.utils.data.distributed.DistributedSampler(
    val_dataset, num_replicas=hvd.size(), rank=hvd.rank())
val_loader = torch.utils.data.DataLoader(val_dataset,
                                         batch_size=args.val_batch_size,
                                         sampler=val_sampler,
                                         **kwargs)

# Set up standard ResNet-50 model.
# model = models.resnet50()
# model = resnet.resnet110()
model = vgg.vgg16()

if args.cuda:
    # Move model to GPU.
    model.cuda()

# Horovod: scale learning rate by the number of GPUs.
# Gradient Accumulation: scale learning rate by batches_per_allreduce
optimizer = optim.SGD(model.parameters(),
                      lr=(args.base_lr * args.batches_per_allreduce *
                          hvd.size()),
                      momentum=args.momentum,
                      weight_decay=args.wd)

# Horovod: (optional) compression algorithm.
Пример #21
0
def Deeplabv3(weights='pascal_voc',
              input_tensor=None,
              input_shape=(512, 512, 3),
              classes=21,
              alpha=1.,
              def_conv=False,
              trainer=False):
    """ Instantiates the Deeplabv3+ architecture

    Optionally loads weights pre-trained
    on PASCAL VOC. This model is available for TensorFlow only,
    and can only be used with inputs following the TensorFlow
    data format `(width, height, channels)`.
    # Arguments
        weights: one of 'pascal_voc' (pre-trained on pascal voc)
            or None (random initialization)
        input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: shape of input image. format HxWxC
            PASCAL VOC model was trained on (512,512,3) images
        classes: number of desired classes. If classes != 21,
            last layer is initialized randomly
        backbone: backbone to use. one of {'xception','mobilenetv2'}
        OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.
            Used only for xception backbone.
        alpha: controls the width of the MobileNetV2 network. This is known as the
            width multiplier in the MobileNetV2 paper.
                - If `alpha` < 1.0, proportionally decreases the number
                    of filters in each layer.
                - If `alpha` > 1.0, proportionally increases the number
                    of filters in each layer.
                - If `alpha` = 1, default number of filters from the paper
                    are used at each layer.
            Used only for mobilenetv2 backbone

    # Returns
        A Keras model instance.


    """

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    OS = 8
    first_block_filters = _make_divisible(32 * alpha, 8)
    x = Conv2D(first_block_filters,
               kernel_size=3,
               strides=(2, 2),
               padding='same',
               use_bias=False,
               name='Conv')(img_input)
    x = BatchNormalization(epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
    x = Activation(relu6, name='Conv_Relu6')(x)

    ####### MAIN FEATURE EXTRACTOR #######
    vgg_bare = vgg16(train=trainer,
                     num_classes=classes,
                     input_shape=input_shape,
                     deformable=def_conv,
                     normalizer=False,
                     full_model=False,
                     last_pooling=True)
    (_, x) = vgg_bare.core(x)
    ######################################
    # end of feature extractor

    # branching for Atrous Spatial Pyramid Pooling

    # Image Feature branch
    #out_shape = int(np.ceil(input_shape[0] / OS))
    b4 = AveragePooling2D(pool_size=(int(np.ceil(input_shape[0] / OS)),
                                     int(np.ceil(input_shape[1] / OS))))(x)
    b4 = Conv2D(256, (1, 1),
                padding='same',
                use_bias=False,
                name='image_pooling')(b4)
    b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
    b4 = Activation('relu')(b4)
    b4 = BilinearUpsampling((int(np.ceil(input_shape[0] / OS)),
                             int(np.ceil(input_shape[1] / OS))))(b4)

    # simple 1x1
    b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)
    b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
    b0 = Activation('relu', name='aspp0_activation')(b0)

    # there are only 2 branches in mobilenetV2. not sure why
    x = Concatenate()([b4, b0])

    x = Conv2D(256, (1, 1),
               padding='same',
               use_bias=False,
               name='concat_projection')(x)
    x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
    x = Activation('relu')(x)
    x = Dropout(0.1)(x)

    x = Conv2D(classes, (1, 1), padding='same',
               name='logits_semantic_custom')(x)
    x = BilinearUpsampling(output_size=(input_shape[0], input_shape[1]))(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    if (trainer):
        x = Flatten(name='flat')(x)
        x = Dense(classes, activation='relu', name='fc')(x)
    model = Model(inputs, x, name='deeplabv3plus')

    # load weights

    #if weights == 'pascal_voc':
    #    if backbone == 'xception':
    #        weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
    #                                WEIGHTS_PATH_X,
    #                                cache_subdir='models')
    #    else:
    #        weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
    #                                WEIGHTS_PATH_MOBILE,
    #                                cache_subdir='models')
    #    model.load_weights(weights_path, by_name=True)
    #elif weights == 'cityscapes':
    #    if backbone == 'xception':
    #        weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5',
    #                                WEIGHTS_PATH_X_CS,
    #                                cache_subdir='models')
    #    else:
    #        weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5',
    #                                WEIGHTS_PATH_MOBILE_CS,
    #                                cache_subdir='models')
    #    model.load_weights(weights_path, by_name=True)
    return model
Пример #22
0
import argparse
import os
import numpy as np
import cv2

import vgg
import gradcam

# Receive image path from command line
parser = argparse.ArgumentParser(description='Grad-CAM demo')
parser.add_argument('img_path', metavar='image_path', type=str, help='path to the image file')

args = parser.parse_args()

# We'll use VGG-16 for visualization
network = vgg.vgg16(pretrained=True, ctx=mx.cpu())
# We'll resize images to 224x244 as part of preprocessing
image_sz = (224, 224)

def preprocess(data):
    """Preprocess the image before running it through the network"""
    data = mx.image.imresize(data, image_sz[0], image_sz[1])
    data = data.astype(np.float32)
    data = data/255
    # These mean values were obtained from
    # https://mxnet.incubator.apache.org/api/python/gluon/model_zoo.html
    data = mx.image.color_normalize(data,
                                    mean=mx.nd.array([0.485, 0.456, 0.406]),
                                    std=mx.nd.array([0.229, 0.224, 0.225]))
    data = mx.nd.transpose(data, (2,0,1)) # Channel first
    return data
Пример #23
0
from torch.utils.data import DataLoader

from DatasetAB import DatasetAB
from generator import Generator
from vgg import vgg16

dataset = DatasetAB('../faces94/malestaff/tony/',
                    '../faces94/malestaff/voudcx/')
dataloader = DataLoader(dataset)
Attacker = Generator(180, 200, 3, 64, 256)
Victim = vgg16(num_classes=2)

print(Attacker)
print('=' * 35)
print(Victim)

for data in dataloader:
    img_a, img_b = data
    masked_img = Attacker(img_a, img_b)
Пример #24
0
def main_worker(gpu, ngpus_per_node, args):
    perfStats = {}
    for batchSize in [1, 2, 4, 8, 16, 32]:
    # for batchSize in []:
        args.batch_size = batchSize
        args.print_freq = 500

        global best_acc1
        args.gpu = gpu

        # create model
        model = models.__dict__["vgg16"]()

        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model = model.cuda(args.gpu)
        else:
            # DataParallel will divide and allocate batch_size to all available GPUs
            if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
                model.features = torch.nn.DataParallel(model.features)
                model.cuda()
            else:
                model = torch.nn.DataParallel(model).cuda()

        # define loss function (criterion) and optimizer
        criterion = nn.CrossEntropyLoss().cuda(args.gpu)
        optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                    momentum=args.momentum,
                                    weight_decay=args.weight_decay)
        cudnn.benchmark = True

        # Data loading code
        traindir = os.path.join(args.data, 'train')
        valdir = os.path.join(args.data, 'val')
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                        std=[0.229, 0.224, 0.225])

        train_dataset = SyntheticDataset((3, 224, 224), 500)
        

        if args.distributed:
            train_sampler = torch.utils.data.distributed.DistributedSampler(
                train_dataset)
        else:
            train_sampler = None

        train_loader = torch.utils.data.DataLoader(
            train_dataset, batch_size=args.batch_size, shuffle=(
                train_sampler is None),
            num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)

        for epoch in range(args.start_epoch, args.epochs):
            if args.distributed:
                train_sampler.set_epoch(epoch)
            adjust_learning_rate(optimizer, epoch, args)

            perfStats[batchSize] = Perf({0: 'load', 1: 'fp', 2: 'loss', 3: 'zero', 4: 'bp', 5: 'opt', 6: 'total/bat', 7: 'totalCPU'})
            # train for one epoch
            train(train_loader, model, criterion, optimizer, epoch, args, perfStats[batchSize])

            if len(perfStats) == 1:
                perfStats[batchSize].printHeader()
            perfStats[batchSize].printAll(batchSize, 1, 0)

    for batchSize in [1, 2, 4, 8, 16, 32]:
        perfStats = {}
        for splitCount in [1, 2, 4, 8, 16, 32]:
            args.batch_size = batchSize
            args.print_freq = 500

            global best_acc1
            args.gpu = gpu

            if args.gpu is not None:
                print("Use GPU: {} for training".format(args.gpu))

            if args.distributed:
                if args.dist_url == "env://" and args.rank == -1:
                    args.rank = int(os.environ["RANK"])
                if args.multiprocessing_distributed:
                    # For multiprocessing distributed training, rank needs to be the
                    # global rank among all the processes
                    args.rank = args.rank * ngpus_per_node + gpu
                dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                                        world_size=args.world_size, rank=args.rank)
            # create model
            # if args.pretrained:
            #     print("=> using pre-trained model '{}'".format(args.arch))
            #     model = models.__dict__[args.arch](pretrained=True)
            # else:
            #     print("=> creating model '{}'".format(args.arch))
            #     model = models.__dict__[args.arch]()
            #     if args.arch == "vgg16":
            #         model = vgg16()
            model = vgg16(splitCount)

            if args.distributed:
                # For multiprocessing distributed, DistributedDataParallel constructor
                # should always set the single device scope, otherwise,
                # DistributedDataParallel will use all available devices.
                if args.gpu is not None:
                    torch.cuda.set_device(args.gpu)
                    model.cuda(args.gpu)
                    # When using a single GPU per process and per
                    # DistributedDataParallel, we need to divide the batch size
                    # ourselves based on the total number of GPUs we have
                    args.batch_size = int(args.batch_size / ngpus_per_node)
                    model = torch.nn.parallel.DistributedDataParallel(
                        model, device_ids=[args.gpu])
                else:
                    model.cuda()
                    # DistributedDataParallel will divide and allocate batch_size to all
                    # available GPUs if device_ids are not set
                    model = torch.nn.parallel.DistributedDataParallel(model)
            elif args.gpu is not None:
                torch.cuda.set_device(args.gpu)
                model = model.cuda(args.gpu)
            else:
                # DataParallel will divide and allocate batch_size to all available GPUs
                if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
                    model.features = torch.nn.DataParallel(model.features)
                    model.cuda()
                else:
                    model = torch.nn.DataParallel(model).cuda()

            # define loss function (criterion) and optimizer
            criterion = nn.CrossEntropyLoss().cuda(args.gpu)

            optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)

            cudnn.benchmark = True

            # Data loading code
            traindir = os.path.join(args.data, 'train')
            valdir = os.path.join(args.data, 'val')
            normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                            std=[0.229, 0.224, 0.225])

            # train_dataset = datasets.ImageFolder(
            #     traindir,
            #     transforms.Compose([
            #         transforms.RandomResizedCrop(224),
            #         transforms.RandomHorizontalFlip(),
            #         transforms.ToTensor(),
            #         normalize,
            #     ]))
            train_dataset = SyntheticDataset((3, 224, 224), 500)
            

            if args.distributed:
                train_sampler = torch.utils.data.distributed.DistributedSampler(
                    train_dataset)
            else:
                train_sampler = None

            train_loader = torch.utils.data.DataLoader(
                train_dataset, batch_size=args.batch_size, shuffle=(
                    train_sampler is None),
                num_workers=args.workers, pin_memory=True, sampler=train_sampler, drop_last=True)

            for epoch in range(args.start_epoch, args.epochs):
                if args.distributed:
                    train_sampler.set_epoch(epoch)
                adjust_learning_rate(optimizer, epoch, args)

                perfStats[splitCount] = Perf({0: 'load', 1: 'fp', 2: 'loss', 3: 'zero', 4: 'bp', 5: 'opt', 6: 'total/bat', 7: 'totalCPU'})
                # train for one epoch
                train(train_loader, model, criterion, optimizer, epoch, args, perfStats[splitCount])

                if len(perfStats) == 1:
                    perfStats[splitCount].printHeader()
                perfStats[splitCount].printAll(batchSize, splitCount, 0)
Пример #25
0
        neptune.log_text('model', model_name)
        neptune.log_text('date_time', date_time)

    neptune.create_experiment(model_name)
    NeptuneLog()

    if model_name == 'vgg11':
        model = vgg.vgg11(pretrained=pretrain_check)
    elif model_name == 'vgg11_bn':
        model = vgg.vgg11_bn(pretrained=pretrain_check)
    elif model_name == 'vgg13':
        model = vgg.vgg13(pretrained=pretrain_check)
    elif model_name == 'vgg13_bn':
        model = vgg.vgg13_bn(pretrained=pretrain_check)
    elif model_name == 'vgg16':
        model = vgg.vgg16(pretrained=pretrain_check)
    elif model_name == 'vgg16_bn':
        model = vgg.vgg16_bn(pretrained=pretrain_check)
    elif model_name == 'vgg19':
        model = vgg.vgg19(pretrained=pretrain_check)
    elif model_name == 'vgg19_bn':
        model = vgg.vgg19_bn(pretrained=pretrain_check)
    model.eval()
    model = torch.nn.DataParallel(model).cuda()

    optimizer = optim.Adam(model.parameters(),
                           lr=learning_rate,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=1e-5)
    scheduler = ReduceLROnPlateau(optimizer,
Пример #26
0
def main():
    # parse the argument
    parser = argparse.ArgumentParser()
    parser.add_argument(
        'model',
        help='The model for image classification',
        choices=['alexnet', 'vgg13', 'vgg16', 'vgg19', 'resnet', 'googlenet'])
    args = parser.parse_args()

    # PaddlePaddle init
    paddle.init(use_gpu=True, trainer_count=7)

    image = paddle.layer.data(
        name="image", type=paddle.data_type.dense_vector(DATA_DIM))
    lbl = paddle.layer.data(
        name="label", type=paddle.data_type.integer_value(CLASS_DIM))

    extra_layers = None
    learning_rate = 0.01
    if args.model == 'alexnet':
        out = alexnet.alexnet(image, class_dim=CLASS_DIM)
    elif args.model == 'vgg13':
        out = vgg.vgg13(image, class_dim=CLASS_DIM)
    elif args.model == 'vgg16':
        out = vgg.vgg16(image, class_dim=CLASS_DIM)
    elif args.model == 'vgg19':
        out = vgg.vgg19(image, class_dim=CLASS_DIM)
    elif args.model == 'resnet':
        out = resnet.resnet_imagenet(image, class_dim=CLASS_DIM)
        learning_rate = 0.1
    elif args.model == 'googlenet':
        out, out1, out2 = googlenet.googlenet(image, class_dim=CLASS_DIM)
        loss1 = paddle.layer.cross_entropy_cost(
            input=out1, label=lbl, coeff=0.3)
        paddle.evaluator.classification_error(input=out1, label=lbl)
        loss2 = paddle.layer.cross_entropy_cost(
            input=out2, label=lbl, coeff=0.3)
        paddle.evaluator.classification_error(input=out2, label=lbl)
        extra_layers = [loss1, loss2]

    cost = paddle.layer.classification_cost(input=out, label=lbl)

    # Create parameters
    parameters = paddle.parameters.create(cost)

    # Create optimizer
    optimizer = paddle.optimizer.Momentum(
        momentum=0.9,
        regularization=paddle.optimizer.L2Regularization(rate=0.0005 *
                                                         BATCH_SIZE),
        learning_rate=learning_rate / BATCH_SIZE,
        learning_rate_decay_a=0.1,
        learning_rate_decay_b=128000 * 35,
        learning_rate_schedule="discexp", )

    train_reader = paddle.batch(
        paddle.reader.shuffle(
            flowers.train(),
            # To use other data, replace the above line with:
            # reader.train_reader('train.list'),
            buf_size=1000),
        batch_size=BATCH_SIZE)
    test_reader = paddle.batch(
        flowers.valid(),
        # To use other data, replace the above line with:
        # reader.test_reader('val.list'),
        batch_size=BATCH_SIZE)

    # Create trainer
    trainer = paddle.trainer.SGD(
        cost=cost,
        parameters=parameters,
        update_equation=optimizer,
        extra_layers=extra_layers)

    # End batch and end pass event handler
    def event_handler(event):
        if isinstance(event, paddle.event.EndIteration):
            if event.batch_id % 1 == 0:
                print "\nPass %d, Batch %d, Cost %f, %s" % (
                    event.pass_id, event.batch_id, event.cost, event.metrics)
        if isinstance(event, paddle.event.EndPass):
            with gzip.open('params_pass_%d.tar.gz' % event.pass_id, 'w') as f:
                trainer.save_parameter_to_tar(f)

            result = trainer.test(reader=test_reader)
            print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics)

    trainer.train(
        reader=train_reader, num_passes=200, event_handler=event_handler)
Пример #27
0
import torch.nn.functional as F
import torch.optim as optim
# from model.roi_layers import ROIPool
import torchvision.ops.roi_pool as ROIPool
# from skimage import io

import vgg, cv2, time
from mydataset import MyDataset
from image_processing import resizeThermal

# from image_processing import showTensor

rgb_mean = (0.4914, 0.4822, 0.4465)
rgb_std = (0.2023, 0.1994, 0.2010)

raw_vgg16 = vgg.vgg16(pretrained=True)


class MyRRN(nn.Module):
    def __init__(self):
        super(MyRRN, self).__init__()
        self.features = raw_vgg16.features
        # self.roi_pool = ROIPool((7, 7), 1/16)
        self.deconv1 = nn.ConvTranspose2d(in_channels=512,
                                          out_channels=64,
                                          kernel_size=4,
                                          stride=8,
                                          padding=1)
        self.conv1 = nn.Conv2d(in_channels=64,
                               out_channels=1,
                               kernel_size=3,
Пример #28
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()
        if args.arch == "vgg16":
            model = vgg16()

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ])),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion, args)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }, is_best)
indices = range(start_point, end_point)


val_loader = torch.utils.data.DataLoader(
    val_set,
    batch_size=batch_size, shuffle=False,
    num_workers=workers, pin_memory=True,
sampler=module.rangeSampler(indices))
#========================data loading=========================


#========================create model=========================
if model_name == 'vgg':
    # quantization_factor = [(feature_bitwidth[j], weight_bitwidth[j]) for j in range(16)]
    quantization_factor = [(feature_bitwidth[j], None) for j in range(16)]
    model = vgg.vgg16(pretrained = True, bit_width = quantization_factor)
elif model_name == 'squeeze':
    quantization_factor = [(feature_bitwidth[j], weight_bitwidth[j]) for j in range(26)]
    model = squeeze.squeezenet1_0(pretrained = True, bit_width = quantization_factor)

model = torch.nn.DataParallel(model).cuda()

#========================create model=========================


#========================weight processing=========================
state_dict = model.state_dict()
        
counter = 0
for module_name, layer in model.named_modules():
    if type(layer) == torch.nn.modules.conv.Conv2d \
Пример #30
0
train_loader = torch.utils.data.DataLoader(train_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           pin_memory=True)

valid_dataset = datasets.ImageFolder(valid_path, transform=valid_transform)

valid_loader = torch.utils.data.DataLoader(valid_dataset,
                                           batch_size=args.batch_size,
                                           shuffle=False,
                                           pin_memory=True)

# load prune model
checkpoint = torch.load(args.prune_path)

model = vgg16(pretrained=True).to(args.device)

mask = checkpoint['mask']

# Conv 5-3 [output]
model.features[-3] = conv_post_mask(model.features[-3], mask[0])
# FC 6 [input, output]
model.classifier[0] = linear_mask(model.classifier[0], mask[0], mask[1])
# FC 7 [input]
model.classifier[3] = linear_pre_mask(model.classifier[3], mask[1])

model.load_state_dict(checkpoint['state_dict'])

criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr, weight_decay=1e-4)
Пример #31
0
def showmodel():
    network = vgg16(num_classes=10)
    network = network.cuda()
    summary(network, (3, 224, 224))