示例#1
0
 def __init__(self, model_path, use_cuda=True):
     #self.net = Net()
     self.net = AlexNet()
     self.device = "cuda" if torch.cuda.is_available(
     ) and use_cuda else "cpu"
     state_dict = torch.load(model_path, map_location=self.device)
     self.net.load_state_dict(state_dict)
     print("Loading weights from {}... Done!".format(model_path))
     self.net.to(self.device)
     self.size = (28, 28)
     self.norm = transforms.Compose([
         transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, )),
     ])
    def build_model(self):

        # the placeholder of image and label
        self.y = tf.placeholder(tf.float32, [self.batch_size, self.y_dim], name='y')
        self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.img_size, self.img_size, 3], name='img')
        self.keep_prob = tf.placeholder("float")

        # chose the model
        if self.type_of_model == 'Alexnet':
            network = AlexNet(self.inputs, self.y_dim, self.keep_prob)
            score = network.fc3
        elif self.type_of_model == 'ResNet':
            network = Resnet_50_101_152.resnet(self.inputs, self.resnet_type, self.y_dim)
            score = tf.squeeze(network, axis=(1, 2))
        elif self.type_of_model == 'VGG19':
            network = VGG19(self.inputs, self.keep_prob, self.y_dim)
            score = network.fc8
        elif self.type_of_model == 'inception_V4':
            score = inception_V4.inference(self.inputs, self.batch_size, self.y_dim)
        else:
            print('these is no %s'%self.type_of_model)

        softmax_result = tf.nn.softmax(score)

        # 定义损失函数 以及相对应的优化器
        cross_entropy = -tf.reduce_sum(self.y * tf.log(softmax_result))
        self.Optimizer = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

        # 用于判断验证集的结果是否正确
        correct_prediction = tf.equal(tf.argmax(softmax_result, 1), tf.argmax(self.y, 1))
        self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
示例#3
0
class MinistTest(object):
    def __init__(self, model_path, use_cuda=True):
        #self.net = Net()
        self.net = AlexNet()
        self.device = "cuda" if torch.cuda.is_available(
        ) and use_cuda else "cpu"
        state_dict = torch.load(model_path, map_location=self.device)
        self.net.load_state_dict(state_dict)
        print("Loading weights from {}... Done!".format(model_path))
        self.net.to(self.device)
        self.size = (28, 28)
        self.norm = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, )),
        ])

    def preprocess(self, im_crops):
        """
        TODO:
            1. to float with scale from 0 to 1
            2. resize to (64, 128) as Market1501 dataset did
            3. concatenate to a numpy array
            3. to torch Tensor
            4. normalize
        """
        def _resize(im, size):
            return cv2.resize(im.astype(np.float32) / 255., size)

        # for im in im_crops:
        #     print(im.shape)
        #     print(self.norm(_resize(im, self.size)).size())
        im_batch = torch.cat([
            self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops
        ],
                             dim=0).float()
        return im_batch

    def inference(self, im_crops):
        im_batch = self.preprocess(im_crops)
        #print(im_batch.size())
        with torch.no_grad():
            im_batch = im_batch.to(self.device)
            features = self.net(im_batch)
        return features.cpu()
示例#4
0
from Alexnet import AlexNet

####################################################
# Following lines are used for RTX GPU Only
# Note: Comment it if you're using GTX GPUs
####################################################
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
###################################################

# Model instance
model = AlexNet()

# Dataset download
cifar10_dataset = tf.keras.datasets.cifar10
(train_images, train_labels), (test_images,
                               test_labels) = cifar10_dataset.load_data()

# Data exploration
print("[INFO] # of Training images with shape", train_images.shape)
print("[INFO] # of Testing images with shape", test_images.shape)
print("[INFO] Type of the labels", type(train_labels[0]))
print("[INFO] # of Training labels", len(train_labels))
print("[INFO] # of Testing labels", len(test_labels))

# Resize images to 227x227x3
# We've used default Bilinear Interpolation method for upsampling
示例#5
0
def main():
    # Training settings
    args = params()
    data_dir = cfgs.DataDir
    test_dir = cfgs.TestDir
    train_file = args.train_file
    test_file = args.test_file
    model_dir = cfgs.ModelSaveDir
    model_path = os.path.join(model_dir, cfgs.ModelPrefix)
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)
    device = torch.device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    #*******************************************************************************create logg
    log_dir = cfgs.LogDir
    if not os.path.exists(log_dir):
        os.makedirs(log_dir)
    logger = logging.getLogger()
    log_name = time.strftime('%F-%T', time.localtime()).replace(':',
                                                                '-') + '.log'
    log_path = os.path.join(log_dir, log_name)
    hdlr = logging.FileHandler(log_path)
    logger.addHandler(hdlr)
    logger.addHandler(logging.StreamHandler())
    logger.setLevel(logging.DEBUG)
    #*******************************************************************************load data
    # train_loader = torch.utils.data.DataLoader(
    #     datasets.MNIST('./data', train=True, download=True,
    #                    transform=transforms.Compose([
    #                        transforms.ToTensor(),
    #                        transforms.Normalize((0.1307,), (0.3081,))
    #                    ])),
    #     batch_size=args.batch_size, shuffle=True, **kwargs)
    #     #transforms.Normalize((0.1307,), (0.3081,))
    # test_loader = torch.utils.data.DataLoader(
    #     datasets.MNIST('./data', train=False, transform=transforms.Compose([
    #                        transforms.ToTensor(),
    #                     transforms.Normalize((0.1307,), (0.3081,))
    #                    ])),
    #     batch_size=args.test_batch_size, shuffle=True, **kwargs)
    #***************

    dataset_train = ReadDataset(
        train_file,
        data_dir,
        transf=transforms.Compose([
            transforms.Normalize((0.1307, ), (0.3081, ))
        ]))  #transforms.Normalize((0.1307,), (0.3081,))
    train_loader = DataLoader(dataset_train,
                              args.batch_size,
                              num_workers=1,
                              shuffle=True,
                              pin_memory=True)
    dataset_test = ReadDataset(test_file,
                               test_dir,
                               transf=transforms.Compose([
                                   transforms.Normalize((0.1307, ), (0.3081, ))
                               ]))
    test_loader = DataLoader(dataset_test,
                             args.batch_size,
                             num_workers=1,
                             shuffle=True,
                             pin_memory=True)
    # load model
    model = AlexNet()
    if args.load_num is not None:
        loadpath = model_path + '_' + str(args.load_num) + '.pt'
        state_dict = torch.load(loadpath, map_location=device)
        model.load_state_dict(state_dict)
    model.to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    #optimizer = optim.Adam(model.parameters(),lr=args.lr,weight_decay=5e-4)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     patience=3,
                                                     verbose=True)
    for epoch in range(1, args.epochs + 1):
        train(args, model, device, train_loader, optimizer, epoch, logger)
        ave_loss = test(args, model, device, test_loader, logger)
        if epoch % cfgs.ModelSaveInterval == 0:
            torch.save(model.state_dict(), model_path + "_%d.pt" % epoch)
        # scheduler.step(ave_loss)

    if (args.save_model):
        torch.save(model.state_dict(), model_path + ".pt")
    # 定义迭代器
    iterator = Iterator.from_structure(tr_data.output_types,
                                       tr_data.output_shapes)

    training_initalize = iterator.make_initializer(tr_data)
    testing_initalize = iterator.make_initializer(test_data)

    # 定义每次迭代的数据
    next_batch = iterator.get_next()

x = tf.placeholder(tf.float32, [None, 227, 227, 3])
y = tf.placeholder(tf.float32, [None, num_classes])
keep_prob = tf.placeholder(tf.float32)

# 图片数据通过AlexNet网络处理
model = AlexNet(x, keep_prob, num_classes, train_layers)

# List of trainable variables of the layers we want to train
var_list = [
    v for v in tf.trainable_variables() if v.name.split('/')[0] in train_layers
]

# 执行整个网络图
score = model.fc8

with tf.name_scope('loss'):
    # 损失函数
    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=score, labels=y))

gradients = tf.gradients(loss, var_list)
    shuffle=False)
tr_data=tr_data.data
test_data=test_data.data

#创建迭代器,向网络传输数据
tr_iterator=Iterator.from_structure(tr_data.output_types,tr_data.output_shapes)
tr_initalize=tr_iterator.make_initializer(tr_data)#tr_data作为检索数据库
tr_batch=tr_iterator.get_next()

test_iterator=Iterator.from_structure(test_data.output_types,test_data.output_shapes)
test_initalize=test_iterator.make_initializer(test_data)#test_data作为被检索图像
test_batch=test_iterator.get_next()

#创建模型
x=tf.placeholder(tf.float32,[None,227,227,3])
model = AlexNet(x, 1, classnum, skip_layer='')
l = model.fc8#取第七层作为检索特征
fc8=model.fc8
saver = tf.train.Saver()

epoch=int(len(train_image)/100)+1
feature=np.empty(shape=[0,30])
with tf.Session() as sess:
    sess.run(tr_initalize)
    sess.run(test_initalize)
    sess.run(tf.global_variables_initializer())
    test_batch,label=sess.run(test_batch)
    saver.restore(sess, "./tmp/fruitcheckpoints/model_epoch20.ckpt") # 导入训练好的参数

    query_feature=sess.run(l,feed_dict={x:test_batch})
    query_fc8=sess.run(fc8,feed_dict={x:test_batch})
示例#8
0
batch_size = 32

train_transforms = transforms.Compose([
    transforms.Resize(224),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])

test_dir = './Atestdataset'
test_datasets = datasets.ImageFolder(test_dir, transform=train_transforms)
test_dataloader = torch.utils.data.DataLoader(test_datasets,
                                              batch_size=batch_size,
                                              shuffle=True)

model = AlexNet()
model.load_state_dict(
    torch.load('./Alex.pth', map_location=torch.device('cpu')))
for epoch in range(1):
    model = model.eval()
    total = 0
    correct = 0
    for i, data in enumerate(test_dataloader):
        images, labels = data
        vggoutputs = model(images)
        _, vggpredicted = torch.max(vggoutputs.data, 1)
        #print(labels)
        #print(vggpredicted)
        total += labels.size(0)
        correct += (vggpredicted == labels).sum().item()
    print(100.0 * correct / total)
iterator = Iterator.from_structure(data.output_types, data.output_shapes)

testing_initalize = iterator.make_initializer(data)
next_batch = iterator.get_next()

#生成查询图片生成器
q_data = Dataset.from_tensor_slices(X_query)
q_data = q_data.map(resize)
q_data = q_data.batch(num_query)

q_iterator = Iterator.from_structure(q_data.output_types, q_data.output_shapes)

query_initalize = q_iterator.make_initializer(q_data)
query_batch = q_iterator.get_next()
# 数据库通过AlexNet
model = AlexNet(x, 1, 10, skip_layer='')
l = model.fc8
fc8 = model.fc8
saver = tf.train.Saver()

epoch = int(num_test / batch_num)  #因为batch最大为600,所以要分代输入
test_feature = np.empty(shape=[0, 10])

with tf.Session() as sess:
    sess.run(testing_initalize)
    sess.run(query_initalize)
    sess.run(tf.global_variables_initializer())

    query_batch = sess.run(query_batch)
    saver.restore(sess,
                  "./tmp/cifarcheckpoints/cifarmodel_epoch20.ckpt")  # 导入训练好的参数