Exemple #1
0
    def __init__(self,
                 num_classes=1000,
                 gpu_id=0,
                 print_freq=10,
                 epoch_print=10):
        self.num_classes = num_classes
        self.gpu = gpu_id
        self.print_freq = print_freq
        self.epoch_print = epoch_print

        torch.cuda.set_device(self.gpu)

        self.loss_function = nn.CrossEntropyLoss().cuda(self.gpu)

        self.model = model.AlexNet(self.num_classes).cuda(self.gpu)

        self.train_losses = []
        self.train_acc = []
        self.test_losses = []
        self.test_acc = []
        self.best_acc = 0
])

test_dataset = torchvision.datasets.ImageFolder(root='work/data/test/',
                                                transform=data_transform)
test_loader = DataLoader(test_dataset,
                         batch_size=batch_size,
                         shuffle=True,
                         num_workers=0)

#花卉类别
data_classes = test_dataset.classes

#选择CPU还是GPU的操作
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

net = model.AlexNet()
net.load_state_dict(torch.load('alexnet_flower_500.pkl'))
#model.eval()

correct = 0
total = 0
with torch.no_grad():
    for data in test_loader:
        images, labels = data
        #images, labels = images.to(device), labels.to(device)
        images.to(device), labels.to(device)
        images, labels = Variable(images), Variable(labels)

        outputs = net(images)
        _, predicted = torch.max(outputs.data, 1)
        total += labels.size(0)
Exemple #3
0
import torch
import torch.nn as nn
import torch.nn.functional as F
import model
from torch.autograd import Variable

N = 10
C_in = 3
D_in = 16
H = 224
W = 224
num_classes = 10

if __name__ == "__main__":
    m = model.AlexNet(32)

    x = Variable(torch.randn(N, C_in, H, W))
    class_weights = torch.Tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
    # y = Variable(torch.multinomial(class_weights, N, replacement=True),
    #         requires_grad=False)
    y = Variable(torch.rand(N, 64))

    loss_fn = torch.nn.MSELoss()

    y_pred = m.forward(x)
    print(y_pred.size())

    loss = loss_fn(y_pred, y)
    print(loss)
Exemple #4
0
import sys
import numpy as np
import model as Model
import torch.optim as optim
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from timeit import default_timer as timer
from torch.autograd import Variable
from PIL import Image, ImageDraw, ImageFont

trained_model = "weights/human36m.pth.tar"
model = Model.AlexNet(32)
weights = torch.load(trained_model, map_location=lambda storage, loc: storage)
model.load_state_dict(weights["state_dict"])

normalize = transforms.Normalize(mean=[0.00094127, 0.00060294, 0.0005603],
                                 std=[0.02102633, 0.01346872, 0.01251619])

transform = transforms.Compose(
    [transforms.Scale((220, 220)),
     transforms.ToTensor(), normalize])

joint_names = [
    "Hips", "RightUpLeg", "RightLeg", "RightFoot", "RightToeBase", "Site",
    "LeftUpLeg", "LeftLeg", "LeftFoot", "LeftToeBase", "Site", "Spine",
    "Spine1", "Neck", "Head", "Site", "LeftShoulder", "LeftArm", "LeftForeArm",
    "LeftHand", "LeftHandThumb", "Site", "L_Wrist_End", "Site",
    "RightShoulder", "RightArm", "RightForeArm", "RightHand", "RightHandThumb",
    "Site", "R_Wrist_End", "Site"
Exemple #5
0
def feature_extraction(image_path, batchsize, out_path, out_name, save_flag):
    """
	This function is to extract features from alexnet. 
	Be careful that the total number of images must be a multiple of 1000
	"""

    print '\nLoad alexnet KERAS model...'
    # the path of the weights of pretraine Alexnet
    alexnet = model.AlexNet(
        '/home/xt/tong/mopsi/feature/keras/alexnet_weights.h5')

    net = Model(input=alexnet.input,
                output=alexnet.get_layer('dense_2').output)

    nb_image = len(image_path)
    feature = np.zeros((nb_image, 4096))

    # divide into minibatchs of 1000 images
    index = 0
    nb_batch = nb_image / 1000

    # handle with 1000 images each time
    for m in range(nb_batch):
        image_batch = np.zeros((1000, 3, 227, 227))

        print '\n Load %dth 1000 images...' % m
        for i, img_path in enumerate(image_path[m * 1000:m * 1000 + 1000]):
            im = Image.open(img_path)
            im = im.resize((227, 227))
            im = np.array(im).astype(np.float32)
            if im.shape == (227, 227):
                im = to_rgb(im)
            im[:, :, 0] -= 123.68
            im[:, :, 1] -= 116.779
            im[:, :, 2] -= 103.939
            im = im.transpose((2, 0, 1))
            im = np.expand_dims(im, axis=0)
            image_batch[i] = im

        # extraction of feature
        print "Extract features..."
        feature[m * 1000:m * 1000 + 1000] = net.predict(image_batch,
                                                        batch_size=batchsize)
    """
	# check distance between features
	dis = cosine_similarity(feature)
	print dis
	"""

    # save feature matrix to pkl file
    fp_f = open(os.path.join(out_path, out_name + '_feature.pkl'), 'wb')
    pickle.dump(feature, fp_f)
    fp_f.close()

    # write feature to txt
    fp = open(os.path.join(out_path, out_name + '_feature.txt'), "w")
    for item in feature:
        fp.write(' '.join(map(str, list(item))))
        fp.write('\n')
    fp.close()

    # save cosine distance
    if save_flag[0]:
        print '\nSave cosine distance...'
        cos_dis = cosine_distances(feature)
        fp_cos = open(out_path + 'dis/' + out_name + '_cosine_dis.pkl', 'wb')
        pickle.dump(cos_dis, fp_cos)
        fp_cos.close()

    # save euclidean distance
    if save_flag[1]:
        print '\nSave euclidean distance...'
        euc_dis = euclidean_distances(feature)
        fp_euc = open(out_path + 'dis/' + out_name + '_euclidean_dis.pkl',
                      'wb')
        pickle.dump(euc_dis, fp_euc)
        fp_euc.close()

    # save hamming distance
    if save_flag[2]:
        print '\nSave hamming distance...'
        dist = DistanceMetric.get_metric('hamming')
        ham_dis = dist.pairwise(np.where(feature > 0, 1, 0))
        fp_ham = open(out_path + 'dis/' + out_name + '_hamming_dis.pkl', 'wb')
        pickle.dump(ham_dis, fp_ham)
        fp_ham.close()

    # save jaccard distance
    if save_flag[3]:
        print '\nSave jaccard distance...'
        dist = DistanceMetric.get_metric('jaccard')
        jac_dis = dist.pairwise(feature != 0)
        fp_jac = open(out_path + 'dis/' + out_name + '_jaccard_dis.pkl', 'wb')
        pickle.dump(jac_dis, fp_jac)
        fp_jac.close()

    return feature
Exemple #6
0
import time

data_root = sys.argv[3]
root = sys.argv[4]
model_path = sys.argv[5]
data_path = sys.argv[6]

if sys.argv[2] == 'cifar10':
    lr = 1e-5
    dataset = data_loader.cifar_10_dataset(data_path + '/cifar-10-batches-py/',
                                           buffer_size=1024,
                                           batch_size=128)
    if sys.argv[1] == '0':
        f = open(root + '/AlexNet.log', 'w')
        model_name = 'AlexNetAtt'
        alexattnet = model.AlexNet(classes=10)

    if sys.argv[1] == '1':
        f = open(root + '/AlexNetAttDSSpatial.log', 'w')
        model_name = 'AlexNetAttDSSpatial'
        alexattnet = model.AlexAttNet(classes=10)

    elif sys.argv[1] == '2':
        f = open(root + '/AlexNetAttPCSpatial.log', 'w')
        model_name = 'AlexNetAttPCSpatial'
        alexattnet = model.AlexNetAttSpatial(classes=10)

    elif sys.argv[1] == '3':
        f = open(root + '/AlexNetGAP.log', 'w')
        model_name = 'AlexNetAttGAP'
        alexattnet = model.AlexNetGAP(classes=10)
Exemple #7
0
DATA_DIR = '/home/vaibhavg/data/'

AFLW2000_DATA_DIR = DATA_DIR + 'AFLW2000/'
AFLW2000_MODEL_FILE = PROJECT_DIR + 'model/aflw2000_model.h5'
AFLW2000_TEST_SAVE_DIR = DATA_DIR + 'aflw2000_test/'

BIWI_DATA_DIR = DATA_DIR + 'Biwi/kinect_head_pose_db/hpdb/'
BIWI_MODEL_FILE = PROJECT_DIR + 'model/biwi_model.h5'
BIWI_TEST_SAVE_DIR = DATA_DIR + 'biwi_test/'

BIN_NUM = 66
INPUT_SIZE = 64
BATCH_SIZE = 16
EPOCHS = 20

dataset = datasets.Biwi(BIWI_DATA_DIR,
                        'filename_list.txt',
                        batch_size=BATCH_SIZE,
                        input_size=INPUT_SIZE,
                        ratio=0.95)

net = model.AlexNet(dataset,
                    BIN_NUM,
                    batch_size=BATCH_SIZE,
                    input_size=INPUT_SIZE)

net.train(BIWI_MODEL_FILE, max_epoches=EPOCHS, load_weight=False)

net.test(BIWI_TEST_SAVE_DIR)
Exemple #8
0
def main():
    global args, best_acc
    args = parser.parse_args()

    # Set paths
    base_path = os.path.normpath(args.data)
    train_path = os.path.join(base_path, "train/images")
    val_path = os.path.join(base_path, "val/images")
    target_path = os.path.join(base_path, "targets")
    print(train_path)

    torch.cuda.manual_seed(1)

    m = model.AlexNet(32)
    m.cuda()

    normalize = data_transforms.Normalize(
        mean=[0.00094127, 0.00060294, 0.0005603],
        std=[0.02102633, 0.01346872, 0.01251619])

    print("Loading data...")
    train_dset = human36m.HUMAN36MPose(
        train_path,
        target_path,
        transform=data_transforms.Compose([
            data_transforms.CropToTarget(20),
            data_transforms.Scale((220, 220)),
            data_transforms.RandomHorizontalFlip(),
            data_transforms.ToTensor(), normalize
        ]))

    val_dset = human36m.HUMAN36MPose(val_path,
                                     target_path,
                                     transform=data_transforms.Compose([
                                         data_transforms.CropToTarget(20),
                                         data_transforms.Scale((220, 220)),
                                         data_transforms.ToTensor(), normalize
                                     ]))

    train_loader = torch.utils.data.DataLoader(train_dset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    # Define loss function and optimizer
    criterion = nn.MSELoss().cuda()
    optimizer = torch.optim.SGD(m.parameters(),
                                args.learning_rate,
                                momentum=args.momentum)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            best_acc = checkpoint["best_acc"]
            m.load_state_dict(checkpoint["state_dict"])
            optimizer.load_state_dict(checkpoint["optimizer"])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint["epoch"]))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    print("Starting \"training\"")
    for epoch in range(args.start_epoch, args.epochs):
        # train
        train(train_loader, m, criterion, optimizer, epoch)
        acc = validate(val_loader, m, criterion)

        is_best = acc > best_acc
        best_acc = max(acc, best_acc)
        save_checkpoint(
            {
                "epoch": epoch + 1,
                "state_dict": m.state_dict(),
                "best_acc": best_acc,
                "optimizer": optimizer.state_dict(),
            }, is_best)
Exemple #9
0
import model
import train_util

import torch
import torch.nn as nn
import torch.optim as optim

train_path = ''
val_path = ''

dset_loaders, dset_sizes, dset_classes = data_util.load_data(
    train_path=train_path, val_path=val_path)

print(dset_sizes)
print(dset_classes)

net = model.AlexNet().cuda()

criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), weight_decay=0.0005)
lr_scheduler = train_util.exp_lr_scheduler
lr = 0.001

best_model, best_acc = train_util.train(net, criterion, optimizer,
                                        lr_scheduler, dset_loaders, dset_sizes,
                                        lr, 40)

print('Saving the best model')
filename = 'trained_model_val_{:.2f}.pt'.format(best_acc)
torch.save(best_model.state_dict(), filename)