Example #1
0
    val_loss = 0
    i = 0
    for x_batch, y_batch in batch_eval:
        batch_len = len(x_batch)
        outputs = cnn(x_batch)
        loss = loss_fn(outputs, y_batch)
        val_loss += loss.item()
        i += 1
        _, prediction = torch.max(outputs.data, 1)
        correct = (prediction == y_batch).sum().item()
        acc = correct / batch_len
        total_acc += acc * batch_len
    return (total_acc / data_len), (val_loss / i)


cnn = resnet18().to(device)

#实验改动位置
#################################################################################
'''
针对optimizer进行实验
'''
optimizer = Adam(cnn.parameters(), lr=0.01)  # 选用AdamOptimizer

#学习率优化若10轮训练loss未下降,则学习率*0.1

scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                       mode='min',
                                                       factor=0.1,
                                                       patience=100,
                                                       verbose=True,
import mxnet as mx
import mxnet.ndarray as nd
from mxnet.gluon import data as gdata
import cv2
from net import resnet18


img = cv2.imread('test_imgs/2.jpg', cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (48, 48))
print(img.shape)
img = nd.array(img).expand_dims(axis=2).expand_dims(axis=0)
print(img.shape)

transform_test = gdata.vision.transforms.Compose([
    gdata.vision.transforms.ToTensor(),
    gdata.vision.transforms.Normalize([0.5],
                                      [0.5])])

img = transform_test(img)
net = resnet18(7)
ctx = mx.gpu(0)
net.load_parameters('trained_models/resnet18-epoch10-loss0.008498244381253217.params')

pred = net(img)[0]
idx = nd.argmax(pred, axis=0)
print(nd.argmax(pred, axis=0))
Example #3
0
    prob = out[0][pred].asscalar()
    return '置信度=%f, 类别 %s' % (prob, label[str(pred)])


def predict_cv(net, ctx, fname, label):
    img = cv2.imread(fname)
    img = cv2.resize(img, (image_size, image_size))
    data, _ = transform(nd.array(img), -1)
    plt.imshow(data.transpose((1, 2, 0)).asnumpy() / 255)
    data = data.expand_dims(axis=0)
    out = net(data.as_in_context(ctx))
    out = nd.SoftmaxActivation(out)
    pred = int(nd.argmax(out, axis=1).asscalar())
    prob = out[0][pred].asscalar()
    print(prob, pred)
    return '置信度=%f, 类别 %s' % (prob, label[str(pred)])


if __name__ == '__main__':
    label_path = '/data/datasets/cifar-10/label.txt'
    image_path = '/data/datasets/cifar-10/test/9/9_11.jpg'
    label_dict = get_label(label_path)
    print(label_dict)
    ctx = utils.try_gpu()
    net2 = net_collection.resnet18(10)
    net2.hybridize()
    net2.load_params('models/11_0.87632_0.89242.params', ctx=ctx)
    print(predict_mxnet(net=net2, ctx=ctx, fname=image_path, label=label_dict))
    plt.imshow(plt.imread(image_path))
    plt.show()
Example #4
0
                    help='Number of iterations we want to do for GTG')

args = parser.parse_args()

batch_size = 32
# torch.cuda.set_device(args.gpu_id)

if args.net_type == 'bn_inception':
    model = net.bn_inception(pretrained=True, nb_classes=args.nb_classes)
    model.last_linear = nn.Linear(1024, args.nb_classes)
    if args.embed:
        model = net.Inception_embed(model, 1024, args.sz_embedding,
                                    args.nb_classes)

elif args.net_type == 'resnet18':
    model = net.resnet18(pretrained=True)
    model.fc = nn.Linear(512, args.nb_classes)
elif args.net_type == 'resnet34':
    model = net.resnet34(pretrained=True)
    model.fc = nn.Linear(512, args.nb_classes)
elif args.net_type == 'resnet50':
    model = net.resnet50(pretrained=True)
    model.fc = nn.Linear(2048, args.nb_classes)
elif args.net_type == 'resnet101':
    model = net.resnet101(pretrained=True)
    model.fc = nn.Linear(2048, args.nb_classes)
elif args.net_type == 'resnet152':
    model = net.resnet152(pretrained=True)
    model.fc = nn.Linear(2048, args.nb_classes)
elif args.net_type == 'densenet121':
    model = net.densenet121(pretrained=True)
Example #5
0
train_iter = gdata.DataLoader(train_ds.transform_first(transform_train),
                              batch_size,
                              shuffle=True,
                              last_batch='keep')
valid_iter = gdata.DataLoader(valid_ds.transform_first(transform_test),
                              batch_size,
                              shuffle=True,
                              last_batch='keep')
test_iter = gdata.DataLoader(test_ds.transform_first(transform_test),
                             batch_size,
                             shuffle=False,
                             last_batch='keep')

# 2. define training
ctx = mx.gpu(0)
net = resnet18(num_classes=7)
net.initialize(ctx=ctx, init=init.Xavier())
net.hybridize()  # dynamic -> static

num_epochs = 100
init_lr = 0.01
wd = 5e-4
lr_period = 10
lr_decay = 0.5

load_params = True

trainer = gluon.Trainer(net.collect_params(), 'sgd', {
    'learning_rate': init_lr,
    'momentum': 0.9,
    'wd': wd