Esempio n. 1
0
import torchvision
from torchvision import datasets, models, transforms
import time
import os
from PIL import Image
import sys
import torch.nn.functional as F

from net import simpleconv3
data_transforms = transforms.Compose([
    transforms.Resize(48),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

net = simpleconv3()
net.eval()
modelpath = sys.argv[1]
net.load_state_dict(
    torch.load(modelpath, map_location=lambda storage, loc: storage))

imagepath = sys.argv[2]
image = Image.open(imagepath)
imgblob = data_transforms(image).unsqueeze(0)
imgblob = Variable(imgblob)

torch.no_grad()

predict = F.softmax(net(imgblob))
print(predict)
Esempio n. 2
0
        zoom_range=0.2,
        horizontal_flip=True)
    # this is the augmentation configuration use for testing only rescaling
    val_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        train_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size)

    val_generator = val_datagen.flow_from_directory(
        validation_data_dir,
        target_size=(img_width, img_height),
        batch_size=batch_size)

    num_train_samples = train_generator.samples
    num_val_samples = val_generator.samples

    tensorboard = TensorBoard(log_dir=('./logs'))
    callbacks = []
    callbacks.append(tensorboard)
    model = simpleconv3()
    loss = binary_crossentropy
    metrics = [binary_accuracy]
    optimizer = SGD(lr=0.001, decay=1e-6, momentum=0.9)

    model = train_model(model, loss, metrics,  optimizer, num_epochs)
    if not os.path.exists('models'):
        os.mkdir('models')
    model.save_weights('models/model.h5')
Esempio n. 3
0
import cv2

txtfile = sys.argv[1]
batch_size = 64
num_classes = 2
image_size = (48,48)
learning_rate = 0.0001

debug=False

if __name__=="__main__":
    dataset = ImageData(txtfile,batch_size,num_classes,image_size)
    iterator = dataset.data.make_one_shot_iterator()
    dataset_size = dataset.dataset_size
    batch_images,batch_labels = iterator.get_next()
    Ylogits = simpleconv3(batch_images,True)

    print "Ylogits size=",Ylogits.shape

    Y = tf.nn.softmax(Ylogits)
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=batch_labels)
    cross_entropy = tf.reduce_mean(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(batch_labels, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
    saver = tf.train.Saver()
    in_steps = 100
    checkpoint_dir = 'checkpoints/'
    if not os.path.exists(checkpoint_dir):
Esempio n. 4
0
        x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
        for x in ['train', 'val']
    }
    dataloders = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=16,
                                       shuffle=True,
                                       num_workers=4)
        for x in ['train', 'val']
    }

    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}

    use_gpu = torch.cuda.is_available()

    modelclc = simpleconv3()
    print(modelclc)
    if use_gpu:
        modelclc = modelclc.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer_ft = optim.SGD(modelclc.parameters(), lr=0.1, momentum=0.9)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft,
                                           step_size=100,
                                           gamma=0.1)

    modelclc = train_model(model=modelclc,
                           criterion=criterion,
                           optimizer=optimizer_ft,
                           scheduler=exp_lr_scheduler,
                           num_epochs=500)
Esempio n. 5
0
import lasagne
import theano
import theano.tensor as T
import sys
import numpy as np

import matplotlib.pyplot as plt

from net import simpleconv3
from dataset import Dataset

input_var = T.tensor4('X')
target_var = T.ivector('y')

network = simpleconv3(input_var)

prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()

#loss = loss.mean() + 1e-4 * lasagne.regularization.regularize_network_params(
#        network, lasagne.regularization.l2)

test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(
    test_prediction, target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                  dtype=theano.config.floatX)

# create parameter update expressions
Esempio n. 6
0
from torch.autograd import Variable
import torchvision
from torchvision import datasets, models, transforms
import time
import os
from PIL import Image
import sys
import torch.nn.functional as F

## 全局变量
## sys.argv[1] 权重文件
## sys.argv[2] 图像文件夹

testsize = 48  ##测试图大小
from net import simpleconv3
net = simpleconv3(2)  ## 定义模型
net.eval()  ## 设置推理模式,使得dropout和batchnorm等网络层在train和val模式间切换
torch.no_grad()  ## 停止autograd模块的工作,以起到加速和节省显存

## 载入模型权重
modelpath = sys.argv[1]
net.load_state_dict(
    torch.load(modelpath, map_location=lambda storage, loc: storage))

## 定义预处理函数
data_transforms = transforms.Compose([
    transforms.Resize(48),
    transforms.ToTensor(),
    transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])