def run():
    classes = ('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise',
               'Neutral')
    crop_size = 44
    trained_model = torch.load("C:/Users/Admin/Downloads/model_state.pth.tar")
    model = VGG("VGG19")
    model.load_state_dict(trained_model["model_weights"])
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model.to(device)
    model.eval()

    st.title("Facial expression recognition")
    img_file = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])

    if img_file is None:
        st.write('** Please upload an image **')
    original_image = Image.open(img_file, mode='r')
    st.image(original_image, use_column_width=True)
    model = 1
    if st.button('Predict'):
        predict_image = detect(model, original_image)
        image = Image.fromarray(cv2.cvtColor(predict_image, cv2.COLOR_BGR2RGB))
        st.image(image, use_column_width=True)
def train():
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomCrop(size=32),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010)),
    ])

    trainset = torchvision.datasets.CIFAR10(root='./data',
                                            train=True,
                                            download=False,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              num_workers=2)

    testset = torchvision.datasets.CIFAR10(root='./data',
                                           train=False,
                                           download=False,
                                           transform=transform_test)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             num_workers=2)

    classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
               'ship', 'truck')

    model = VGG(vars(args))
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lrate,
                                momentum=0.9,
                                weight_decay=5e-4)

    if args.use_cuda:
        model = model.cuda()

    if args.eval:
        model.load_state_dict(torch.load(args.model_dir))
        model.eval()
        accuracy = model.evaluate(testloader)
        exit()

    total_size = len(trainloader)
    lrate = args.lrate
    best_score = 0.0
    scores = []
    for epoch in range(1, args.epochs + 1):
        model.train()
        for i, (image, label) in enumerate(trainloader):

            loss = model(image, label)
            model.zero_grad()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if i % 100 == 0:
                print('Epoch = %d, step = %d / %d, loss = %.5f lrate = %.5f' %
                      (epoch, i, total_size, loss, lrate))

        model.eval()
        accuracy = model.evaluate(testloader)
        scores.append(accuracy)

        with open(args.model_dir + "_scores.pkl", "wb") as f:
            pkl.dump(scores, f)

        if best_score < accuracy:
            best_score = accuracy
            print('saving %s ...' % args.model_dir)
            torch.save(model.state_dict(), args.model_dir)

        if epoch % args.decay_period == 0:
            lrate *= args.decay
            for param_group in optimizer.param_groups:
                param_group['lr'] = lrate
args = ap.parse_args()

mode = args.mode
assert mode in {"mtcnn", "haarcascade"}

classes = ('Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral')
crop_size = 44
image_path = args.input
#Load model
trained_model = torch.load(args.trained_model)
print("Load weight model with {} epoch".format(trained_model["epoch"]))

model = VGG(args.model_name)
model.load_state_dict(trained_model["model_weights"])
model.to(device)
model.eval()

transform_test = transforms.Compose([
    transforms.TenCrop(crop_size),
    transforms.Lambda(lambda crops: torch.stack(
        [transforms.ToTensor()(crop) for crop in crops]))
])


def detect():
    original_image = cv2.imread(image_path)
    if mode == "haarcascade":
        gray_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
        faces = haarcascade_detect.face_detect(gray_image)
    else:
        detector = MTCNN()
Exemple #4
0
                                       transform=transforms.Compose([
                                           transforms.ToTensor(),
                                           normalize,
                                       ]))
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=2)

# Load the conv model
net = VGG('VGG16')
net = net.cuda()
criterion_no_constrain = nn.CrossEntropyLoss()
net.load_state_dict(torch.load(model_path))

net.eval()
store_feat_maps(net)  # store all feature maps and max pooling locations
#
# Build the deconv net
net_decocnv = _vgg16Deconv()
for idx, layer in enumerate(net.features):
    if isinstance(layer, nn.Conv2d):
        net_decocnv.features[net_decocnv.conv2deconv_indices[
            idx]].weight.data = layer.weight.data
        if idx in net_decocnv.conv2deconv_bias_indices:
            net_decocnv.features[net_decocnv.conv2deconv_bias_indices[
                idx]].bias.data = layer.bias.data
net_decocnv = net_decocnv.cuda()
net_decocnv.eval()

next_img = 1