Пример #1
0
def main():
    args = parser.parse_args()

    id_to_label = dataset.get_labels()
    results = find_inputs(args.data, filter=['results.csv'])
    dfs = []
    for r in results:
        df = pd.read_csv(r[1], index_col=None)
        df = df.set_index('fname')
        dfs.append(df)
    all = pd.concat(dfs)

    sum = all.groupby(['fname']).sum()
    sum /= len(dfs)
    sum_arr = sum.as_matrix()
    fnames = sum.index.values
    probs_arr = np.exp(sum_arr)
    #probs_arr[:, 0] += 0.3
    #probs_arr[:, 1] += 0.16
    idx = np.argmax(probs_arr, axis=1)
    probs = probs_arr[np.arange(len(idx)), idx]
    labels = np.array(id_to_label)[idx]
    ensemble = pd.DataFrame(data={
        'fname': fnames,
        'label': labels,
        'prob': probs
    },
                            columns=['fname', 'label', 'prob'])
    ensemble.to_csv('./ensemble.csv', index=False)
Пример #2
0
def do_classify_predict(trainer, dataset, training_args):
    logging.info('*** Test ***')
    predictions, label_ids, metrics = trainer.predict(test_dataset=dataset)

    output_test_file = os.path.join(training_args.output_dir,
                                    "test_results.txt")
    with open(output_test_file, 'w') as writer:
        logging.info('*** Test results is in {}***'.format(output_test_file))
        writer.write("index\tprediction\n")
        for index, item in enumerate(label_ids):
            item = dataset.get_labels()[item]
            writer.write('%d\t%s\n' % (index, item))
Пример #3
0
def ExpressionDetection (image):
    detection_model_path = "%s/%s/haarcascade_frontalface_default.xml"%(PATH,"haar_cascade")


    emotion_model_path = '%s/vision/models/fer2013_mini_XCEPTION.102-0.66.hdf5'%(PATH)
    emotion_labels = get_labels('fer2013')

    # loading models
    face_detection = load_detection_model(detection_model_path)

    emotion_classifier = load_model(emotion_model_path, compile=False)

    # getting input model shapes for inference
    emotion_target_size = emotion_classifier.input_shape[1:3]
    emotion_offsets = (20, 40)
    #read image


    # print 'dddddddd',gray_image
    #gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # rgb_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    gray_image=cv2.imread(image,0)
    faces = detect_faces(face_detection,gray_image)
    # print 'qqqqqqqq',faces

    for face_coordinates in faces:

        x1, x2, y1, y2 = apply_offsets(face_coordinates, emotion_offsets)
        gray_face = gray_image[y1:y2, x1:x2]
        try:
            gray_face = cv2.resize(gray_face, (emotion_target_size))
        except:
            continue

        gray_face = preprocess_input(gray_face, True)
        # print 'eeeeee',gray_face
        gray_face = np.expand_dims(gray_face, 0)
        gray_face = np.expand_dims(gray_face, -1)
        emotion_prediction = emotion_classifier.predict(gray_face)
        emotion_probability = np.max(emotion_prediction)
        emotion_label_arg = np.argmax(emotion_prediction)
        emotion_text = emotion_labels[emotion_label_arg]
        # print 'the emotion probability is:',emotion_probability
        # print 'the emotion is:',emotion_label_arg

        # print 'the predicted emotion is:',emotion_text
        return emotion_text
Пример #4
0
def main():
    args = parser.parse_args()

    if args.output:
        output_base = args.output
    else:
        output_base = './output'
    exp_name = '-'.join([
        datetime.now().strftime("%Y%m%d-%H%M%S"),
        args.model,
        args.gp,
        'f'+str(args.fold)])
    output_dir = get_outdir(output_base, 'train', exp_name)

    train_input_root = os.path.join(args.data)
    batch_size = args.batch_size
    num_epochs = args.epochs
    wav_size = (16000,)
    num_classes = len(dataset.get_labels())

    torch.manual_seed(args.seed)

    model = model_factory.create_model(
        args.model,
        in_chs=1,
        pretrained=args.pretrained,
        num_classes=num_classes,
        drop_rate=args.drop,
        global_pool=args.gp,
        checkpoint_path=args.initial_checkpoint)
    #model.reset_classifier(num_classes=num_classes)

    dataset_train = dataset.CommandsDataset(
        root=train_input_root,
        mode='train',
        fold=args.fold,
        wav_size=wav_size,
        format='spectrogram',
    )

    loader_train = data.DataLoader(
        dataset_train,
        batch_size=batch_size,
        pin_memory=True,
        shuffle=True,
        num_workers=args.workers
    )

    dataset_eval = dataset.CommandsDataset(
        root=train_input_root,
        mode='validate',
        fold=args.fold,
        wav_size=wav_size,
        format='spectrogram',
    )

    loader_eval = data.DataLoader(
        dataset_eval,
        batch_size=args.batch_size,
        pin_memory=True,
        shuffle=False,
        num_workers=args.workers
    )

    train_loss_fn = validate_loss_fn = torch.nn.CrossEntropyLoss()
    train_loss_fn = train_loss_fn.cuda()
    validate_loss_fn = validate_loss_fn.cuda()

    opt_params = list(model.parameters())
    if args.opt.lower() == 'sgd':
        optimizer = optim.SGD(
            opt_params, lr=args.lr,
            momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
    elif args.opt.lower() == 'adam':
        optimizer = optim.Adam(
            opt_params, lr=args.lr, weight_decay=args.weight_decay, eps=args.opt_eps)
    elif args.opt.lower() == 'nadam':
        optimizer = nadam.Nadam(
            opt_params, lr=args.lr, weight_decay=args.weight_decay, eps=args.opt_eps)
    elif args.opt.lower() == 'adadelta':
        optimizer = optim.Adadelta(
            opt_params, lr=args.lr, weight_decay=args.weight_decay, eps=args.opt_eps)
    elif args.opt.lower() == 'rmsprop':
        optimizer = optim.RMSprop(
            opt_params, lr=args.lr, alpha=0.9, eps=args.opt_eps,
            momentum=args.momentum, weight_decay=args.weight_decay)
    else:
        assert False and "Invalid optimizer"
    del opt_params

    if not args.decay_epochs:
        print('No decay epoch set, using plateau scheduler.')
        lr_scheduler = ReduceLROnPlateau(optimizer, patience=10)
    else:
        lr_scheduler = None

    # optionally resume from a checkpoint
    start_epoch = 0 if args.start_epoch is None else args.start_epoch
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
                if 'args' in checkpoint:
                    print(checkpoint['args'])
                new_state_dict = OrderedDict()
                for k, v in checkpoint['state_dict'].items():
                    if k.startswith('module'):
                        name = k[7:] # remove `module.`
                    else:
                        name = k
                    new_state_dict[name] = v
                model.load_state_dict(new_state_dict)
                if 'optimizer' in checkpoint:
                    optimizer.load_state_dict(checkpoint['optimizer'])
                if 'loss' in checkpoint:
                    train_loss_fn.load_state_dict(checkpoint['loss'])
                print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
                start_epoch = checkpoint['epoch'] if args.start_epoch is None else args.start_epoch
            else:
                model.load_state_dict(checkpoint)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            exit(1)

    saver = CheckpointSaver(checkpoint_dir=output_dir)

    if args.num_gpu > 1:
        model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
    else:
        model.cuda()

    # Optional fine-tune of only the final classifier weights for specified number of epochs (or part of)
    if not args.resume and args.ft_epochs > 0.:
        if isinstance(model, torch.nn.DataParallel):
            classifier_params = model.module.get_classifier().parameters()
        else:
            classifier_params = model.get_classifier().parameters()
        if args.opt.lower() == 'adam':
            finetune_optimizer = optim.Adam(
                classifier_params,
                lr=args.ft_lr, weight_decay=args.weight_decay)
        else:
            finetune_optimizer = optim.SGD(
                classifier_params,
                lr=args.ft_lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)

        finetune_epochs_int = int(np.ceil(args.ft_epochs))
        finetune_final_batches = int(np.ceil((1 - (finetune_epochs_int - args.ft_epochs)) * len(loader_train)))
        print(finetune_epochs_int, finetune_final_batches)
        for fepoch in range(0, finetune_epochs_int):
            if fepoch == finetune_epochs_int - 1 and finetune_final_batches:
                batch_limit = finetune_final_batches
            else:
                batch_limit = 0
            train_epoch(
                fepoch, model, loader_train, finetune_optimizer, train_loss_fn, args,
                output_dir=output_dir, batch_limit=batch_limit)

    best_loss = None
    try:
        for epoch in range(start_epoch, num_epochs):
            if args.decay_epochs:
                adjust_learning_rate(
                    optimizer, epoch, initial_lr=args.lr,
                    decay_rate=args.decay_rate, decay_epochs=args.decay_epochs)

            train_metrics = train_epoch(
                epoch, model, loader_train, optimizer, train_loss_fn, args,
                saver=saver, output_dir=output_dir)

            # save a recovery in case validation blows up
            saver.save_recovery({
                'epoch': epoch + 1,
                'arch': args.model,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'loss': train_loss_fn.state_dict(),
                'args': args,
                'gp': args.gp,
                },
                epoch=epoch + 1,
                batch_idx=0)

            step = epoch * len(loader_train)
            eval_metrics = validate(
                step, model, loader_eval, validate_loss_fn, args,
                output_dir=output_dir)

            if lr_scheduler is not None:
                lr_scheduler.step(eval_metrics['eval_loss'])

            rowd = OrderedDict(epoch=epoch)
            rowd.update(train_metrics)
            rowd.update(eval_metrics)
            with open(os.path.join(output_dir, 'summary.csv'), mode='a') as cf:
                dw = csv.DictWriter(cf, fieldnames=rowd.keys())
                if best_loss is None:  # first iteration (epoch == 1 can't be used)
                    dw.writeheader()
                dw.writerow(rowd)

            # save proper checkpoint with eval metric
            best_loss = saver.save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.model,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'args': args,
                'gp': args.gp,
                },
                epoch=epoch + 1,
                metric=eval_metrics['eval_loss'])

    except KeyboardInterrupt:
        pass
    print('*** Best loss: {0} (epoch {1})'.format(best_loss[1], best_loss[0]))
Пример #5
0
def main():
    args = parser.parse_args()

    num_classes = len(get_labels())
    test_time_pool = 0  #5 if 'dpn' in args.model else 0

    model = model_factory.create_model(args.model,
                                       in_chs=1,
                                       num_classes=num_classes,
                                       global_pool=args.gp,
                                       test_time_pool=test_time_pool)
    #model.reset_classifier(num_classes=num_classes)

    if args.num_gpu > 1:
        model = torch.nn.DataParallel(model,
                                      device_ids=list(range(
                                          args.num_gpu))).cuda()
    else:
        model.cuda()

    if not os.path.exists(args.checkpoint):
        print("=> no checkpoint found at '{}'".format(args.checkpoint))
        exit(1)
    print("=> loading checkpoint '{}'".format(args.checkpoint))
    checkpoint = torch.load(args.checkpoint)
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            args.checkpoint, checkpoint['epoch']))
    else:
        model.load_state_dict(checkpoint)

    csplit = os.path.normpath(args.checkpoint).split(sep=os.path.sep)
    if len(csplit) > 1:
        exp_name = csplit[-2] + '-' + csplit[-1].split('.')[0]
    else:
        exp_name = ''

    if args.output:
        output_base = args.output
    else:
        output_base = './output'

    output_dir = get_outdir(output_base, 'predictions', exp_name)

    dataset = CommandsDataset(root=args.data,
                              mode='test',
                              format='spectrogram')

    loader = data.DataLoader(dataset,
                             batch_size=args.batch_size,
                             pin_memory=True,
                             shuffle=False,
                             num_workers=args.workers)

    model.eval()
    batch_time_m = AverageMeter()
    data_time_m = AverageMeter()
    try:
        # open CSV for writing predictions
        cf = open(os.path.join(output_dir, 'results.csv'), mode='w')
        res_writer = csv.writer(cf)
        res_writer.writerow(['fname'] + dataset.id_to_label)

        # open CSV for writing submission
        cf = open(os.path.join(output_dir, 'submission.csv'), mode='w')
        sub_writer = csv.writer(cf)
        sub_writer.writerow(['fname', 'label', 'prob'])

        end = time.time()
        batch_sample_idx = 0
        for batch_idx, (input, target) in enumerate(loader):
            data_time_m.update(time.time() - end)
            input = input.cuda()
            output = model(input)

            # augmentation reduction
            #reduce_factor = loader.dataset.get_aug_factor()
            #if reduce_factor > 1:
            #    output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2).squeeze(dim=2)
            #    index = index[0:index.size(0):reduce_factor]

            # move data to CPU and collect)
            output_logprob = F.log_softmax(output, dim=1).cpu().numpy()
            output = F.softmax(output, dim=1)
            output_prob, output_idx = output.max(1)
            output_prob = output_prob.cpu().numpy()
            output_idx = output_idx.cpu().numpy()
            for i in range(output_logprob.shape[0]):
                index = batch_sample_idx + i
                pred_label = dataset.id_to_label[output_idx[i]]
                pred_prob = output_prob[i]
                filename = dataset.filename(index)
                res_writer.writerow([filename] + list(output_logprob[i]))
                sub_writer.writerow([filename] + [pred_label, pred_prob])

            batch_sample_idx += input.size(0)
            batch_time_m.update(time.time() - end)
            if batch_idx % args.print_freq == 0:
                print('Inference: [{}/{} ({:.0f}%)]  '
                      'Time: {batch_time.val:.3f}s, {rate:.3f}/s  '
                      '({batch_time.avg:.3f}s, {rate_avg:.3f}/s)  '
                      'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
                          batch_sample_idx,
                          len(loader.sampler),
                          100. * batch_idx / len(loader),
                          batch_time=batch_time_m,
                          rate=input.size(0) / batch_time_m.val,
                          rate_avg=input.size(0) / batch_time_m.avg,
                          data_time=data_time_m))

            end = time.time()
            # end iterating through dataset

    except KeyboardInterrupt:
        pass
    except Exception as e:
        print(str(e))
import cv2
import numpy as np

from keras.models import load_model
from statistics import mode
from dataset import get_labels
from tool import draw_text
from tool import draw_bounding_box
from tool import apply_offsets
from preprocessor import preprocess_input

use_webcam = True

emotion_model_path = 'emotion_model.hdf5'

emotion_labels = get_labels('fer2013')

face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
emotion_classifier = load_model(emotion_model_path)

emotion_target_size = emotion_classifier.input_shape[1:3]

emotion_window = []

cv2.namedWindow('window_frame')
video_capture = cv2.VideoCapture(0)

cap = None
if (use_webcam == True):
    cap = cv2.VideoCapture(0)
else:
Пример #7
0
def _main():
    """
    main function
    :return:
    """
    show_all_parameters = True

    batch_size = 16
    num_epochs = 20
    # save the model per save_epoch_number epochs
    save_epoch_number = 1
    display_step = 100
    istrain = True

    model_folder = './model/'
    img_folder = './images/'

    if istrain:
        datasetname = '../dataset/COCO/TFRecords/train2017.tfrecords'
        annotationFile = '../dataset/COCO/annotations_trainval2017/annotations/instances_train2017.json'

        #datasetname = '/home/cv/YuSun/Yolo9000/TFRecords/train2017.tfrecords'
        #annotationFile = '/home/cv/dataset/coco/annotations/instances_train2017.json'
        modelfile = './model/model.ckpt'
        image_numbers = 118287
    else:
        datasetname = '../dataset/COCO/TFRecords/val2017.tfrecords'
        annotationFile = '../dataset/COCO/annotations_trainval2017/annotations/instances_val2017.json'

        #datasetname = '/home/cv/YuSun/Yolo9000/TFRecords/val2017.tfrecords'
        #annotationFile = '/home/cv/dataset/coco/annotations/instances_val2017.json'
        modelfile = './model/epoch10.ckpt-81312'
        image_numbers = 5000

    if not os.path.exists(model_folder):
        os.makedirs(model_folder)
    if not os.path.exists(img_folder):
        os.makedirs(img_folder)
    if not os.path.exists(datasetname):
        print('Dataset file (TFRecords) {} not exist!!!'.format(datasetname))
    if not os.path.exists(annotationFile):
        print('Annotation file {} not exist!!!'.format(annotationFile))
    if not os.path.exists(modelfile + '.index'):
        if not istrain:
            print('Model file {} not exist!!!'.format(modelfile))

    if show_all_parameters:
        print(
            '------------------------------------------------------------------------------------------------'
        )
        print('dataset name:{}'.format(datasetname))
        print('annotation file name:{}'.format(annotationFile))
        print('batch size = {}'.format(batch_size))
        print('istrain = {}'.format(istrain))
        if istrain:
            print('epoch num = {}'.format(num_epochs))
            print('Model will be saved in {} for every {} epochs'.format(
                model_folder, save_epoch_number))
            print(
                'The result of loss will be shown for every {} steps.'.format(
                    display_step))
        else:
            print('The model used to test is {}'.format(modelfile))
        print(
            '------------------------------------------------------------------------------------------------'
        )

    #------------------------------------------------------------------------------------------------------------------

    images_batch, ids_batch, shapes_batch = dataset.input_batch(
        datasetname=datasetname, batch_size=batch_size, num_epochs=num_epochs)
    input_labels = tf.placeholder(tf.float32, [None, 13, 13, 5, 85])
    input_images = tf.placeholder(tf.float32, [None, 416, 416, 3])

    norm_imgs = (input_images - 172.5) / 255.0

    net_output = darknet(norm_imgs, n_last_channels=425)

    prediction = lossfunction.output_to_prediction(darknet_output=net_output)

    loss = lossfunction.compute_loss(net_output, input_labels)

    coco = COCO(annotationFile)

    global_step = tf.Variable(0, trainable=False)
    decay_learning_rate = tf.train.exponential_decay(0.0001,
                                                     global_step=global_step,
                                                     decay_steps=1000,
                                                     decay_rate=0.9)
    train_step = tf.train.AdamOptimizer(decay_learning_rate).minimize(
        loss, global_step=global_step)

    #saver = tf.train.import_meta_graph('./model/yolo2_coco.ckpt.meta')
    saver = tf.train.Saver()

    if istrain:
        with tf.Session() as sess:
            print('Start training...')
            start_time = time.time()
            sess.run(tf.global_variables_initializer())
            #saver.restore(sess=sess, save_path='./model/yolo2_coco.ckpt')

            try:
                for epoch in range(num_epochs):
                    epochtime = time.time()
                    for step in range(int(image_numbers / batch_size)):

                        timages_batch, tids_batch, tshapes_batch = sess.run(
                            [images_batch, ids_batch, shapes_batch])
                        labels = dataset.get_labels(images_ids=tids_batch,
                                                    coco=coco)

                        train_step.run(feed_dict={
                            input_images: timages_batch,
                            input_labels: labels
                        })
                        #train_step.run(feed_dict={input_labels: labels})

                        if step % 100 == 0:
                            tloss, tpreds = sess.run([loss, prediction],
                                                     feed_dict={
                                                         input_images:
                                                         timages_batch,
                                                         input_labels: labels
                                                     })
                            print(
                                'Epoch {:>2}/{}, step = {:>6}/{:>6}, loss = {:.6f}, time = {}'
                                .format(epoch, num_epochs, step,
                                        int(image_numbers / batch_size), tloss,
                                        time.time() - start_time))

                            for i in range(len(timages_batch)):
                                timage = np.uint8(timages_batch[i])

                                tpred = tpreds[i]
                                tshape = [
                                    tshapes_batch[0][i], tshapes_batch[1][i]
                                ]

                                resultimg = dataset.decode_labels(
                                    timage, tpred, tshape, sess)
                                cv2.imwrite(
                                    img_folder +
                                    'epoch{}_step{}_i{}.png'.format(
                                        epoch, step, i), resultimg)
                                if i >= 2:
                                    break
                                #cv2.imshow('testimg', resultimg)
                                #cv2.waitKey(2500)
                                #cv2.destroyAllWindows()
                                #break

                            print(
                                '-------------------------------------------------------------------------------------'
                            )
                    if epoch % save_epoch_number == 0:
                        saver.save(sess,
                                   model_folder + 'epoch{}.ckpt'.format(epoch),
                                   global_step=global_step)
                        print('Model saved in: {}'.format(
                            model_folder + 'epoch{}.ckpt'.format(epoch)))
            except tf.errors.OutOfRangeError:
                print('End training...')
            finally:
                total_time = time.time() - start_time
                saver.save(sess, modelfile, global_step=global_step)
                print('Model saved as: {}, runing time: {} s'.format(
                    modelfile, total_time))
                print('Done!')
    else:
        with tf.Session() as sess:
            print('Start testing...')
            sess.run(tf.global_variables_initializer())
            saver.restore(sess=sess, save_path=modelfile)

            for step in range(int(image_numbers / batch_size)):
                print('step: {}'.format(step))

                timages_batch, tids_batch, tshapes_batch = sess.run(
                    [images_batch, ids_batch, shapes_batch])

                tpreds = sess.run(prediction,
                                  feed_dict={input_images: timages_batch})

                for i in range(len(timages_batch)):
                    timage = np.uint8(timages_batch[i])

                    tpred = tpreds[i]
                    tshape = [tshapes_batch[0][i], tshapes_batch[1][i]]

                    resultimg = dataset.decode_labels(timage, tpred, tshape,
                                                      sess)
                    cv2.imwrite(img_folder + 'step{}_i{}.png'.format(step, i),
                                resultimg)

                    # cv2.imshow('testimg', resultimg)
                    # cv2.waitKey()
                    # cv2.destroyAllWindows()
            print('Finished tesing. The test results are saved in {}.'.format(
                img_folder))