Example #1
0
def main():
    parser = argparse.ArgumentParser()
    # parser.add_argument('--position', default="tail_conv", type=str,
    #                     choices=["tail", "tail_tsp", "tail_conv", "tail_tsp_self",
    #                              "tail_conv_deep", "tail_conv_deep_fc"])
    # parser.add_argument('--csize', default=64, type=int)
    # parser.add_argument('-fc', '--focal_loss', action='store_true', help='use focal loss')
    parser.add_argument('--weights', default="hg_yolo-110000", type=str)
    parser.add_argument('--weight_dir',
                        default='../../log512/512stc4md2_bboxwhsm32hm1_hghm3/',
                        type=str)
    # parser.add_argument('--data_dir', default="data", type=str)
    parser.add_argument('--gpu', type=str)
    parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')
    args = parser.parse_args()

    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    if args.cpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    get_config(args.weight_dir)
    # cfg.CELL_SIZE = args.csize
    yolo = HOURGLASSYOLONet('visual')
    detector = Detector(yolo, os.path.join(args.weight_dir, args.weights))

    # detect from camera
    cap = cv2.VideoCapture(-1)
    detector.camera_detector(cap)
Example #2
0
def main(generate_accuracy_csv=False, order=None):
    """
    run evaluation for a specific model and generate chart
    """
    # Load configuration for evaluation
    # config = get_config('eval/resnet152')
    # config = get_config('eval/affact')
    config = get_config('eval/resnet51_s')

    # Init environment, use GPU if available, set random seed
    device = init_environment(config)

    # Create an evaluation instance with the loaded configuration on the loaded device
    eval_instance = EvalModel(config, device)

    # Check if flag is set to generate file that contains accuracies of the model of different test sets
    # Further check if such a file exists. If not, generate file regardless of the state of the flag.
    if generate_accuracy_csv or not os.path.isfile(
            '{}/evaluation_result.csv'.format(config.experiments_dir)):
        # Run the evaluation (Calculate prediction accuracies of model on different test sets)
        df = eval_instance.evaluate()
        # Save csv
        df.to_csv('{}/evaluation_result.csv'.format(config.experiments_dir))

    # execute quantitative analysis
    if config.evaluation.quantitative.enabled:
        eval_instance.quantitative_analysis(order)

    # execute qualitative analysis
    if config.evaluation.qualitative.enabled:
        eval_instance.qualitative_analysis()
Example #3
0
 def __init__(self):
     self.conf = config_utils.get_config("FVExtractionCnn")
     self.img = None
     self.frontal_face_detector = models_utils.get_frontal_face_detector()
     self.shape_predictor = models_utils.get_shape_predictor()
     self.face_rec_model = models_utils.get_face_recog_model()
     self.upsample_times = self.conf["upsampleTimes"]
     self.num_jitters = self.conf["numJitters"]
Example #4
0
 def __init__(self):
     self.conf = config_utils.get_config("preprocessing")
     self.img_path = None
     self.img = None
     self.upsample_times = self.conf["upsampleTimes"]
     self.img_square_size = self.conf["imgSquareSize"]
     self.anti_aliasing = bool(self.conf["antiAliasing"])
     self.resize_mode = self.conf["resizeMode"]
     self.frontal_face_detector = models_utils.get_frontal_face_detector()
     self.shape_predictor = models_utils.get_shape_predictor()
 def __init__(self):
     self.conf = config_utils.get_config("FVExtractionLbph")
     self.img = None
     self.neighbours = self.conf["neighbours"]
     self.radius = self.conf["radius"]
     self.grid_size = self.conf["gridSize"]
     self.img_size = self.conf["imgSize"]
     self.n_rows = self.conf["nRows"]
     self.n_cols = self.conf["nCols"]
     self.nbp_method_name = self.conf["lbpMethodName"]
     self.nbp_method_colors = self.conf["lbpMethodColors"]
Example #6
0
def main():
    """
    Run training for a specific model
    """
    # Load configuration for training
    config = get_config()
    # Init environment, use GPU if available, set random seed
    device = init_environment(config)
    # Create result directory
    create_result_directory(config)
    # Create a training instance with the loaded configuration on the loaded device
    training_instance = TrainModel(config, device)
    # Run the training
    training_instance.train()
def main():
    """
    Creates the test datasets needed for evaluation
    """

    config = get_config('dataset/testsetA_config')
    # Load dataframes for testing
    df_test_labels, df_test_landmarks, df_test_bounding_boxes = generate_test_dataset(
        config)
    # Define transformations for dataset A (aligned)
    # Consists of images aligned according to the hand-labled landmarks
    print('Creating testset A (aligned)')
    data_transforms_A = transforms.Compose([AffactTransformer(config)])
    _create_test_images(config, df_test_labels, df_test_landmarks,
                        df_test_bounding_boxes, data_transforms_A)

    # Define transformations for dataset D (detected bounding boxes)
    # Consists of images aligned according to the face detector
    print('Creating testset D (detected bounding boxes)')
    config = get_config('dataset/testsetD_config')
    data_transforms_D = transforms.Compose([AffactTransformer(config)])
    _create_test_images(config, df_test_labels, df_test_landmarks,
                        df_test_bounding_boxes, data_transforms_D)

    # face detector -> grösser bbx -> 10crop
    config = get_config('dataset/testsetC_config')
    print('Creating testset C (10 crop)')
    data_transforms_C = transforms.Compose([AffactTransformer(config)])
    _create_test_images(config, df_test_labels, df_test_landmarks,
                        df_test_bounding_boxes, data_transforms_C)

    # face detector -> bigger bbx -> AFFACT Transformations
    config = get_config('dataset/testsetT_config')
    print('Creating testset T (AFFACT transformations)')
    data_transforms_T = transforms.Compose([AffactTransformer(config)])
    _create_test_images(config, df_test_labels, df_test_landmarks,
                        df_test_bounding_boxes, data_transforms_T)
Example #8
0
def create_and_run_model(config_name, eval_path=None, infer=False):
    if eval_path is not None and infer:
        raise Exception("Both infer_path and eval_path are set. But cannot infer and evaluate at the same time.")

    mappings = {'CNN_RNN_CTC': CNN_RNN_CTC, 'RNN_CTC': RNN_CTC, 'Encoder_Decoder': Encoder_Decoder}
 
    config = get_config(config_name)

    TRAIN = eval_path is None and not infer    # indicates whether we are in training mode or not

    if not infer:
        _setup_logging(config, config.restore_previous_model or not TRAIN)

    if TRAIN:
        if config.restore_previous_model:
            logging.info("-"*70 + "\n" + " "*20 + "Continuing with previous model\n" + "-"*70)

        else:
            logging.info("-"*70 + "\n" + " "*25 + "Starting a new model\n" + "-"*70)
            logging.info("Logging model parameters:") 
            try:
                attr = vars(config)
            except:
                attr = config._asdict()
            for k, v in attr.items():
                if not k.startswith('__'):
                    logging.info(str(k) + ": " + str(v))
    
    elif eval_path is not None:
        logging.info("\n" + "-"*50 + "\nEvaluating ({})\n".format(config.decoder_type.replace("_", " ").capitalize()) + "-"*50)

    if TRAIN:
        logging.info("\n" + "-"*50 + "\nInitializing Model and Building Graph\n" + "-"*50)
        tic = time.time();

    model = mappings[config.model](config, eval_path, infer);

    if TRAIN:
        toc = time.time()
        logging.info("Time to initialize model (including to load data): %f secs\n" % (toc-tic))
        
    if TRAIN or eval_path is not None:
        model()

    return model, config
FastAPI Demo

Create the initial user
'''
from database.setup import session_local, engine
from database import models
from data_schemas import schemas
from utils.config_utils import get_config
from utils.user_utils import (
	create_user,
	set_user_admin
)

###############################################################################

CONFIG = get_config('initial_user.cfg')
INITIAL_USER_CONFIG = CONFIG['initial_user']
INITIAL_USER = schemas.UserCreate(
	username=INITIAL_USER_CONFIG['username'],
	first_name=INITIAL_USER_CONFIG['first_name'],
	last_name=INITIAL_USER_CONFIG['last_name'],
	email=INITIAL_USER_CONFIG['email'],
	password=INITIAL_USER_CONFIG['password']
)

###############################################################################

def create_initial_user():
	'''Create the initial user'''
	session = session_local()
	models.Base.metadata.create_all(bind=engine)
Example #10
0
from fastapi.security import OAuth2PasswordBearer, OAuth2PasswordRequestForm
import jwt
from sqlalchemy.orm import Session
from starlette.requests import Request
from starlette.status import (HTTP_400_BAD_REQUEST, HTTP_401_UNAUTHORIZED)

# Local Imports
from utils.config_utils import get_config
from utils.main_utils import get_db
from utils.token_utils import create_access_token
from utils.user_utils import (authenticate_user, get_user)
from data_schemas import schemas

###############################################################################

CONFIG = get_config('security.cfg')
SECURITY = CONFIG['security']

oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/token")  #pylint: disable=invalid-name

router = APIRouter()  #pylint: disable=invalid-name

###############################################################################

AUTHENTICATION_EXCEPTION = HTTPException(
    status_code=HTTP_401_UNAUTHORIZED,
    detail="Incorrect username or password",
    headers={"WWW-Authenticate": "Bearer"},
)

CREDENTIALS_EXCEPTION = HTTPException(status_code=HTTP_401_UNAUTHORIZED,
import dlib

from utils import config_utils

paths = config_utils.get_config("paths")


def get_shape_predictor():
    return dlib.shape_predictor(paths["shapePredictor"])


def get_frontal_face_detector():
    return dlib.get_frontal_face_detector()


def get_face_recog_model():
    return dlib.face_recognition_model_v1(paths["faceRecog1"])
Example #12
0
def load_img_skimage(img_path):
    from utils import config_utils
    size = config_utils.get_config("preprocessing")["imgSquareSize"]
    return resize(imageio.imread(img_path, pilmode='LA'), (size, size),
                  anti_aliasing=True,
                  mode="constant")
Example #13
0
from fastapi import Depends, FastAPI
from starlette.requests import Request
from starlette.responses import Response
from starlette.testclient import TestClient

# Local Imports
from database.setup import session_local
from data_schemas import schemas
from routers import (token, users)
from utils.config_utils import get_config
from utils.token_utils import create_access_token
import utils.user_utils as user_utils

###############################################################################

CONFIG = get_config('testing.cfg')
TESTING = CONFIG['testing']
PASS_MSG = 'Parameters meant to pass the test produced negative result'


###############################################################################
# Testing class
###############################################################################
class TestFastAPI(unittest.TestCase):
    '''Template class for testing a PostgreSQL database'''
    def __init__(self, *args, **kwargs):
        super(TestFastAPI, self).__init__(*args, **kwargs)
        self.app = FastAPI()
        self.setup_fastapi_app()
        self.client = TestClient(self.app)
        self.session = session_local()
Example #14
0
def main():
    """
    Run training for a specific model
    """
    # Load configuration for training
    config = get_config('train/affact_hyperopt')
    # Init environment, use GPU if available, set random seed
    device = init_environment(config)
    # Create result directory
    create_result_directory(config)
    # Create a training instance with the loaded configuration on the loaded device
    # training_instance = TrainModel(config, device)
    # Run the training
    # training_instance.train()

    prefix = "/home/yves/Desktop/uzh/affact/PyAffact/"
    print(prefix + config.dataset.dataset_labels_filename)

    def train_hyperopt(config, data_dir=None):
        config.basic.result_directory = prefix + "results"
        config.dataset.dataset_labels_filename = prefix + config.dataset.dataset_labels_filename
        config.dataset.partition_filename = prefix + config.dataset.partition_filename
        config.dataset.dataset_image_folder = prefix + config.dataset.dataset_image_folder
        config.dataset.landmarks_filename = prefix + config.dataset.landmarks_filename
        config.dataset.bounding_boxes_filename = prefix + config.dataset.bounding_boxes_filename
        training_instance = TrainModel(config, device)
        # Run the training
        training_instance.train_resnet_51_hyperopt()

    data_dir = os.path.abspath("./data_dir")
    config.preprocessing.dataloader.batch_size = tune.choice([32, 64])
    wandb.init(config=config)
    # config.training.epochs = tune.choice([1, 2])

    scheduler = ASHAScheduler(metric="loss",
                              mode="min",
                              max_t=10,
                              grace_period=1,
                              reduction_factor=2)
    reporter = CLIReporter(
        # parameter_columns=["l1", "l2", "lr", "batch_size"],
        metric_columns=["loss", "accuracy", "training_iteration"])
    print("starting")
    result = tune.run(partial(train_hyperopt),
                      resources_per_trial={
                          "cpu": 2,
                          "gpu": 1
                      },
                      config=config,
                      num_samples=1,
                      scheduler=scheduler,
                      progress_reporter=reporter)

    best_trial = result.get_best_trial("loss", "min", "last")
    print("Best trial config: {}".format(best_trial.config))
    print("Best trial final validation loss: {}".format(
        best_trial.last_result["loss"]))
    print("Best trial final validation accuracy: {}".format(
        best_trial.last_result["accuracy"]))

    # Create the sweep
    wandb.sweep(result)
Example #15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-ims', '--image_size', default=512, type=int)
    parser.add_argument('-g', '--gpu', type=str)
    parser.add_argument('-c', '--cpu', action='store_true', help='use cpu')
    parser.add_argument('-ds',
                        '--data_source',
                        default='all',
                        type=str,
                        choices=['coco', 'pascal', 'all'])
    parser.add_argument('-ef', '--eval_file', type=str, required=True)
    parser.add_argument('-lf', '--log_file', type=str)
    parser.add_argument('-al', '--auto_all', action='store_true')
    # when calculate single model
    parser.add_argument('--weights', default="hg_yolo-240000", type=str)
    parser.add_argument(
        '--weight_dir',
        default='../log_bbox_hm/0.8_0.08_0.03_conv_fc_l2_0.005_bhm5',
        type=str)
    args = parser.parse_args()
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    if args.cpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = ''
    if not args.auto_all:
        strings = get_config(args.weight_dir)

        net = HOURGLASSYOLONet('eval')
        detector = Detector(net, os.path.join(args.weight_dir, args.weights))
        # data = COCO_VAL()
        data = PASCAL_VAL()
        evaluator = EVALUATOR(detector, data)
        ap = evaluator.eval()
        log = Logger(args.eval_file, level='debug')
        log.logger.info('\n calculate single ap from {} {}\n'.format(
            args.weight_dir, args.weights))
        log.logger.info('Data sc:{}  AP:{}  Weights:{}  {}'.format(
            data.__class__.__name__, ap, args.weights, strings))
    else:
        data_source = ds_config(args)
        log = Logger(args.eval_file, level='debug')
        log.logger.info('\n calculate ap from {}\n'.format(args.eval_file))
        model_start = 'hg_yolo'
        rootdir = '../' + args.log_file
        root_list = os.listdir(rootdir)  # 列出文件夹下所有的目录与文件
        root_list.sort()
        for path in root_list:
            model_dir = os.path.join(rootdir, path)
            models = os.listdir(model_dir)
            models = filter(lambda x: x.startswith(model_start), models)
            models = list(set(map(lambda x: x.split('.')[0], models)))
            models.sort(key=lambda x: int(x[8:]))
            for data in data_source:
                for model in models:
                    strings = get_config(model_dir)
                    tf.reset_default_graph()
                    net = HOURGLASSYOLONet('eval')
                    detector = Detector(net, os.path.join(model_dir, model))
                    evaluator = EVALUATOR(detector, data)
                    ap = evaluator.eval()
                    log.logger.info(
                        'Data sc:{}  AP:{:<5.5f}  Weights:{}  {}'.format(
                            data.__class__.__name__, ap, model, strings))
                    detector.sess.close()
                    del net
                    del detector
                    del evaluator
"""
Generate chart to compare relative performance of two models
"""
from evaluation.charts import generate_relative_improvement_chart
from preprocessing.dataset_generator import generate_test_dataset
from utils.config_utils import get_config
import pandas as pd

if __name__ == '__main__':
    config_l = get_config('eval/resnet51_s')
    config_r = get_config('eval/affact')
    left_name = 'ResNet-51'
    right_name = 'AFFACT'

    labels, _, _ = generate_test_dataset(config_l)
    labels = labels.columns.tolist()
    accuracy_df_l = pd.read_csv('{}/evaluation_result.csv'.format(
        config_l.experiments_dir),
                                index_col=0)
    accuracy_df_r = pd.read_csv('{}/evaluation_result.csv'.format(
        config_r.experiments_dir),
                                index_col=0)

    figures = generate_relative_improvement_chart(labels, accuracy_df_l,
                                                  accuracy_df_r, left_name,
                                                  right_name)
    for i, test_set in enumerate(accuracy_df_l.columns):
        figures[i].show()
        figures[i].write_image(
            '{}/eval_{}_{}_{}_relative_improvement.png'.format(
                config_l.experiments_dir, left_name, right_name, test_set),
Example #17
0
FastAPI Demo

Database setup
'''
# Standard Imports

# PyPi Imports
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker

# Local Imports
from utils.config_utils import get_config

###############################################################################
CONFIG = get_config('database.cfg')
DB_CONFIG = CONFIG['database']
DB_URL_MASK = 'postgresql+psycopg2://{user}:{pw}@{url}/{db}'
DB_URL = DB_URL_MASK.format(user=DB_CONFIG['POSTGRES_USER'],
                            pw=DB_CONFIG['POSTGRES_PW'],
                            url=DB_CONFIG['POSTGRES_URL'],
                            db=DB_CONFIG['POSTGRES_DB'])
SQLALCHEMY_DATABASE_URL = DB_URL

###############################################################################

engine = create_engine(SQLALCHEMY_DATABASE_URL)  #pylint: disable=invalid-name

session_local = sessionmaker(autocommit=False, autoflush=False, bind=engine)  #pylint: disable=invalid-name

Base = declarative_base()  #pylint: disable=invalid-name
from sklearn import metrics
from data_loader.gnn_data_generator import load_data
from models.text_gnn import TextGNN
from utils import csv_utils
from utils.config_utils import get_config


def evaluate_model(args):
    tf.set_random_seed(19)
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
        label_dict = csv_utils.read(args['dataset']['path'] + '/labels.csv')
        data_generator = load_data(args['dataset']['path'], args['dataset']['dataset_name'])
        model = TextGNN(sess=sess, data_generator=data_generator, **data_generator, **args['dataset'], **args['model'],
                        **args)
        result, labels = model.test()

        print('评估')
        print(metrics.classification_report(labels, result, target_names=label_dict))
        print('混淆矩阵')
        print(metrics.confusion_matrix(labels, result))


if __name__ == '__main__':
    # config = get_config('gnn/aclImdb')
    # config = get_config('gnn/cnews')
    config = get_config('gnn/cnews_voc')
    config['tag'] = 'base'
    evaluate_model(config)
Example #19
0
        # yaml_utils.write(args['model']['checkpoint_dir'] + '/' + args['dataset']['dataset_name'] + '/' +
        #                  args['model']['name'] + '/' + args['tag'] + '/' + 'best_result.yaml', result)
        print('评估')
        print(
            metrics.classification_report(
                labels, result, target_names=eval_data_generator.get_labels()))
        print('混淆矩阵')
        cm = metrics.confusion_matrix(labels, result)
        print(cm)


if __name__ == '__main__':
    # config = get_config('adversarial/aclImdb_rnn')
    # config = get_config('adversarial/aclImdb_cnn')
    # config = get_config('adversarial/cnews_rnn')
    # config = get_config('adversarial/cnews_cnn')
    # config = get_config('adversarial/cnews_voc_cnn')
    # config = get_config('adversarial/cnews_voc_rnn')

    # config = get_config('cnn/aclImdb')
    # config = get_config('cnn/cnews')
    # config = get_config('cnn/cnews_voc')
    config = get_config('rnn/aclImdb')
    # config = get_config('rnn/cnews')
    # config = get_config('rnn/cnews_voc')
    # config['tag'] = 'base'
    # config['tag'] = 'embedding_untrainable'
    config['tag'] = 'lstm'
    config['model']['rnn_type'] = 'lstm'
    evaluate_model(config)
Example #20
0
            seq_length=args['dataset']['seq_length'])
        model_class = get_model_class_by_name(args['model']['name'])
        model = model_class(sess=sess,
                            train_generator=train_data_generator,
                            eval_generator=eval_data_generator,
                            embedding=embedding,
                            **dataset_info,
                            **args['dataset'],
                            **args['model'],
                            **args)
        model.train()


if __name__ == '__main__':
    os.environ['CUDA_VISIBLE_DEVICES'] = '2'
    config = get_config('adversarial/aclImdb_rnn')
    # config = get_config('adversarial/aclImdb_cnn')
    # config = get_config('adversarial/cnews_rnn')
    # config = get_config('adversarial/cnews_cnn')
    # config = get_config('adversarial/cnews_voc_cnn')
    # config = get_config('adversarial/cnews_voc_rnn')

    # config = get_config('cnn/aclImdb')
    # config = get_config('cnn/cnews')
    # config = get_config('cnn/cnews_voc')
    # config = get_config('rnn/aclImdb')
    # config = get_config('rnn/cnews')
    # config = get_config('rnn/cnews_voc')
    config['tag'] = 'lstm_clear_string'
    # config['tag'] = 'lstm_without_embedding'
    config['model']['rnn_type'] = 'lstm'
Example #21
0
import tensorflow as tf
from model import SkipGram
from utils.config_utils import get_config


def train(args):
    tf.set_random_seed(19)
    tf_config = tf.ConfigProto(allow_soft_placement=True)
    tf_config.gpu_options.allow_growth = True
    with tf.Session(config=tf_config) as sess:
        model = SkipGram(sess=sess, **args['dataset'], **args['model'], **args)
        model.train()


if __name__ == '__main__':
    config = get_config('english')
    config['tag'] = 'base'
    train(config)