예제 #1
0
def load_test_model():
    if not os.path.isfile(cfg.DUMPED_MODEL) and not os.path.isfile(
            os.path.join(cfg.DATASET_BASE, "models", cfg.DUMPED_MODEL)):
        print("No trained model file!")
        return
    main_model = f_model(model_path=cfg.DUMPED_MODEL).cuda(cfg.GPU_ID)
    color_model = c_model().cuda(cfg.GPU_ID)
    pooling_model = p_model().cuda(cfg.GPU_ID)
    extractor = FeatureExtractor(main_model, color_model, pooling_model)
    return extractor
예제 #2
0
def DataManager(dh: DataHandling = None,
                fr: FeatureRecipe = None,
                fe: FeatureExtractor = None):
    """
        Function linking the 3 first classes of the pipeline
    """
    # Columns to keep
    klist = [
        'Manufacturer', 'Latest_Launch', 'Horsepower', 'Price_in_thousands'
    ]

    dh = DataHandling()
    dh.get_data()
    fr = FeatureRecipe(dh.data)
    fr.prepare_data(0.3)

    fe = FeatureExtractor(fr.data, klist)
    return fe.split_data(0.1)
예제 #3
0
def init():
    global X, feat, sess, soft_pred, all_alphas, keep

    feat = FeatureExtractor(device='cpu')

    tf.reset_default_graph()
    X, keep, y, optimizer, loss, lstm_variables, soft_pred, all_alphas = build_model(
        batch_size=1)

    model_root = Model.get_model_path('crash-detection')
    saver = tf.train.Saver()

    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    init = tf.global_variables_initializer()
    sess.run(init)
    saver.restore(sess, os.path.join(model_root, 'model/final_model'))
예제 #4
0
def main(args):
    es = Elasticsearch(f'es:{args.p}')
    create_index(es, args.index)
    image_dir = os.path.join(os.path.dirname(__file__), 'images')
    loader = Dataloader(image_dir, 32)
    fe = FeatureExtractor()

    for i in tqdm(range(len(loader))):
        path, image = loader.__getitem__(i)
        vector = fe.predict(image)
        docs = [{
            '_index': args.index,
            '_source': {
                'path': str(p),
                'vector': list(v)
            }
        } for p, v in zip(path, vector)]
        helpers.bulk(es, docs)

    print("Preparing complete")
예제 #5
0
    validloader = DataLoader(dataset=valid_set,
                             num_workers=4,
                             batch_size=1,
                             shuffle=False)
    generator_net = Generator(upscale_factor=upscale_factor, num_blocks=16)
    discriminator_net = Discriminator()

    adversarial_criterion = nn.BCELoss()
    content_criterion = nn.MSELoss()
    tv_reg = TV_Loss()

    generator_optimizer = optim.Adam(generator_net.parameters(),
                                     lr=generator_lr)
    discriminator_optimizer = optim.Adam(discriminator_net.parameters(),
                                         lr=discriminator_lr)
    feature_extractor = FeatureExtractor()

    if torch.cuda.is_available() and opt.cuda:
        generator_net.cuda()
        discriminator_net.cuda()
        adversarial_criterion.cuda()
        content_criterion.cuda()
        feature_extractor.cuda()

    generator_running_loss = 0.0
    generator_losses = []
    discriminator_losses = []
    PSNR_valid = []

    if opt.resume != 0:
        check_point = torch.load(check_points_dir + "check_point_epoch_" +
예제 #6
0
def main():
    # parse arguments
    cfg = parse_arguments(funcs=[add_arguments])

    # get the name of a model
    arch_name = models.utils.set_arch_name(cfg)

    # set a logger
    logger = utils.Logger(cfg, arch_name)

    # construct a model
    logger.print('Building a model ...')
    model, image_size = models.set_model(cfg)

    # profile the model
    input = torch.randn(1, 3, image_size, image_size)
    macs, params = profile(model, inputs=(input, ), verbose=False)
    logger.print(
        f'Name: {arch_name}    (Params: {int(params)}, FLOPs: {int(macs)})')

    # set other options
    criterion = nn.CrossEntropyLoss()
    optimizer = set_optimizer(model, cfg)
    lr_scheduler = set_lr_scheduler(optimizer, cfg)

    # load dataset
    loaders = datasets.set_dataset(cfg, image_size)

    # set a trainer
    trainer = Trainer(cfg=cfg,
                      model=model,
                      criterion=criterion,
                      optimizer=optimizer,
                      lr_scheduler=lr_scheduler,
                      loaders=loaders,
                      logger=logger)

    # set device
    trainer.set_device()

    # run
    if cfg.run_type == 'train':
        # set hooks
        if cfg.load is not None:
            if not cfg.resume:
                trainer.register_hooks(loc='before_train', func=[load_init])
            else:
                trainer.register_hooks(loc='before_train', func=[load_resume])
        if cfg.step_location == 'epoch':
            trainer.register_hooks(loc='after_epoch', func=[step_lr_epoch])
        else:
            trainer.register_hooks(loc='after_batch', func=[step_lr_batch])
        trainer.register_hooks(loc='after_epoch',
                               func=[save_train, summarize_reports])

        trainer.train()

    elif cfg.run_type == 'validate':
        # set hooks
        trainer.register_hooks(loc='before_epoch', func=[load_valid])
        trainer.register_hooks(loc='after_epoch', func=[summarize_reports])

        trainer.validate()

    elif cfg.run_type == 'test':
        # set hooks
        trainer.register_hooks(loc='before_epoch', func=[load_valid])
        trainer.register_hooks(loc='after_epoch', func=[save_pred])

        trainer.test()

    elif cfg.run_type == 'analyze':
        # set hooks
        trainer.register_hooks(loc='before_epoch', func=[load_valid])
        # extract features
        from utils import FeatureExtractor
        extractor = FeatureExtractor()
        trainer.register_hooks(loc='before_epoch', func=[extractor.initialize])
        trainer.register_hooks(loc='after_batch',
                               func=[extractor.check_feature])
        trainer.register_hooks(loc='after_epoch',
                               func=[extractor.save_feature])

        trainer.analyze()
예제 #7
0
 def build_encoder(self):
     model = FeatureExtractor(self.encoder_name, self.extract_list)
     return model
예제 #8
0
import os
from PIL import Image
from flask import Flask, render_template, request, send_from_directory
from tensorflow.keras.applications.mobilenet_v2 import MobileNetV2, preprocess_input
from elasticsearch import Elasticsearch
from utils import FeatureExtractor

import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--index', type=str, default='image')
args = parser.parse_args()

app = Flask(__name__)
fe = FeatureExtractor()
es = Elasticsearch('es:9200')
APP_ROOT = os.path.dirname(os.path.abspath(__file__))


@app.route('/')
def index():
    return render_template('index.html')


@app.route('/query', methods=["POST"])
def query():
    f = request.files['query_image']
    image = Image.open(f.stream)
    image.save(f.filename)

    image = image.resize((224, 224))
    query_vector = fe.extract(image)