Пример #1
0
    def sort_dependencies(cls, apps='all'):
        """
        Return a list of models in their order of dependency
        """
        dependencies = defaultdict(set)
        model_objects = []
        if apps != 'all':
            apps = apps.split(',')

        for c in get_subclasses(ViewedModel):
            if apps != 'all' and c._meta.app_label not in apps:
                continue
            class_string = model_default_table_name(c)
            for dependency in getattr(c, 'dependencies', []):
                m = get_model(app_name=dependency[0], model_name=dependency[1])
                dependencies[class_string].add(model_default_table_name(m))

        flattened = toposort_flatten(dependencies)
        print(flattened)
        for name in flattened:
            app, model = name.split('_')
            if app == 'None' or model == 'viewedmodel':
                continue
            model_object = get_model(app, model)
            if hasattr(model_object, 'sql'):
                model_objects.append(model_object)

        return model_objects
Пример #2
0
 def post(self):
     # Something strange happens to the model graph if the session is not cleared. The model is reloaded each time as a work around.
     helpers.get_model()
     # Flask restful plus does provide good support for defining api models containing lists of lists. Pass data to route as JSON instead.
     data = api.payload
     helpers.retrain_model(data)
     return
Пример #3
0
def main(ARGS):

    if ARGS.import_pytorch_model_path == None:
        raise AssertionError("Path should not be None")

    ######### distance_metric = 1 #### if CenterLoss = 0, If Cosface = 1

    ####### Device setup
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    ####### Model setup
    print("Use CUDA: " + str(use_cuda))
    print('Model type: %s' % ARGS.model_type)
    model = get_model(ARGS.model_type, ARGS.input_size)

    if use_cuda:
        model.load_state_dict(torch.load(ARGS.import_pytorch_model_path))
    else:
        model.load_state_dict(torch.load(ARGS.import_pytorch_model_path, map_location='cpu'))

    model.to(device)
    embedding_size = 512
    model.eval()

    example = torch.rand(1, 3, 112, 112)
    traced_script_module = torch.jit.trace(model, example)
    traced_script_module.save(ARGS.export_traced_model_path)
Пример #4
0
def main(argv):
    """
    Command line example:
    python builddataset.py -i 'img/dir' -o 'out/dir' -w <img width> -h <img height> -n <number of components for the model>
    """
    indir = '.\dataset'
    outdir = '.\output'
    w = 320
    h = 243
    nc = 150

    try:
        opts, args = getopt.getopt(
            argv, 'i:o:w:h:n:',
            ['img_dir=', 'out_dir=', 'width=', 'height=', 'n_components='])
    except getopt.GetoptError:
        usage()
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-h', '--height'):
            h = int(arg)
        elif opt in ('-w', '--width'):
            w = int(arg)
        elif opt in ('-o', '--out_dir'):
            outdir = arg
        elif opt in ('-i', '--img_dir'):
            indir = arg
        elif opt in ('-n', '--n_components'):
            nc = int(arg)
        else:
            usage()
            sys.exit(2)

    files = [join(indir, f) for f in listdir(indir) if isfile(join(indir, f))]
    data = list()
    data_labels = list()
    for file in files:
        data.append(helpers.get_nparray_from_img(file, (w, h)))
        data_labels.append(
            basename(file))  # temporary while we do not have more info

    model = helpers.get_model(n_components=nc, data=data)
    helpers.dump(model, join(outdir, MODEL_FILE), compress_level=3)

    with open(join(outdir, DATASET_FILE), 'wb') as f:
        for index, eigenface in enumerate(model.transform(data)):
            f.write('"{}","{}","{}"\n'.format(index, data_labels[index],
                                              ' '.join(map(str, eigenface))))

    print ''
    print 'Created {} and {} in directory {}.'.format(MODEL_FILE, DATASET_FILE,
                                                      outdir)
    print 'PCA Explained Variance Ratio: {}'.format(
        sum(model.explained_variance_ratio_))
    print 'Obs.: if this number is not satisfactory try increasing the number of components'
    print ''
Пример #5
0
 def infer(self, input):
     # load preprocessed input
     inputAsNpArr = self._imageProcessor.loadAndPreprocess(input)
     # # Run inference
     im =  self._imageProcessor._im
     result_shape = self._imageProcessor._result_shape
     # model loading needs to happen after input has been prosessed as
     # instantianting it requires the size of the input image.
     model = get_model(self._ctx, 'model/model.onnx', im)
     conf,result_img,blended_img,raw = predict(inputAsNpArr, result_shape, model, im)
     return np.array(result_img)
def test(batch_size,img_size,test_images_path,model_path):
    test_generator = helpers.generate_test_data(batch_size,img_size,preprocess_input,test_images_path)
    model = helpers.get_model(model_path)
    yhat = helpers.predict(model,test_generator)
    yhat = helpers.get_best_guess(yhat)
    acc_nnet_M = helpers.get_accuracy(yhat,test_generator)
    class_mapping =  helpers.get_class_mapping(test_generator)
    final_pred = helpers.final_prediction(class_mapping,yhat)
    df_nnet_M = helpers.create_dataframe(['image','class','nnet_M'])
    df_nnet_M = helpers.add_records(df_nnet_M,test_generator,final_pred,'nnet_M')

    return df_nnet_M,acc_nnet_M
Пример #7
0
def main(argv):
    """
    Command line example:
    python builddataset.py -i 'img/dir' -o 'out/dir' -w <img width> -h <img height> -n <number of components for the model>
    """
    indir = '.\dataset'
    outdir = '.\output'
    w = 320
    h = 243
    nc = 150

    try:
        opts, args = getopt.getopt(argv, 'i:o:w:h:n:', ['img_dir=', 'out_dir=', 'width=', 'height=', 'n_components='])
    except getopt.GetoptError:
        usage()
        sys.exit(2)

    for opt, arg in opts:
        if opt in ('-h', '--height'):
            h = int(arg)
        elif opt in ('-w', '--width'):
            w = int(arg)
        elif opt in ('-o', '--out_dir'):
            outdir = arg
        elif opt in ('-i', '--img_dir'):
            indir = arg
        elif opt in ('-n', '--n_components'):
            nc = int(arg)
        else:
            usage()
            sys.exit(2)

    files = [join(indir, f) for f in listdir(indir) if isfile(join(indir, f))]
    data = list()
    data_labels = list()
    for file in files:
        data.append(helpers.get_nparray_from_img(file, (w, h)))
        data_labels.append(basename(file)) # temporary while we do not have more info

    model = helpers.get_model(n_components=nc, data=data)
    helpers.dump(model, join(outdir, MODEL_FILE), compress_level=3)

    with open(join(outdir, DATASET_FILE), 'wb') as f:
        for index, eigenface in enumerate(model.transform(data)):
            f.write('"{}","{}","{}"\n'.format(index, data_labels[index], ' '.join(map(str, eigenface))))

    print ''
    print 'Created {} and {} in directory {}.'.format(MODEL_FILE, DATASET_FILE, outdir)
    print 'PCA Explained Variance Ratio: {}'.format(sum(model.explained_variance_ratio_))
    print 'Obs.: if this number is not satisfactory try increasing the number of components'
    print ''
def run(rank, world_size):
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "10638"
    dist_init(rank, world_size)
    os.environ["MASTER_PORT"] = "10639"
    dist.rpc.init_rpc(f"worker{rank}", rank=rank, world_size=world_size)
    initialize_model_parallel(1, world_size)

    model = get_model()
    data, target = get_data()[0]
    loss_fn = get_loss_fun()

    device = torch.device("cuda",
                          rank) if DEVICE == "cuda" else torch.device("cpu")

    model = MultiProcessPipe(
        model,
        balance=[2, 1],
        style=MultiProcessPipe.MultiProcess,
        worker_map={
            0: "worker0",
            1: "worker1"
        },  # Needed to convert ranks to RPC worker names
        input_device=device,
    ).to(device)

    # define optimizer and loss function
    optimizer = optim.SGD(model.parameters(), lr=0.001)

    # zero the parameter gradients
    optimizer.zero_grad()

    # outputs and target need to be on the same device
    # forward step
    outputs = model(data.to(device))
    # compute loss
    if rank == 1:
        loss = loss_fn(outputs.to(device), target.to(device))

        # backward + optimize
        loss.backward()
        optimizer.step()
    else:
        model.back_helper(outputs)

    print(f"Finished Training Step on {rank}")
    dist.rpc.shutdown()

    del model
Пример #9
0
def run(rank, world_size):
    torch_pg.init_mpi()
    os.environ["MASTER_ADDR"] = "localhost"
    os.environ["MASTER_PORT"] = "10638"
    dist_init(rank, world_size)  # FIXME (supports gloo)
    os.environ["MASTER_PORT"] = "10639"
    torch.distributed.rpc.init_rpc(f"worker{rank}",
                                   rank=rank,
                                   world_size=world_size)
    initialize_model_parallel(1, world_size, pipeline_backend="mpi")

    if rank == 1:
        # For RPC, all ranks other than 0 just need to call rpc.shutdown()
        torch.distributed.rpc.shutdown()
        return

    model = get_model()
    data, target = get_data()[0]
    loss_fn = get_loss_fun()

    device = torch.device("cuda", rank)

    model = fairscale.nn.PipeRPCWrapper(
        model,
        balance=[2, 1],
        worker_map={
            0: "worker0",
            1: "worker1"
        },  # Needed to convert ranks to RPC worker names
        input_device=device,
    ).to(device)

    # We can't directly access the model on each worker, so we need to call
    # foreach_worker with a callback to setup the optimizer
    model.foreach_worker(register_optimizer, {"lr": 0.001}, include_self=True)

    outputs = model(data.to(device))
    loss = loss_fn(outputs.to(device), target.to(device))
    loss.backward()

    # Same as earlier, use foreach_worker to step the optimizer on each rank
    model.foreach_worker(run_optimizer, include_self=True)

    print(f"Finished Training Step on {rank}")

    torch.distributed.rpc.shutdown()

    del model
Пример #10
0
    def __init__(self, qr, name):
        # type: (QR,str) -> NotImplemented
        """
        Create instance of COLL
        :param qr:
        :param name:
        """
        self.name = ""
        self.qr = None
        self._where = None
        self._entity = None
        self._none_schema_name = None
        self._none_schema_name = name
        import threading
        if hasattr(threading.currentThread(),
                   "tenancy_code") and tenancy.get_schema() != "":
            self.name = tenancy.get_schema() + "." + name
        else:
            self.name = name
        self._model = get_model(name)

        self.qr = qr
Пример #11
0
from helpers import get_data, get_loss_fun, get_model
import torch
import torch.optim as optim

import fairscale

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
RANK = 0  # example

model = get_model()
data, target = get_data()[0]
loss_fn = get_loss_fun()

model = fairscale.nn.Pipe(model, balance=[2, 1])

# define optimizer and loss function
optimizer = optim.SGD(model.parameters(), lr=0.001)

# zero the parameter gradients
optimizer.zero_grad()

device = torch.device("cuda",
                      RANK) if DEVICE == "cuda" else torch.device("cpu")

# outputs and target need to be on the same device
# forward step
outputs = model(data.to(device).requires_grad_())
# compute loss
loss = loss_fn(outputs.to(device), target.to(device))

# backward + optimize
Пример #12
0
from flask import Flask, Blueprint, render_template
from flask_restplus import Resource, Api, reqparse, fields

from flask import request
from flask import jsonify

os.environ['KERAS_BACKEND'] = 'theano'

app = Flask(__name__, static_url_path='/static')
blueprint = Blueprint('api', __name__, url_prefix='/api')
api = Api(blueprint, doc='/doc/')
app.register_blueprint(blueprint)

print('Loading Keras model...')
helpers.get_model()

a_name = api.model('Name',
                   {'name': fields.String('The firstname of the person.')})
a_dataset = api.model(
    'Dataset', {
        'data':
        fields.String(
            'A 2D array containing the dataset, entered as a JSON string.')
    })


@app.route('/')
def index():
    return render_template('index.html')
Пример #13
0
def main(ARGS):

    if ARGS.model_path == None:
        raise AssertionError("Path should not be None")

    ######### distance_metric = 1 #### if CenterLoss = 0, If Cosface = 1

    ####### Device setup
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    ####### Model setup
    print("Use CUDA: " + str(use_cuda))
    print('Model type: %s' % ARGS.model_type)
    model = get_model(ARGS.model_type, ARGS.input_size)

    if use_cuda:
        model.load_state_dict(torch.load(ARGS.model_path))
    else:
        model.load_state_dict(torch.load(ARGS.model_path, map_location='cpu'))

    model.to(device)
    embedding_size = 512
    model.eval()

    ##########################################################################################
    #### Evaluate LFW Example
    type = 'LFW'
    root_dir = './data/lfw_112'
    dataset, loader = get_evaluate_dataset_and_loader(
        root_dir=root_dir,
        type=type,
        num_workers=ARGS.num_workers,
        input_size=[112, 112],
        batch_size=ARGS.batch_size)

    print('Runnning forward pass on {} images'.format(type))

    tpr, fpr, accuracy, val, val_std, far = evaluate_forward_pass(
        model,
        loader,
        dataset,
        embedding_size,
        device,
        lfw_nrof_folds=10,
        distance_metric=1,
        subtract_mean=False)

    print_evaluate_result(type, tpr, fpr, accuracy, val, val_std, far)
    #### End of Evaluate LFW Example
    ##########################################################################################

    ##########################################################################################
    ### Evaluate CALFW Example
    type = 'CALFW'
    root_dir = './data/calfw_112'
    dataset, loader = get_evaluate_dataset_and_loader(
        root_dir=root_dir,
        type=type,
        num_workers=ARGS.num_workers,
        input_size=[112, 112],
        batch_size=ARGS.batch_size)

    print('Runnning forward pass on {} images'.format(type))

    tpr, fpr, accuracy, val, val_std, far = evaluate_forward_pass(
        model,
        loader,
        dataset,
        embedding_size,
        device,
        lfw_nrof_folds=10,
        distance_metric=1,
        subtract_mean=False)

    print_evaluate_result(type, tpr, fpr, accuracy, val, val_std, far)
    #### End of Evaluate CALFW Example
    ##########################################################################################

    ##########################################################################################
    ### Evaluate CPLFW Example
    type = 'CPLFW'
    root_dir = './data/cplfw_112'
    dataset, loader = get_evaluate_dataset_and_loader(
        root_dir=root_dir,
        type=type,
        num_workers=ARGS.num_workers,
        input_size=[112, 112],
        batch_size=ARGS.batch_size)

    print('Runnning forward pass on {} images'.format(type))

    tpr, fpr, accuracy, val, val_std, far = evaluate_forward_pass(
        model,
        loader,
        dataset,
        embedding_size,
        device,
        lfw_nrof_folds=10,
        distance_metric=1,
        subtract_mean=False)

    print_evaluate_result(type, tpr, fpr, accuracy, val, val_std, far)
    #### End of Evaluate CPLFW Example
    ##########################################################################################

    ##########################################################################################
    ### Evaluate CFP_FF Example
    type = 'CFP_FF'
    root_dir = './data/cfp_112'
    dataset, loader = get_evaluate_dataset_and_loader(
        root_dir=root_dir,
        type=type,
        num_workers=ARGS.num_workers,
        input_size=[112, 112],
        batch_size=ARGS.batch_size)

    print('Runnning forward pass on {} images'.format(type))

    tpr, fpr, accuracy, val, val_std, far = evaluate_forward_pass(
        model,
        loader,
        dataset,
        embedding_size,
        device,
        lfw_nrof_folds=10,
        distance_metric=1,
        subtract_mean=False)

    print_evaluate_result(type, tpr, fpr, accuracy, val, val_std, far)
    #### End of Evaluate CFP_FF Example
    ##########################################################################################

    ##########################################################################################
    ### Evaluate CFP_FP Example
    type = 'CFP_FP'
    root_dir = './data/cfp_112'
    dataset, loader = get_evaluate_dataset_and_loader(
        root_dir=root_dir,
        type=type,
        num_workers=ARGS.num_workers,
        input_size=[112, 112],
        batch_size=ARGS.batch_size)

    print('Runnning forward pass on {} images'.format(type))

    tpr, fpr, accuracy, val, val_std, far = evaluate_forward_pass(
        model,
        loader,
        dataset,
        embedding_size,
        device,
        lfw_nrof_folds=10,
        distance_metric=1,
        subtract_mean=False)

    print_evaluate_result(type, tpr, fpr, accuracy, val, val_std, far)
Пример #14
0
def train(rank: int, world_size: int, epochs: int, use_oss: bool):

    # DDP
    dist_init(rank, world_size)
    device = torch.device("cpu") if DEVICE == "cpu" else rank  # type:ignore

    # Problem statement
    model = get_model().to(device)
    dataloader = get_data(n_batches=1)
    loss_fn = get_loss_fun()

    optimizer: Optional[Union[OSS, torch.optim.SGD]] = None

    if not use_oss:
        optimizer = torch.optim.SGD(params=model.parameters(), lr=1e-4)
    else:
        base_optimizer = torch.optim.SGD
        base_optimizer_arguments = {
            "lr": 1e-4
        }  # any optimizer specific arguments, LR, momentum, etc...
        optimizer = OSS(params=model.parameters(),
                        optim=base_optimizer,
                        broadcast_buffer_size=2**17,
                        **base_optimizer_arguments)

    training_start = time.monotonic()
    # Any relevant training loop, nothing specific to OSS. For example:
    model.train()

    for _ in range(epochs):
        for (data, target) in dataloader:
            data, target = data.to(device), target.to(device)

            # Train
            model.zero_grad()
            outputs = model(data)
            loss = loss_fn(outputs, target)
            loss.backward()

            # if you want to clip the gradients / get the current max:
            max_norm = 1000.0
            norm_type = 1
            if not use_oss:
                _total_norm = torch.nn.utils.clip_grad_norm_(
                    model.parameters(), max_norm,
                    norm_type=norm_type)  # type: ignore
            else:
                optimizer = cast(OSS, optimizer)
                _total_norm = optimizer.clip_grad_norm(max_norm,
                                                       norm_type=norm_type)

            optimizer.step()

            print(f"Loss: {loss.item()}")

    training_end = time.monotonic()
    print(
        f"[{dist.get_rank()}] : Training done. {training_end-training_start:.2f} sec"
    )

    if DEVICE == "cuda":
        max_memory = torch.cuda.max_memory_allocated(rank)
        print(f"[{dist.get_rank()}] : Peak memory {max_memory:.1f}MiB")
Пример #15
0
    def lookup(self,
               source=None,
               local_field=None,
               foreign_field=None,
               alias=None,
               *args,
               **kwargs):
        # type: (str,str,str,str) -> AGGREGATE
        # type: (COLL,str,str,str) -> AGGREGATE
        """
        Create lookup aggregate
        :param source: where this collection will lookup for mongodb that is 'from'
        :param local_field:which is the field in this collection serve for lookup? for mongodb that is 'localField'
        :param foreign_field:which is the field from source collection serve for lookup? for mongodb that is 'foreignField'
        :param alias:The alias after lookup for mongdb that is 'as'
        :param args:
        :param kwargs:
        :return:
        """
        if self.get_selected_fields().count(local_field) == 0:
            msm_details = ""
            for x in self.get_selected_fields():
                msm_details += x + "\n"
            raise (Exception(
                "What is '{0}'?, Your selected fields are:\n {1}".format(
                    local_field, msm_details)))

        if args == () and kwargs == {}:
            _source = source
            if source.__class__ is COLL:
                _source = source.name

            kwargs.update(source=_source,
                          local_field=local_field,
                          foreign_field=foreign_field,
                          alias=alias)
        else:
            if not kwargs.has_key("source"):
                raise Exception("'source' was not found")
            if not kwargs.has_key("local_field"):
                raise Exception("'local_field' was not found")
            if not kwargs.has_key("foreign_field"):
                raise Exception("'foreign_field' was not found")
            if not kwargs.has_key("alias"):
                raise Exception("'alias' was not found")
        source_model = None
        if isinstance(source, COLL):
            source_model = source._model
        else:
            source_model = get_model(source)

        self._selected_fields = self.get_selected_fields()
        self._selected_fields.append(alias)
        if source_model.get_fields().count(foreign_field) == 0:
            msm_details = ""
            for x in source_model.get_fields():
                msm_details += x + "\n"
            raise (Exception(
                "What is '{0}'?\n '{0}'  is not in '{1}'.\n All fields of '{1}' are bellow:\n {2}"
                .format(foreign_field, source, msm_details)))
        for x in source_model.get_fields():
            self._selected_fields.append(alias + "." + x)
        self._pipe.append({
            "$lookup": {
                "from": kwargs["source"],
                "localField": kwargs["local_field"],
                "foreignField": kwargs["foreign_field"],
                "as": kwargs["alias"]
            }
        })
        return self
Пример #16
0
import random
import base64
import io
from flask_cors import CORS
import json

from torchvision.models.utils import load_state_dict_from_url

from helpers import get_model, from_base64, align_source, pil_to_cv2
from classes import VOC_CLASSES
from blending import instance_segmentation, poisson_blend

app = Flask(__name__)
CORS(app)

model = get_model(len(VOC_CLASSES))
model.load_state_dict(
    load_state_dict_from_url(
        "https://pytorch-pascal-voc-obj-detect-model.s3.us-east-2.amazonaws.com/voc-seg.pt",
        map_location="cpu"))
model.eval()


def serve_pil_image(pil_img):
    img_io = io.BytesIO()
    pil_img.save(img_io, 'JPEG')
    img_io.seek(0)
    return send_file(img_io, mimetype='image/jpeg')


def open_image(image_bytes):