Пример #1
0
    def __init__(self, param):
        self.param = param
        self.param["prepro_param"].update(self.param["common_param"])
        self.param["train_param"].update(self.param["common_param"])

        self.Prepro = Prepro(self.param["prepro_param"])
        self.Train = Train(self.param["train_param"])
        self.Infer = Infer(self.param["train_param"])
        self.Logger = Logger(self.param["train_param"])
Пример #2
0
            num_workers=int(cfg.WORKERS))

        # Test dataset
        testdataset = data.StoryDataset(dir_path,
                                        video_transforms,
                                        is_train=False)

        testloader = torch.utils.data.DataLoader(
            testdataset,
            batch_size=cfg.TRAIN.ST_BATCH_SIZE * num_gpu,
            drop_last=True,
            shuffle=False,
            num_workers=int(cfg.WORKERS))

        if args.eval_fid:
            algo = Infer(output_dir, 1.0)
            algo.eval_fid2(testloader, video_transforms, image_transforms)

        elif args.eval_fvd:
            algo = Infer(output_dir, 1.0)
            algo.eval_fvd(imageloader, storyloader, testloader, cfg.STAGE)

        elif args.load_ckpt != None:
            # For inference training result
            algo = Infer(output_dir, 1.0, args.load_ckpt)
            algo.inference(imageloader, storyloader, testloader, cfg.STAGE)
        else:
            # For training model
            algo = GANTrainer(output_dir, args, ratio=1.0)
            algo.train(imageloader, storyloader, testloader, cfg.STAGE)
    else:
Пример #3
0
from argparse import ArgumentParser
from inference import Infer

parser = ArgumentParser()

parser.add_argument("modelname", help="name of model to use")
parser.add_argument("imagepath", help="relative path to image")
parser.add_argument("--use_gpu",
                    help="use gpu or not",
                    nargs="?",
                    default=False,
                    const=True,
                    type=bool)
args = parser.parse_args()

infer = Infer(args.use_gpu)

try:
    infer.infer(args.imagepath, args.modelname)
except:
    print("Something BAD happened!!!")
from inference import Infer
from models.AlbuNet.AlbuNet import AlbuNet
import segmentation_models_pytorch as smp

inferer = Infer(threshold=0.75)
model = smp.Unet("se_resnext50_32x4d", classes=6)
model.cuda()
inferer.inference(model)
Пример #5
0
def ajax_index():
	
	count, path = Infer().infer(pathToFile, algo, use_gpu)
	
	return render_template('preview.html', count = int(count), original = 'uploads/'+str(filename),newimg = "images/"+path)
Пример #6
0
import os
from flask import Flask, flash, request, redirect, url_for, render_template
from werkzeug.utils import secure_filename
import json
from argparse import ArgumentParser

#***************************************************To process IMAGE******************************************************************#
from inference import Infer


#***************************************************WEB APP******************************************************************#

ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
AVAILABLE_MODELS, AVAILABLE_DEVICES = Infer().getAvailableModelsAndDevices()


def allowed_file(filename):
	return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
	
app = Flask(__name__)

@app.route("/")
@app.route("/upload")
def upload():
	return render_template('upload.html', models = AVAILABLE_MODELS, devices = AVAILABLE_DEVICES)

@app.route('/ajax/index')
def ajax_index():
	
	count, path = Infer().infer(pathToFile, algo, use_gpu)
	
from inference import Infer
from datetime import datetime
import os
import segmentation_models_pytorch as smp

inferer1 = Infer(
    rez_dir="inferred",
    image_folder="inferred2/overlay",
    batch_size=3,
    num_batches=2,
    batch_id=0,
)

inferer2 = Infer(
    rez_dir="inferred",
    image_folder="inferred2/overlay",
    batch_size=3,
    num_batches=2,
    batch_id=1,
)
model = smp.Unet("se_resnext50_32x4d")
model.cuda()
inferer1.inference(model)
inferer2.inference(model)
Пример #8
0
import os
import segmentation_models_pytorch as smp

input_file = "ufc234_gastelum_bisping_1080p_nosound_cut.mp4"
output_file = "test.mp4"
intermediate_dir = "video_frames"
intermediate_dir2 = "video_frames_processed"
print("Frame extraction")
now = datetime.now()
extract_frames(input_file, intermediate_dir)
print(datetime.now() - now)
print("Inference")
num_batches = 70
model = smp.Unet("se_resnext50_32x4d")
model.cuda()
for i in range(0, num_batches):
    inferer1 = Infer(
        rez_dir=intermediate_dir2,
        image_folder=intermediate_dir,
        batch_size=2,
        num_batches=num_batches,
        batch_id=i,
        threshold=0.5,
    )
    inferer1.inference(model)
print(datetime.now() - now)
print("Making video")
make_video_from_frames(frame_dir=os.path.join(intermediate_dir2, "mask"),
                       target_path=output_file)
print(datetime.now() - now)
Пример #9
0
PUB.info("Loading new Thread")
PUB.info('OpenCV '+cv2.__version__)

def lambda_handler(event, context):
    return

try:
    VS = VideoStream().start()
except Exception as err:
    PUB.exception(str(err))
PUB.info('Camera is ' + VS.device)

OUTPUT = FileOutput('/tmp/results.mjpeg', VS.read(), PUB)
OUTPUT.start()

model = Infer()
def main_loop():
    try:
        last_update = time.time()
        results = []
        fps = 0
        while 42 :
            frame = VS.read()
            frame = cv2.resize(frame, (model.data_shape, model.data_shape))
            try:
                category = model.do(frame)
                results.append(category)
                font = cv2.FONT_HERSHEY_DUPLEX
                title = str(fps) + " - " + category
                cv2.putText(frame, title, (6, 24), font, 0.5, (255, 255, 255), 1)