コード例 #1
0
class DrawArea(QGLWidget):
    def __init__(self):
        self._drawer = Drawer()
        super().__init__()
        self.setMinimumSize(600, 480)
        self.resize(600, 480)

    def paintGL(self):
        self._drawer.paint()

    def initializeGL(self):
        self.qglClearColor(QtGui.QColor(25, 25, 25))
        glLineWidth(1)
        self.qglColor(QtGui.QColor(255, 0, 0))

    def resizeGL(self, w, h):
        glViewport(0, 0, w, h)

    def get_drawer(self):
        return self._drawer

    def mousePressEvent(self, event):
        candidate_point = Point2D(
            (2 * event.x() - self.width()) / self.width(),
            (2 * (self.height() - event.y()) - self.height()) / self.height())
        self._drawer.handle_candidate(candidate_point)
        self.updateGL()
コード例 #2
0
ファイル: formater.py プロジェクト: mikister/feri-urnik
    def __makeTable(self, personal=False):
        '''
        Creates a table of the schedule and returns that in a string.

        Description:
        Arranges 'classes' and 'num_day' in 'day' list, each entiry is an list
        of classes that happen on that day. The 'num_day' is used to tell the 
        Drawer what day it is.
        
        return: None
            
        '''

        # Json file has Course and group number
        filters = Filter(self.jsonData)
        drawer = Drawer()
        day = []
        date = self.classes[
            0].date  # The date of the first element in list so it can start comparing with others
        class_list = []

        for entiry in self.classes:
            if not personal or filters.checkGroup(entiry):

                # Sees if the schedule changed to a new day
                if (date < entiry.date):
                    day.append({
                        'num_day': date.weekday(),
                        'classes': class_list
                    })
                    class_list = []
                    date = entiry.date  # Adds new date for different day

                class_list.append(entiry)

        day.append({'num_day': date.weekday(), 'classes': class_list})
        string = ""
        for value in day:
            string += drawer.drawTable(classes=value['classes'],
                                       num_day=value['num_day'])

        self.daysSchedual.append(string)
コード例 #3
0
model = Xception(weights='imagenet')
input_size = (299, 299)


def decode(model, img_tensor):
    preds = model.predict(img_tensor)
    preds = decode_predictions(preds, top=1)[0][0]
    preds = [preds[1], preds[2]]
    return preds


preprocess_input = preprocess_input

# Init drawer and xai_tool
drawer = Drawer()
xai_tool = XAITool(model, input_size, decode, preprocess_input)
cap = cv2.VideoCapture(0)
while (True):
    ret_run, frame = cap.read()
    xai_dict = xai_tool.vidCapRun(frame, -1)
    if ('predictions' in xai_dict):
        drawer.singleThread(frame, xai_dict['heatmap'],
                            xai_dict['activations'], xai_tool.layers[-1], -1,
                            xai_dict['predictions'])
    else:
        drawer.singleThread(frame, xai_dict['heatmap'],
                            xai_dict['activations'], xai_tool.layers[-1], -1)
# TEMP
# cap = cv2.VideoCapture(0)
# ret_run, frame = cap.read()
コード例 #4
0
from conmech.problem_solver import Static as StaticProblemSolver
from conmech.problems import Static
from examples.p_slope_contact_law import PSlopeContactLaw
from utils.drawer import Drawer

p_slope = 1.


@dataclass()
class StaticSetup(Static):
    grid_height:...= 1
    cells_number:...= (2, 5)
    inner_forces:...= np.array([-0.2, -0.2])
    outer_forces:...= np.array([0, 0])
    mu_coef:...= 4
    lambda_coef:...= 4
    contact_law:...= PSlopeContactLaw

    @staticmethod
    def friction_bound(u_nu):
        return 0


if __name__ == '__main__':
    setup = StaticSetup()
    runner = StaticProblemSolver(setup, 'direct')

    state = runner.solve(verbose=True)
    Drawer(state).draw()
コード例 #5
0
ファイル: demo_reid.py プロジェクト: gordonjun2/CenterTrack
from detector import Detector

from AIC2018_iamai.ReID.ReID_CNN.Model_Wrapper_reid import ResNet_Loader
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchvision import transforms
from PIL import Image
import numpy as np
from utils.drawer import Drawer
import time

image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge', 'display']
drawer = Drawer()
cos = nn.CosineSimilarity(dim=1, eps=1e-6)


def euclidean_distance(qf, gf):
    gf = gf.transpose(0, 1)
    m = qf.shape[0]
    n = gf.shape[0]
    dist_mat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \
                torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()
    dist_mat.addmm_(1, -2, qf, gf.t())
    return dist_mat.cpu().numpy()


def cosine_similarity(qf, gf):
    gf = gf.transpose(0, 1)
コード例 #6
0
from utils.drawer import Drawer
from utils.xai_tool import XAITool

from time import time

# For multi-threading
from threading import Thread, Lock

# Setting up the input for the tool
# model is a keras model
# input_size is a tuple of (x, y) which contains the dims of the input image of the CNN
# decode_predictions is the function keras uses to decode the prediction
# preprocess_input is the function keras uses to process the images before passing them into the model. NOTE: this is an optional arg

# Initialising relevant classes and shared vars
drawer = Drawer()
cap = cv2.VideoCapture(0)
shared_dict = {
    'frame': None,
    'heatmap': None,
    'activations': None,
    'predictions': None,
    'select_layers_list': None,
}
data_lock = Lock()


def xaiProcessing():
    # Need to import the keras mdoel in the thread itself
    from keras.applications.xception import Xception, decode_predictions, preprocess_input
    model = Xception(weights='imagenet')
コード例 #7
0
 def __init__(self):
     self._drawer = Drawer()
     super().__init__()
     self.setMinimumSize(600, 480)
     self.resize(600, 480)
コード例 #8
0
    system_configs.update_config(
        configs["system"]
    )  # Update config.py based on retrieved 'system' parameters
    db_configs.update_config(
        configs["db"])  # Update db/base.py based on retrieved 'db' parameters

    print("system config...")
    pprint.pprint(system_configs.full)  # Show 'system' parameters in terminal

    print("db config...")
    pprint.pprint(db_configs.full)  # Show 'db' parameters in terminal

    print("loading parameters at iteration: {}".format(
        args.testiter))  # Show args.testiter in terminal

    print("building neural network...")
    nnet = NetworkFactory()  # Initialise CenterNet's neural network
    print("loading parameters...")
    nnet.load_params(args.testiter)  # To locate CenterNet's pretrained model

    drawer = Drawer()  # Initialise Drawer to add bboxes in frames later

    #nnet.cpu()                                                                 # Uncomment if using cpu
    nnet.cuda()  # Comment if using cpu
    nnet.eval_mode()

    if args.file_dir[args.file_dir.rfind('.') + 1:].lower() in video_ext:
        show_video(args.file_dir, nnet, drawer, args.score_min, args.save)
    else:
        show_image(args.file_dir, nnet, drawer, args.score_min, args.save)
コード例 #9
0
def main():
     # Configs
    args = get_args()
    cfg = Config(args.config)
    pose_kwargs = cfg.POSE
    clf_kwargs = cfg.CLASSIFIER
    tracker_kwargs = cfg.TRACKER

    # Initiate video/webcam
    source = args.source if args.source else 0
    video = Video(source)

    ## Initiate trtpose, deepsort and action classifier
    pose_estimator = get_pose_estimator(**pose_kwargs)
    if args.task != 'pose':
        tracker = get_tracker(**tracker_kwargs)
        if args.task == 'action':
            action_classifier = get_classifier(**clf_kwargs)

    ## initiate drawer and text for visualization
    drawer = Drawer(draw_numbers=args.draw_kp_numbers)
    user_text = {
        'text_color': 'green',
        'add_blank': True,
        'Mode': args.task,
        # MaxDist: cfg.TRACKER.max_dist,
        # MaxIoU: cfg.TRACKER.max_iou_distance,
    }

    # loop over the video frames
    for bgr_frame in video:
        rgb_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB)
        # predict pose estimation
        start_pose = time.time()
        predictions = pose_estimator.predict(rgb_frame, get_bbox=True) # return predictions which include keypoints in trtpose order, bboxes (x,y,w,h)
        # if no keypoints, update tracker's memory and it's age
        if len(predictions) == 0 and args.task != 'pose':
            debug_img = bgr_frame
            tracker.increment_ages()
        else:
            # draw keypoints only if task is 'pose'
            if args.task != 'pose':
                # Tracking
                # start_track = time.time()
                predictions = utils.convert_to_openpose_skeletons(predictions)
                predictions, debug_img = tracker.predict(rgb_frame, predictions,
                                                                debug=args.debug_track)
                # end_track = time.time() - start_track

                # Action Recognition
                if len(predictions) > 0 and args.task == 'action':
                    predictions = action_classifier.classify(predictions)

        end_pipeline = time.time() - start_pose
        # add user's desired text on render image
        user_text.update({
            'Frame': video.frame_cnt,
            'Speed': '{:.1f}ms'.format(end_pipeline*1000),
        })

        # draw predicted results on bgr_img with frame info
        render_image = drawer.render_frame(bgr_frame, predictions, **user_text)

        if video.frame_cnt == 1 and args.save_folder:
            # initiate writer for saving rendered video.
            output_suffix = get_suffix(args, cfg)
            output_path = video.get_output_file_path(
                args.save_folder, suffix=output_suffix)
            writer = video.get_writer(render_image, output_path, fps=30)

            if args.debug_track and args.task != 'pose':
                debug_output_path = output_path[:-4] + '_debug.avi'
                debug_writer = video.get_writer(debug_img, debug_output_path)
            print(f'[INFO] Saving video to : {output_path}')
        # show frames
        try:
            if args.debug_track and args.task != 'pose':
                debug_writer.write(debug_img)
                utils.show(debug_img, window='debug_tracking')
            if args.save_folder:
                writer.write(render_image)
            utils.show(render_image, window='webcam' if isinstance(source, int) else osp.basename(source))
        except StopIteration:
            break
    if args.debug_track and args.task != 'pose':
        debug_writer.release()
    if args.save_folder and len(predictions) > 0:
        writer.release()
    video.stop()
コード例 #10
0
    thresh=od_thresh,
    weights=od_weights,
    config=od_config,
    classes_path=od_classes,
)
print("----------OD started----------")

# assuming 7fps & 70nn_budget, tracker looks into 10secs in the past.
tracker = Tracker(max_age=30,
                  nn_budget=70,
                  override_track_class=None,
                  clock=clock)
print("----------Tracker started----------")

GPUtil.showUtilization()
drawer = Drawer()

display = not args.nodisplay

ptz_exists = bool(int(env.get('PTZ_EXISTS', 0)))
ptz = None
if ptz_exists:
    from utils.ptzer import PTZ
    from utils.ui_controller import ui_controller
    manual_override = {"p": 0, "t": 0, "z": 0, "override": False}
    if display:
        ui_thread = Thread(target=ui_controller, args=(manual_override, ))
    ptz_host_url = env.get('PTZ_HOST_URL')
    assert ptz_host_url
    ptz = PTZ(ptz_host_url, flip=flip, manual_override=manual_override)
コード例 #11
0
ファイル: trainer_origin.py プロジェクト: el16y2w/Mobile-Pose
    def start(self, fromStep, totalSteps, lr, modeltype, time):
        result = open(
            os.path.join(config.modeloutputFile, time + "training_result.csv"),
            "w")
        result.write(
            "model_name, epochs, learning-rate, train_loss, test_acc\n")
        result.close()
        result = open(
            os.path.join(config.modeloutputFile, time + "training_result.csv"),
            "a+")
        for i in range(fromStep, fromStep + totalSteps + 1):
            result = open(
                os.path.join(config.modeloutputFile,
                             time + "training_result.csv"), "a+")
            for i in range(config.datanumber):
                inputs, heatmaps = self.dataTrainProvider[i].drawn()
                res = self.sess.run(
                    [self.trainLoss[i], self.updater[i], self.summaryMerge],
                    feed_dict={
                        self.inputImage: inputs,
                        self.heatmapGT: heatmaps,
                        self.learningRate: lr
                    })
                self.fileWriter.add_summary(res[2], i)
                print(str(i) + " -- TRAIN" + str(i) + " : " + str(res[0]))

            a = str(res[0])

            result.write("{},{},{},{}\n".format(modeltype, i, lr, a))

            if i % Trainer.SAVE_EVERY == 0:
                checkpoint_path = os.path.join(self.savePath, 'model')
                self.saver.save(self.sess, checkpoint_path, global_step=i)

            if i % Trainer.TEST_EVERY == 0:
                inputs, heatmaps = self.dataValProvider[0].drawn()
                res = self.sess.run(
                    [self.output, self.summaryMerge],
                    feed_dict={
                        self.inputImage: inputs,
                        self.heatmapGT: heatmaps,
                        self.learningRate: 0
                    })

                fullscreen_bbox = BBox(0, 1, 0, 1)

                distances = []
                for batch_id in range(inputs.shape[0]):
                    pose_gt, _ = Pose2DInterface.our_approach_postprocessing(
                        heatmaps[batch_id, :, :, :], fullscreen_bbox,
                        self.inputSize)
                    pose_pred, _ = Pose2DInterface.our_approach_postprocessing(
                        res[0][batch_id, :, :, :], fullscreen_bbox,
                        self.inputSize)

                    # pose_pred
                    # all labeled gt joints are used in the loss,
                    # if not detected by the prediction joint location (-1,-1) => (0.5,0.5)
                    tmp = pose_pred.get_joints()
                    tmp[~pose_pred.get_active_joints(), :] = 0.5
                    pose_pred = Pose2D(tmp)

                    distances.append(pose_gt.distance_to(pose_pred))

                summary = tf.Summary(value=[
                    tf.Summary.Value(tag="testset_accuracy",
                                     simple_value=mean(distances))
                ])

                self.fileWriter.add_summary(summary, i)
                result.write("{},{},{},{},{}\n".format(modeltype, i, lr, a,
                                                       mean(distances)))

            if i % Trainer.VIZ_EVERY == 0:
                inputs, heatmaps = self.dataValProvider[0].drawn()
                res = self.sess.run(
                    [self.output, self.summaryMerge],
                    feed_dict={
                        self.inputImage: inputs,
                        self.heatmapGT: heatmaps,
                        self.learningRate: 0
                    })

                currHeatmaps = res[0][0, :, :, :]
                currImage = self._imageFeatureToImage(inputs[0, :, :, :])
                currHeatmapViz = self._heatmapVisualisation(currHeatmaps)
                currHeatmapViz = currHeatmapViz.reshape(
                    (1, currHeatmapViz.shape[0], currHeatmapViz.shape[0], 1))
                currPose = self._toPose(currHeatmaps)
                skeletonViz = np.expand_dims(
                    Drawer.draw_2d_pose(currImage, currPose), 0)

                tmp = tf.summary.image("skeleton_" + str(i),
                                       skeletonViz).eval(session=self.sess)
                self.fileWriter.add_summary(tmp, i)
                tmp = tf.summary.image("heatmap_predicted_" + str(i),
                                       currHeatmapViz).eval(session=self.sess)
                self.fileWriter.add_summary(tmp, i)
        result.close()
コード例 #12
0
ファイル: single.py プロジェクト: chuanhao01/XVis_Tool
            preds = decode_predictions(preds, top=1)[0][0]
            preds = [preds[1], preds[2]]
            return preds

    return decode


# Custom preprocess input function if you have one
# def preprocess_input(img_tensor):
#         img_tensor = img_tensor / 255
#         return img_tensor

decode = createDecoder(target_labels)

# Init classes
drawer = Drawer()
cap = cv2.VideoCapture(0)
xai_tool = XVisTool(model,
                    input_size,
                    decoder_func=decode,
                    preprocess_img_func=preprocess_input)

# Showing the default
ori_img = xai_tool.setStillImg(img_path)
xai_dict = xai_tool.stillImgRun(-1)
layers = xai_tool.layers

drawer.singleThread(ori_img, xai_dict['heatmap'], xai_dict['activations'],
                    [layers[-1], -1], xai_dict['predictions'])
cv2.imshow('XVis Single', drawer.mask)
cv2.waitKey(100)
コード例 #13
0
def test():
    setup = Setup()
    runner = SimulationRunner(setup)
    solver = runner.run()
    Drawer(solver).draw()
コード例 #14
0
except:
    print("")
    parser.print_help()
    sys.exit(0)

# load the object detection network
net = jetson.inference.detectNet(opt.network, sys.argv, opt.threshold)

# create video sources & outputs
input = jetson.utils.videoSource(opt.input_URI, argv=sys.argv)
output = jetson.utils.videoOutput(opt.output_URI, argv=sys.argv + is_headless)

# assuming 7fps & 70nn_budget, tracker looks into 10secs in the past.
nn_budget = 70
tracker = Tracker(max_age=30, nn_budget=nn_budget, override_track_class=None)
drawer = Drawer()
# process frames until the user exits
while True:
    tic = time.time()
    # capture the next image
    img = input.Capture()
    np_source = jetson.utils.cudaToNumpy(img)
    np_source = cv2.cvtColor(np_source, cv2.COLOR_RGBA2BGR)
    # detect objects in the image (with overlay)
    #detections = net.Detect(img, overlay=opt.overlay)
    detections = net.Detect(img, overlay='none')
    chosen_track = None

    # print the detections
    print("detected {:d} objects in image".format(len(detections)))
    raw_dets = []
コード例 #15
0
vidInfo = stream.init_src()
# print(f'vid info: {vidInfo}')

if which_od == 'yolov4':
    objDet = YOLOV4(score=0.5,
                    bgr=True,
                    model_image_size=(608, 608),
                    max_batch_size=4,
                    half=True)
elif which_od == 'yolov4_trt':
    objDet = YOLOV4_TRT(
        score=0.5,
        bgr=True,
        engine_path='pytorch_YOLOv4/trt_weights/yolov4_1_608_608.trt')

drawer = Drawer()
show_win_name = 'GRAB YOUR HUMAN'
cv2.namedWindow(show_win_name, cv2.WINDOW_NORMAL)

start_whole = time.time()
try:
    for frame_count in itertools.count():
        if stream.stopped:
            break

        frame = stream.read()
        frame_draw = copy.deepcopy(frame)

        dets = objDet.get_detections_dict([frame], classes=['person'])[0]
        # print(dets)
        drawer.draw_bbs(frame_draw, dets)