Example #1
0
    def __makeTable(self, personal=False):
        '''
        Creates a table of the schedule and returns that in a string.

        Description:
        Arranges 'classes' and 'num_day' in 'day' list, each entiry is an list
        of classes that happen on that day. The 'num_day' is used to tell the 
        Drawer what day it is.
        
        return: None
            
        '''

        # Json file has Course and group number
        filters = Filter(self.jsonData)
        drawer = Drawer()
        day = []
        date = self.classes[
            0].date  # The date of the first element in list so it can start comparing with others
        class_list = []

        for entiry in self.classes:
            if not personal or filters.checkGroup(entiry):

                # Sees if the schedule changed to a new day
                if (date < entiry.date):
                    day.append({
                        'num_day': date.weekday(),
                        'classes': class_list
                    })
                    class_list = []
                    date = entiry.date  # Adds new date for different day

                class_list.append(entiry)

        day.append({'num_day': date.weekday(), 'classes': class_list})
        string = ""
        for value in day:
            string += drawer.drawTable(classes=value['classes'],
                                       num_day=value['num_day'])

        self.daysSchedual.append(string)
Example #2
0
model = Xception(weights='imagenet')
input_size = (299, 299)


def decode(model, img_tensor):
    preds = model.predict(img_tensor)
    preds = decode_predictions(preds, top=1)[0][0]
    preds = [preds[1], preds[2]]
    return preds


preprocess_input = preprocess_input

# Init drawer and xai_tool
drawer = Drawer()
xai_tool = XAITool(model, input_size, decode, preprocess_input)
cap = cv2.VideoCapture(0)
while (True):
    ret_run, frame = cap.read()
    xai_dict = xai_tool.vidCapRun(frame, -1)
    if ('predictions' in xai_dict):
        drawer.singleThread(frame, xai_dict['heatmap'],
                            xai_dict['activations'], xai_tool.layers[-1], -1,
                            xai_dict['predictions'])
    else:
        drawer.singleThread(frame, xai_dict['heatmap'],
                            xai_dict['activations'], xai_tool.layers[-1], -1)
# TEMP
# cap = cv2.VideoCapture(0)
# ret_run, frame = cap.read()
Example #3
0
from conmech.problem_solver import Static as StaticProblemSolver
from conmech.problems import Static
from examples.p_slope_contact_law import PSlopeContactLaw
from utils.drawer import Drawer

p_slope = 1.


@dataclass()
class StaticSetup(Static):
    grid_height:...= 1
    cells_number:...= (2, 5)
    inner_forces:...= np.array([-0.2, -0.2])
    outer_forces:...= np.array([0, 0])
    mu_coef:...= 4
    lambda_coef:...= 4
    contact_law:...= PSlopeContactLaw

    @staticmethod
    def friction_bound(u_nu):
        return 0


if __name__ == '__main__':
    setup = StaticSetup()
    runner = StaticProblemSolver(setup, 'direct')

    state = runner.solve(verbose=True)
    Drawer(state).draw()
Example #4
0
 def __init__(self):
     self._drawer = Drawer()
     super().__init__()
     self.setMinimumSize(600, 480)
     self.resize(600, 480)
Example #5
0
    system_configs.update_config(
        configs["system"]
    )  # Update config.py based on retrieved 'system' parameters
    db_configs.update_config(
        configs["db"])  # Update db/base.py based on retrieved 'db' parameters

    print("system config...")
    pprint.pprint(system_configs.full)  # Show 'system' parameters in terminal

    print("db config...")
    pprint.pprint(db_configs.full)  # Show 'db' parameters in terminal

    print("loading parameters at iteration: {}".format(
        args.testiter))  # Show args.testiter in terminal

    print("building neural network...")
    nnet = NetworkFactory()  # Initialise CenterNet's neural network
    print("loading parameters...")
    nnet.load_params(args.testiter)  # To locate CenterNet's pretrained model

    drawer = Drawer()  # Initialise Drawer to add bboxes in frames later

    #nnet.cpu()                                                                 # Uncomment if using cpu
    nnet.cuda()  # Comment if using cpu
    nnet.eval_mode()

    if args.file_dir[args.file_dir.rfind('.') + 1:].lower() in video_ext:
        show_video(args.file_dir, nnet, drawer, args.score_min, args.save)
    else:
        show_image(args.file_dir, nnet, drawer, args.score_min, args.save)
Example #6
0
def main():
     # Configs
    args = get_args()
    cfg = Config(args.config)
    pose_kwargs = cfg.POSE
    clf_kwargs = cfg.CLASSIFIER
    tracker_kwargs = cfg.TRACKER

    # Initiate video/webcam
    source = args.source if args.source else 0
    video = Video(source)

    ## Initiate trtpose, deepsort and action classifier
    pose_estimator = get_pose_estimator(**pose_kwargs)
    if args.task != 'pose':
        tracker = get_tracker(**tracker_kwargs)
        if args.task == 'action':
            action_classifier = get_classifier(**clf_kwargs)

    ## initiate drawer and text for visualization
    drawer = Drawer(draw_numbers=args.draw_kp_numbers)
    user_text = {
        'text_color': 'green',
        'add_blank': True,
        'Mode': args.task,
        # MaxDist: cfg.TRACKER.max_dist,
        # MaxIoU: cfg.TRACKER.max_iou_distance,
    }

    # loop over the video frames
    for bgr_frame in video:
        rgb_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB)
        # predict pose estimation
        start_pose = time.time()
        predictions = pose_estimator.predict(rgb_frame, get_bbox=True) # return predictions which include keypoints in trtpose order, bboxes (x,y,w,h)
        # if no keypoints, update tracker's memory and it's age
        if len(predictions) == 0 and args.task != 'pose':
            debug_img = bgr_frame
            tracker.increment_ages()
        else:
            # draw keypoints only if task is 'pose'
            if args.task != 'pose':
                # Tracking
                # start_track = time.time()
                predictions = utils.convert_to_openpose_skeletons(predictions)
                predictions, debug_img = tracker.predict(rgb_frame, predictions,
                                                                debug=args.debug_track)
                # end_track = time.time() - start_track

                # Action Recognition
                if len(predictions) > 0 and args.task == 'action':
                    predictions = action_classifier.classify(predictions)

        end_pipeline = time.time() - start_pose
        # add user's desired text on render image
        user_text.update({
            'Frame': video.frame_cnt,
            'Speed': '{:.1f}ms'.format(end_pipeline*1000),
        })

        # draw predicted results on bgr_img with frame info
        render_image = drawer.render_frame(bgr_frame, predictions, **user_text)

        if video.frame_cnt == 1 and args.save_folder:
            # initiate writer for saving rendered video.
            output_suffix = get_suffix(args, cfg)
            output_path = video.get_output_file_path(
                args.save_folder, suffix=output_suffix)
            writer = video.get_writer(render_image, output_path, fps=30)

            if args.debug_track and args.task != 'pose':
                debug_output_path = output_path[:-4] + '_debug.avi'
                debug_writer = video.get_writer(debug_img, debug_output_path)
            print(f'[INFO] Saving video to : {output_path}')
        # show frames
        try:
            if args.debug_track and args.task != 'pose':
                debug_writer.write(debug_img)
                utils.show(debug_img, window='debug_tracking')
            if args.save_folder:
                writer.write(render_image)
            utils.show(render_image, window='webcam' if isinstance(source, int) else osp.basename(source))
        except StopIteration:
            break
    if args.debug_track and args.task != 'pose':
        debug_writer.release()
    if args.save_folder and len(predictions) > 0:
        writer.release()
    video.stop()
Example #7
0
def test():
    setup = Setup()
    runner = SimulationRunner(setup)
    solver = runner.run()
    Drawer(solver).draw()