]

    # Put models together into the model universe
    model_universe = np.array(AR_models + VAR_models + mixed_models)

    # Use a uniform prior over these models
    model_prior = np.array([1 / len(model_universe)] * len(model_universe))
    """STEP 5: Build and run detector"""

    detector = Detector(data=data,
                        model_universe=model_universe,
                        model_prior=model_prior,
                        cp_model=cp_model,
                        S1=S1,
                        S2=S2,
                        T=T,
                        store_rl=True,
                        store_mrl=True,
                        trim_type="keep_K",
                        threshold=200,
                        notifications=100,
                        save_performance_indicators=True,
                        training_period=250)
    detector.run()
    """STEP 6: Give some results/pictures/summaries"""

    # Store results + real CPs into EvaluationTool object
    EvT = EvaluationTool()
    EvT.add_true_CPs(true_CP_location=true_CP_location,
                     true_CP_model_index=true_CP_location,
                     true_CP_model_label=-1)
    EvT.build_EvaluationTool_via_run_detector(detector)
Esempio n. 2
0
from printer import Printer
from reader import Reader
from preprocessor import Preprocessor

# Read the image
reader = Reader()
img = reader.getImage()
original = img.copy()

# Preprocess the image
pre = Preprocessor(img)
img = pre.getProcessed()
coloredSigns = pre.getLists()

# Detect image Points of Interest (POI)
det = Detector(img, coloredSigns)
det.detectCircles("Blue")
det.detectCircles("Red")
det.detectRectangles("Blue")
det.detectTriangles("Red")

det.detectRectangles("Yellow")
det.detectStop()
ans = det.getDetected()

# Print the Detected POI's into the image
printer = Printer(original)
printer.printToSTDOUT(det.getDetectedSigns())
original = printer.printAllIntoImage(ans)
printer.showAndSave('output.png')
Esempio n. 3
0
 def __init__(self):
     self.text_detection_model = Detector(
         path_to_model='./models/identity_card/model.tflite',
         path_to_labels='./models/identity_card/label_map.pbtxt',
         nms_threshold=0.3,
         score_threshold=0.3)
Esempio n. 4
0
    def __init__(self):

        logger = Logger()
        self.log = logger.logger
        self.error_log = logger.err_logger
        self.camera = None

        try:
            self.log.info("Reading the config...")
            self.config = ConfigReader()
            self.log.info("Config read")
        except:
            self.log.error("Error reading config.ini")
            self.error_log.error("Error reading config.ini", exc_info=True)
            sys.exit()

        try:
            self.folder_path = self.config.get_folder_path_config()
            print("folder_path: {}".format(self.folder_path))
        except:
            self.log.error(
                "Initialisation error: the video folder path is not defined")
            self.error_log.error(
                "Initialisation error: the video folder path is not defined",
                exc_info=True)
            sys.exit()

        self.log.info("Initialising face_model")
        try:
            face_model_address, face_model_protos = self.config.get_model_config(
            )
            print(face_model_address, face_model_protos)
            self.get_face_model = Model(face_model_address,
                                        face_model_protos,
                                        num_classes=2)
            self.get_face_model.get_session()
            self.log.info("face_model initialisation completed")
        except:
            self.log.error("face_model initialisation error")
            self.error_log.error("face_model initialisation error",
                                 exc_info=True)
            sys.exit()

        self.log.info("Initialising camera")
        try:
            self.camera = Camera(self.folder_path)
            self.log.info("Camera initialised")
        except:
            self.log.error("Camera initialisation error")
            self.error_log.error("Camera initialisation error", exc_info=True)
            sys.exit()

        try:
            self.max_boxes_to_draw, self.min_score_thresh = self.config.get_vis_utils_config(
            )
        except:
            self.max_boxes_to_draw, self.min_score_thresh = 10, 0.3

        self.log.info("Initializing detector")
        try:
            self.detector = Detector(self.get_face_model,
                                     self.max_boxes_to_draw,
                                     self.min_score_thresh)
            self.log.info("detector initialized")
        except:
            self.log.error("detector initialization failed")
            self.error_log.error("detector initialization failed",
                                 exc_info=True)
            sys.exit()

        try:
            self.log.info("Initialising embeddings models...")
            self.calculator = Calculate(self.log, self.error_log, self.config)
            self.log.info("embeddings Models initialisation completed")
        except:
            self.log.error("embeddings Models initialisation error")
            self.error_log.error("embeddings Models initialisation error",
                                 exc_info=True)
            sys.exit()

        self.batch_frame_list = []

        # todo set batch_size to config.ini ?
        self.batch_size = 2
        self.flag = True
Esempio n. 5
0
val_filenames=list(pd.read_table(val_index_file_path,header=None,names=['filename']).filename)
train_filenames=list(pd.read_table(train_index_file_path,header=None,names=['filename']).filename)
testset=allset.loc[allset['filename'].isin(test_filenames)]
valset=allset.loc[allset['filename'].isin(val_filenames)]
trainset=allset.loc[allset['filename'].isin(train_filenames)]
trainset['filename']=trainset['filename'].map(lambda x:os.path.join(image_path,x))
valset['filename']=valset['filename'].map(lambda x:os.path.join(image_path,x))
testset['filename']=testset['filename'].map(lambda x:os.path.join(image_path,x))
with open(wordset_path)as f:
 wordset=cPickle.load(f)
n_labels=len(wordset)

learning_rate=tf.placeholder(tf.float32,[])
images_tf=tf.placeholder(tf.float32,[None,224,224,3],name="images")
labels_tf=tf.placeholder(tf.float32,[None,319],name='labels')
detector=Detector(weight_path,n_labels)
p1,p2,p3,p4,conv5,conv6,gap,output=detector.inference(images_tf)
sig_output=tf.nn.sigmoid(output)
loss_tf=-tf.reduce_mean((labels_tf*tf.log(sig_output+1e-9))+((1-labels_tf)*tf.log(1-sig_output+1e-9)))
weights_only=filter(lambda x:x.name.endswith('W:0'),tf.trainable_variables())
weight_decay=tf.reduce_sum(tf.stack([tf.nn.l2_loss(x)for x in weights_only]))*weight_decay_rate
loss_tf+=weight_decay
sess=tf.InteractiveSession()
saver=tf.train.Saver(max_to_keep=500)
optimizer=tf.train.MomentumOptimizer(learning_rate,momentum)
grads_and_vars=optimizer.compute_gradients(loss_tf)
grads_and_vars=map(lambda gv:(gv[0],gv[1])if('conv6' in gv[1].name or 'GAP' in gv[1].name)else(gv[0]*0.1,gv[1]),grads_and_vars)
train_op=optimizer.apply_gradients(grads_and_vars)
tf.initialize_all_variables().run()

if pretrained_model_path:
parser = argparse.ArgumentParser()
parser.add_argument('--input',
                    type=str,
                    help='Path to a video or a sequence of image.',
                    default='data/videos/14.mp4')
parser.add_argument('--algo',
                    type=str,
                    help='Background subtraction method (COLOR).',
                    default='COLOR')
args = parser.parse_args()

if args.algo == 'COLOR':
    lower_blue = np.array([110, 100, 100])
    upper_blue = np.array([120, 255, 255])
    detector = Detector(type="COLOR", color=(lower_blue, upper_blue))

tracker = Tracker(160, 30, 10, 100)
track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255),
                (127, 0, 127)]

capture = cv.VideoCapture(cv.samples.findFileOrKeep(args.input))
# capture = cv.VideoCapture('http:192.168.1.106:8080/video')
if not capture.isOpened:
    print('Unable to open: ' + args.input)
    exit(0)

frames = 0
inf = 99999999
corners = [[0, 0], [inf, 0], [inf, inf], [0, inf]]
Esempio n. 7
0
min_face_size = 24
stride = 2
slide_window = False
shuffle = False
detectors = [None, None, None]
prefix = [
    'model/MTCNN_model/PNet_landmark/PNet',
    'model/MTCNN_model/RNet_landmark/RNet',
    'model/MTCNN_model/ONet_landmark/ONet'
]
epoch = [18, 14, 16]
batch_size = [2048, 256, 16]
model_path = ['%s-%s' % (x, y) for x, y in zip(prefix, epoch)]
# load pnet model
if slide_window:
    PNet = Detector(P_Net, 12, batch_size[0], model_path[0])
else:
    PNet = FcnDetector(P_Net, model_path[0])
detectors[0] = PNet

# load rnet model
if test_mode in ["RNet", "ONet"]:
    RNet = Detector(R_Net, 24, batch_size[1], model_path[1])
    detectors[1] = RNet

# load onet model
if test_mode == "ONet":
    ONet = Detector(O_Net, 48, batch_size[2], model_path[2])
    detectors[2] = ONet
detectors[1] = detectors[2] = None
mtcnn_detector = MtcnnDetector(detectors=detectors,
Esempio n. 8
0
# -*- coding: UTF-8 -*-
from __future__ import print_function

import glob
import os
import argparse
import time

import cv2

from detector import Detector
from recoer import Recoer

detector = Detector('./data/models/ctpn.pb')
recoer = Recoer('./tf_crnn/data/chars/chn.txt', './data/models/crnn.pb')


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--img_dir', default='/home/cwq/data/ICDAR13/Challenge2_Test_Task12_Images')
    parser.add_argument('--output_dir', default='./output')
    parser.add_argument('--viz', action='store_true', default=False)
    args = parser.parse_args()

    if not os.path.exists(args.img_dir):
        print("img_dir not exists")
        exit(-1)

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)
def test_eigenvalue_cutoff_response_matrix_unfolding(random_state=None,
                                                     cutoff=5,
                                                     num_bins=20,
                                                     plot=False):
    if not isinstance(random_state, np.random.RandomState):
        random_state = np.random.RandomState(random_state)

    energies = 1000.0 * random_state.power(0.70, 50000)
    below_zero = energies < 1.0
    energies[below_zero] = 1.0

    detector = Detector(distribution='gaussian',
                        energy_loss='const',
                        make_noise=True,
                        smearing=True,
                        resolution_chamber=1.,
                        noise=0.,
                        response_bins=num_bins,
                        random_state=random_state)
    signal, true_hits, energies_return, detector_matrix = detector.simulate(
        energies)
    eigenvalue_cutoff_results = eigenvalue_cutoff(signal,
                                                  energies,
                                                  detector_matrix,
                                                  cutoff=cutoff)
    eigenvalues, eigenvectors = eigenvalue_cutoff_results[
        0], eigenvalue_cutoff_results[1]

    true, folded, measured = obtain_coefficients(signal,
                                                 energies,
                                                 eigenvalues,
                                                 eigenvectors,
                                                 cutoff=cutoff)
    if true_hits.ndim == 2:
        sum_true_energy = np.sum(true_hits, axis=1)
        true_hits = np.histogram(sum_true_energy,
                                 bins=detector_matrix.shape[0])

    if plot:
        evaluate_unfolding.plot_eigenvalue_coefficients(
            true, folded, measured, eigenvalue_cutoff_results[6])
        # evaluate_unfolding.plot_eigenvalues(eigenvalues, eigenvectors, n_dims=detector_matrix.shape[0])
        evaluate_unfolding.plot_unfolded_vs_true(
            eigenvalue_cutoff_results[2],
            energies_return,
            errors=eigenvalue_cutoff_results[6],
            title="Unfolding X")
        evaluate_unfolding.plot_unfolded_vs_true(
            eigenvalue_cutoff_results[3],
            energies_return,
            errors=eigenvalue_cutoff_results[6],
            title="Unfolding X Other")
        evaluate_unfolding.plot_unfolded_vs_true(
            eigenvalue_cutoff_results[4],
            energies_return,
            errors=eigenvalue_cutoff_results[6],
            title="Unfolding True")
        evaluate_unfolding.plot_unfolded_vs_true(
            eigenvalue_cutoff_results[5],
            energies_return,
            errors=eigenvalue_cutoff_results[6],
            title="Unfolding True 2")
def main():
    params = Params()

    if not os.path.isdir(params.dataset_root):
        raise Exception("Unable to load images from " + params.dataset_root +
                        ": not a directory")

    if not os.path.exists(params.output_dir):
        os.mkdir(params.output_dir)

    if not os.path.isdir(params.output_dir):
        raise Exception("Unable to save results to " + params.output_dir +
                        ": not a directory")

    if (params.dataset == "DIC-C2DH-HeLa"):
        path = params.dataset_root + "/" + str(
            list(params.images_idx.keys())[0])
    elif (params.dataset == "PhC-C2DL-PSC"
          and params.nn_method == "DeepWater"):
        path = params.dataset_root + "/" + str(
            list(params.images_idx.keys())[0])
    else:
        path = params.dataset_root
    # seq = []
    print(path)
    images = glob.glob(path + '/*.tif')
    #sort the order of images
    images = [(int(x[-7:-4]), x) for x in images]
    images.sort(key=lambda x: x[0])
    images = [x[1] for x in images]

    preprocessor = Preprocessor(images, params)
    detector = Detector(preprocessor)
    matcher = Matcher(detector)
    drawer = Drawer(matcher, preprocessor)
    masks = preprocessor.get_masks()

    print('Generating all frames and cell states...')
    # based on the contours for all images, tracking trajectory and mitosis image by image
    drawer.load()
    print('Successfully loaded all images')

    if os.path.exists(f'{path}/gen'):
        os.makedirs(f"{path}/gen")

    counter = 1
    for g in drawer.get_gen_images():
        cv2.imwrite(f'{path}/gen/{counter}.tif', g)
        cv2.imwrite(f'{path}/gen/mask_{counter}.tif', masks[counter - 1])
        counter += 1
    print('Saved all images')

    # Now standby for user to issue commands for retrieval
    # input: num_1  num_2
    # 1st num represents the image number
    # 2nd num represents the cell number in the given image
    # 2nd num is not compulsory, with 1 number it will just show the image
    # press ENTER without any input will end the program
    # all inputs are assumed right
    while True:
        string = input(
            'Input a frame and cell ID (optional) separated by a space...\n')
        if string:
            string = string.split(' ')
            frame = int(string[0])
            if len(string) > 1:
                try:
                    id = int(string[1])
                    display_image = drawer.serve(frame, id)
                except ValueError:
                    print(f'Not an integer')
                    display_image = drawer.serve(frame)
            else:
                display_image = drawer.serve(frame)
            plt.imshow(display_image)
            plt.axis('off')
            plt.show()
            # cv2.imshow('image', display_image)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()

        else:
            break
Esempio n. 11
0
def test_multiple_datasets_std(random_state=None,
                               method=matrix_inverse_unfolding,
                               num_datasets=20,
                               num_bins=20,
                               noise=True,
                               smearing=True,
                               plot=False):
    if not isinstance(random_state, np.random.RandomState):
        random_state = np.random.RandomState(random_state)

    # Array to hold the arrays of means and stuff
    array_of_unfolding_errors = []

    subset_random_state_seed = random_state.random_integers(low=3000000,
                                                            size=num_datasets)

    # Generate the different datasets based off the random_state
    for seed in subset_random_state_seed:
        subset_random_state = np.random.RandomState(seed)

        # Now generate the datasets
        energies = 1000.0 * subset_random_state.power(0.70, 500)
        below_zero = energies < 1.0
        energies[below_zero] = 1.0

        detector = Detector(distribution='gaussian',
                            energy_loss='const',
                            make_noise=noise,
                            smearing=smearing,
                            resolution_chamber=1.,
                            noise=0.,
                            response_bins=num_bins,
                            random_state=subset_random_state)

        signal, true_hits, energies_return, detector_matrix = detector.simulate(
            energies)

        if method == matrix_inverse_unfolding:
            matrix_inverse_unfolding_results = method(signal, detector_matrix)
            array_of_unfolding_errors.append(
                matrix_inverse_unfolding_results[4])
        else:
            matrix_inverse_unfolding_results = method(signal, detector_matrix)
            array_of_unfolding_errors.append(
                matrix_inverse_unfolding_results[0])

    array_of_unfolding_errors = np.asarray(array_of_unfolding_errors)

    # Different ways of getting the mean and std
    # Mean and std of datasets
    # print(np.mean(array_of_unfolding_errors, axis=1))
    # print(np.std(array_of_unfolding_errors, axis=1))

    # Mean and std of the whole thing
    print("Mean (All): " + str(np.mean(array_of_unfolding_errors)))
    print("Std (All): " + str(np.std(array_of_unfolding_errors)))

    if plot:
        evaluate_unfolding.plot_error_stats(
            np.mean(array_of_unfolding_errors, axis=1),
            np.std(array_of_unfolding_errors, axis=1))
Esempio n. 12
0
 def test_hand_detection(self):
     detector = Detector()
     _, scores = detector.detect_objects(self.img)
     self.assertTrue(scores[0] > 0.05)
Esempio n. 13
0
def demo(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.debug = max(opt.debug, 1)
    detector = Detector(opt)

    if opt.demo == 'webcam' or opt.demo[opt.demo.rfind('.') +
                                        1:].lower() in video_ext:
        cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)
        out = None
        out_name = opt.demo[opt.demo.rfind('/') + 1:]
        if opt.save_video:
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            out = cv2.VideoWriter(
                '../results/{}.mp4'.format(opt.exp_id + '_' + out_name),
                fourcc, opt.save_framerate, (opt.video_w, opt.video_h))
        detector.pause = False
        cnt = 0
        results = {}
        if opt.load_results != '':
            load_results = json.load(open(opt.load_results, 'r'))
        while True:
            cnt += 1
            _, img = cam.read()
            if opt.resize_video:
                try:
                    img = cv2.resize(img, (opt.video_w, opt.video_h))
                except:
                    print('FINISH!')
                    save_and_exit(opt, out, results, out_name)
            if cnt < opt.skip_first:
                continue
            try:
                cv2.imshow('input', img)
            except:
                print('FINISH!')
                save_and_exit(opt, out, results, out_name)
            input_meta = {'pre_dets': []}
            img_id_str = '{}'.format(cnt)
            if opt.load_results:
                input_meta['cur_dets'] = load_results[img_id_str] \
                    if img_id_str in load_results else []
                if cnt == 1:
                    input_meta['pre_dets'] = load_results[img_id_str] \
                        if img_id_str in load_results else []
            ret = detector.run(img, input_meta)
            time_str = 'frame {} |'.format(cnt)
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            results[cnt] = ret['results']
            print(time_str)
            if opt.save_video:
                out.write(ret['generic'])
            if cv2.waitKey(1) == 27:
                print('EXIT!')
                save_and_exit(opt, out, results, out_name)
                return  # esc to quit
        save_and_exit(opt, out, results)
    else:
        # Demo on images, currently does not support tracking
        if os.path.isdir(opt.demo):
            image_names = []
            ls = os.listdir(opt.demo)
            for file_name in sorted(ls):
                ext = file_name[file_name.rfind('.') + 1:].lower()
                if ext in image_ext:
                    image_names.append(os.path.join(opt.demo, file_name))
        else:
            image_names = [opt.demo]

        for (image_name) in image_names:
            ret = detector.run(image_name)
            time_str = ''
            for stat in time_stats:
                time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
            print(time_str)
Esempio n. 14
0
# TODO: Set your coco_table_file, coco_anno_root and set_name
coco_table_file = 'data/coco_table.json'
coco_anno_root = '/home1/xyt/dataset/coco17/'
set_name = 'val2017'
# ==================================

# Read train.json/coco_table_file and set current GPU (for nms_cuda)
with open(coco_table_file, 'r') as load_f:
    coco_table = json.load(load_f)
with open('train.json', 'r') as load_f:
    cfg = json.load(load_f)
torch.cuda.set_device(cfg['device'][0])

# Prepare the network
net = Detector(pretrained=False)
device_out = 'cuda:%d' % (cfg['device'][0])
net.load_state_dict(torch.load('net.pkl', map_location=device_out))
net = net.cuda(cfg['device'][0])
net.eval()

# Get eval dataset
dataset_eval = Dataset_CSV(cfg['root_eval'],
                           cfg['list_eval'],
                           cfg['name_file'],
                           size=net.view_size,
                           train=False,
                           normalize=True)

# Prepare API structure
inferencer = api.COCOEvaluator(net, dataset_eval, coco_table['val_image_ids'],
model_universe_KL = np.array(model_universe_KL)
model_prior = np.array([1.0/len(model_universe_DPD)]*len(model_universe_DPD))
cp_model = CpModel(cp_intensity)


detector_DPD = Detector(
        data=data, 
        model_universe=model_universe_DPD, 
        model_prior = model_prior,
        cp_model = cp_model, 
        S1 = S1, 
        S2 = S2, 
        T = T, 
        store_rl=True, 
        store_mrl=True,
        trim_type="keep_K", 
        threshold = 50,
        notifications = 100,
        save_performance_indicators = True,
        generalized_bayes_rld = rld_DPD, 
        alpha_param_learning =  param_learning, 
        alpha_param  = alpha_param, 
        alpha_param_opt_t = 100, 
        alpha_rld = alpha_rld, 
        alpha_rld_learning = rld_learning, 
        loss_der_rld_learning="absolute_loss"
        )
detector_DPD.run()

detector_KL = Detector(
        data=data, 
        model_universe=model_universe_KL, 
Esempio n. 16
0
def detect_n_track():
    pub = rospy.Publisher('bounding_box', String, queue_size=10)
    rospy.init_node('detect_n_track', anonymous=True)
    rate = rospy.Rate(10)  # 10hz

    while not rospy.is_shutdown():
        parser = argparse.ArgumentParser()
        parser.add_argument("--csrt",
                            type=bool,
                            default=False,
                            help="use csrt tracker")
        parser.add_argument("--image_folder", type=str, default="data/samples")
        parser.add_argument("--model_def",
                            type=str,
                            default="config/yolov3-custom.cfg",
                            help="path to model definition file")
        parser.add_argument("--weights_path",
                            type=str,
                            default="weights/yolov3_v2.pth",
                            help="path to weights file")
        parser.add_argument("--class_path",
                            type=str,
                            default="config/classes.names",
                            help="path to class label file")
        parser.add_argument("--conf_thres",
                            type=float,
                            default=0.6,
                            help="object confidence threshold")
        parser.add_argument("--nms_thres",
                            type=float,
                            default=0.1,
                            help="iou threshold for non-maximum suppression")
        parser.add_argument("--img_size",
                            type=int,
                            default=416,
                            help="size of each image dimension")
        opt = parser.parse_args()
        print(opt)

        if opt.csrt:
            OPENCV_OBJECT_TRACKERS = {
                "csrt": cv2.TrackerCSRT_create,
            }
            trackers = cv2.MultiTracker_create()

        yolo = Detector(opt.model_def, opt.weights_path, opt.class_path,
                        opt.conf_thres, opt.nms_thres, opt.img_size)
        tracklets = []
        object_id = 0

        for i in range(0, 50):
            frame = cv2.imread(
                os.path.join(opt.image_folder, "frame{:04d}.jpg".format(i)))
            if opt.csrt:
                frame = imutils.resize(frame, width=400)
            yolo_boxes = yolo.detect(frame)

            for tr in tracklets:
                tr.setActive(False)

            for yolo_box in yolo_boxes:
                match = 0
                for tr in tracklets:
                    if yolo_box[-1] != tr.label:
                        continue

                    if iou(yolo_box[:4], tr.getState()):
                        tr.update(yolo_box[:4])
                        match = 1
                        tr.setActive(True)
                        break

                if match == 0:
                    tr = Tracklet(object_id, yolo_box[-1], yolo_box[:-1])
                    tracklets.append(tr)
                    if opt.csrt:
                        tracker = OPENCV_OBJECT_TRACKERS["csrt"]()
                        trackers.add(tracker, frame,
                                     (yolo_box[0], yolo_box[1], yolo_box[2],
                                      yolo_box[3]))

                    object_id += 1

            for tr in tracklets:
                if tr.active == False:
                    tr.addTimeout()
                if tr.timeout > 10:
                    tracklets.remove(tr)
                    continue

                if tr.active:
                    (x, y, w, h) = tr.getState()
                    x, y, w, h = int(x), int(y), int(w), int(h)
                    cv2.rectangle(frame, (x, y), (x + w, y + h), tr.color, 2)
                    ymin = y
                    ymax = y + h
                    xmin = x
                    xmax = x + w

                if opt.csrt:
                    (success, csrt_boxes) = trackers.update(frame)
                    tr.predict(csrt_boxes[tr.idx])
                else:
                    tr.predict()

                detection_msg = BoundingBox()
                detection_msg.xmin = xmin
                detection_msg.xmax = xmax
                detection_msg.ymin = ymin
                detection_msg.ymax = ymax
                detection_msg.probability = 1
                detection_msg.Class = tr.idx

                detection_results.bounding_boxes.append(detection_msg)

                # data_string = str(tr.idx)+str(x)+str(y)+str(w)+str(h)
                # pub.publish(data_string)
                self.pub_.publish(detection_results)

            # cv2.imshow("Frame", frame)
            # key = cv2.waitKey(1) & 0xFF

        # cv2.destroyAllWindows()
        rate.sleep
Esempio n. 17
0
def prefetch_test(opt):
    if not opt.not_set_cuda_env:
        os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    Dataset = dataset_factory[opt.test_dataset]
    opt = opts().update_dataset_info_and_set_heads(opt, Dataset)
    #Logger(opt)

    if opt.save_video:
        opt.debug = 1
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = None
        vid_dir = os.path.join(opt.save_dir, 'videos')
        os.makedirs(vid_dir, exist_ok=True)
        print(f'Saving videos to {vid_dir}')
        import wandb
        wandb_run = wandb.init(project='object-motion',
                               resume='allow',
                               id=opt.uuid,
                               dir='/scratch/cluster/jozhang/logs')
        print(f'Logging videos to wandb at {opt.uuid}')
        imgs = []
    print(opt)

    split = 'val' if not opt.trainval else 'test'
    dataset = Dataset(opt, split)
    detector = Detector(opt)

    if opt.load_results != '':
        load_results = json.load(open(opt.load_results, 'r'))
        for img_id in load_results:
            for k in range(len(load_results[img_id])):
                if load_results[img_id][k][
                        'class'] - 1 in opt.ignore_loaded_cats:
                    load_results[img_id][k]['score'] = -1
    else:
        load_results = {}

    data_loader = torch.utils.data.DataLoader(PrefetchDataset(
        opt, dataset, detector.pre_process),
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=1,
                                              pin_memory=True)

    results = {}
    num_iters = len(data_loader) if opt.num_iters < 0 else opt.num_iters
    bar = Bar('{}'.format(opt.exp_id), max=num_iters)
    time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge', 'track']
    avg_time_stats = {t: AverageMeter() for t in time_stats}
    if opt.use_loaded_results:
        for img_id in data_loader.dataset.images:
            results[img_id] = load_results['{}'.format(img_id)]
        num_iters = 0

    # Main: iterate through dataset and prepare tracking results
    for ind, (img_id, pre_processed_images) in enumerate(data_loader):
        # handle early stop
        if ind >= num_iters:
            break

        # handle first frame
        img_id_str = str(int(img_id.numpy().astype(np.int32)[0]))
        if opt.tracking and ('is_first_frame' in pre_processed_images):
            if img_id_str in load_results:
                pre_processed_images['meta']['pre_dets'] = load_results[
                    img_id_str]
            else:
                print('No pre_dets for', img_id_str,
                      '. Use empty initialization.')
                pre_processed_images['meta']['pre_dets'] = []
            detector.reset_tracking()
            vid_id = pre_processed_images["video_id"].item()
            if opt.save_video:
                if out is not None:
                    out.release()
                vid_path = os.path.join(vid_dir, f'track{vid_id}.mp4')
                out = cv2.VideoWriter(
                    vid_path, fourcc, opt.save_framerate,
                    pre_processed_images['image'].shape[1:3][::-1])
                if len(imgs) > 0:
                    wandb.log({
                        f'eval/video_{vid_id-1}':
                        wandb.Video(torch.as_tensor(imgs).permute(0, 3, 1, 2))
                    })
                imgs = []
            print('Start tracking video', vid_id)

        # handle public det
        if opt.public_det:
            if img_id_str in load_results:
                pre_processed_images['meta']['cur_dets'] = load_results[
                    img_id_str]
            else:
                print('No cur_dets for', img_id_str)
                pre_processed_images['meta']['cur_dets'] = []

        # run tracker and store results
        img_ids_for_vid = [v['id'] for v in dataset.video_to_images[vid_id]]
        results_for_vid = {
            k - min(img_ids_for_vid): v
            for k, v in results.items() if k in img_ids_for_vid
        }
        ret = detector.run(pre_processed_images, tracks=results_for_vid)
        results[int(img_id_str)] = ret['results']

        if opt.save_video:
            assert 'generic' in ret, 'images are not in the detector results'
            out.write(ret['generic'])
            imgs.append(cv2.cvtColor(ret['generic'], cv2.COLOR_BGR2RGB))
            # cv2.imwrite(os.path.join(vid_dir, f'frame_{img_id_str}.png'), ret['generic'])

        # logging
        Bar.suffix = '[{0}/{1}]|Tot: {total:} |ETA: {eta:} '.format(
            ind, num_iters, total=bar.elapsed_td, eta=bar.eta_td)
        for t in avg_time_stats:
            avg_time_stats[t].update(ret[t])
            Bar.suffix = Bar.suffix + '|{} {tm.val:.3f}s ({tm.avg:.3f}s) '.format(
                t, tm=avg_time_stats[t])
        if opt.print_iter > 0:
            if ind % opt.print_iter == 0:
                print('{}/{}| {}'.format(opt.task, opt.exp_id, Bar.suffix))
        else:
            bar.next()
    bar.finish()

    if opt.save_video and out is not None:
        out.release()
        wandb.log({
            f'eval/video_{vid_id - 1}':
            wandb.Video(torch.as_tensor(imgs).permute(0, 3, 1, 2))
        })

    # Main: save tracks into dataset specific format and evaluate with external script
    os.makedirs(opt.save_dir, exist_ok=True)
    if opt.save_results:
        print(
            'saving results to',
            opt.save_dir + '/save_results_{}{}.json'.format(
                opt.test_dataset, opt.dataset_version))
        json.dump(
            _to_list(copy.deepcopy(results)),
            open(
                opt.save_dir + '/save_results_{}{}.json'.format(
                    opt.test_dataset, opt.dataset_version), 'w'))
    if opt.save_video:
        wandb_run.finish()
    dataset.run_eval(results, opt.save_dir, opt.uuid)
Esempio n. 18
0
#! /usr/bin/env python
# Main near-duplicate detection "runner"
#
# Written by Parker Moore (pjm336)
# http://www.parkermoore.de

import operator, copy
from detector import Detector
import psycopg2

if __name__ == "__main__":
    # run the program
    detector = Detector('./test')
    print "Checking for duplicates using NDD..."
    duplicates = detector.check_for_duplicates()
    if duplicates:
        print "Duplicates found (Jaccard coefficient > 0.5):"
        print duplicates
    filenames_of_first_one_hundred = []

    try:
        conn = psycopg2.connect(
            "dbname='djangology' user='******' password=''")
    except:
        print "I am unable to connect to the database."

    cur = conn.cursor()
    try:
        cur.execute("""SELECT * from dj_document""")
    except:
        print "I can't SELECT from dj_document"
if settings.RANDOM_SEED:
    tf.set_random_seed(settings.RANDOM_SEED)
    np.random.seed(settings.RANDOM_SEED)

# initialize the ice
ice_true = Ice()
ice_true.init(l_abs=settings.L_ABS_TRUE, l_scat=settings.L_SCAT_TRUE)

ice_pred = Ice(trainable=True)
ice_pred.init(l_abs=settings.L_ABS_START, l_scat=settings.L_SCAT_TRUE)

# initialize the detector
detector = Detector(dom_radius=settings.DOM_RADIUS,
                    nx_strings=settings.NX_STRINGS,
                    ny_strings=settings.NY_STRINGS,
                    doms_per_string=settings.DOMS_PER_STRING,
                    l_x=settings.LENGTH_X,
                    l_y=settings.LENGTH_Y,
                    l_z=settings.LENGTH_Z)

# initialize the models
model_true = Model(ice_true, detector)
model_pred = Model(ice_pred, detector)

# save final positions and traveled layer distances as variables for each batch
final_positions_true = []
final_positions_pred = []
traveled_layer_distance_true = []
traveled_layer_distance_pred = []

for i in range(settings.BATCHES_PER_STEP):
Esempio n. 20
0
def model_fn(features, labels, mode, params, config):
    """
    This is a function for creating a computational tensorflow graph.
    The function is in format required by tf.estimator.
    """

    # choose a backbone network
    if params['backbone'] == 'resnet':
        feature_extractor = resnet
    elif params['backbone'] == 'mobilenet':
        feature_extractor = lambda x: mobilenet(x, params['depth_multiplier'])
    elif params['backbone'] == 'shufflenet':
        feature_extractor = lambda x: shufflenet(
            x, str(params['depth_multiplier']))

    # build the main graph
    is_training = mode == tf.estimator.ModeKeys.TRAIN
    detector = Detector(features['images'], feature_extractor, is_training,
                        params)

    # add NMS to the graph
    if not is_training:
        predictions = detector.get_predictions(
            score_threshold=params['score_threshold'],
            iou_threshold=params['iou_threshold'],
            max_boxes_per_class=params['max_boxes_per_class'])

    if mode == tf.estimator.ModeKeys.PREDICT:

        w, h = tf.unstack(tf.to_float(
            features['images_size']))  # original image size
        s = tf.to_float(tf.shape(features['images']))  # size after resizing
        scaler = tf.stack([h / s[1], w / s[2], h / s[1], w / s[2]])
        predictions['boxes'] = scaler * predictions['boxes']

        export_outputs = tf.estimator.export.PredictOutput({
            name: tf.identity(tensor, name)
            for name, tensor in predictions.items()
        })
        return tf.estimator.EstimatorSpec(
            mode,
            predictions=predictions,
            export_outputs={'outputs': export_outputs})

    # add L2 regularization
    with tf.name_scope('weight_decay'):
        add_weight_decay(params['weight_decay'])
        regularization_loss = tf.losses.get_regularization_loss()

    # create localization and classification losses
    losses = detector.get_losses(labels, params)
    tf.losses.add_loss(params['alpha'] * losses['rpn_localization_loss'])
    tf.losses.add_loss(params['beta'] * losses['rpn_classification_loss'])
    tf.losses.add_loss(params['gamma'] * losses['roi_localization_loss'])
    tf.losses.add_loss(params['theta'] * losses['roi_classification_loss'])
    total_loss = tf.losses.get_total_loss(add_regularization_losses=True)

    tf.summary.scalar('regularization_loss', regularization_loss)
    tf.summary.scalar('rpn_localization_loss', losses['rpn_localization_loss'])
    tf.summary.scalar('rpn_classification_loss',
                      losses['rpn_classification_loss'])
    tf.summary.scalar('roi_localization_loss', losses['roi_localization_loss'])
    tf.summary.scalar('roi_classification_loss',
                      losses['roi_classification_loss'])

    if mode == tf.estimator.ModeKeys.EVAL:

        with tf.name_scope('evaluator'):
            evaluator = Evaluator(num_classes=params['num_classes'])
            eval_metric_ops = evaluator.get_metric_ops(labels, predictions)

        return tf.estimator.EstimatorSpec(mode,
                                          loss=total_loss,
                                          eval_metric_ops=eval_metric_ops)

    assert mode == tf.estimator.ModeKeys.TRAIN
    with tf.variable_scope('learning_rate'):
        global_step = tf.train.get_global_step()
        learning_rate = tf.train.cosine_decay(params['initial_learning_rate'],
                                              global_step,
                                              decay_steps=params['num_steps'])
        tf.summary.scalar('learning_rate', learning_rate)

    with tf.variable_scope('optimizer'):
        optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.9)

        if params['backbone'] == 'shufflenet':
            var_list = [
                v for v in tf.trainable_variables()
                if 'Conv1' not in v.name and 'Stage2' not in v.name
            ]
        elif params['backbone'] == 'mobilenet':
            var_list = [
                v for v in tf.trainable_variables()
                if all('Conv2d_%d_' % i not in v.name
                       for i in range(6)) and 'Conv2d_0' not in v.name
            ]
        elif params['backbone'] == 'resnet':
            var_list = [
                v for v in tf.trainable_variables()
                if 'resnet_v1_50/block1/' not in v.name
                and 'resnet_v1_50/conv1/' not in v.name
            ]

        grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
        grads_and_vars = [(3.0 * g, v) if 'thin_feature_maps' in v.name else
                          (g, v) for g, v in grads_and_vars]
        train_op = optimizer.apply_gradients(grads_and_vars, global_step)

    for g, v in grads_and_vars:
        tf.summary.histogram(v.name[:-2] + '_hist', v)
        tf.summary.histogram(v.name[:-2] + '_grad_hist', g)

    with tf.control_dependencies([train_op]), tf.name_scope('ema'):
        ema = tf.train.ExponentialMovingAverage(decay=MOVING_AVERAGE_DECAY,
                                                num_updates=global_step)
        train_op = ema.apply(tf.trainable_variables())

    return tf.estimator.EstimatorSpec(mode, loss=total_loss, train_op=train_op)
Esempio n. 21
0
from utils.fps import Fps

#########
# Not sure where to put this, sorry Mike
numbers = [x for x in range(0, 24)]
letters = [
    'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P',
    'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y'
]
gesture_dict = dict(zip(numbers, letters))
#########

cap = VideoCaptureThreading()
cap.start()
fps = Fps()
detector = Detector()

cv.namedWindow("main")

while True:
    ret, frame = cap.read()

    if not ret:
        print("no camera found")
        break

    # frame = cv.medianBlur(frame, 3)

    if fps.nbf % 2 == 0:
        rel_boxes, scores = detector.detect_objects(cv.cvtColor(frame, 4))
Esempio n. 22
0
def main(argv=None):  # pylint: disable=unused-argument
    assert args.ckpt > 0 or args.batch_eval
    assert args.detect or args.segment, "Either detect or segment should be True"
    if args.trunk == 'resnet50':
        net = ResNet
        depth = 50
    if args.trunk == 'resnet101':
        net = ResNet
        depth = 101
    if args.trunk == 'vgg16':
        net = VGG
        depth = 16

    net = net(config=net_config, depth=depth, training=False)

    if args.dataset == 'voc07' or args.dataset == 'voc07+12':
        loader = VOCLoader('07', 'test')
    if args.dataset == 'voc12':
        loader = VOCLoader('12', 'val', segmentation=args.segment)
    if args.dataset == 'coco':
        loader = COCOLoader(args.split)
    else:
        loader = MODDLoader('all')

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                          log_device_placement=False)) as sess:
        detector = Detector(sess,
                            net,
                            loader,
                            net_config,
                            no_gt=args.no_seg_gt)
        if args.dataset == 'coco':
            tester = COCOEval(detector, loader)
        else:
            tester = Evaluation(detector,
                                loader,
                                iou_thresh=args.voc_iou_thresh)
        if not args.batch_eval:
            detector.restore_from_ckpt(args.ckpt)
            tester.evaluate_network(args.ckpt)
        else:
            log.info('Evaluating %s' % args.run_name)
            ckpts_folder = CKPT_ROOT + args.run_name + '/'
            out_file = ckpts_folder + evaluation_logfile

            max_checked = get_last_eval(out_file)
            log.debug("Maximum checked ckpt is %i" % max_checked)
            with open(out_file, 'a') as f:
                start = max(args.min_ckpt, max_checked + 1)
                ckpt_files = glob(ckpts_folder + '*.data*')
                folder_has_nums = np.array(list((map(filename2num,
                                                     ckpt_files))),
                                           dtype='int')
                nums_available = sorted(
                    folder_has_nums[folder_has_nums >= start])
                nums_to_eval = [nums_available[-1]]
                for n in reversed(nums_available):
                    if nums_to_eval[-1] - n >= args.step:
                        nums_to_eval.append(n)
                nums_to_eval.reverse()

                for ckpt in nums_to_eval:
                    log.info("Evaluation of ckpt %i" % ckpt)
                    tester.reset()
                    detector.restore_from_ckpt(ckpt)
                    res = tester.evaluate_network(ckpt)
                    f.write(res)
                    f.flush()
Esempio n. 23
0
def demo(opt):
    os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str
    opt.debug = max(opt.debug, 1)
    detector = Detector(opt)

    if opt.demo == 'webcam' or \
      opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
        is_video = True
        # demo on video stream
        cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)
    else:
        is_video = False
        # Demo on images sequences
        if os.path.isdir(opt.demo):
            image_names = []
            ls = os.listdir(opt.demo)
            for file_name in sorted(ls):
                ext = file_name[file_name.rfind('.') + 1:].lower()
                if ext in image_ext:
                    image_names.append(os.path.join(opt.demo, file_name))
        else:
            image_names = [opt.demo]

    # Initialize output video
    out = None
    out_name = opt.demo[opt.demo.rfind('/') + 1:]
    print('out_name', out_name)
    if opt.save_video:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('./{}.avi'.format(opt.exp_id + '_' + out_name),
                              fourcc, opt.save_framerate,
                              (opt.video_w, opt.video_h))

    if opt.debug < 5:
        detector.pause = False
    cnt = 0
    results = {}

    while True:
        if is_video:
            _, img = cam.read()
            if img is None:
                save_and_exit(opt, out, results, out_name)
        else:
            if cnt < len(image_names):
                img = cv2.imread(image_names[cnt])
            else:
                save_and_exit(opt, out, results, out_name)
        cnt += 1

        # resize the original video for saving video results
        if opt.resize_video:
            img = cv2.resize(img, (opt.video_w, opt.video_h))

        # skip the first X frames of the video
        if cnt < opt.skip_first:
            continue

        # track or detect the image.
        ret = detector.run(img)

        # log run time
        time_str = 'frame {} |'.format(cnt)
        for stat in time_stats:
            time_str = time_str + '{} {:.3f}s |'.format(stat, ret[stat])
        print(time_str)

        # results[cnt] is a list of dicts:
        #  [{'bbox': [x1, y1, x2, y2], 'tracking_id': id, 'category_id': c, ...}]
        results[cnt] = ret['results']

        # save debug image to video
        if opt.save_video:
            out.write(ret['generic'])
            if not is_video:
                cv2.imwrite('../results/demo{}.jpg'.format(cnt),
                            ret['generic'])

        # esc to quit and finish saving video
        if cv2.waitKey(1) == 27:
            save_and_exit(opt, out, results, out_name)
            return
    save_and_exit(opt, out, results)
def test():

    filepath = './example_data/'

    filepath += 'ManchesterByTheSea.csv'

    with open(filepath) as f:
        colnames = f.readline().split(",")
        firstline = f.readline().split(",")

    firstdateindex = float(firstline[0])
    datacols = list(range(1, (len(colnames))))

    data = np.loadtxt(filepath,
                      dtype=np.float64,
                      delimiter=",",
                      skiprows=1,
                      usecols=datacols)
    if len(data.shape) == 1:
        data = data.reshape((data.shape[0], 1))
    T, s1 = data.shape
    s2 = 1
    """Set prior hazard and pruning threshold"""
    prior_hazard = 30
    pruning_threshold = T + 1
    """create hazard model object"""
    cp_model = CpModel(prior_hazard)
    """create model object(s)"""
    lgcp_model = LGCPModel(
        prior_signal_variance=1,
        prior_lengthscale=1,
        custom_kernel=None,
        inference_method='laplace',
        # 'laplace', 'variational_inference', 'sparse_variational_inference',
        refresh_rate=8,
        M_pseudo_input_size=10,
        S1=s1,
        S2=s2,
        auto_prior_update=True,  # put false for now True
    )

    mlgcp_model = mLGCPModel(
        prior_signal_variance=1,
        prior_lengthscale=1,
        custom_kernel=None,
        inference_method='laplace',
        refresh_rate=5,
        M_pseudo_input_size=10,
        S1=s1,
        S2=s2,
        auto_prior_update=True,
    )
    """Single model"""
    detector = Detector(
        data,
        np.array([lgcp_model]),
        np.array([1]),
        cp_model,
        s1,
        s2,
        T,
        threshold=pruning_threshold,  # None for now, pruning_threshold
        trim_type="keep_K",
        store_mrl=True,
        store_rl=True)
    """Multiple models"""
    # detector = Detector(
    #     data,
    #     np.array([lgcp_model, pg_model]),
    #     np.array([1/2,1/2]),
    #     cp_model,
    #     s1,
    #     s2,
    #     T,
    #     threshold = pruning_threshold,
    #     trim_type = "keep_K",
    #     store_mrl = True,
    #     store_rl = True
    # )
    """Measure computation time"""
    start = time.time()
    """run MVBOCPDwMS"""
    print("Running CP detection...")
    j = 0
    for t in range(0, T):
        detector.next_run(data[t, :], t + 1)
        if t >= j * T // 100:
            print(j, "% Complete")
            j += 1

    print("Done. Plotting...")

    end = time.time()
    print("Execution time (seconds): ", end - start)
    """Plot CPs and run lengths"""
    r_distr_plot(
        data,
        T,
        detector,
        s1 * s2,
        dateindex=float(0),
        dateincr=float(1),
        # title="",
        # ylabel="",
        # xlabel=""
    )
def run_demo(args):
    cap = open_images_capture(args.input, args.loop)

    log.info('OpenVINO Inference Engine')
    log.info('\tbuild: {}'.format(get_version()))
    core = Core()

    log.info('Reading Object Detection model {}'.format(args.model_od))
    detector_person = Detector(core,
                               args.model_od,
                               device=args.device,
                               label_class=args.person_label)
    log.info('The Object Detection model {} is loaded to {}'.format(
        args.model_od, args.device))

    log.info('Reading Human Pose Estimation model {}'.format(args.model_hpe))
    single_human_pose_estimator = HumanPoseEstimator(core,
                                                     args.model_hpe,
                                                     device=args.device)
    log.info('The Human Pose Estimation model {} is loaded to {}'.format(
        args.model_hpe, args.device))

    delay = int(cap.get_type() in ('VIDEO', 'CAMERA'))
    video_writer = cv2.VideoWriter()

    frames_processed = 0
    presenter = monitors.Presenter(args.utilization_monitors, 25)
    metrics = PerformanceMetrics()

    start_time = perf_counter()
    frame = cap.read()
    if frame is None:
        raise RuntimeError("Can't read an image from the input")

    if args.output and not video_writer.open(
            args.output, cv2.VideoWriter_fourcc(*'MJPG'), cap.fps(),
        (frame.shape[1], frame.shape[0])):
        raise RuntimeError("Can't open video writer")

    while frame is not None:
        bboxes = detector_person.detect(frame)
        human_poses = [
            single_human_pose_estimator.estimate(frame, bbox)
            for bbox in bboxes
        ]

        presenter.drawGraphs(frame)

        colors = [(0, 0, 255), (255, 0, 0), (0, 255, 0), (255, 0, 0),
                  (0, 255, 0), (255, 0, 0), (0, 255, 0), (255, 0, 0),
                  (0, 255, 0), (255, 0, 0), (0, 255, 0), (255, 0, 0),
                  (0, 255, 0), (255, 0, 0), (0, 255, 0), (255, 0, 0),
                  (0, 255, 0)]

        for pose, bbox in zip(human_poses, bboxes):
            cv2.rectangle(frame, (bbox[0], bbox[1]),
                          (bbox[0] + bbox[2], bbox[1] + bbox[3]), (255, 0, 0),
                          2)
            for id_kpt, kpt in enumerate(pose):
                cv2.circle(frame, (int(kpt[0]), int(kpt[1])), 3,
                           colors[id_kpt], -1)

        metrics.update(start_time, frame)

        frames_processed += 1
        if video_writer.isOpened() and (args.output_limit <= 0 or
                                        frames_processed <= args.output_limit):
            video_writer.write(frame)

        if not args.no_show:
            cv2.imshow('Human Pose Estimation Demo', frame)
            key = cv2.waitKey(delay)
            if key == 27:
                break
            presenter.handleKey(key)

        start_time = perf_counter()
        frame = cap.read()

    metrics.log_total()
    for rep in presenter.reportMeans():
        log.info(rep)
Esempio n. 26
0
from cv import *
from utils import *
from detector import Detector
from key import LINE_KEY, WEBHOOK_KEY

import os
import random
import re

app = Flask(__name__)

line_bot_api = LineBotApi(LINE_KEY)
handler = WebhookHandler(WEBHOOK_KEY)

detector = Detector(128, 'word2vec.wv', 'model.h5')
detect_mode = False


@app.route("/callback", methods=['POST'])
def callback():
    signature = request.headers['X-Line-Signature']
    body = request.get_data(as_text=True)
    app.logger.info("Request body: " + body)
    try:
        handler.handle(body, signature)
    except InvalidSignatureError:
        abort(400)
    return 'OK'

Esempio n. 27
0
def instrument(beam=None,
               sample_path=None,
               sample='',
               angleMons=[45, 135],
               detector_width=[0.5, 0.5],
               detector_height=[0.5, 0.5],
               sourceTosample_x=0.,
               sourceTosample_y=0.,
               sourceTosample_z=0.,
               sampleTodetector_z=[0.5, 0.5],
               number_detectors=2,
               number_pixels_in_height=[256, 256],
               number_pixels_in_width=[256, 256],
               number_of_box_in_height=[3, 3],
               number_of_box_in_width=[3, 3]):
    """

    Parameters
    ----------
    beam
    sample_path:
    sample
    angleMons
    detector_width
    detector_height
    sourceTosample_x
    sourceTosample_y
    sourceTosample_z
    sampleTodetector_z
    number_detectors
    number_pixels_in_height
    number_pixels_in_width
    number_of_box_in_height
    number_of_box_in_width

    Returns
    -------

    """
    if beam is None:
        beam = os.path.join(thisdir, '../../beam/Neutrons_mcvine.dat')
    instrument = mcvine.instrument()
    a_source = mcvine.components.sources.NeutronFromStorage('source', beam)
    instrument.append(a_source, position=(0, 0, 0))

    samplename = sample

    if sample_path is None:
        samplexml = os.path.join(
            thisdir, '../../sample/sampleassembly_%s.xml' % samplename)
    else:
        samplexml = os.path.join(sample_path,
                                 'sampleassembly_%s.xml' % samplename)

    sample = mcvine.components.samples.SampleAssemblyFromXml(
        'sample', samplexml)
    instrument.append(sample,
                      position=(sourceTosample_x, sourceTosample_y,
                                sourceTosample_z),
                      relativeTo=a_source)

    save = mcvine.components.monitors.NeutronToStorage(
        'save', '{}.mcvine'.format(samplename))
    instrument.append(save, position=(0, 0, 0), relativeTo=sample)

    from detector import Detector

    width = detector_width
    height = detector_height

    NpixelsPerPanel = 0
    for i in range(number_detectors):
        angle = np.deg2rad(angleMons[i])
        print('detector_angle: ', angleMons[i])
        pixel_width = detector_width[i] / number_pixels_in_width[
            i] / number_of_box_in_width[i]  # *1.00000001
        pixel_height = height[i] / number_pixels_in_height[
            i] / number_of_box_in_height[i]
        NpixelsPerPanel += number_pixels_in_height[i]\
                           *number_of_box_in_height[i]\
                           *number_pixels_in_width[i]\
                           *number_of_box_in_width[i]

        instrument.append(Detector('detector{}'.format(i + 1),
                                   width[i],
                                   height[i],
                                   pixel_width,
                                   pixel_height,
                                   'detector{}'.format(i + 1),
                                   start_index=NpixelsPerPanel * i),
                          position=(sampleTodetector_z[i] * np.sin(angle), 0,
                                    sampleTodetector_z[i] * np.cos(angle)),
                          orientation=(0, np.rad2deg(angle), 0),
                          relativeTo=sample)

    return instrument
Esempio n. 28
0
def hentAI_detection(dcp_dir=None,
                     in_path=None,
                     is_mosaic=False,
                     is_video=False,
                     save_mask=False):
    # Create new window? Can show loading bar
    # hent_win = new_window()
    # info_label = Label(hent_win, text="Beginning detection")
    # info_label.pack(padx=10,pady=10)
    # hent_win.mainloop()

    if dcp_dir == None:
        error(5)
    if in_path == None:
        error(2)

    # print(save_mask)

    #Import the big guns here. It can take a while for tensorflow, and a laggy initial bringup can look sketchy tbh

    # print('Initializing Detector class')
    detect_instance = Detector(weights_path=weights_path)
    # print('loading weights')
    detect_instance.load_weights()
    if (is_mosaic == True):
        # Copy input folder to decensor_input_original. NAMES MUST MATCH for DCP
        print('copying inputs into input_original dcp folder')
        # print(in_path)
        # print(listdir(in_path))
        for file in listdir(in_path):
            # kinda dumb but check if same file
            shutil.copy(in_path + '/' + file,
                        dcp_dir + '/decensor_input_original/')

    # Run detection
    if (is_video == True):
        print('running video detection')
        loader = Tk()
        loader.title('Running detections')
        load_label = Label(
            loader,
            text=
            'Now running detections. This can take around a minute or so per image. Please wait'
        )
        load_label.pack(side=TOP, fill=X, pady=10, padx=20)
        loader.update()
        detect_instance.run_on_folder(
            input_folder=in_path,
            output_folder=dcp_dir + '/decensor_input/',
            is_video=True,
            orig_video_folder=dcp_dir + '/decensor_input_original/')
        loader.destroy()
    else:
        print('running detection, outputting to dcp input')
        loader = Tk()
        loader.title('Running detections')
        load_label = Label(
            loader,
            text=
            'Now running detections. This can take around a minute or so per image. Please wait'
        )
        load_label.pack(side=TOP, fill=X, pady=10, padx=20)
        loader.update()
        detect_instance.run_on_folder(input_folder=in_path,
                                      output_folder=dcp_dir +
                                      '/decensor_input/',
                                      is_video=False,
                                      save_mask=save_mask)
        loader.destroy()

    # Announce completion, offer to run DCP from DCP directory
    print('Process complete!')
    popup = Tk()
    popup.title('Success!')
    label = Label(
        popup,
        text='Process executed successfully! Now you can run DeepCreamPy.')
    label.pack(side=TOP, fill=X, pady=20, padx=10)
    num_jpgs = detect_instance.get_non_png()
    if (num_jpgs > 0):
        label2 = Label(
            popup,
            text=str(num_jpgs) +
            " files are NOT in .png format, and were not processed.\nPlease convert jpgs to pngs."
        )
        label2.pack(side=TOP, fill=X, pady=10, padx=5)
    # dcprun = Button(popup, text='Run DCP (Only if you have the .exe)', command= lambda: run_dcp(dcp_dir))
    # dcprun.pack(pady=10)
    okbutton = Button(popup, text='Ok', command=popup.destroy)
    okbutton.pack()
    popup.mainloop()
Esempio n. 29
0

if __name__ == "__main__":

    suite_root = "all_suites"
    if len(sys.argv) == 2:
        suite_root = str(sys.argv[1])

    print("\nSTARTING TEST")
    print("Suite root:  ", suite_root)
    print("")

    # Label names from obj.names as list
    label_names = open(os.path.join("..", "input",
                                    "obj.names")).read().splitlines()

    d = Detector(None)

    result = Result(suite_root)
    result.combine(test_suite("", suite_root, label_names, d, root=True))

    print("\n\n============================================================")
    print("\nSUMMARY")

    for suite in all_suite_results:
        print(
            "\n~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~\n")
        suite.print()

    print("\n============================================================")
    result.print()