Пример #1
0
def eval(train_file):
    print('TRAIN')
    gold_annotations = read_annotations('../data/TRAIN.annotations', return_sent=True)
    pred_annotations = read_annotations(train_file, return_sent=True)

    print(f'Entities extracted as entirety')
    P, P_miss = get_precision(gold_annotations, pred_annotations, entirety=True)
    R, R_miss = get_recall(gold_annotations, pred_annotations, entirety=True)
    print(f'Precision: {P:.3f}')
    print(f'Recall:    {R:.3f}')
    print(f'F1:        {((2 * R * P) / (P + R)):.3f}')

    print()
    print(f'Entities extracted with "in" method')
    P, P_miss = get_precision(gold_annotations, pred_annotations, entirety=False)
    R, R_miss = get_recall(gold_annotations, pred_annotations, entirety=False)
    print(f'Precision: {P:.3f}')
    print(f'Recall:    {R:.3f}')
    print(f'F1:        {((2 * R * P) / (P + R)):.3f}')

    print(f'precision miss:')
    for t in random.sample(P_miss, 5):
        print(t)
        print(get_dep_path(sent=nlp(t[3][2:-2]), per=t[1], org=t[2]))
    print()
    print(f'recall miss:')
    for t in random.sample(R_miss, 5):
        print(t)
        print(get_dep_path(sent=nlp(t[3][2:-2]), per=t[1], org=t[2]))
def load_train_data(args, train_dir, valid_prop=0.10):
    """load training data and write to IO formatted training and validation files"""
    vocab = set()
    tfile = codecs.open(join(args.work_dir, TRAIN_FILE_NAME), 'w', 'utf-8')
    vfile = codecs.open(join(args.work_dir, VALID_FILE_NAME), 'w', 'utf-8')
    txt_files = [f for f in listdir(train_dir) if f.endswith(".txt")]
    random.shuffle(txt_files)
    num_val_files = int(len(txt_files) * valid_prop)
    for findex, txt_file in enumerate(txt_files):
        print("Reading", txt_file)
        rfile = vfile if findex < num_val_files else tfile
        doc_tokens, file_vocab = tokenize_document(join(train_dir, txt_file))
        vocab = vocab.union(file_vocab)
        annotations = read_annotations(join(train_dir, txt_file[:-3] + "ann"))
        for token in doc_tokens:
            ignore_token = False
            for ann in annotations:
                if token.start >= ann.start and token.end <= ann.end:
                    # Change this for IOB annotations
                    if ann.atype == LOC_ANN_TAG:
                        token.encoding = "I-LOC"
                    if ann.atype == PRO_ANN_TAG:
                        ignore_token = True
                    break
            if not ignore_token:
                print(token.text + "\t" + token.encoding, file=rfile)
    tfile.close()
    vfile.close()
    return vocab
def replace_annotation_in_image(image, mask, new_path, bg_list, num_bg,
                                num_rotate, rotation_limit):
    results = []

    for i in range(num_rotate):
        fg_cut, mask_cut, h_cut, w_cut, rot_path = rotate_image(
            image, mask, rotation_limit, new_path)
        # check if mask and img have the same size, otherwise retry
        mh, mw = mask_cut.shape
        if not (mh == h_cut and mw == w_cut):
            print("fg_cut and mask_cut have different shapes! Skip.")
            continue

        for j in range(num_bg):
            bg_file = bg_list[randint(0, len(bg_list) - 1)]
            print("choose bg: " + bg_file)
            bg_label_path = bg_file.replace("/images/", "/labels/").replace(
                ".jpg", ".txt")
            bg = cv2.imread(bg_file, 1)
            bg_box_list = utils.read_annotations(bg_label_path)
            h, w, _ = bg.shape
            bbox_rand = get_random_position_on_surface(w_cut, h_cut,
                                                       bg_box_list, w, h)
            if not bbox_rand:
                # todo: try again with smaller scaled object?
                print("couldn't place annotation on new background")
            else:
                bg_img, bg_path = place_roi_on_bg(fg_cut, mask_cut, bg,
                                                  bbox_rand, rot_path, j)
                norm_box = utils.abs_to_norm_bbox(bbox_rand)
                result = (bg_img, bg_path, [norm_box])
                results.append(result)

    return results
Пример #4
0
def disambiguate(args):
    '''Method for disambiguation'''
    # load pubmed files
    pub_files = [
        f for f in listdir(args.dir)
        if isfile(join(args.dir, f)) and f.endswith(".ann")
    ]
    for _, pubfile in enumerate(pub_files):
        entities = read_annotations(join(args.dir, pubfile))
        pmid = pubfile.replace(".ann", "")
        write_annotations(args.outdir, entities, pmid, True)
Пример #5
0
def fine_tune_yolo(debug=False):
    video = Video("../datasets/AICity_data/train/S03/c010/frames")
    detection_transform = DetectionTransform()
    classes = utils.load_classes('../config/coco.names')

    hyperparams = parse_model_config('../config/yolov3.cfg')[0]
    learning_rate = float(hyperparams["learning_rate"])
    momentum = float(hyperparams["momentum"])
    decay = float(hyperparams["decay"])
    burn_in = int(hyperparams["burn_in"])

    model = Darknet('../config/yolov3.cfg')
    print(model)
    model.load_weights('../weights/yolov3.weights')
    model.train()
    for module_def, module in zip(model.module_defs, model.module_list):
        if module_def["type"] == "yolo":
            break
        module.train(False)
    if torch.cuda.is_available():
        model = model.cuda()

    optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()),
                     lr=1e-5)
    gt = read_annotations(
        '../datasets/AICity_data/train/S03/c010/m6-full_annotation.xml')
    dataset = YoloDataset(video, gt, classes, transforms=detection_transform)
    data_loader = DataLoader(dataset,
                             batch_size=16,
                             shuffle=True,
                             num_workers=4)

    for epoch in tqdm(range(10), file=sys.stdout, desc='Fine tuning'):
        for images, targets in tqdm(data_loader,
                                    file=sys.stdout,
                                    desc='Running epoch'):
            if torch.cuda.is_available():
                images = images.cuda()
                targets = targets.cuda()

            optimizer.zero_grad()
            loss = model(images, targets)
            loss.backward()
            optimizer.step()

    print('Training finished. Saving weights...')
    model.save_weights('../weights/fine_tuned_yolo_freeze.weights')
    print('Saved weights')
Пример #6
0
def get_y(file, df):
    gold_annotations = read_annotations(file)
    print(
        f'{WORK_FOR} input annotations: {sum([len(annotations) for annotations in gold_annotations.values()])}'
    )
    y = np.zeros(df.shape[0])

    for i, idx in enumerate(df.index):
        sent_id, person, org, _, = idx

        for ann in gold_annotations[sent_id]:
            if (person in ann[0] or ann[0] in person) and (org in ann[2]
                                                           or ann[2] in org):
                y[i] = 1
                break
    return y
Пример #7
0
def main():
    start_frame = 1440
    end_frame = 1789

    gt = read_annotations('../annotations', start_frame, end_frame)

    alg = 'mask_rcnn'

    detections = read_detections('../datasets/AICity_data/train/S03/c010/det/det_{0}.txt'.format(alg))

    kalman = KalmanTracking()
    for i in range(start_frame, end_frame):
        f = Frame(i)
        f.detections = detections[i]
        f.ground_truth = gt[i - start_frame]
        kalman(f)
        print(seq(f.detections).map(lambda d: d.id).to_list())
Пример #8
0
def create_siamese_dataset():
    AICity_dataset_path = '../datasets/AICity_data/train/S03/c010/'
    siamese_dataset_path = '../datasets/siamese_data/'
    annotation_file = 'm6-full_annotation.xml'
    frames = int(2140 * 0.25)
    gt_detections = read_annotations(AICity_dataset_path + annotation_file,
                                     frames)

    for frame in range(frames + 1):
        frame_name = "frame_{:04d}.jpg".format(frame + 1)
        img = cv.imread(AICity_dataset_path + 'frames/' + frame_name)

        for detection in gt_detections[frame]:
            xtl, ytl = detection.top_left
            w = detection.width
            h = detection.height

            class_path = str(detection.label) + '-' + str(detection.id) + '/'
            if not os.path.exists(siamese_dataset_path + class_path):
                os.mkdir(siamese_dataset_path + class_path)
            cropped_img = img[ytl:ytl + h, xtl:xtl + w]
            cv.imwrite(siamese_dataset_path + class_path + frame_name,
                       cropped_img)
def main():

    demonstration_list = []
    agents = read_annotations("annotations.txt")
    grid = create_video_grid(VIDEO_WIDTH, VIDEO_HEIGHT, BLOCK_SIZE)
    
    for agent in agents:
        positions = get_agent_positions_in_grid(agent, grid)
        step_list = get_steps_from_position_list(positions)
        demonstration_list.append(step_list)

    grid_width = len(grid[0])
    grid_height = len(grid)
    num_states = grid_width * grid_height
    placeholder_mdp = GridWorldMDP(width = grid_width, height=grid_height, init_loc=(0,0), 
        goal_locs = [], walls = [], name = "placeholder")    
    planner = Planner(placeholder_mdp, sample_rate=5)
    planner._compute_matrix_from_trans_func()

    feature_map = np.eye(num_states)
    reward_array = deep_maxent_irl(
        feature_map, planner, demonstration_list, args.learning_rate, args.num_iterations)  
    reward_matrix = np.reshape(reward_array, (grid_height, grid_width))
    np.save("reward_prior", reward_matrix) 
def load_test_data(args, test_dir):
    """load test data and write to IO formatted file"""
    vocab = set()
    tfile = codecs.open(join(args.work_dir, "test-io.txt"), 'w', 'utf-8')
    txt_files = [f for f in listdir(test_dir) if f.endswith(".txt")]
    for _, txt_file in enumerate(txt_files):
        print("Reading", txt_file)
        doc_tokens, file_vocab = tokenize_document(join(test_dir, txt_file))
        vocab = vocab.union(file_vocab)
        annotations = read_annotations(join(test_dir, txt_file[:-3] + "ann"))
        for token in doc_tokens:
            ignore_token = False
            for ann in annotations:
                if token.start >= ann.start and token.end <= ann.end:
                    # Change this for IOB annotations
                    if ann.atype == LOC_ANN_TAG:
                        token.encoding = "I-LOC"
                    if ann.atype == PRO_ANN_TAG:
                        ignore_token = True
                    break
            if not ignore_token:
                print(token.text + "\t" + token.encoding, file=tfile)
    tfile.close()
    return vocab
Пример #11
0
def off_the_shelf_ssd(tracking, debug=False, **kwargs):
    if cuda.is_available():
        torch.set_default_tensor_type('torch.cuda.FloatTensor')
    gt = read_annotations(
        '../datasets/AICity_data/train/S03/c010/m6-full_annotation.xml')
    video = Video("../datasets/AICity_data/train/S03/c010/frames")
    trans = transforms.Compose(
        [transforms.Resize((300, 300)),
         transforms.ToTensor()])

    labels = (  # always index 0
        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
        'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
        'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')

    model = build_ssd('test', 300, 21)  # initialize SSD
    model.load_weights('../weights/ssd300_mAP_77.43_v2.pth')
    if torch.cuda.is_available():
        model = model.cuda()

    frames = []

    model.eval()
    with torch.no_grad():
        for i, im in enumerate(video.get_frames()):

            im_tensor = trans(im)
            im_tensor = im_tensor.view((-1, ) + im_tensor.size())
            if torch.cuda.is_available():
                im_tensor = im_tensor.cuda()

            output = model.forward(im_tensor)
            detections = output.data

            w = im.width
            h = im.height
            frame = Frame(i)

            frame.ground_truth = gt[frame.id]

            # skip j = 0, because it's the background class
            for j in (2, 6, 7, 14):
                dets = detections[0, j, :]
                mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
                dets = torch.masked_select(dets, mask).view(-1, 5)
                if dets.size(0) == 0:
                    continue
                boxes = dets[:, 1:]
                scores = dets[:, 0].cpu().numpy()
                cls_dets = np.hstack((boxes.cpu().numpy(),
                                      scores[:,
                                             np.newaxis])).astype(np.float32,
                                                                  copy=False)
                for cls_det in cls_dets:
                    x1 = int(w * cls_det[0])
                    y1 = int(h * cls_det[1])
                    det = Detection(-1,
                                    labels[j - 1], (x1, y1),
                                    width=w * (cls_det[2] - cls_det[0]),
                                    height=h * (cls_det[3] - cls_det[1]),
                                    confidence=cls_det[4])
                    frame.detections.append(det)

            # kalman(frame)
            if tracking is not None:
                tracking(frame, frames, debug=debug)
            frames.append(frame)

            if debug:
                plt.figure()
                for det in frame.detections:
                    rect = patches.Rectangle(det.top_left,
                                             det.width,
                                             det.height,
                                             linewidth=2,
                                             edgecolor='blue',
                                             facecolor='none')
                    plt.gca().add_patch(rect)
                    plt.text(det.top_left[0],
                             det.top_left[1],
                             s='{} ~ {}'.format(det.label, det.id),
                             color='white',
                             verticalalignment='top',
                             bbox={
                                 'color': 'blue',
                                 'pad': 0
                             })
                plt.imshow(im)
                plt.axis('off')
                # plt.savefig('../video/video_ssd_KalmanID/frame_{:04d}'.format(i))
                plt.show()
                plt.close()

        #iou_over_time(frames)
        mAP = mean_average_precision(frames)
        print("SSD mAP:", mAP)
Пример #12
0
#
# Alternatives
# Controller_object (Controller)
# Want_suspect (Want)
# Building_subparts (Building)
# Hedging
# Be_in_agreement_on_action (agreement)
# Disgraceful_situation (situation)
# Change_event_duration (event)
# Intentional_deception (deception)

ANNOTATIONS_PATH = 'annotations.csv'
STOPWORDS_PATH = 'stop_words_FULL.txt'

if __name__ == '__main__':
    annotations = read_annotations(ANNOTATIONS_PATH)
    stopw = load_stopwords(STOPWORDS_PATH)

    res_rows = []

    giuste = 0
    for frame, word, target_synset in annotations:
        # Se non è disponibile il mapping
        if target_synset is None:
            giuste += 1
            continue

        # Rimuovo il PoS dalla parola estratta dalle annotazioni
        input_word = word.split('.')[0]
        # disambiguo la parola input_word (quella centrale
        # nelle annotazioni) usando il frame come contesto
Пример #13
0
def main(cwd, do_amgng, amgng_file, ma_window, ma_recalc_delay,
         do_cla, cla_file, buffer_len, plot):
    global mean_oa, mean_sem
    values = inspect.getargvalues(inspect.currentframe())[3]
    print('using parameters: {}'.format(values))
    annotations_path = os.path.join(cwd, 'annotations.csv')
    anndf = utils.read_annotations(annotations_path, ['Aux'])

    amgng_df = None
    if do_amgng:
        from mgng.amgng import main as amgng_main
        print('Training AMGNG model...')
        out_file = os.path.join(cwd, 'out_amgng_{}'.format(amgng_file))
        full_path = os.path.join(cwd, amgng_file)
        start = datetime.now()
        amgng_main(input_file=full_path, output_file=out_file,
                   buffer_len=buffer_len, index_col='timestamp',
                   skip_rows=[1,2], ma_window=ma_window,
                   ma_recalc_delay=ma_recalc_delay)
        amgng_time = datetime.now() - start
        amgng_df = set_annotations_and_plot(out_file, anndf,
                                            'anomaly_density', plot)
        amgng_df['AnnotationSpans'] = amgng_df.Annotation.copy()
        utils.fill_annotations(amgng_df, 'Annotation', '.*OA.*',
                               spans_field='AnnotationSpans', method='pad',
                               mean=mean_oa, std=mean_sem)
        amgng_df.to_csv(out_file)
        print('Time taken: amgng={}'.format(amgng_time))

    cla_df = None
    if do_cla:
        from cla.swarm import swarm
        from cla.cla import create_model, open_input_file
        from cla.cla import  prepare_run, process_row
        print('Training CLA model...')
        full_path = os.path.join(cwd, cla_file)
        out_file = os.path.join(cwd, 'out_cla_{}'.format(cla_file))
        cla_model = {}
        fields, csv_reader, input_handler = open_input_file(full_path)
        start = datetime.now()
        for p in fields:
            swarm_desc = fill_swarm_description(full_path, buffer_len, p)
            model_params = swarm(cwd=cwd, input_file=cla_file,
                                 swarm_description=swarm_desc)
            model = create_model(params=model_params, predictedField=p)
            model_out_file = os.path.join(cwd, '{}_{}'.format(p, cla_file))
            shifter, output_handler = prepare_run(fields=fields,
                                                  predicted_field=p,
                                                  plot=False,
                                                  output_name=model_out_file)
            cla_model[p] = {'model': model, 'shifter': shifter,
                            'output_handler': output_handler,
                            'model_out_file': model_out_file}

        swarm_time = datetime.now() - start
        start = datetime.now()
        for i, row in enumerate(csv_reader):
            for p in fields:
                process_row(row=row, fields=fields, predicted_field=p,
                            model=cla_model[p]['model'],
                            shifter=cla_model[p]['shifter'],
                            output_handler=cla_model[p]['output_handler'],
                            counter=i)
        cla_time = datetime.now() - start

        input_handler.close()
        for i, p in enumerate(fields):
            cla_model[p]['output_handler'].close()
            df = pd.read_csv(cla_model[p]['model_out_file'], parse_dates=True,
                             index_col='timestamp')
            if i == 0:
                cla_df = df
            else:
                cla_df.anomaly_likelihood += df.anomaly_likelihood
        cla_df.anomaly_likelihood /= len(fields)
        cla_df.to_csv(out_file)
        cla_df = set_annotations_and_plot(out_file, anndf,
                                          'anomaly_likelihood', plot)
        cla_df['AnnotationSpans'] = cla_df.Annotation.copy()
        utils.fill_annotations(cla_df, 'Annotation', '.*OA.*',
                               spans_field='AnnotationSpans', method='pad',
                               mean=mean_oa, std=mean_sem)
        cla_df.to_csv(out_file)
        print('Time taken: swarm={}, cla={}'.format(swarm_time, cla_time))

    return amgng_df, cla_df
Пример #14
0
def evaluate_detection(image_list,
                       test_dict,
                       graph_list,
                       label_map,
                       min_threshold=0.2,
                       ignore_labels=False,
                       do_recognition=False,
                       graph_r=None,
                       labels_r=None,
                       save_images=False,
                       logging_dir=None):
    """
    Evaluate a list of detection graphs given a minimum threshold. Analyse results to find a good threshold.
    Optionally use recognition graph to re-label results or ignore the labels completely (just compare bounding boxes).
    :param image_list: list of images (label files exist)
    :param graph_list: list of detection graph files
    :param label_map: label map for detection
    :param min_threshold: minimum threshold to filter detection results
    :param ignore_labels: if true, only compare bounding boxes
    :param do_recognition: if true, re-label detection results
    :param graph_r: graph file for recognition
    :param labels_r: label file for recognition
    :return:
    """
    detector = detect.Detector(detection_threshold=min_threshold)
    if do_recognition:
        recognizer = recognize.TensorflowRecognition()
        recognizer.load_graph(graph_r, labels_r)

    for graph in graph_list:
        detector.load_graph(graph, label_map)
        # evaluate this graph and determine a good threshold

        rec_true = 0
        rec_false = 0
        detect_true = 0
        detect_false = 0
        detect_missing = 0
        num_images = len(image_list)
        sum_threshold = 0.0

        for image in image_list:
            print(image)
            label_path = utils.get_label_path_by_image(image)
            annotation_list = utils.read_annotations(label_path)

            img = cv2.imread(image)
            id_list, score_list, box_list = detector.detect(img)
            detection_list = utils.detection_results_to_annotation_list(
                id_list, score_list, box_list)

            match_list = []
            annotation_unmatched_list = list(range(len(annotation_list)))
            for detection in detection_list:

                best_match = -1
                best_p = 0.0  # minimum likelihood for matching
                label_match = False
                for i in range(len(annotation_list)):
                    annotation = annotation_list[i]
                    p = match_bounding_boxes(detection.bbox, annotation.bbox)
                    if ignore_labels:
                        if p > best_p:
                            best_match = i
                            best_p = p
                    elif not do_recognition:
                        if p > best_p:
                            hyp_list = [(detection.label, detection.prob)]
                            likelihood, score = match_hypotheses(
                                1, hyp_list, annotation.label)
                            # todo: save threshold/score
                            if likelihood > 0:
                                label_match = True
                    else:
                        if p > best_p:
                            n = 3  # todo
                            hyp_list = [(detection.label, detection.prob)
                                        ]  # todo: do recognition
                            likelihood, score = match_hypotheses(
                                n, hyp_list, annotation.label)
                            # todo: save threshold/score
                            if likelihood > 0:
                                label_match = True
                if not best_match == -1:
                    sum_threshold = sum_threshold + detection.prob  # get thresholds for correct(!) detections (only bbox)
                match_list.append((best_match, best_p, label_match))

            if save_images:
                save_image(img, detection_list, annotation_list, match_list,
                           logging_dir)

            for match in match_list:
                if match[0] == -1:
                    detect_false = detect_false + 1
                else:
                    detect_true = detect_true + 1
                    if match[0] in annotation_unmatched_list:
                        annotation_unmatched_list.remove(match[0])
                    if match[2]:
                        rec_true = rec_true + 1
                    else:
                        rec_false = rec_false + 1
            for index in annotation_unmatched_list:
                detect_missing = detect_missing + 1

        detected = detect_true + detect_false
        to_detect = detect_true + detect_missing
        rec_total = rec_true + rec_false
        detection_rate = float(detect_true) / float(to_detect)
        correct_detected = float(detect_true) / float(detected)
        recognition_rate = float(rec_true) / float(rec_total)
        avg_threshold = float(sum_threshold) / float(detect_true)
        print(
            "Detected {} of all annotations.\n{} of detections were correct (annotated)."
            .format(detection_rate, correct_detected))
        if do_recognition:
            print("\nRecognition rate: {}\n\n".format(recognition_rate))
        print("average threshold of correct (!) detections: {}".format(
            avg_threshold))
Пример #15
0
import sys
from utils import read_annotations

gold_annotations = read_annotations(sys.argv[1])
pred_annotations = read_annotations(sys.argv[2])


def get_precision(gold_annotations, pred_annotations, entirety=False):
    TP, FP = 0, 0
    for sent_id, annotations in pred_annotations.items():
        for pred_ann in annotations:
            per, _, org = pred_ann
            T = False
            for glod_ann in gold_annotations[sent_id]:
                if entirety:
                    if per == glod_ann[0] and org == glod_ann[2]:
                        T = True
                        break
                else:
                    if (per in glod_ann[0]
                            or glod_ann[0] in per) and (org in glod_ann[2]
                                                        or glod_ann[2] in org):
                        T = True
                        break
            if T:
                TP += 1
            else:
                FP += 1
    return TP / (TP + FP)

Пример #16
0
def main():
    video = Video("../datasets/AICity_data/train/S03/c010/vdo.avi")

    gt = read_annotations('../annotations', start_frame, end_frame)
    """
        DETECTIONS
    """
    det_algs = ['yolo3', 'mask_rcnn', 'ssd512']
    for alg in det_algs:
        detections = read_detections(
            '../datasets/AICity_data/train/S03/c010/det/det_{0}.txt'.format(
                alg))
        detections = detections[start_frame:end_frame + 1]

        frames = []

        # roi = cv2.imread('../datasets/AICity_data/train/S03/c010/roi.jpg')

        for im, f in seq(video.get_frames(
                start_frame_number=start_frame)).take(end_frame - start_frame +
                                                      1):
            f.ground_truth = gt[f.id]
            f.detections = detections[f.id]
            frames.append(f)

            if make_video:
                make_video_frame(im, f, frames)

        iou_over_time(frames)
        mAP = mean_average_precision(frames)
        print(alg, " mAP:", mAP)
    """
        DETECTIONS FROM ALTERED GROUND TRUTH 
    """
    frames = []

    for im, f in seq(video.get_frames()).take(end_frame - start_frame + 1):
        f.ground_truth = gt[f.id]
        f.detections = alter_detections(f.ground_truth)
        frames.append(f)

        if make_video:
            make_video_frame(im, f, frames)

    iou_over_time(frames)
    mAP = mean_average_precision(frames)
    print('Random alteration', " mAP:", mAP)
    """
        OPTICAL FLOW 
    """
    of_det_1 = read_optical_flow(
        '../datasets/optical_flow/detection/LKflow_000045_10.png')
    of_det_2 = read_optical_flow(
        '../datasets/optical_flow/detection/LKflow_000157_10.png')

    of_gt_1 = read_optical_flow('../datasets/optical_flow/gt/000045_10.png')
    of_gt_2 = read_optical_flow('../datasets/optical_flow/gt/000157_10.png')

    img_1 = cv2.imread('../datasets/optical_flow/img/000045_10.png')
    img_2 = cv2.imread('../datasets/optical_flow/img/000157_10.png')

    msen_of = msen(of_det_2, of_gt_2)
    pepn_of = pepn(of_det_2, of_gt_2)

    print(msen_of, pepn_of)
    show_optical_flow(of_gt_1)
    show_optical_flow_arrows(img_1, of_gt_1)

    msen_45 = msen(of_det_1, of_gt_1, plot=True)
    pepn_45 = pepn(of_det_1, of_gt_1)
    print("Sequence 045: MSEN", msen_45, "PEPN", pepn_45)

    msen_157 = msen(of_det_2, of_gt_2, plot=True)
    pepn_157 = pepn(of_det_2, of_gt_2)
    print("Sequence 157: MSEN", msen_157, "PEPN", pepn_157)

    show_optical_flow(of_gt_1)
Пример #17
0
def off_the_shelf_yolo(tracking, debug=False, *args, **kwargs):
    video = Video("../datasets/AICity_data/train/S03/c010/frames")
    detection_transform = DetectionTransform()
    classes = utils.load_classes('../config/coco.names')
    gt = read_annotations(
        '../datasets/AICity_data/train/S03/c010/m6-full_annotation.xml')

    model = Darknet('../config/yolov3.cfg')
    model.load_weights('../weights/fine_tuned_yolo_freeze.weights')
    if torch.cuda.is_available():
        model = model.cuda()

    frames = []
    last_im = None

    model.eval()
    with torch.no_grad():
        for i, im in tqdm(enumerate(video.get_frames(start=len(video) // 4)),
                          total=len(video),
                          file=sys.stdout,
                          desc='Yolo'):
            im_tensor = detection_transform(im)

            im_tensor = im_tensor.view((-1, ) + im_tensor.size())
            if torch.cuda.is_available():
                im_tensor = im_tensor.cuda()

            detections = model.forward(im_tensor)
            detections = utils.non_max_suppression(detections,
                                                   80,
                                                   conf_thres=.6,
                                                   nms_thres=0.3)

            frame = Frame(i + (len(video) // 4))
            frame.ground_truth = gt[frame.id]

            for d in detections[0]:
                if int(d[6]) in VALID_LABELS:
                    bbox = d.cpu().numpy()
                    det = Detection(-1,
                                    classes[int(d[6])], (bbox[0], bbox[1]),
                                    width=bbox[2] - bbox[0],
                                    height=bbox[3] - bbox[1],
                                    confidence=d[5])
                    detection_transform.unshrink_detection(det)
                    frame.detections.append(det)

            if tracking is not None:
                last_frame = None if len(frames) == 0 else frames[-1]
                tracking(frame=frame,
                         im=im,
                         last_frame=last_frame,
                         last_im=last_im,
                         frames=frames,
                         debug=False)

            frames.append(frame)

            last_im = im

            if debug:
                plt.figure()
                for det in frame.detections:
                    rect = patches.Rectangle(det.top_left,
                                             det.width,
                                             det.height,
                                             linewidth=2,
                                             edgecolor='blue',
                                             facecolor='none')
                    plt.gca().add_patch(rect)
                    if tracking is None:
                        text = '{}'.format(det.label)
                    else:
                        text = '{} ~ {}'.format(det.label, det.id)
                    plt.text(det.top_left[0],
                             det.top_left[1],
                             s=text,
                             color='white',
                             verticalalignment='top',
                             bbox={
                                 'color': 'blue',
                                 'pad': 0
                             })
                plt.imshow(im)
                plt.axis('off')
                # plt.savefig('../video/video_yolo_fine_tune_good/frame_{:04d}'.format(i))
                plt.show()
                plt.close()
        # iou_over_time(frames)
        mAP = mean_average_precision(frames)
        print("YOLO mAP:", mAP)
def main(cwd, do_amgng, amgng_file, ma_window, ma_recalc_delay,
         do_cla, cla_file, buffer_len, plot):
    values = inspect.getargvalues(inspect.currentframe())[3]
    print('using parameters: {}'.format(values))
    annotations_path = os.path.join(cwd, 'annotations.csv')
    anndf = utils.read_annotations(annotations_path, ['Type'], 20000)
    amgng_df = None
    if do_amgng:
        from mgng.amgng import main as amgng_main
        print('Training AMGNG model...')
        out_file = os.path.join(cwd, 'out_amgng_{}'.format(amgng_file))
        full_path = os.path.join(cwd, amgng_file)
        start = datetime.now()
        amgng_main(input_file=full_path, output_file=out_file,
                   buffer_len=buffer_len, index_col='timestamp',
                   skip_rows=[1,2], ma_window=ma_window,
                   ma_recalc_delay=ma_recalc_delay)
        amgng_time = datetime.now() - start

        print('Reading results...')
        amgng_df = pd.read_csv(out_file, parse_dates=True,
                               index_col='timestamp')
        amgng_df['Annotation'] = anndf.Type
        print('Writing annotated results...')
        amgng_df.to_csv(out_file)
        if plot:
            utils.plot_results(amgng_df, ['ECG1'], 'anomaly_score',
                               'anomaly_density', '[rs]')
        print('Time taken: amgng={}'.format(amgng_time))

    cla_df = None
    if do_cla:
        from cla.swarm import swarm
        from cla.cla import main as cla_main
        out_file = os.path.join(cwd, 'out_cla_{}'.format(cla_file))
        print('Training CLA model...')
        full_path = os.path.join(cwd, cla_file)
        SWARM_DESCRIPTION = {
            'includedFields': [
                {
                    'fieldName': 'timestamp',
                    'fieldType': 'datetime',
                },
                {
                    'fieldName': 'ECG1',
                    'fieldType': 'float',
                },
            ],
            'streamDef': {
                'info': 'chfdbchf13 ECG1',
                'version': 1,
                'streams': [
                    {
                        'info': 'chfdbchf13',
                        'source': full_path,
                        'columns': ['*']
                    }
                ]
            },
            'inferenceType': 'TemporalAnomaly',
            'inferenceArgs': {
                'predictionSteps': [1],
                'predictedField': 'ECG1'
            },
            'iterationCount': buffer_len,
            'swarmSize': 'small'
        }
        start = datetime.now()
        swarm(cwd=cwd, input_file=cla_file,
              swarm_description=SWARM_DESCRIPTION)
        swarm_time = datetime.now() - start
        start = datetime.now()
        cla_main(cwd=cwd, input_file=full_path, output_name=out_file, plot=False,
                 predicted_field='ECG1')
        cla_time = datetime.now() - start

        print('Reading results...')
        cla_df = pd.read_csv(out_file, parse_dates=True, index_col='timestamp')
        cla_df['Annotation'] = anndf.Type
        print('Writing annotated results...')
        cla_df.to_csv(out_file)
        if plot:
            utils.plot_results(cla_df, ['ECG1'], 'anomaly_score',
                               'anomaly_likelihood', '[rs]')
        print('Time taken: swarm={}, cla={}'.format(swarm_time, cla_time))
    return amgng_df, cla_df
def multiply_dataset(input_dir,
                     output_dir,
                     bg_dir,
                     num_rotate=1,
                     num_illuminate=1,
                     num_scale=1,
                     num_blur=1,
                     num_bg=1):

    print("input: " + input_dir)
    print("output: " + output_dir)

    bg_list = []
    use_bg = False
    if bg_dir is not None and not bg_dir == "" and num_bg > 0:
        print("try to read background data set")
        for dirname, dirnames, filenames in os.walk(bg_dir):
            for filename in filenames:
                file = dirname + '/' + filename
                if file.endswith(".jpg"):
                    l_file = file.replace("/images/",
                                          "/labels/").replace(".jpg", ".txt")
                    if not (os.path.isfile(l_file)):
                        print(
                            "error: Surface label file does not exist! Skipping image."
                        )
                        continue
                    bg_list.append("{}".format(file))
        print("found {} backgrounds".format(len(bg_list)))
        if len(bg_list) > 0:
            use_bg = True
    else:
        print("background change disabled")

    print("multiply image (light, scale, blur, rotate): {}x{}x{}x{}x{} times".
          format(num_illuminate, num_scale, num_blur, num_rotate, num_bg))
    rotation_limit = 80

    for dirname, dirnames, filenames in os.walk(input_dir):
        for filename in filenames:
            file_path = dirname + '/' + filename

            if not os.path.exists(dirname.replace(
                    input_dir, output_dir)):  # creates dir path
                os.makedirs(dirname.replace(input_dir, output_dir))
            roi_path = dirname.replace(input_dir, output_dir).replace(
                "/images", "/rois").replace("/labels", "/rois")
            if not os.path.exists(roi_path):
                os.makedirs(roi_path)

            # deals with all None-Image files
            if imghdr.what(file_path) is None:

                if ".jpg" in filename or ".png" in filename:  # ignore empty images
                    continue

                if dirname.endswith(
                        "/labels"
                ):  # ignores the label files, they will be written later
                    continue
                #if "train.txt" in filename or "test.txt" in filename:  # leaves the files empty to fill it later
                #    train_txt = open(file_path.replace(input_dir, output_dir).replace("test.txt", "train.txt"), 'a+')
                else:  # copy the files without changing
                    old_file = open(file_path, 'r')
                    new_file = open(file_path.replace(input_dir, output_dir),
                                    'w+')
                    new_file.write(old_file.read())
                    old_file.close()  # close the streams
                    new_file.close()
                    continue

            if "mask." in file_path:  # ignore masks
                continue

            if "/rois" in file_path:  # ignore generated rois
                continue

            label_path = file_path.replace("/images/", "/labels/").replace(
                ".jpg", ".txt").replace(".png", ".txt")
            mask_path = file_path.replace(".jpg", "_mask.jpg").replace(
                ".png", "_mask.png")
            if not os.path.isfile(label_path):  # skip images with no labels
                continue

            has_mask = False
            if os.path.isfile(mask_path):  # check mask exists
                has_mask = True

            print(file_path)

            image = cv2.imread(file_path)
            mask = cv2.imread(mask_path, 0)
            annotation_list = utils.read_annotations(label_path)
            new_path = file_path.replace(input_dir, output_dir)

            results = []
            if has_mask and use_bg:
                results = replace_annotation_in_image(image, mask, new_path,
                                                      bg_list, num_bg,
                                                      num_rotate,
                                                      rotation_limit)

            bbox_list = []
            for a in annotation_list:
                bbox = copy.deepcopy(a.bbox)
                bbox_list.append(bbox)
            original = (image, new_path, bbox_list)
            results.append(original)
            for res in results:
                if not len(annotation_list) == len(res[2]):
                    print(
                        "ERROR: There are {} annotations, but only {} bounding boxes were changed. "
                        + "Skip.".format(len(annotation_list), len(res[2])))
                    continue

                change_whole_image(res[0], res[1], annotation_list, res[2],
                                   num_illuminate, num_scale, num_blur)