コード例 #1
0
def __parse_args():
    parser = argparse.ArgumentParser(description="Send few VXLAN packets")
    parser.add_argument("-i", metavar="INTERFACE", dest="interface",
                        action="store", type=str, required=True,
                        help="interface to send UDP packets")
    out_ip = parser.add_mutually_exclusive_group(required=True)
    out_ip.add_argument("-d", metavar="DST-IPv4", dest="dip4", type=str,
                        help="destination IPv4 address")
    out_ip.add_argument("-D", metavar="DST-IPv6", dest="dip6", type=str,
                        help="destination IPv6 address")
    parser.add_argument("-n", metavar="NPACKETS", dest="npackets", type=int,
                        default=4, help="number of UDP packets to send")
    parser.add_argument("-g", metavar="INTERVAL", dest="interval", type=int,
                        default=0, help="seconds to wait between packets")
    parser.add_argument("-l", metavar="SIZE", dest="size", type=int,
                        default=64, help="size of UDP payload in bytes")
    parser.add_argument("-w", dest="wait", action="store_true",
                        help="wait for user input between packets")
    parser.add_argument("-b", metavar="PKT-TYPE", dest="bcsum_type", type=str,
                        help="force bad checksum for IPv4 and/or UDP packets")
    parser.add_argument("-V", metavar="VNI", dest="vni", type=int,
                        default=VXLAN_VNI,
                        help="VxLAN Network Identifier; default=1969")
    parser.add_argument("-P", metavar="VXLAN-PORT", dest="vxlan_port",
                        type=int, default=VXLAN_PORT,
                        help="VxLAN UDP port number; default=4789")
    parser.add_argument("-O", metavar="OUTER-PKT", dest="out_pkt",
                        type=str, default="ipv4",
                        help="outer packet type; ipv4 or ipv6; default=ipv4")
    parser.add_argument("-I", metavar="INNER-PKT", dest="in_pkt",
                        type=str, default="ipv4",
                        help="inner packet type; ipv4 or ipv6; default=ipv4")
    parser.add_argument("-T", metavar="INNER-TRSPT", dest="in_trspt",
                        type=str, default="udp",
                        help="inner transport type; tcp or udp; default=udp")
    args = parser.parse_args()

    if args.interface not in get_if_list():
        err = "Interface '" + args.interface + "' not found."
        raise parser.error(err)

    if args.bcsum_type is not None:
        args.bcsum_type = csv2list(args.bcsum_type)
        if (set(args.bcsum_type) <= set(CSUM_TYPES)) is False:
            err = "Invalid PKT-TYPE for '-b'. Use one or more of '%s'." % \
                    ",".join(CSUM_TYPES)
            raise parser.error(err)
    args.bcsum = __parse_bcsum(args.bcsum_type)

    err, args = __parse_pkt_types(args)
    if err is not None:
        raise parser.error(err)

    return args
コード例 #2
0
from utils import csv2list, split_list, list2csv

all_path = '/data1/sap/frcnn_keras/data/mv_test_backup.txt'
l1_path = '/data1/sap/frcnn_keras/data/mv_val.txt'
l2_path = '/data1/sap/frcnn_keras/data/mv_test.txt'
ratio = .5

all_list = csv2list(all_path)
size = int(len(all_list) * ratio)
l1, l2 = split_list(all_list, size)

l1 = sorted(l1)
l2 = sorted(l2)

list2csv(l1_path, l1)
list2csv(l2_path, l2)
コード例 #3
0
'''


def copy_scenes(src_jm, dst_jm, scenes):
    for scene in scenes:
        dst_jm.insert_scene(scene, src_jm.get_scene(scene))


if __name__ == '__main__':
    jm = json_maker([], all_json_path, 0)
    yolo_label = Yolo_label(img_dir, label_dir, cls_list)
    img_names = get_file_list_from_dir(img_dir, is_full_path=False)
    img_names.sort()
    seen_patterns = []
    #2. for line : add instance summary and instances
    lines = csv2list(csv_path, header=False)
    for line in lines:
        cls, pattern, cam_num = line
        cls = cls_list.index(cls)
        valid_cams = list(map(int, cam_num.split('/')))
        for i, img_name in enumerate(img_names):
            if not check_pattern_exist(img_name, pattern): continue
            cur_cls, cur_cam_num, cur_scene = replace(
                pattern, '\g<class> \g<cam_num> \g<scene>', img_name).split()
            cam_num = int(cur_cam_num)
            if cam_num not in valid_cams: continue
            cur_pattern = '%s_%s' % (cur_cls, cur_scene)
            if cur_pattern not in seen_patterns:
                scene_num = len(seen_patterns)
                scene_num = "%08d" % (scene_num)
                seen_patterns.append(cur_pattern)
コード例 #4
0
            os.makedirs(folder)

    if (is_make_label):
        sv_train = open(all_label_path % ('sv', 'train'), 'w')
        sv_val = open(all_label_path % ('sv', 'val'), 'w')
        sv_test = open(all_label_path % ('sv', 'test'), 'w')

        mv_train = open(all_label_path % ('mv', 'train'), 'w')
        mv_val = open(all_label_path % ('mv', 'val'), 'w')
        mv_test = open(all_label_path % ('mv', 'test'), 'w')

        yolo_label_train = Yolo_label(train_dir, train_dir, ['dummy'])
        yolo_label_val = Yolo_label(val_dir, val_dir, ['dummy'])
        yolo_label_test = Yolo_label(test_dir, test_dir, ['dummy'])

    file_names = csv2list(file_names_csv, header=False)
    num_cls = {}
    src2dst_list = []
    cnt = -1
    for cls_and_pattern in file_names:
        cnt += 1
        #if cnt < 36 : continue
        cls = cls_and_pattern[0].lower()
        patterns = cls_and_pattern[1:4]
        total, num_train, num_val, num_test = map(int, cls_and_pattern[4:])
        if (num_train == 0): continue

        if cls not in num_cls:
            num_cls[cls] = 0

        scene_num_set = {cam_idx: set() for cam_idx in cam_num}
コード例 #5
0
def evaluate(args, model):
    # Multi-GPU
    device_ids = csv2list(args.gpu_ids, int)
    print('Selected GPUs: {}'.format(device_ids))

    # Device for loading dataset (batches)
    device = torch.device(device_ids[0])

    # Text-to-Text
    text2text = ('t5' in args.model)
    uniqa = ('unified' in args.model)

    # Dataloader
    dataset = BaseDataset('test',
                          tokenizer=args.model,
                          max_seq_len=args.seq_len,
                          text2text=text2text,
                          uniqa=uniqa)

    loader = DataLoader(dataset, batch_size=1, num_workers=args.num_workers)

    # Load model checkpoint file (if specified)
    checkpoint = torch.load(args.ckpt, map_location='cpu')
    # Load model & optimizer
    model.load_weights(checkpoint)

    model_parameters = filter(lambda p: p.requires_grad, model.parameters())
    model_size = sum([np.prod(p.size()) for p in model_parameters])

    data_len = dataset.__len__()
    print('Total Samples: {}'.format(data_len))

    # Inference
    model.eval()
    model.to(device)

    # Store predicted & ground-truth labels
    _ids = []
    preds = []

    total_samples = 0
    # Evaluate on mini-batches
    for batch in tqdm(loader):
        batch = {k: v.to(device) for k, v in batch.items()}

        # Forward Pass
        pred = model.inference(batch)  # e.g. [1] or [0]

        preds.append(pred)

        total_samples += loader.batch_size

        if total_samples >= data_len:
            break

    dataset = loader.dataset
    _ids = [record['_id'] for record in dataset.data]

    output = []

    # Regrouping single sample predictions to pairs
    # use the first id as the pair id
    for i in range(0, len(preds), 2):
        # PREDICTIONS: pred = {'id': _, 'pred_1': 'True', 'pred_2': 'False'}
        pred_1 = 'True' if preds[i][0] == 1 else 'False'
        pred_2 = 'True' if preds[i + 1][0] == 1 else 'False'
        output.append({'id': _ids[i], 'pred_1': pred_1, 'pred_2': pred_2})

    return output, model_size